summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorzhangguanzhang <guanzhangzhang@gmail.com>2020-06-12 19:54:10 +0800
committerGitHub <noreply@github.com>2020-06-12 19:54:10 +0800
commit64eae15aa7905dd49f4a348f6b4765dfb4d9dd91 (patch)
tree2914c20a2e0e15383da50334cad89b9b85b209a1
parent3218736cff4b718b8fe855759687cb66f19d6e1e (diff)
parent8aa5cf3d45998bc92eaafd67ab2a59e3722bade4 (diff)
downloadpodman-64eae15aa7905dd49f4a348f6b4765dfb4d9dd91.tar.gz
podman-64eae15aa7905dd49f4a348f6b4765dfb4d9dd91.tar.bz2
podman-64eae15aa7905dd49f4a348f6b4765dfb4d9dd91.zip
Merge pull request #1 from containers/master
# sync
-rw-r--r--.cirrus.yml58
-rw-r--r--.golangci.yml2
-rw-r--r--Makefile28
-rw-r--r--changelog.txt27
-rw-r--r--cmd/podman/common/create.go5
-rw-r--r--cmd/podman/common/create_opts.go1
-rw-r--r--cmd/podman/common/default.go2
-rw-r--r--cmd/podman/common/specgen.go13
-rw-r--r--cmd/podman/common/util.go57
-rw-r--r--cmd/podman/containers/attach.go4
-rw-r--r--cmd/podman/containers/container.go2
-rw-r--r--cmd/podman/containers/diff.go2
-rw-r--r--cmd/podman/containers/exec.go8
-rw-r--r--cmd/podman/containers/ps.go2
-rw-r--r--cmd/podman/containers/stats.go10
-rw-r--r--cmd/podman/containers/stop.go3
-rw-r--r--cmd/podman/containers/wait.go4
-rw-r--r--cmd/podman/diff.go2
-rw-r--r--cmd/podman/images/image.go2
-rw-r--r--cmd/podman/images/list.go4
-rw-r--r--cmd/podman/networks/list.go14
-rw-r--r--cmd/podman/parse/common.go55
-rw-r--r--cmd/podman/play/kube.go34
-rw-r--r--cmd/podman/pods/create.go14
-rw-r--r--cmd/podman/pods/pod.go2
-rw-r--r--cmd/podman/pods/ps.go10
-rw-r--r--cmd/podman/pods/rm.go25
-rw-r--r--cmd/podman/pods/start.go22
-rw-r--r--cmd/podman/pods/stats.go4
-rw-r--r--cmd/podman/pods/stop.go35
-rw-r--r--cmd/podman/registry/json.go4
-rw-r--r--cmd/podman/report/report.go2
-rw-r--r--cmd/podman/root.go8
-rw-r--r--cmd/podman/system/df.go6
-rw-r--r--cmd/podman/system/events.go6
-rw-r--r--cmd/podman/system/service.go4
-rw-r--r--cmd/podman/system/system.go2
-rw-r--r--cmd/podman/validate/args.go4
-rw-r--r--cmd/podman/validate/choice.go16
-rw-r--r--cmd/podman/volumes/create.go2
-rw-r--r--cmd/podman/volumes/volume.go2
-rw-r--r--completions/bash/podman7
-rw-r--r--contrib/cirrus/README.md62
-rwxr-xr-xcontrib/cirrus/check_image.sh42
-rwxr-xr-xcontrib/cirrus/integration_test.sh15
-rw-r--r--contrib/cirrus/lib.sh13
-rw-r--r--contrib/cirrus/packer/fedora_packaging.sh85
-rw-r--r--contrib/cirrus/packer/fedora_setup.sh8
-rw-r--r--contrib/cirrus/packer/ubuntu_packaging.sh31
-rwxr-xr-xcontrib/cirrus/rootless_test.sh22
-rwxr-xr-xcontrib/cirrus/setup_environment.sh19
-rw-r--r--contrib/cirrus/swagger_stack_trace.pngbin0 -> 42799 bytes
-rw-r--r--contrib/spec/podman.spec.in3
-rw-r--r--docs/Readme.md30
-rw-r--r--docs/source/markdown/podman-create.1.md4
-rw-r--r--docs/source/markdown/podman-events.1.md2
-rw-r--r--docs/source/markdown/podman-generate-systemd.1.md5
-rw-r--r--docs/source/markdown/podman-login.1.md4
-rw-r--r--docs/source/markdown/podman-pod-create.1.md4
-rw-r--r--docs/source/markdown/podman-pod-rm.1.md6
-rw-r--r--docs/source/markdown/podman-pod-start.1.md5
-rw-r--r--docs/source/markdown/podman-pod-stop.1.md11
-rw-r--r--docs/source/markdown/podman-run.1.md4
-rw-r--r--go.mod6
-rw-r--r--go.sum12
-rwxr-xr-xhack/install_bats.sh16
-rw-r--r--libpod/container_internal.go24
-rw-r--r--libpod/container_internal_linux.go19
-rw-r--r--libpod/container_log.go4
-rw-r--r--libpod/define/errors.go3
-rw-r--r--libpod/define/pod_inspect.go3
-rw-r--r--libpod/filters/pods.go8
-rw-r--r--libpod/image/filters.go6
-rw-r--r--libpod/oci.go7
-rw-r--r--libpod/oci_conmon_linux.go25
-rw-r--r--libpod/oci_missing.go5
-rw-r--r--libpod/oci_util.go36
-rw-r--r--libpod/options.go24
-rw-r--r--libpod/pod.go25
-rw-r--r--libpod/pod_api.go1
-rw-r--r--libpod/runtime_ctr.go6
-rw-r--r--libpod/runtime_pod_infra_linux.go3
-rw-r--r--pkg/api/handlers/compat/containers.go4
-rw-r--r--pkg/api/handlers/compat/images.go4
-rw-r--r--pkg/api/handlers/compat/images_build.go8
-rw-r--r--pkg/api/handlers/compat/info.go2
-rw-r--r--pkg/api/handlers/compat/networks.go4
-rw-r--r--pkg/api/handlers/compat/ping.go4
-rw-r--r--pkg/api/handlers/compat/version.go4
-rw-r--r--pkg/api/handlers/decoder.go6
-rw-r--r--pkg/api/handlers/libpod/pods.go9
-rw-r--r--pkg/api/handlers/libpod/volumes.go2
-rw-r--r--pkg/api/handlers/utils/containers.go2
-rw-r--r--pkg/api/handlers/utils/handler.go24
-rw-r--r--pkg/api/handlers/utils/handler_test.go4
-rw-r--r--pkg/api/handlers/utils/pods.go4
-rw-r--r--pkg/api/server/docs.go25
-rw-r--r--pkg/api/server/handler_api.go6
-rw-r--r--pkg/api/server/server.go2
-rw-r--r--pkg/bindings/bindings.go3
-rw-r--r--pkg/bindings/connection.go8
-rw-r--r--pkg/bindings/containers/attach.go16
-rw-r--r--pkg/bindings/containers/checkpoint.go12
-rw-r--r--pkg/bindings/containers/commit.go6
-rw-r--r--pkg/bindings/containers/diff.go4
-rw-r--r--pkg/bindings/generate/generate.go2
-rw-r--r--pkg/bindings/images/diff.go4
-rw-r--r--pkg/bindings/images/images.go4
-rw-r--r--pkg/bindings/play/play.go2
-rw-r--r--pkg/domain/entities/container_ps.go6
-rw-r--r--pkg/domain/entities/containers.go36
-rw-r--r--pkg/domain/entities/engine.go4
-rw-r--r--pkg/domain/entities/engine_container.go24
-rw-r--r--pkg/domain/entities/engine_image.go14
-rw-r--r--pkg/domain/entities/filters.go24
-rw-r--r--pkg/domain/entities/images.go10
-rw-r--r--pkg/domain/entities/play.go14
-rw-r--r--pkg/domain/entities/pods.go46
-rw-r--r--pkg/domain/entities/set.go16
-rw-r--r--pkg/domain/entities/types.go4
-rw-r--r--pkg/domain/entities/volumes.go8
-rw-r--r--pkg/domain/infra/abi/containers.go91
-rw-r--r--pkg/domain/infra/abi/generate.go161
-rw-r--r--pkg/domain/infra/abi/healthcheck.go4
-rw-r--r--pkg/domain/infra/abi/images.go34
-rw-r--r--pkg/domain/infra/abi/parse/parse.go2
-rw-r--r--pkg/domain/infra/abi/play.go104
-rw-r--r--pkg/domain/infra/abi/pods.go12
-rw-r--r--pkg/domain/infra/abi/system.go14
-rw-r--r--pkg/domain/infra/abi/trust.go8
-rw-r--r--pkg/domain/infra/abi/volumes.go6
-rw-r--r--pkg/domain/infra/runtime_abi.go4
-rw-r--r--pkg/domain/infra/runtime_tunnel.go4
-rw-r--r--pkg/domain/infra/tunnel/containers.go46
-rw-r--r--pkg/domain/infra/tunnel/generate.go2
-rw-r--r--pkg/domain/infra/tunnel/healthcheck.go4
-rw-r--r--pkg/domain/infra/tunnel/helpers.go16
-rw-r--r--pkg/domain/infra/tunnel/images.go34
-rw-r--r--pkg/domain/infra/tunnel/play.go2
-rw-r--r--pkg/domain/infra/tunnel/pods.go13
-rw-r--r--pkg/domain/infra/tunnel/volumes.go4
-rw-r--r--pkg/domain/utils/utils.go2
-rw-r--r--pkg/network/network.go2
-rw-r--r--pkg/parallel/parallel_linux.go6
-rw-r--r--pkg/spec/createconfig.go2
-rw-r--r--pkg/specgen/container_validate.go2
-rw-r--r--pkg/specgen/generate/container_create.go2
-rw-r--r--pkg/specgen/generate/namespaces.go4
-rw-r--r--pkg/specgen/generate/pod_create.go4
-rw-r--r--pkg/specgen/pod_validate.go2
-rw-r--r--pkg/specgen/podspecgen.go3
-rw-r--r--pkg/systemd/generate/common.go50
-rw-r--r--pkg/systemd/generate/common_test.go25
-rw-r--r--pkg/systemd/generate/containers.go289
-rw-r--r--pkg/systemd/generate/containers_test.go366
-rw-r--r--pkg/systemd/generate/pods.go341
-rw-r--r--pkg/systemd/generate/pods_test.go100
-rw-r--r--pkg/systemd/generate/systemdgen.go237
-rw-r--r--pkg/systemd/generate/systemdgen_test.go347
-rw-r--r--pkg/trust/config.go4
-rw-r--r--pkg/varlinkapi/create.go4
-rw-r--r--pkg/varlinkapi/volumes.go2
-rw-r--r--test/dockerpy/README.md5
-rw-r--r--test/dockerpy/__init__.py0
-rw-r--r--test/dockerpy/common.py64
-rw-r--r--test/dockerpy/constant.py2
-rw-r--r--test/dockerpy/containers.py46
-rw-r--r--test/dockerpy/images.py40
-rw-r--r--test/e2e/checkpoint_test.go2
-rw-r--r--test/e2e/cp_test.go2
-rw-r--r--test/e2e/create_test.go37
-rw-r--r--test/e2e/generate_kube_test.go8
-rw-r--r--test/e2e/generate_systemd_test.go50
-rw-r--r--test/e2e/play_kube_test.go300
-rw-r--r--test/e2e/pod_inspect_test.go22
-rw-r--r--test/e2e/pod_rm_test.go69
-rw-r--r--test/e2e/pod_start_test.go94
-rw-r--r--test/e2e/pod_stop_test.go69
-rw-r--r--test/e2e/run_networking_test.go26
-rw-r--r--vendor/github.com/containers/common/pkg/config/config.go3
-rw-r--r--vendor/github.com/json-iterator/go/README.md36
-rw-r--r--vendor/github.com/json-iterator/go/any_str.go4
-rw-r--r--vendor/github.com/json-iterator/go/config.go4
-rw-r--r--vendor/github.com/json-iterator/go/iter_object.go4
-rw-r--r--vendor/github.com/json-iterator/go/reflect_extension.go2
-rw-r--r--vendor/github.com/json-iterator/go/reflect_map.go80
-rw-r--r--vendor/github.com/json-iterator/go/reflect_optional.go4
-rw-r--r--vendor/github.com/json-iterator/go/reflect_struct_decoder.go22
-rw-r--r--vendor/github.com/json-iterator/go/stream.go5
-rw-r--r--vendor/github.com/stretchr/testify/assert/assertion_format.go9
-rw-r--r--vendor/github.com/stretchr/testify/assert/assertion_forward.go17
-rw-r--r--vendor/github.com/stretchr/testify/assert/http_assertions.go21
-rw-r--r--vendor/github.com/stretchr/testify/require/require.go17
-rw-r--r--vendor/github.com/stretchr/testify/require/require_forward.go17
-rw-r--r--vendor/k8s.io/api/apps/v1/doc.go21
-rw-r--r--vendor/k8s.io/api/apps/v1/generated.pb.go8238
-rw-r--r--vendor/k8s.io/api/apps/v1/generated.proto701
-rw-r--r--vendor/k8s.io/api/apps/v1/register.go60
-rw-r--r--vendor/k8s.io/api/apps/v1/types.go826
-rw-r--r--vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go365
-rw-r--r--vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go772
-rw-r--r--vendor/modules.txt7
202 files changed, 14356 insertions, 1577 deletions
diff --git a/.cirrus.yml b/.cirrus.yml
index 58c7cd871..6d9ccfc78 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -39,7 +39,7 @@ env:
UBUNTU_NAME: "ubuntu-20"
PRIOR_UBUNTU_NAME: "ubuntu-19"
- _BUILT_IMAGE_SUFFIX: "libpod-6268069335007232" # From the packer output of 'build_vm_images_script'
+ _BUILT_IMAGE_SUFFIX: "libpod-6508632441356288"
FEDORA_CACHE_IMAGE_NAME: "${FEDORA_NAME}-${_BUILT_IMAGE_SUFFIX}"
PRIOR_FEDORA_CACHE_IMAGE_NAME: "${PRIOR_FEDORA_NAME}-${_BUILT_IMAGE_SUFFIX}"
UBUNTU_CACHE_IMAGE_NAME: "${UBUNTU_NAME}-${_BUILT_IMAGE_SUFFIX}"
@@ -72,10 +72,6 @@ env:
GCE_SSH_USERNAME: cirrus-ci
# Name where this repositories cloud resources are located
GCP_PROJECT_ID: ENCRYPTED[7c80e728e046b1c76147afd156a32c1c57d4a1ac1eab93b7e68e718c61ca8564fc61fef815952b8ae0a64e7034b8fe4f]
- RELEASE_GCPJSON: ENCRYPTED[789d8f7e9a5972ce350fd8e60f1032ccbf4a35c3938b604774b711aad280e12c21faf10e25af1e0ba33597ffb9e39e46]
- RELEASE_GCPNAME: ENCRYPTED[417d50488a4bd197bcc925ba6574de5823b97e68db1a17e3a5fde4bcf26576987345e75f8d9ea1c15a156b4612c072a1]
- RELEASE_GCPROJECT: ENCRYPTED[7c80e728e046b1c76147afd156a32c1c57d4a1ac1eab93b7e68e718c61ca8564fc61fef815952b8ae0a64e7034b8fe4f]
-
# Default VM to use unless set or modified by task
@@ -422,25 +418,20 @@ testing_task:
- name: "test ${PRIOR_FEDORA_NAME}"
gce_instance:
image_name: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
- # TODO:
- # - name: "test ${UBUNTU_NAME}"
- # gce_instance:
- # image_name: "${UBUNTU_CACHE_IMAGE_NAME}"
- # - name: "test ${PRIOR_UBUNTU_NAME}"
- # gce_instance:
- # image_name: "${PRIOR_UBUNTU_CACHE_IMAGE_NAME}"
+ - name: "test ${UBUNTU_NAME}"
+ gce_instance:
+ image_name: "${UBUNTU_CACHE_IMAGE_NAME}"
+ - name: "test ${PRIOR_UBUNTU_NAME}"
+ gce_instance:
+ image_name: "${PRIOR_UBUNTU_CACHE_IMAGE_NAME}"
timeout_in: 120m
env:
ADD_SECOND_PARTITION: 'true'
matrix:
- - name: remote
- env:
- TEST_REMOTE_CLIENT: 'true'
- - name: local
- env:
- TEST_REMOTE_CLIENT: 'false'
+ - TEST_REMOTE_CLIENT: 'true'
+ - TEST_REMOTE_CLIENT: 'false'
networking_script: '${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/networking.sh'
setup_environment_script: '$SCRIPT_BASE/setup_environment.sh |& ${TIMESTAMP}'
@@ -487,12 +478,8 @@ special_testing_rootless_task:
ADD_SECOND_PARTITION: 'true'
SPECIALMODE: 'rootless' # See docs
matrix:
- - name: remote
- env:
- TEST_REMOTE_CLIENT: 'true'
- - name: local
- env:
- TEST_REMOTE_CLIENT: 'false'
+ - TEST_REMOTE_CLIENT: 'true'
+ - TEST_REMOTE_CLIENT: 'false'
timeout_in: 60m
@@ -601,6 +588,7 @@ special_testing_bindings_task:
env:
SPECIALMODE: 'bindings' # See docs
+ ADD_SECOND_PARTITION: 'true' # More root fs space is required
timeout_in: 40m
@@ -694,27 +682,19 @@ verify_test_built_images_task:
env:
ADD_SECOND_PARTITION: 'true'
matrix:
- - name: remote
- env:
- TEST_REMOTE_CLIENT: 'true'
- - name: local
- env:
- TEST_REMOTE_CLIENT: 'false'
+ - TEST_REMOTE_CLIENT: 'true'
+ - TEST_REMOTE_CLIENT: 'false'
matrix:
- # Required env. var. by check_image_script
PACKER_BUILDER_NAME: "${FEDORA_NAME}"
PACKER_BUILDER_NAME: "${PRIOR_FEDORA_NAME}"
+ PACKER_BUILDER_NAME: "${UBUNTU_NAME}"
PACKER_BUILDER_NAME: "${PRIOR_UBUNTU_NAME}"
- # Multiple test failures on ${UBUNTU_CACHE_IMAGE_NAME}
- # PACKER_BUILDER_NAME: "${UBUNTU_NAME}"
networking_script: '${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/networking.sh'
installed_packages_script: '$SCRIPT_BASE/logcollector.sh packages'
environment_script: '$SCRIPT_BASE/setup_environment.sh |& ${TIMESTAMP}'
- # Verify expectations once per image
- check_image_script: >-
- [[ "$TEST_REMOTE_CLIENT" == "false" ]] || \
- $SCRIPT_BASE/check_image.sh |& ${TIMESTAMP}
+ # Verify expectations of built images
+ check_image_script: '$SCRIPT_BASE/check_image.sh |& ${TIMESTAMP}'
# Note: A truncated form of normal testing. It only needs to confirm new images
# "probably" work. A full round of testing will happen again after $*_CACHE_IMAGE_NAME
# are updated in this or another PR (w/o '***CIRRUS: TEST IMAGES***').
@@ -732,6 +712,10 @@ docs_task:
depends_on:
- "gating"
+ env:
+ RELEASE_GCPJSON: ENCRYPTED[789d8f7e9a5972ce350fd8e60f1032ccbf4a35c3938b604774b711aad280e12c21faf10e25af1e0ba33597ffb9e39e46]
+ RELEASE_GCPNAME: ENCRYPTED[417d50488a4bd197bcc925ba6574de5823b97e68db1a17e3a5fde4bcf26576987345e75f8d9ea1c15a156b4612c072a1]
+ RELEASE_GCPROJECT: ENCRYPTED[7c80e728e046b1c76147afd156a32c1c57d4a1ac1eab93b7e68e718c61ca8564fc61fef815952b8ae0a64e7034b8fe4f]
script:
- "$SCRIPT_BASE/build_swagger.sh |& ${TIMESTAMP}"
diff --git a/.golangci.yml b/.golangci.yml
index 5480b02bb..33a8b4f59 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -7,6 +7,7 @@ run:
- contrib
- dependencies
- test
+ - pkg/spec
- pkg/varlink
- pkg/varlinkapi
skip-files:
@@ -21,7 +22,6 @@ linters:
- gochecknoinits
- goconst
- gocyclo
- - golint
- gosec
- lll
- maligned
diff --git a/Makefile b/Makefile
index 1d30d2534..2ac6f426f 100644
--- a/Makefile
+++ b/Makefile
@@ -88,8 +88,8 @@ RELEASE_DIST_VER ?= $(shell hack/get_release_info.sh DIST_VER)
RELEASE_ARCH ?= $(shell hack/get_release_info.sh ARCH)
RELEASE_BASENAME := $(shell hack/get_release_info.sh BASENAME)
-# If non-empty, logs all output from varlink during remote system testing
-VARLINK_LOG ?=
+# If non-empty, logs all output from server during remote system testing
+PODMAN_SERVER_LOG ?=
# If GOPATH not specified, use one in the local directory
ifeq ($(GOPATH),)
@@ -357,22 +357,28 @@ localsystem:
remotesystem:
# Wipe existing config, database, and cache: start with clean slate.
$(RM) -rf ${HOME}/.local/share/containers ${HOME}/.config/containers
- # Start varlink server using tmp socket; loop-wait for it;
+ # Start podman server using tmp socket; loop-wait for it;
# test podman-remote; kill server, clean up tmp socket file.
- # varlink server spews copious unhelpful output; ignore it.
+ # podman server spews copious unhelpful output; ignore it.
+ # FIXME FIXME FIXME: remove 'exit 0' after #6538 and #6539 are fixed
+ exit 0;\
rc=0;\
if timeout -v 1 true; then \
SOCK_FILE=$(shell mktemp --dry-run --tmpdir podman.XXXXXX);\
- export PODMAN_SOCKEY=unix:$$SOCK_FILE; \
- ./bin/podman system service --timeout=0 $$PODMAN_VARLINK_ADDRESS &> $(if $(VARLINK_LOG),$(VARLINK_LOG),/dev/null) & \
+ export PODMAN_SOCKET=unix:$$SOCK_FILE; \
+ ./bin/podman system service --timeout=0 $$PODMAN_SOCKET &> $(if $(PODMAN_SERVER_LOG),$(PODMAN_SERVER_LOG),/dev/null) & \
retry=5;\
while [[ $$retry -ge 0 ]]; do\
echo Waiting for server...;\
sleep 1;\
- ./bin/podman-remote --remote $(SOCK_FILE) info &>/dev/null && break;\
+ ./bin/podman-remote --url $$PODMAN_SOCKET info &>/dev/null && break;\
retry=$$(expr $$retry - 1);\
done;\
- env PODMAN=./bin/podman-remote bats test/system/ ;\
+ if [[ $$retry -lt 0 ]]; then\
+ echo "Error: ./bin/podman system service did not come up on $$SOCK_FILE" >&2;\
+ exit 1;\
+ fi;\
+ env PODMAN="./bin/podman-remote --url $$PODMAN_SOCKET" bats test/system/ ;\
rc=$$?;\
kill %1;\
rm -f $$SOCK_FILE;\
@@ -612,7 +618,7 @@ uninstall:
GIT_CHECK_EXCLUDE="./vendor:docs/make.bat" $(GOBIN)/git-validation -run DCO,short-subject,dangling-whitespace -range $(EPOCH_TEST_COMMIT)..$(HEAD)
.PHONY: install.tools
-install.tools: .install.gitvalidation .install.md2man .install.ginkgo .install.golangci-lint ## Install needed tools
+install.tools: .install.gitvalidation .install.md2man .install.ginkgo .install.golangci-lint .install.bats ## Install needed tools
define go-get
env GO111MODULE=off \
@@ -635,6 +641,10 @@ endef
.install.golangci-lint: .gopathok
VERSION=1.18.0 GOBIN=$(GOBIN) sh ./hack/install_golangci.sh
+.PHONY: .install.bats
+.install.bats: .gopathok
+ VERSION=v1.1.0 ./hack/install_bats.sh
+
.PHONY: .install.pre-commit
.install.pre-commit:
if [ -z "$(PRE_COMMIT)" ]; then \
diff --git a/changelog.txt b/changelog.txt
index 609e2de8f..f17e0ee75 100644
--- a/changelog.txt
+++ b/changelog.txt
@@ -1,3 +1,30 @@
+- Changelog for v2.0.0-rc5 (2020-06-10)
+ * Fix Id->ID where possible for lint
+ * Fixup issues found by golint
+ * podman-events: clarify streaming behaviour
+ * Cirrus: Include packages for containers/conmon CI
+ * Ensure signal validation happens first in pod kill
+ * Bump github.com/json-iterator/go from 1.1.9 to 1.1.10
+ * Bump github.com/containers/common from 0.12.0 to 0.13.0
+ * Improve swagger+CORS metadata docs
+ * Ensure Conmon is alive before waiting for exit file
+ * Bump github.com/stretchr/testify from 1.6.0 to 1.6.1
+ * e2e: disable checkpoint test on Ubuntu
+ * force bats version to v1.1.0
+ * Enable Ubuntu tests in CI
+ * Modify py test to start stop system service for each test
+ * Add parallel operation to `podman stop`
+ * Fix handling of systemd.
+ * Add parallel execution code for container operations
+ * Fix handling of ThrottleWriteIOPSDevice
+ * Bump github.com/seccomp/containers-golang from 0.4.1 to 0.5.0
+ * Strip defaults from namespace flags
+ * Ensure that containers in pods properly set hostname
+ * Adds docker py regression test.
+ * Turn on the podman-commands script to verify man pages
+ * Attempt to turn on special_testing_in_podman tests
+ * Bump to v2.0.0-dev
+
- Changelog for v2.0.0-rc4 (2020-06-04)
* /images/.../json: fix port parsing
* BATS and APIv2: more tests and tweaks
diff --git a/cmd/podman/common/create.go b/cmd/podman/common/create.go
index 86cd51643..e79c5c20b 100644
--- a/cmd/podman/common/create.go
+++ b/cmd/podman/common/create.go
@@ -338,6 +338,11 @@ func GetCreateFlags(cf *ContainerCLIOpts) *pflag.FlagSet {
"pod", "",
"Run container in an existing pod",
)
+ createFlags.StringVar(
+ &cf.PodIDFile,
+ "pod-id-file", "",
+ "Read the pod ID from the file",
+ )
createFlags.BoolVar(
&cf.Privileged,
"privileged", false,
diff --git a/cmd/podman/common/create_opts.go b/cmd/podman/common/create_opts.go
index 4cba5daf7..98dc6744c 100644
--- a/cmd/podman/common/create_opts.go
+++ b/cmd/podman/common/create_opts.go
@@ -68,6 +68,7 @@ type ContainerCLIOpts struct {
PID string
PIDsLimit int64
Pod string
+ PodIDFile string
Privileged bool
PublishAll bool
Pull string
diff --git a/cmd/podman/common/default.go b/cmd/podman/common/default.go
index 7233b2091..6e5994b18 100644
--- a/cmd/podman/common/default.go
+++ b/cmd/podman/common/default.go
@@ -16,5 +16,5 @@ var (
// DefaultImageVolume default value
DefaultImageVolume = "bind"
// Pull in configured json library
- json = registry.JsonLibrary()
+ json = registry.JSONLibrary()
)
diff --git a/cmd/podman/common/specgen.go b/cmd/podman/common/specgen.go
index 26003b40f..fee9d8c7b 100644
--- a/cmd/podman/common/specgen.go
+++ b/cmd/podman/common/specgen.go
@@ -254,6 +254,17 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *ContainerCLIOpts, args []string
s.PublishExposedPorts = c.PublishAll
s.Pod = c.Pod
+ if len(c.PodIDFile) > 0 {
+ if len(s.Pod) > 0 {
+ return errors.New("Cannot specify both --pod and --pod-id-file")
+ }
+ podID, err := ReadPodIDFile(c.PodIDFile)
+ if err != nil {
+ return err
+ }
+ s.Pod = podID
+ }
+
expose, err := createExpose(c.Expose)
if err != nil {
return err
@@ -669,7 +680,7 @@ func makeHealthCheckFromCli(inCmd, interval string, retries uint, timeout, start
hc.Interval = intervalDuration
if retries < 1 {
- return nil, errors.New("healthcheck-retries must be greater than 0.")
+ return nil, errors.New("healthcheck-retries must be greater than 0")
}
hc.Retries = int(retries)
timeoutDuration, err := time.ParseDuration(timeout)
diff --git a/cmd/podman/common/util.go b/cmd/podman/common/util.go
index a3626b4e4..ce323a4ba 100644
--- a/cmd/podman/common/util.go
+++ b/cmd/podman/common/util.go
@@ -1,6 +1,7 @@
package common
import (
+ "io/ioutil"
"net"
"strconv"
"strings"
@@ -10,6 +11,30 @@ import (
"github.com/sirupsen/logrus"
)
+// ReadPodIDFile reads the specified file and returns its content (i.e., first
+// line).
+func ReadPodIDFile(path string) (string, error) {
+ content, err := ioutil.ReadFile(path)
+ if err != nil {
+ return "", errors.Wrap(err, "error reading pod ID file")
+ }
+ return strings.Split(string(content), "\n")[0], nil
+}
+
+// ReadPodIDFiles reads the specified files and returns their content (i.e.,
+// first line).
+func ReadPodIDFiles(files []string) ([]string, error) {
+ ids := []string{}
+ for _, file := range files {
+ id, err := ReadPodIDFile(file)
+ if err != nil {
+ return nil, err
+ }
+ ids = append(ids, id)
+ }
+ return ids, nil
+}
+
// createExpose parses user-provided exposed port definitions and converts them
// into SpecGen format.
// TODO: The SpecGen format should really handle ranges more sanely - we could
@@ -71,14 +96,44 @@ func createPortBindings(ports []string) ([]specgen.PortMapping, error) {
return nil, errors.Errorf("invalid port format - protocol can only be specified once")
}
- splitPort := strings.Split(splitProto[0], ":")
+ remainder := splitProto[0]
+ haveV6 := false
+
+ // Check for an IPv6 address in brackets
+ splitV6 := strings.Split(remainder, "]")
+ switch len(splitV6) {
+ case 1:
+ // Do nothing, proceed as before
+ case 2:
+ // We potentially have an IPv6 address
+ haveV6 = true
+ if !strings.HasPrefix(splitV6[0], "[") {
+ return nil, errors.Errorf("invalid port format - IPv6 addresses must be enclosed by []")
+ }
+ if !strings.HasPrefix(splitV6[1], ":") {
+ return nil, errors.Errorf("invalid port format - IPv6 address must be followed by a colon (':')")
+ }
+ ipNoPrefix := strings.TrimPrefix(splitV6[0], "[")
+ hostIP = &ipNoPrefix
+ remainder = strings.TrimPrefix(splitV6[1], ":")
+ default:
+ return nil, errors.Errorf("invalid port format - at most one IPv6 address can be specified in a --publish")
+ }
+
+ splitPort := strings.Split(remainder, ":")
switch len(splitPort) {
case 1:
+ if haveV6 {
+ return nil, errors.Errorf("invalid port format - must provide host and destination port if specifying an IP")
+ }
ctrPort = splitPort[0]
case 2:
hostPort = &(splitPort[0])
ctrPort = splitPort[1]
case 3:
+ if haveV6 {
+ return nil, errors.Errorf("invalid port format - when v6 address specified, must be [ipv6]:hostPort:ctrPort")
+ }
hostIP = &(splitPort[0])
hostPort = &(splitPort[1])
ctrPort = splitPort[2]
diff --git a/cmd/podman/containers/attach.go b/cmd/podman/containers/attach.go
index 9f29d1664..9ef9d79f0 100644
--- a/cmd/podman/containers/attach.go
+++ b/cmd/podman/containers/attach.go
@@ -18,7 +18,7 @@ var (
Short: "Attach to a running container",
Long: attachDescription,
RunE: attach,
- Args: validate.IdOrLatestArgs,
+ Args: validate.IDOrLatestArgs,
Example: `podman attach ctrID
podman attach 1234
podman attach --no-stdin foobar`,
@@ -29,7 +29,7 @@ var (
Short: attachCommand.Short,
Long: attachCommand.Long,
RunE: attachCommand.RunE,
- Args: validate.IdOrLatestArgs,
+ Args: validate.IDOrLatestArgs,
Example: `podman container attach ctrID
podman container attach 1234
podman container attach --no-stdin foobar`,
diff --git a/cmd/podman/containers/container.go b/cmd/podman/containers/container.go
index a102318fb..3ff341dcd 100644
--- a/cmd/podman/containers/container.go
+++ b/cmd/podman/containers/container.go
@@ -10,7 +10,7 @@ import (
var (
// Pull in configured json library
- json = registry.JsonLibrary()
+ json = registry.JSONLibrary()
// Command: podman _container_
containerCmd = &cobra.Command{
diff --git a/cmd/podman/containers/diff.go b/cmd/podman/containers/diff.go
index 59b788010..33b1c1126 100644
--- a/cmd/podman/containers/diff.go
+++ b/cmd/podman/containers/diff.go
@@ -13,7 +13,7 @@ var (
// podman container _diff_
diffCmd = &cobra.Command{
Use: "diff [flags] CONTAINER",
- Args: validate.IdOrLatestArgs,
+ Args: validate.IDOrLatestArgs,
Short: "Inspect changes on container's file systems",
Long: `Displays changes on a container filesystem. The container will be compared to its parent layer.`,
RunE: diff,
diff --git a/cmd/podman/containers/exec.go b/cmd/podman/containers/exec.go
index 41f100768..ce48af618 100644
--- a/cmd/podman/containers/exec.go
+++ b/cmd/podman/containers/exec.go
@@ -84,7 +84,7 @@ func init() {
}
func exec(cmd *cobra.Command, args []string) error {
- var nameOrId string
+ var nameOrID string
if len(args) == 0 && !execOpts.Latest {
return errors.New("exec requires the name or ID of a container or the --latest flag")
@@ -92,7 +92,7 @@ func exec(cmd *cobra.Command, args []string) error {
execOpts.Cmd = args
if !execOpts.Latest {
execOpts.Cmd = args[1:]
- nameOrId = args[0]
+ nameOrID = args[0]
}
// Validate given environment variables
execOpts.Envs = make(map[string]string)
@@ -122,12 +122,12 @@ func exec(cmd *cobra.Command, args []string) error {
streams.AttachOutput = true
streams.AttachError = true
- exitCode, err := registry.ContainerEngine().ContainerExec(registry.GetContext(), nameOrId, execOpts, streams)
+ exitCode, err := registry.ContainerEngine().ContainerExec(registry.GetContext(), nameOrID, execOpts, streams)
registry.SetExitCode(exitCode)
return err
}
- id, err := registry.ContainerEngine().ContainerExecDetached(registry.GetContext(), nameOrId, execOpts)
+ id, err := registry.ContainerEngine().ContainerExecDetached(registry.GetContext(), nameOrID, execOpts)
if err != nil {
return err
}
diff --git a/cmd/podman/containers/ps.go b/cmd/podman/containers/ps.go
index 4d12d2534..a29b4da3d 100644
--- a/cmd/podman/containers/ps.go
+++ b/cmd/podman/containers/ps.go
@@ -67,7 +67,7 @@ func listFlagSet(flags *pflag.FlagSet) {
flags.BoolVar(&listOpts.Sync, "sync", false, "Sync container state with OCI runtime")
flags.UintVarP(&listOpts.Watch, "watch", "w", 0, "Watch the ps output on an interval in seconds")
- sort := validate.ChoiceValue(&listOpts.Sort, "command", "created", "id", "image", "names", "runningfor", "size", "status")
+ sort := validate.Value(&listOpts.Sort, "command", "created", "id", "image", "names", "runningfor", "size", "status")
flags.Var(sort, "sort", "Sort output by: "+sort.Choices())
if registry.IsRemote() {
diff --git a/cmd/podman/containers/stats.go b/cmd/podman/containers/stats.go
index c61b161e4..11aa3a4d2 100644
--- a/cmd/podman/containers/stats.go
+++ b/cmd/podman/containers/stats.go
@@ -87,13 +87,13 @@ func init() {
func checkStatOptions(cmd *cobra.Command, args []string) error {
opts := 0
if statsOptions.All {
- opts += 1
+ opts++
}
if statsOptions.Latest {
- opts += 1
+ opts++
}
if len(args) > 0 {
- opts += 1
+ opts++
}
if opts > 1 {
return errors.Errorf("--all, --latest and containers cannot be used together")
@@ -219,9 +219,9 @@ func combineHumanValues(a, b uint64) string {
func outputJSON(stats []*containerStats) error {
type jstat struct {
- Id string `json:"id"`
+ Id string `json:"id"` //nolint
Name string `json:"name"`
- CpuPercent string `json:"cpu_percent"`
+ CpuPercent string `json:"cpu_percent"` //nolint
MemUsage string `json:"mem_usage"`
MemPerc string `json:"mem_percent"`
NetIO string `json:"net_io"`
diff --git a/cmd/podman/containers/stop.go b/cmd/podman/containers/stop.go
index 22c487961..0f2a91af0 100644
--- a/cmd/podman/containers/stop.go
+++ b/cmd/podman/containers/stop.go
@@ -85,9 +85,8 @@ func stop(cmd *cobra.Command, args []string) error {
var (
errs utils.OutputErrors
)
- stopOptions.Timeout = containerConfig.Engine.StopTimeout
if cmd.Flag("time").Changed {
- stopOptions.Timeout = stopTimeout
+ stopOptions.Timeout = &stopTimeout
}
responses, err := registry.ContainerEngine().ContainerStop(context.Background(), args, stopOptions)
diff --git a/cmd/podman/containers/wait.go b/cmd/podman/containers/wait.go
index ca3883091..115bb3eea 100644
--- a/cmd/podman/containers/wait.go
+++ b/cmd/podman/containers/wait.go
@@ -23,7 +23,7 @@ var (
Short: "Block on one or more containers",
Long: waitDescription,
RunE: wait,
- Args: validate.IdOrLatestArgs,
+ Args: validate.IDOrLatestArgs,
Example: `podman wait --interval 5000 ctrID
podman wait ctrID1 ctrID2`,
}
@@ -33,7 +33,7 @@ var (
Short: waitCommand.Short,
Long: waitCommand.Long,
RunE: waitCommand.RunE,
- Args: validate.IdOrLatestArgs,
+ Args: validate.IDOrLatestArgs,
Example: `podman container wait --interval 5000 ctrID
podman container wait ctrID1 ctrID2`,
}
diff --git a/cmd/podman/diff.go b/cmd/podman/diff.go
index 1ff2fce40..d635ea57a 100644
--- a/cmd/podman/diff.go
+++ b/cmd/podman/diff.go
@@ -18,7 +18,7 @@ var (
diffDescription = `Displays changes on a container or image's filesystem. The container or image will be compared to its parent layer.`
diffCmd = &cobra.Command{
Use: "diff [flags] {CONTAINER_ID | IMAGE_ID}",
- Args: validate.IdOrLatestArgs,
+ Args: validate.IDOrLatestArgs,
Short: "Display the changes of object's file system",
Long: diffDescription,
TraverseChildren: true,
diff --git a/cmd/podman/images/image.go b/cmd/podman/images/image.go
index 790c16c05..ebef126c0 100644
--- a/cmd/podman/images/image.go
+++ b/cmd/podman/images/image.go
@@ -9,7 +9,7 @@ import (
var (
// Pull in configured json library
- json = registry.JsonLibrary()
+ json = registry.JSONLibrary()
// Command: podman _image_
imageCmd = &cobra.Command{
diff --git a/cmd/podman/images/list.go b/cmd/podman/images/list.go
index 23757104b..236ae15b4 100644
--- a/cmd/podman/images/list.go
+++ b/cmd/podman/images/list.go
@@ -100,7 +100,7 @@ func images(cmd *cobra.Command, args []string) error {
switch {
case listFlag.quiet:
- return writeId(summaries)
+ return writeID(summaries)
case cmd.Flag("format").Changed && listFlag.format == "json":
return writeJSON(summaries)
default:
@@ -108,7 +108,7 @@ func images(cmd *cobra.Command, args []string) error {
}
}
-func writeId(imageS []*entities.ImageSummary) error {
+func writeID(imageS []*entities.ImageSummary) error {
var ids = map[string]struct{}{}
for _, e := range imageS {
i := "sha256:" + e.ID
diff --git a/cmd/podman/networks/list.go b/cmd/podman/networks/list.go
index 24604c055..498a4dc18 100644
--- a/cmd/podman/networks/list.go
+++ b/cmd/podman/networks/list.go
@@ -33,8 +33,8 @@ var (
var (
networkListOptions entities.NetworkListOptions
- headers string = "NAME\tVERSION\tPLUGINS\n"
- defaultListRow string = "{{.Name}}\t{{.Version}}\t{{.Plugins}}\n"
+ headers = "NAME\tVERSION\tPLUGINS\n"
+ defaultListRow = "{{.Name}}\t{{.Version}}\t{{.Plugins}}\n"
)
func networkListFlags(flags *pflag.FlagSet) {
@@ -57,7 +57,7 @@ func init() {
func networkList(cmd *cobra.Command, args []string) error {
var (
- nlprs []NetworkListPrintReports
+ nlprs []ListPrintReports
)
// validate the filter pattern.
@@ -83,7 +83,7 @@ func networkList(cmd *cobra.Command, args []string) error {
}
for _, r := range responses {
- nlprs = append(nlprs, NetworkListPrintReports{r})
+ nlprs = append(nlprs, ListPrintReports{r})
}
row := networkListOptions.Format
@@ -125,14 +125,14 @@ func jsonOut(responses []*entities.NetworkListReport) error {
return nil
}
-type NetworkListPrintReports struct {
+type ListPrintReports struct {
*entities.NetworkListReport
}
-func (n NetworkListPrintReports) Version() string {
+func (n ListPrintReports) Version() string {
return n.CNIVersion
}
-func (n NetworkListPrintReports) Plugins() string {
+func (n ListPrintReports) Plugins() string {
return network.GetCNIPlugins(n.NetworkConfigList)
}
diff --git a/cmd/podman/parse/common.go b/cmd/podman/parse/common.go
index 13f425b6d..b3aa88da2 100644
--- a/cmd/podman/parse/common.go
+++ b/cmd/podman/parse/common.go
@@ -5,6 +5,10 @@ import (
"github.com/spf13/cobra"
)
+// TODO: the two functions here are almost identical. It may be worth looking
+// into generalizing the two a bit more and share code but time is scarce and
+// we only live once.
+
// CheckAllLatestAndCIDFile checks that --all and --latest are used correctly.
// If cidfile is set, also check for the --cidfile flag.
func CheckAllLatestAndCIDFile(c *cobra.Command, args []string, ignoreArgLen bool, cidfile bool) error {
@@ -55,3 +59,54 @@ func CheckAllLatestAndCIDFile(c *cobra.Command, args []string, ignoreArgLen bool
}
return nil
}
+
+// CheckAllLatestAndPodIDFile checks that --all and --latest are used correctly.
+// If withIDFile is set, also check for the --pod-id-file flag.
+func CheckAllLatestAndPodIDFile(c *cobra.Command, args []string, ignoreArgLen bool, withIDFile bool) error {
+ argLen := len(args)
+ if c.Flags().Lookup("all") == nil || c.Flags().Lookup("latest") == nil {
+ if !withIDFile {
+ return errors.New("unable to lookup values for 'latest' or 'all'")
+ } else if c.Flags().Lookup("pod-id-file") == nil {
+ return errors.New("unable to lookup values for 'latest', 'all' or 'pod-id-file'")
+ }
+ }
+
+ specifiedAll, _ := c.Flags().GetBool("all")
+ specifiedLatest, _ := c.Flags().GetBool("latest")
+ specifiedPodIDFile := false
+ if pid, _ := c.Flags().GetStringArray("pod-id-file"); len(pid) > 0 {
+ specifiedPodIDFile = true
+ }
+
+ if specifiedPodIDFile && (specifiedAll || specifiedLatest) {
+ return errors.Errorf("--all, --latest and --pod-id-file cannot be used together")
+ } else if specifiedAll && specifiedLatest {
+ return errors.Errorf("--all and --latest cannot be used together")
+ }
+
+ if (argLen > 0) && specifiedAll {
+ return errors.Errorf("no arguments are needed with --all")
+ }
+
+ if ignoreArgLen {
+ return nil
+ }
+
+ if argLen > 0 {
+ if specifiedLatest {
+ return errors.Errorf("no arguments are needed with --latest")
+ } else if withIDFile && (specifiedLatest || specifiedPodIDFile) {
+ return errors.Errorf("no arguments are needed with --latest or --pod-id-file")
+ }
+ }
+
+ if specifiedPodIDFile {
+ return nil
+ }
+
+ if argLen < 1 && !specifiedAll && !specifiedLatest && !specifiedPodIDFile {
+ return errors.Errorf("you must provide at least one name or id")
+ }
+ return nil
+}
diff --git a/cmd/podman/play/kube.go b/cmd/podman/play/kube.go
index 1fbf24d5e..c26ca9853 100644
--- a/cmd/podman/play/kube.go
+++ b/cmd/podman/play/kube.go
@@ -92,21 +92,29 @@ func kube(cmd *cobra.Command, args []string) error {
return err
}
- for _, l := range report.Logs {
- fmt.Fprintf(os.Stderr, l)
+ for _, pod := range report.Pods {
+ for _, l := range pod.Logs {
+ fmt.Fprintf(os.Stderr, l)
+ }
}
- fmt.Printf("Pod:\n%s\n", report.Pod)
- switch len(report.Containers) {
- case 0:
- return nil
- case 1:
- fmt.Printf("Container:\n")
- default:
- fmt.Printf("Containers:\n")
- }
- for _, ctr := range report.Containers {
- fmt.Println(ctr)
+ for _, pod := range report.Pods {
+ fmt.Printf("Pod:\n")
+ fmt.Println(pod.ID)
+
+ switch len(pod.Containers) {
+ case 0:
+ continue
+ case 1:
+ fmt.Printf("Container:\n")
+ default:
+ fmt.Printf("Containers:\n")
+ }
+ for _, ctr := range pod.Containers {
+ fmt.Println(ctr)
+ }
+ // Empty line for space for next block
+ fmt.Println()
}
return nil
diff --git a/cmd/podman/pods/create.go b/cmd/podman/pods/create.go
index 62b5b849e..51b7a7d52 100644
--- a/cmd/podman/pods/create.go
+++ b/cmd/podman/pods/create.go
@@ -53,6 +53,7 @@ func init() {
flags.AddFlagSet(common.GetNetFlags())
flags.StringVar(&createOptions.CGroupParent, "cgroup-parent", "", "Set parent cgroup for the pod")
flags.BoolVar(&createOptions.Infra, "infra", true, "Create an infra container associated with the pod to share namespaces with")
+ flags.StringVar(&createOptions.InfraConmonPidFile, "infra-conmon-pidfile", "", "Path to the file that will receive the POD of the infra container's conmon")
flags.StringVar(&createOptions.InfraImage, "infra-image", containerConfig.Engine.InfraImage, "The image of the infra container to associate with the pod")
flags.StringVar(&createOptions.InfraCommand, "infra-command", containerConfig.Engine.InfraCommand, "The command to run on the infra container when the pod is started")
flags.StringSliceVar(&labelFile, "label-file", []string{}, "Read in a line delimited file of labels")
@@ -73,8 +74,8 @@ func aliasNetworkFlag(_ *pflag.FlagSet, name string) pflag.NormalizedName {
func create(cmd *cobra.Command, args []string) error {
var (
- err error
- podIdFile *os.File
+ err error
+ podIDFD *os.File
)
createOptions.Labels, err = parse.GetAllLabels(labelFile, labels)
if err != nil {
@@ -83,6 +84,9 @@ func create(cmd *cobra.Command, args []string) error {
if !createOptions.Infra {
logrus.Debugf("Not creating an infra container")
+ if cmd.Flag("infra-conmon-pidfile").Changed {
+ return errors.New("cannot set infra-conmon-pid without an infra container")
+ }
if cmd.Flag("infra-command").Changed {
return errors.New("cannot set infra-command without an infra container")
}
@@ -101,15 +105,15 @@ func create(cmd *cobra.Command, args []string) error {
}
if cmd.Flag("pod-id-file").Changed {
- podIdFile, err = util.OpenExclusiveFile(podIDFile)
+ podIDFD, err = util.OpenExclusiveFile(podIDFile)
if err != nil && os.IsExist(err) {
return errors.Errorf("pod id file exists. Ensure another pod is not using it or delete %s", podIDFile)
}
if err != nil {
return errors.Errorf("error opening pod-id-file %s", podIDFile)
}
- defer errorhandling.CloseQuiet(podIdFile)
- defer errorhandling.SyncQuiet(podIdFile)
+ defer errorhandling.CloseQuiet(podIDFD)
+ defer errorhandling.SyncQuiet(podIDFD)
}
createOptions.Net, err = common.NetFlagsToNetOptions(cmd)
diff --git a/cmd/podman/pods/pod.go b/cmd/podman/pods/pod.go
index ed265ef90..9dc538c71 100644
--- a/cmd/podman/pods/pod.go
+++ b/cmd/podman/pods/pod.go
@@ -10,7 +10,7 @@ import (
var (
// Pull in configured json library
- json = registry.JsonLibrary()
+ json = registry.JSONLibrary()
// Command: podman _pod_
podCmd = &cobra.Command{
diff --git a/cmd/podman/pods/ps.go b/cmd/podman/pods/ps.go
index 1385ff270..bcd1db84c 100644
--- a/cmd/podman/pods/ps.go
+++ b/cmd/podman/pods/ps.go
@@ -195,7 +195,7 @@ func (l ListPodReporter) ID() string {
}
// Id returns the Pod id
-func (l ListPodReporter) Id() string {
+func (l ListPodReporter) Id() string { //nolint
if noTrunc {
return l.ListPodsReport.Id
}
@@ -209,7 +209,7 @@ func (l ListPodReporter) InfraID() string {
// InfraId returns the infra container id for the pod
// depending on trunc
-func (l ListPodReporter) InfraId() string {
+func (l ListPodReporter) InfraId() string { //nolint
if len(l.ListPodsReport.InfraId) == 0 {
return ""
}
@@ -252,7 +252,7 @@ func sortPodPsOutput(sortBy string, lprs []*entities.ListPodsReport) error {
case "created":
sort.Sort(podPsSortedCreated{lprs})
case "id":
- sort.Sort(podPsSortedId{lprs})
+ sort.Sort(podPsSortedID{lprs})
case "name":
sort.Sort(podPsSortedName{lprs})
case "number":
@@ -276,9 +276,9 @@ func (a podPsSortedCreated) Less(i, j int) bool {
return a.lprSort[i].Created.After(a.lprSort[j].Created)
}
-type podPsSortedId struct{ lprSort }
+type podPsSortedID struct{ lprSort }
-func (a podPsSortedId) Less(i, j int) bool { return a.lprSort[i].Id < a.lprSort[j].Id }
+func (a podPsSortedID) Less(i, j int) bool { return a.lprSort[i].Id < a.lprSort[j].Id }
type podPsSortedNumber struct{ lprSort }
diff --git a/cmd/podman/pods/rm.go b/cmd/podman/pods/rm.go
index 4b9882f8a..8de0bce9e 100644
--- a/cmd/podman/pods/rm.go
+++ b/cmd/podman/pods/rm.go
@@ -4,6 +4,7 @@ import (
"context"
"fmt"
+ "github.com/containers/libpod/cmd/podman/common"
"github.com/containers/libpod/cmd/podman/parse"
"github.com/containers/libpod/cmd/podman/registry"
"github.com/containers/libpod/cmd/podman/utils"
@@ -11,7 +12,15 @@ import (
"github.com/spf13/cobra"
)
+// allows for splitting API and CLI-only options
+type podRmOptionsWrapper struct {
+ entities.PodRmOptions
+
+ PodIDFiles []string
+}
+
var (
+ rmOptions = podRmOptionsWrapper{}
podRmDescription = fmt.Sprintf(`podman rm will remove one or more stopped pods and their containers from the host.
The pod name or ID can be used. A pod with containers will not be removed without --force. If --force is specified, all containers will be stopped, then removed.`)
@@ -21,7 +30,7 @@ var (
Long: podRmDescription,
RunE: rm,
Args: func(cmd *cobra.Command, args []string) error {
- return parse.CheckAllLatestAndCIDFile(cmd, args, false, false)
+ return parse.CheckAllLatestAndPodIDFile(cmd, args, false, true)
},
Example: `podman pod rm mywebserverpod
podman pod rm -f 860a4b23
@@ -29,10 +38,6 @@ var (
}
)
-var (
- rmOptions = entities.PodRmOptions{}
-)
-
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
@@ -45,6 +50,7 @@ func init() {
flags.BoolVarP(&rmOptions.Force, "force", "f", false, "Force removal of a running pod by first stopping all containers, then removing all containers in the pod. The default is false")
flags.BoolVarP(&rmOptions.Ignore, "ignore", "i", false, "Ignore errors when a specified pod is missing")
flags.BoolVarP(&rmOptions.Latest, "latest", "l", false, "Remove the latest pod podman is aware of")
+ flags.StringArrayVarP(&rmOptions.PodIDFiles, "pod-id-file", "", nil, "Read the pod ID from the file")
if registry.IsRemote() {
_ = flags.MarkHidden("latest")
_ = flags.MarkHidden("ignore")
@@ -55,7 +61,14 @@ func rm(cmd *cobra.Command, args []string) error {
var (
errs utils.OutputErrors
)
- responses, err := registry.ContainerEngine().PodRm(context.Background(), args, rmOptions)
+
+ ids, err := common.ReadPodIDFiles(rmOptions.PodIDFiles)
+ if err != nil {
+ return err
+ }
+ args = append(args, ids...)
+
+ responses, err := registry.ContainerEngine().PodRm(context.Background(), args, rmOptions.PodRmOptions)
if err != nil {
return err
}
diff --git a/cmd/podman/pods/start.go b/cmd/podman/pods/start.go
index d0150a3c2..97020b360 100644
--- a/cmd/podman/pods/start.go
+++ b/cmd/podman/pods/start.go
@@ -4,6 +4,7 @@ import (
"context"
"fmt"
+ "github.com/containers/libpod/cmd/podman/common"
"github.com/containers/libpod/cmd/podman/parse"
"github.com/containers/libpod/cmd/podman/registry"
"github.com/containers/libpod/cmd/podman/utils"
@@ -11,6 +12,13 @@ import (
"github.com/spf13/cobra"
)
+// allows for splitting API and CLI-only options
+type podStartOptionsWrapper struct {
+ entities.PodStartOptions
+
+ PodIDFiles []string
+}
+
var (
podStartDescription = `The pod name or ID can be used.
@@ -21,7 +29,7 @@ var (
Long: podStartDescription,
RunE: start,
Args: func(cmd *cobra.Command, args []string) error {
- return parse.CheckAllLatestAndCIDFile(cmd, args, false, false)
+ return parse.CheckAllLatestAndPodIDFile(cmd, args, false, true)
},
Example: `podman pod start podID
podman pod start --latest
@@ -30,7 +38,7 @@ var (
)
var (
- startOptions = entities.PodStartOptions{}
+ startOptions = podStartOptionsWrapper{}
)
func init() {
@@ -43,6 +51,7 @@ func init() {
flags := startCommand.Flags()
flags.BoolVarP(&startOptions.All, "all", "a", false, "Restart all running pods")
flags.BoolVarP(&startOptions.Latest, "latest", "l", false, "Restart the latest pod podman is aware of")
+ flags.StringArrayVarP(&startOptions.PodIDFiles, "pod-id-file", "", nil, "Read the pod ID from the file")
if registry.IsRemote() {
_ = flags.MarkHidden("latest")
}
@@ -52,7 +61,14 @@ func start(cmd *cobra.Command, args []string) error {
var (
errs utils.OutputErrors
)
- responses, err := registry.ContainerEngine().PodStart(context.Background(), args, startOptions)
+
+ ids, err := common.ReadPodIDFiles(startOptions.PodIDFiles)
+ if err != nil {
+ return err
+ }
+ args = append(args, ids...)
+
+ responses, err := registry.ContainerEngine().PodStart(context.Background(), args, startOptions.PodStartOptions)
if err != nil {
return err
}
diff --git a/cmd/podman/pods/stats.go b/cmd/podman/pods/stats.go
index d3950fdbc..d14632f01 100644
--- a/cmd/podman/pods/stats.go
+++ b/cmd/podman/pods/stats.go
@@ -71,7 +71,7 @@ func stats(cmd *cobra.Command, args []string) error {
}
format := statsOptions.Format
- doJson := strings.ToLower(format) == formats.JSONString
+ doJSON := strings.ToLower(format) == formats.JSONString
header := getPodStatsHeader(format)
for {
@@ -80,7 +80,7 @@ func stats(cmd *cobra.Command, args []string) error {
return err
}
// Print the stats in the requested format and configuration.
- if doJson {
+ if doJSON {
if err := printJSONPodStats(reports); err != nil {
return err
}
diff --git a/cmd/podman/pods/stop.go b/cmd/podman/pods/stop.go
index daf05d640..628e8a536 100644
--- a/cmd/podman/pods/stop.go
+++ b/cmd/podman/pods/stop.go
@@ -4,6 +4,7 @@ import (
"context"
"fmt"
+ "github.com/containers/libpod/cmd/podman/common"
"github.com/containers/libpod/cmd/podman/parse"
"github.com/containers/libpod/cmd/podman/registry"
"github.com/containers/libpod/cmd/podman/utils"
@@ -11,7 +12,18 @@ import (
"github.com/spf13/cobra"
)
+// allows for splitting API and CLI-only options
+type podStopOptionsWrapper struct {
+ entities.PodStopOptions
+
+ PodIDFiles []string
+ TimeoutCLI uint
+}
+
var (
+ stopOptions = podStopOptionsWrapper{
+ PodStopOptions: entities.PodStopOptions{Timeout: -1},
+ }
podStopDescription = `The pod name or ID can be used.
This command will stop all running containers in each of the specified pods.`
@@ -22,7 +34,7 @@ var (
Long: podStopDescription,
RunE: stop,
Args: func(cmd *cobra.Command, args []string) error {
- return parse.CheckAllLatestAndCIDFile(cmd, args, false, false)
+ return parse.CheckAllLatestAndPodIDFile(cmd, args, false, true)
},
Example: `podman pod stop mywebserverpod
podman pod stop --latest
@@ -30,13 +42,6 @@ var (
}
)
-var (
- stopOptions = entities.PodStopOptions{
- Timeout: -1,
- }
- timeout uint
-)
-
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
@@ -47,7 +52,8 @@ func init() {
flags.BoolVarP(&stopOptions.All, "all", "a", false, "Stop all running pods")
flags.BoolVarP(&stopOptions.Ignore, "ignore", "i", false, "Ignore errors when a specified pod is missing")
flags.BoolVarP(&stopOptions.Latest, "latest", "l", false, "Stop the latest pod podman is aware of")
- flags.UintVarP(&timeout, "time", "t", containerConfig.Engine.StopTimeout, "Seconds to wait for pod stop before killing the container")
+ flags.UintVarP(&stopOptions.TimeoutCLI, "time", "t", containerConfig.Engine.StopTimeout, "Seconds to wait for pod stop before killing the container")
+ flags.StringArrayVarP(&stopOptions.PodIDFiles, "pod-id-file", "", nil, "Read the pod ID from the file")
if registry.IsRemote() {
_ = flags.MarkHidden("latest")
_ = flags.MarkHidden("ignore")
@@ -60,9 +66,16 @@ func stop(cmd *cobra.Command, args []string) error {
errs utils.OutputErrors
)
if cmd.Flag("time").Changed {
- stopOptions.Timeout = int(timeout)
+ stopOptions.Timeout = int(stopOptions.TimeoutCLI)
+ }
+
+ ids, err := common.ReadPodIDFiles(stopOptions.PodIDFiles)
+ if err != nil {
+ return err
}
- responses, err := registry.ContainerEngine().PodStop(context.Background(), args, stopOptions)
+ args = append(args, ids...)
+
+ responses, err := registry.ContainerEngine().PodStop(context.Background(), args, stopOptions.PodStopOptions)
if err != nil {
return err
}
diff --git a/cmd/podman/registry/json.go b/cmd/podman/registry/json.go
index f25406c3c..a8a1623f5 100644
--- a/cmd/podman/registry/json.go
+++ b/cmd/podman/registry/json.go
@@ -11,8 +11,8 @@ var (
jsonSync sync.Once
)
-// JsonLibrary provides a "encoding/json" compatible API
-func JsonLibrary() jsoniter.API {
+// JSONLibrary provides a "encoding/json" compatible API
+func JSONLibrary() jsoniter.API {
jsonSync.Do(func() {
json = jsoniter.ConfigCompatibleWithStandardLibrary
})
diff --git a/cmd/podman/report/report.go b/cmd/podman/report/report.go
index 8392f10e0..ce349ef35 100644
--- a/cmd/podman/report/report.go
+++ b/cmd/podman/report/report.go
@@ -3,4 +3,4 @@ package report
import "github.com/containers/libpod/cmd/podman/registry"
// Pull in configured json library
-var json = registry.JsonLibrary()
+var json = registry.JSONLibrary()
diff --git a/cmd/podman/root.go b/cmd/podman/root.go
index b62ee144a..4f834e87d 100644
--- a/cmd/podman/root.go
+++ b/cmd/podman/root.go
@@ -119,10 +119,10 @@ func persistentPreRunE(cmd *cobra.Command, args []string) error {
}
if cmd.Flag("cpu-profile").Changed {
- f, err := os.Create(cfg.CpuProfile)
+ f, err := os.Create(cfg.CPUProfile)
if err != nil {
return errors.Wrapf(err, "unable to create cpu profiling file %s",
- cfg.CpuProfile)
+ cfg.CPUProfile)
}
if err := pprof.StartCPUProfile(f); err != nil {
return err
@@ -212,13 +212,13 @@ func rootFlags(opts *entities.PodmanConfig, flags *pflag.FlagSet) {
// V2 flags
flags.BoolVarP(&opts.Remote, "remote", "r", false, "Access remote Podman service (default false)")
// TODO Read uri from containers.config when available
- flags.StringVar(&opts.Uri, "url", registry.DefaultAPIAddress(), "URL to access Podman service (CONTAINER_HOST)")
+ flags.StringVar(&opts.URI, "url", registry.DefaultAPIAddress(), "URL to access Podman service (CONTAINER_HOST)")
flags.StringSliceVar(&opts.Identities, "identity", []string{}, "path to SSH identity file, (CONTAINER_SSHKEY)")
flags.StringVar(&opts.PassPhrase, "passphrase", "", "passphrase for identity file (not secure, CONTAINER_PASSPHRASE), ssh-agent always supported")
cfg := opts.Config
flags.StringVar(&cfg.Engine.CgroupManager, "cgroup-manager", cfg.Engine.CgroupManager, "Cgroup manager to use (\"cgroupfs\"|\"systemd\")")
- flags.StringVar(&opts.CpuProfile, "cpu-profile", "", "Path for the cpu profiling results")
+ flags.StringVar(&opts.CPUProfile, "cpu-profile", "", "Path for the cpu profiling results")
flags.StringVar(&opts.ConmonPath, "conmon", "", "Path of the conmon binary")
flags.StringVar(&cfg.Engine.NetworkCmdPath, "network-cmd-path", cfg.Engine.NetworkCmdPath, "Path to the command for configuring the network")
flags.StringVar(&cfg.Network.NetworkConfigDir, "cni-config-dir", cfg.Network.NetworkConfigDir, "Path of the configuration directory for CNI networks")
diff --git a/cmd/podman/system/df.go b/cmd/podman/system/df.go
index 8fe035209..9318bba12 100644
--- a/cmd/podman/system/df.go
+++ b/cmd/podman/system/df.go
@@ -63,7 +63,7 @@ func printSummary(reports *entities.SystemDfReport, userFormat string) error {
dfSummaries []*dfSummary
active int
size, reclaimable int64
- format string = "{{.Type}}\t{{.Total}}\t{{.Active}}\t{{.Size}}\t{{.Reclaimable}}\n"
+ format = "{{.Type}}\t{{.Total}}\t{{.Active}}\t{{.Size}}\t{{.Reclaimable}}\n"
w io.Writer = os.Stdout
)
@@ -74,7 +74,7 @@ func printSummary(reports *entities.SystemDfReport, userFormat string) error {
for _, i := range reports.Images {
if i.Containers > 0 {
- active += 1
+ active++
}
size += i.Size
if i.Containers < 1 {
@@ -99,7 +99,7 @@ func printSummary(reports *entities.SystemDfReport, userFormat string) error {
)
for _, c := range reports.Containers {
if c.Status == "running" {
- conActive += 1
+ conActive++
} else {
conReclaimable += c.RWSize
}
diff --git a/cmd/podman/system/events.go b/cmd/podman/system/events.go
index 27e80138e..c401c5a92 100644
--- a/cmd/podman/system/events.go
+++ b/cmd/podman/system/events.go
@@ -17,8 +17,10 @@ import (
)
var (
- eventsDescription = "Monitor podman events"
- eventsCommand = &cobra.Command{
+ eventsDescription = `Monitor podman events.
+
+ By default, streaming mode is used, printing new events as they occur. Previous events can be listed via --since and --until.`
+ eventsCommand = &cobra.Command{
Use: "events",
Args: validate.NoArgs,
Short: "Show podman events",
diff --git a/cmd/podman/system/service.go b/cmd/podman/system/service.go
index 1b07ee301..ecd17c251 100644
--- a/cmd/podman/system/service.go
+++ b/cmd/podman/system/service.go
@@ -64,7 +64,7 @@ func aliasTimeoutFlag(_ *pflag.FlagSet, name string) pflag.NormalizedName {
}
func service(cmd *cobra.Command, args []string) error {
- apiURI, err := resolveApiURI(args)
+ apiURI, err := resolveAPIURI(args)
if err != nil {
return err
}
@@ -103,7 +103,7 @@ func service(cmd *cobra.Command, args []string) error {
return restService(opts, cmd.Flags(), registry.PodmanConfig())
}
-func resolveApiURI(_url []string) (string, error) {
+func resolveAPIURI(_url []string) (string, error) {
// When determining _*THE*_ listening endpoint --
// 1) User input wins always
// 2) systemd socket activation
diff --git a/cmd/podman/system/system.go b/cmd/podman/system/system.go
index d9691ad2a..acf41a32d 100644
--- a/cmd/podman/system/system.go
+++ b/cmd/podman/system/system.go
@@ -9,7 +9,7 @@ import (
var (
// Pull in configured json library
- json = registry.JsonLibrary()
+ json = registry.JSONLibrary()
// Command: podman _system_
systemCmd = &cobra.Command{
diff --git a/cmd/podman/validate/args.go b/cmd/podman/validate/args.go
index 14b4d7897..69240798f 100644
--- a/cmd/podman/validate/args.go
+++ b/cmd/podman/validate/args.go
@@ -23,8 +23,8 @@ func SubCommandExists(cmd *cobra.Command, args []string) error {
return errors.Errorf("missing command '%[1]s COMMAND'\nTry '%[1]s --help' for more information.", cmd.CommandPath())
}
-// IdOrLatestArgs used to validate a nameOrId was provided or the "--latest" flag
-func IdOrLatestArgs(cmd *cobra.Command, args []string) error {
+// IDOrLatestArgs used to validate a nameOrId was provided or the "--latest" flag
+func IDOrLatestArgs(cmd *cobra.Command, args []string) error {
if len(args) > 1 || (len(args) == 0 && !cmd.Flag("latest").Changed) {
return fmt.Errorf("`%s` requires a name, id or the \"--latest\" flag", cmd.CommandPath())
}
diff --git a/cmd/podman/validate/choice.go b/cmd/podman/validate/choice.go
index 572c5f4a5..8bb21c591 100644
--- a/cmd/podman/validate/choice.go
+++ b/cmd/podman/validate/choice.go
@@ -6,28 +6,28 @@ import (
)
// Honors cobra.Value interface
-type choiceValue struct {
+type ChoiceValue struct {
value *string
choices []string
}
-// ChoiceValue may be used in cobra FlagSet methods Var/VarP/VarPF() to select from a set of values
+// Value may be used in cobra FlagSet methods Var/VarP/VarPF() to select from a set of values
//
// Example:
// created := validate.ChoiceValue(&opts.Sort, "command", "created", "id", "image", "names", "runningfor", "size", "status")
// flags.Var(created, "sort", "Sort output by: "+created.Choices())
-func ChoiceValue(p *string, choices ...string) *choiceValue {
- return &choiceValue{
+func Value(p *string, choices ...string) *ChoiceValue {
+ return &ChoiceValue{
value: p,
choices: choices,
}
}
-func (c *choiceValue) String() string {
+func (c *ChoiceValue) String() string {
return *c.value
}
-func (c *choiceValue) Set(value string) error {
+func (c *ChoiceValue) Set(value string) error {
for _, v := range c.choices {
if v == value {
*c.value = value
@@ -37,10 +37,10 @@ func (c *choiceValue) Set(value string) error {
return fmt.Errorf("%q is not a valid value. Choose from: %q", value, c.Choices())
}
-func (c *choiceValue) Choices() string {
+func (c *ChoiceValue) Choices() string {
return strings.Join(c.choices, ", ")
}
-func (c *choiceValue) Type() string {
+func (c *ChoiceValue) Type() string {
return "choice"
}
diff --git a/cmd/podman/volumes/create.go b/cmd/podman/volumes/create.go
index 1bec8d0e7..16ac3771e 100644
--- a/cmd/podman/volumes/create.go
+++ b/cmd/podman/volumes/create.go
@@ -67,6 +67,6 @@ func create(cmd *cobra.Command, args []string) error {
if err != nil {
return err
}
- fmt.Println(response.IdOrName)
+ fmt.Println(response.IDOrName)
return nil
}
diff --git a/cmd/podman/volumes/volume.go b/cmd/podman/volumes/volume.go
index 3e90d178c..12947a6b1 100644
--- a/cmd/podman/volumes/volume.go
+++ b/cmd/podman/volumes/volume.go
@@ -9,7 +9,7 @@ import (
var (
// Pull in configured json library
- json = registry.JsonLibrary()
+ json = registry.JSONLibrary()
// Command: podman _volume_
volumeCmd = &cobra.Command{
diff --git a/completions/bash/podman b/completions/bash/podman
index a58becaf0..6dbe645fe 100644
--- a/completions/bash/podman
+++ b/completions/bash/podman
@@ -2102,6 +2102,7 @@ _podman_container_run() {
--pid
--pids-limit
--pod
+ --pod-id-file
--publish -p
--pull
--runtime
@@ -2206,7 +2207,7 @@ _podman_container_run() {
__podman_complete_capabilities
return
;;
- --cidfile|--env-file|--init-path|--label-file)
+ --cidfile|--env-file|--init-path|--label-file|--pod-id-file)
_filedir
return
;;
@@ -3097,6 +3098,7 @@ _podman_pod_create() {
--dns-opt
--dns-search
--infra-command
+ --infra-conmon-pidfile
--infra-image
--ip
--label-file
@@ -3223,6 +3225,7 @@ _podman_pod_restart() {
_podman_pod_rm() {
local options_with_args="
+ --pod-id-file
"
local boolean_options="
@@ -3250,6 +3253,7 @@ _podman_pod_rm() {
_podman_pod_start() {
local options_with_args="
+ --pod-id-file
"
local boolean_options="
@@ -3275,6 +3279,7 @@ _podman_pod_stop() {
local options_with_args="
-t
--time
+ --pod-id-file
"
local boolean_options="
diff --git a/contrib/cirrus/README.md b/contrib/cirrus/README.md
index 541cf2f54..c8ec766e7 100644
--- a/contrib/cirrus/README.md
+++ b/contrib/cirrus/README.md
@@ -167,26 +167,50 @@ env:
### `docs` Task
-Builds swagger API documentation YAML and uploads to google storage for both
-PR's (for testing the process) and after a merge into any branch. For PR's
+Builds swagger API documentation YAML and uploads to google storage (an online
+service for storing unstructured data) for both
+PR's (for testing the process) and the master branch. For PR's
the YAML is uploaded into a [dedicated short-pruning cycle
-bucket.](https://storage.googleapis.com/libpod-pr-releases/) For branches,
-a [separate bucket is
-used.](https://storage.googleapis.com/libpod-master-releases)
-In both cases the filename includes the source
-PR number or branch name.
-
-***Note***: [The online documentation](http://docs.podman.io/en/latest/_static/api.html)
-is presented through javascript on the client-side. This requires CORS to be properly
-configured on the bucket, for the `http://docs.podman.io` origin. Please see
-[Configuring CORS on a bucket](https://cloud.google.com/storage/docs/configuring-cors#configure-cors-bucket)
-for details. This may be performed by anybody with admin access to the google storage bucket,
-using the following JSON:
+bucket.](https://storage.googleapis.com/libpod-pr-releases/) for testing purposes
+only. For the master branch, a [separate bucket is
+used](https://storage.googleapis.com/libpod-master-releases) and provides the
+content rendered on [the API Reference page](https://docs.podman.io/en/latest/_static/api.html)
+
+The online API reference is presented by javascript to the client. To prevent hijacking
+of the client by malicious data, the [javascript utilises CORS](https://cloud.google.com/storage/docs/cross-origin).
+This CORS metadata is served by `https://storage.googleapis.com` when configured correctly.
+It will appear in [the request and response headers from the
+client](https://cloud.google.com/storage/docs/configuring-cors#troubleshooting) when accessing
+the API reference page.
+
+However, when the CORS metadata is missing or incorrectly configured, clients will receive an
+error-message similar to:
+
+![Javascript Stack Trace Image](swagger_stack_trace.png)
+
+For documentation built by Read The Docs from the master branch, CORS metadata is
+set on the `libpod-master-releases` storage bucket. Viewing or setting the CORS
+metadata on the bucket requires having locally [installed and
+configured the google-cloud SDK](https://cloud.google.com/sdk/docs). It also requires having
+admin access to the google-storage bucket. Contact a project owner for help if you are
+unsure of your permissions or need help resolving an error similar to the picture above.
+
+Assuming the SDK is installed, and you have the required admin access, the following command
+will display the current CORS metadata:
+
+```
+gsutil cors get gs://libpod-master-releases
+```
+
+To function properly (allow client "trust" of content from `storage.googleapis.com`) the followiing
+metadata JSON should be used. Following the JSON, is an example of the command used to set this
+metadata on the libpod-master-releases bucket. For additional information about configuring CORS
+please referr to [the google-storage documentation](https://cloud.google.com/storage/docs/configuring-cors).
```JSON
[
{
- "origin": ["http://docs.podman.io"],
+ "origin": ["http://docs.podman.io", "https://docs.podman.io"],
"responseHeader": ["Content-Type"],
"method": ["GET"],
"maxAgeSeconds": 600
@@ -194,6 +218,14 @@ using the following JSON:
]
```
+```
+gsutil cors set /path/to/file.json gs://libpod-master-releases
+```
+
+***Note:*** The CORS metadata does _NOT_ change after the `docs` task uploads a new swagger YAML
+file. Therefore, if it is not functioning or misconfigured, a person must have altered it or
+changes were made to the referring site (e.g. `docs.podman.io`).
+
## Base-images
Base-images are VM disk-images specially prepared for executing as GCE VMs.
diff --git a/contrib/cirrus/check_image.sh b/contrib/cirrus/check_image.sh
index 5423f67d6..0d33e55bf 100755
--- a/contrib/cirrus/check_image.sh
+++ b/contrib/cirrus/check_image.sh
@@ -6,7 +6,7 @@ source $(dirname $0)/lib.sh
EVIL_UNITS="$($CIRRUS_WORKING_DIR/$PACKER_BASE/systemd_banish.sh --list)"
-req_env_var PACKER_BUILDER_NAME TEST_REMOTE_CLIENT EVIL_UNITS OS_RELEASE_ID
+req_env_var PACKER_BUILDER_NAME TEST_REMOTE_CLIENT EVIL_UNITS OS_RELEASE_ID CG_FS_TYPE
NFAILS=0
echo "Validating VM image"
@@ -22,7 +22,8 @@ item_test 'Minimum available memory' $MEM_FREE -ge $MIN_MEM_MB || let "NFAILS+=1
# We're testing a custom-built podman; make sure there isn't a distro-provided
# binary anywhere; that could potentially taint our results.
-item_test "remove_packaged_podman_files() did it's job" -z "$(type -P podman)" || let "NFAILS+=1"
+remove_packaged_podman_files
+item_test "remove_packaged_podman_files() does it's job" -z "$(type -P podman)" || let "NFAILS+=1"
# Integration Tests require varlink in Fedora
item_test "The varlink executable is present" -x "$(type -P varlink)" || let "NFAILS+=1"
@@ -39,8 +40,10 @@ for REQ_UNIT in google-accounts-daemon.service \
google-shutdown-scripts.service \
google-startup-scripts.service
do
- item_test "required $REQ_UNIT enabled" \
- "$(systemctl list-unit-files --no-legend $REQ_UNIT)" = "$REQ_UNIT enabled" || let "NFAILS+=1"
+ # enabled/disabled appears at the end of the line, on some Ubuntu's it appears twice
+ service_status=$(systemctl list-unit-files --no-legend $REQ_UNIT | tac -s ' ' | head -1)
+ item_test "required $REQ_UNIT status is enabled" \
+ "$service_status" = "enabled" || let "NFAILS+=1"
done
for evil_unit in $EVIL_UNITS
@@ -50,19 +53,28 @@ do
item_test "No $evil_unit unit is present or active:" "$unit_status" -ne "0" || let "NFAILS+=1"
done
-if [[ "$OS_RELEASE_ID" == "ubuntu" ]] && [[ -x "/usr/lib/cri-o-runc/sbin/runc" ]]
-then
- SAMESAME=$(diff --brief /usr/lib/cri-o-runc/sbin/runc /usr/bin/runc &> /dev/null; echo $?)
- item_test "On ubuntu /usr/bin/runc is /usr/lib/cri-o-runc/sbin/runc" "$SAMESAME" -eq "0" || let "NFAILS+=1"
-fi
-
-if [[ "$OS_RELEASE_ID" == "ubuntu" ]]
-then
- item_test "On ubuntu, no periodic apt crap is enabled" -z "$(egrep $PERIODIC_APT_RE /etc/apt/apt.conf.d/*)"
-fi
-
echo "Checking items specific to ${PACKER_BUILDER_NAME}${BUILT_IMAGE_SUFFIX}"
case "$PACKER_BUILDER_NAME" in
+ ubuntu*)
+ item_test "On ubuntu, no periodic apt crap is enabled" -z "$(egrep $PERIODIC_APT_RE /etc/apt/apt.conf.d/*)"
+ ;;
+ fedora*)
+ # Only runc -OR- crun should be installed, never both
+ case "$CG_FS_TYPE" in
+ tmpfs)
+ HAS=runc
+ HAS_NOT=crun
+ ;;
+ cgroup2fs)
+ HAS=crun
+ HAS_NOT=runc
+ ;;
+ esac
+ HAS_RC=$(rpm -qV $HAS &> /dev/null; echo $?)
+ HAS_NOT_RC=$(rpm -qV $HAS_NOT &> /dev/null; echo $?)
+ item_test "With a cgroups-fs type $CG_FS_TYPE, the $HAS package is installed" $HAS_RC -eq 0
+ item_test "With a cgroups-fs type $CG_FS_TYPE, the $HAS_NOT package is not installed" $HAS_NOT_RC -ne 0
+ ;;
xfedora*)
echo "Kernel Command-line: $(cat /proc/cmdline)"
item_test \
diff --git a/contrib/cirrus/integration_test.sh b/contrib/cirrus/integration_test.sh
index 1aef678d4..33e9fbc6b 100755
--- a/contrib/cirrus/integration_test.sh
+++ b/contrib/cirrus/integration_test.sh
@@ -6,6 +6,11 @@ source $(dirname $0)/lib.sh
req_env_var GOSRC SCRIPT_BASE OS_RELEASE_ID OS_RELEASE_VER CONTAINER_RUNTIME VARLINK_LOG
+LOCAL_OR_REMOTE=local
+if [[ "$TEST_REMOTE_CLIENT" = "true" ]]; then
+ LOCAL_OR_REMOTE=remote
+fi
+
# Our name must be of the form xxxx_test or xxxx_test.sh, where xxxx is
# the test suite to run; currently (2019-05) the only option is 'integration'
# but pr2947 intends to add 'system'.
@@ -34,7 +39,7 @@ case "$SPECIALMODE" in
req_env_var ROOTLESS_USER
ssh $ROOTLESS_USER@localhost \
-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
- -o CheckHostIP=no $GOSRC/$SCRIPT_BASE/rootless_test.sh ${TESTSUITE}
+ -o CheckHostIP=no $GOSRC/$SCRIPT_BASE/rootless_test.sh ${TESTSUITE} ${LOCAL_OR_REMOTE}
;;
endpoint)
make
@@ -52,12 +57,8 @@ case "$SPECIALMODE" in
make
make install PREFIX=/usr ETCDIR=/etc
make test-binaries
- if [[ "$TEST_REMOTE_CLIENT" == "true" ]]
- then
- make remote${TESTSUITE} VARLINK_LOG=$VARLINK_LOG
- else
- make local${TESTSUITE}
- fi
+ make .install.bats
+ make ${LOCAL_OR_REMOTE}${TESTSUITE} PODMAN_SERVER_LOG=$PODMAN_SERVER_LOG
;;
*)
die 110 "Unsupported \$SPECIALMODE: $SPECIALMODE"
diff --git a/contrib/cirrus/lib.sh b/contrib/cirrus/lib.sh
index cc5a3ffa7..66e8060cf 100644
--- a/contrib/cirrus/lib.sh
+++ b/contrib/cirrus/lib.sh
@@ -39,6 +39,8 @@ PACKER_BASE=${PACKER_BASE:-./contrib/cirrus/packer}
# Important filepaths
SETUP_MARKER_FILEPATH="${SETUP_MARKER_FILEPATH:-/var/tmp/.setup_environment_sh_complete}"
AUTHOR_NICKS_FILEPATH="${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/git_authors_to_irc_nicks.csv"
+# Downloaded, but not installed packages.
+PACKAGE_DOWNLOAD_DIR=/var/cache/download
# Log remote-client system test varlink output here
export VARLINK_LOG=/var/tmp/varlink.log
@@ -422,7 +424,7 @@ remove_packaged_podman_files() {
then
LISTING_CMD="$SUDO dpkg-query -L podman"
else
- LISTING_CMD='$SUDO rpm -ql podman'
+ LISTING_CMD="$SUDO rpm -ql podman"
fi
# yum/dnf/dpkg may list system directories, only remove files
@@ -437,6 +439,14 @@ remove_packaged_podman_files() {
sync && echo 3 > /proc/sys/vm/drop_caches
}
+# The version of CRI-O and Kubernetes must always match
+get_kubernetes_version(){
+ # TODO: Look up the kube RPM/DEB version installed, or in $PACKAGE_DOWNLOAD_DIR
+ # and retrieve the major-minor version directly.
+ local KUBERNETES_VERSION="1.15"
+ echo "$KUBERNETES_VERSION"
+}
+
canonicalize_image_names() {
req_env_var IMGNAMES
echo "Adding all current base images to \$IMGNAMES for timestamp update"
@@ -479,6 +489,7 @@ _finalize() {
fi
echo "Re-initializing so next boot does 'first-boot' setup again."
cd /
+ $SUDO rm -rf $GOPATH/src # Actual source will be cloned at runtime
$SUDO rm -rf /var/lib/cloud/instanc*
$SUDO rm -rf /root/.ssh/*
$SUDO rm -rf /etc/ssh/*key*
diff --git a/contrib/cirrus/packer/fedora_packaging.sh b/contrib/cirrus/packer/fedora_packaging.sh
index e80d48bc8..aecaaef93 100644
--- a/contrib/cirrus/packer/fedora_packaging.sh
+++ b/contrib/cirrus/packer/fedora_packaging.sh
@@ -11,6 +11,8 @@ echo "Updating/Installing repos and packages for $OS_REL_VER"
source $GOSRC/$SCRIPT_BASE/lib.sh
+req_env_var GOSRC SCRIPT_BASE BIGTO INSTALL_AUTOMATION_VERSION FEDORA_BASE_IMAGE PRIOR_FEDORA_BASE_IMAGE
+
# Pre-req. to install automation tooing
$LILTO $SUDO dnf install -y git
@@ -35,7 +37,7 @@ fi
$BIGTO ooe.sh $SUDO dnf update -y
-REMOVE_PACKAGES=()
+REMOVE_PACKAGES=(runc)
INSTALL_PACKAGES=(\
autoconf
automake
@@ -50,8 +52,11 @@ INSTALL_PACKAGES=(\
containernetworking-plugins
containers-common
criu
+ crun
+ curl
device-mapper-devel
dnsmasq
+ e2fsprogs-devel
emacs-nox
file
findutils
@@ -60,16 +65,26 @@ INSTALL_PACKAGES=(\
gcc
git
glib2-devel
+ glibc-devel
glibc-static
gnupg
go-md2man
golang
+ gpgme
gpgme-devel
+ grubby
+ hostname
iproute
iptables
jq
+ krb5-workstation
+ libassuan
libassuan-devel
+ libblkid-devel
libcap-devel
+ libffi-devel
+ libgpg-error-devel
+ libguestfs-tools
libmsi1
libnet
libnet-devel
@@ -79,56 +94,60 @@ INSTALL_PACKAGES=(\
libselinux-devel
libtool
libvarlink-util
+ libxml2-devel
+ libxslt-devel
lsof
make
+ mlocate
msitools
+ nfs-utils
nmap-ncat
+ openssl
+ openssl-devel
ostree-devel
pandoc
+ pkgconfig
podman
+ policycoreutils
procps-ng
protobuf
protobuf-c
protobuf-c-devel
protobuf-devel
- python
+ python2
+ python3-PyYAML
python3-dateutil
python3-psutil
python3-pytoml
+ python3-libsemanage
+ python3-libselinux
+ python3-libvirt
+ redhat-rpm-config
+ rpcbind
rsync
+ sed
selinux-policy-devel
skopeo
skopeo-containers
slirp4netns
+ socat
+ tar
unzip
vim
wget
which
xz
zip
+ zlib-devel
+)
+DOWNLOAD_PACKAGES=(\
+ "cri-o-$(get_kubernetes_version)*"
+ cri-tools
+ "kubernetes-$(get_kubernetes_version)*"
+ runc
+ oci-umount
+ parallel
)
-
-case "$OS_RELEASE_VER" in
- 30)
- INSTALL_PACKAGES+=(\
- atomic-registries
- golang-github-cpuguy83-go-md2man
- python2-future
- runc
- )
- REMOVE_PACKAGES+=(crun)
- ;;
- 31)
- INSTALL_PACKAGES+=(crun)
- REMOVE_PACKAGES+=(runc)
- ;;
- 32)
- INSTALL_PACKAGES+=(crun)
- REMOVE_PACKAGES+=(runc)
- ;;
- *)
- bad_os_id_ver ;;
-esac
echo "Installing general build/test dependencies for Fedora '$OS_RELEASE_VER'"
$BIGTO ooe.sh $SUDO dnf install -y ${INSTALL_PACKAGES[@]}
@@ -136,6 +155,18 @@ $BIGTO ooe.sh $SUDO dnf install -y ${INSTALL_PACKAGES[@]}
[[ ${#REMOVE_PACKAGES[@]} -eq 0 ]] || \
$LILTO ooe.sh $SUDO dnf erase -y ${REMOVE_PACKAGES[@]}
-export GOPATH="$(mktemp -d)"
-trap "$SUDO rm -rf $GOPATH" EXIT
-ooe.sh $SUDO $GOSRC/hack/install_catatonit.sh
+if [[ ${#DOWNLOAD_PACKAGES[@]} -gt 0 ]]; then
+ echo "Downloading packages for optional installation at runtime, as needed."
+ # Required for cri-o
+ ooe.sh $SUDO dnf -y module enable cri-o:$(get_kubernetes_version)
+ $SUDO mkdir -p "$PACKAGE_DOWNLOAD_DIR"
+ cd "$PACKAGE_DOWNLOAD_DIR"
+ $LILTO ooe.sh $SUDO dnf download -y --resolve ${DOWNLOAD_PACKAGES[@]}
+ ls -la "$PACKAGE_DOWNLOAD_DIR/"
+fi
+
+echo "Installing runtime tooling"
+# Save some runtime by having these already available
+cd $GOSRC
+$SUDO make install.tools
+$SUDO $GOSRC/hack/install_catatonit.sh
diff --git a/contrib/cirrus/packer/fedora_setup.sh b/contrib/cirrus/packer/fedora_setup.sh
index 3830b3bc4..25b568e8a 100644
--- a/contrib/cirrus/packer/fedora_setup.sh
+++ b/contrib/cirrus/packer/fedora_setup.sh
@@ -12,11 +12,11 @@ req_env_var SCRIPT_BASE PACKER_BASE INSTALL_AUTOMATION_VERSION PACKER_BUILDER_NA
workaround_bfq_bug
-# Do not enable update-stesting on the previous Fedora release
-if [[ "$FEDORA_BASE_IMAGE" =~ "${OS_RELEASE_ID}-cloud-base-${OS_RELEASE_VER}" ]]; then
- DISABLE_UPDATES_TESTING=0
-else
+# Do not enable updates-testing on the previous Fedora release
+if [[ "$PRIOR_FEDORA_BASE_IMAGE" =~ "${OS_RELEASE_ID}-cloud-base-${OS_RELEASE_VER}" ]]; then
DISABLE_UPDATES_TESTING=1
+else
+ DISABLE_UPDATES_TESTING=0
fi
bash $PACKER_BASE/fedora_packaging.sh
diff --git a/contrib/cirrus/packer/ubuntu_packaging.sh b/contrib/cirrus/packer/ubuntu_packaging.sh
index fd0280230..09f9aab9f 100644
--- a/contrib/cirrus/packer/ubuntu_packaging.sh
+++ b/contrib/cirrus/packer/ubuntu_packaging.sh
@@ -11,6 +11,8 @@ echo "Updating/Installing repos and packages for $OS_REL_VER"
source $GOSRC/$SCRIPT_BASE/lib.sh
+req_env_var GOSRC SCRIPT_BASE BIGTO SUDOAPTGET INSTALL_AUTOMATION_VERSION
+
echo "Updating/configuring package repositories."
$BIGTO $SUDOAPTGET update
@@ -99,6 +101,7 @@ INSTALL_PACKAGES=(\
protobuf-c-compiler
protobuf-compiler
python-protobuf
+ python2
python3-dateutil
python3-pip
python3-psutil
@@ -118,6 +121,11 @@ INSTALL_PACKAGES=(\
zip
zlib1g-dev
)
+DOWNLOAD_PACKAGES=(\
+ cri-o-$(get_kubernetes_version)
+ cri-tools
+ parallel
+)
# These aren't resolvable on Ubuntu 20
if [[ "$OS_RELEASE_VER" -le 19 ]]; then
@@ -137,16 +145,15 @@ echo "Installing general testing and system dependencies"
$LILTO ooe.sh $SUDOAPTGET update
$BIGTO ooe.sh $SUDOAPTGET install ${INSTALL_PACKAGES[@]}
-export GOPATH="$(mktemp -d)"
-trap "$SUDO rm -rf $GOPATH" EXIT
-echo "Installing cataonit and libseccomp.sudo"
-cd $GOSRC
-ooe.sh $SUDO hack/install_catatonit.sh
-ooe.sh $SUDO make install.libseccomp.sudo
-
-CRIO_RUNC_PATH="/usr/lib/cri-o-runc/sbin/runc"
-if $SUDO dpkg -L cri-o-runc | grep -m 1 -q "$CRIO_RUNC_PATH"
-then
- echo "Linking $CRIO_RUNC_PATH to /usr/bin/runc for ease of testing."
- $SUDO ln -f "$CRIO_RUNC_PATH" "/usr/bin/runc"
+if [[ ${#DOWNLOAD_PACKAGES[@]} -gt 0 ]]; then
+ echo "Downloading packages for optional installation at runtime, as needed."
+ $SUDO ln -s /var/cache/apt/archives "$PACKAGE_DOWNLOAD_DIR"
+ $LILTO ooe.sh $SUDOAPTGET install --download-only ${DOWNLOAD_PACKAGES[@]}
+ ls -la "$PACKAGE_DOWNLOAD_DIR/"
fi
+
+echo "Installing runtime tooling"
+cd $GOSRC
+$SUDO hack/install_catatonit.sh
+$SUDO make install.libseccomp.sudo
+$SUDO make install.tools
diff --git a/contrib/cirrus/rootless_test.sh b/contrib/cirrus/rootless_test.sh
index 3f45aac84..9e1b1d911 100755
--- a/contrib/cirrus/rootless_test.sh
+++ b/contrib/cirrus/rootless_test.sh
@@ -2,14 +2,6 @@
set -e
-remote=0
-
-# The TEST_REMOTE_CLIENT environment variable decides whether
-# to test varlink
-if [[ "$TEST_REMOTE_CLIENT" == "true" ]]; then
- remote=1
-fi
-
source $(dirname $0)/lib.sh
if [[ "$UID" == "0" ]]
@@ -18,11 +10,8 @@ then
exit 1
fi
-# Which set of tests to run; possible alternative is "system"
-TESTSUITE=integration
-if [[ -n "$*" ]]; then
- TESTSUITE="$1"
-fi
+TESTSUITE=${1?Missing TESTSUITE argument (arg1)}
+LOCAL_OR_REMOTE=${2?Missing LOCAL_OR_REMOTE argument (arg2)}
# Ensure environment setup correctly
req_env_var GOSRC ROOTLESS_USER
@@ -31,7 +20,6 @@ echo "."
echo "Hello, my name is $USER and I live in $PWD can I be your friend?"
echo "."
-export PODMAN_VARLINK_ADDRESS=unix:/tmp/podman-$(id -u)
show_env_vars
set -x
@@ -39,8 +27,4 @@ cd "$GOSRC"
make
make varlink_generate
make test-binaries
-if [ $remote -eq 0 ]; then
- make local${TESTSUITE}
-else
- make remote${TESTSUITE}
-fi
+make ${LOCAL_OR_REMOTE}${TESTSUITE}
diff --git a/contrib/cirrus/setup_environment.sh b/contrib/cirrus/setup_environment.sh
index 25b7ff941..323e7c35b 100755
--- a/contrib/cirrus/setup_environment.sh
+++ b/contrib/cirrus/setup_environment.sh
@@ -39,6 +39,17 @@ done
cd "${GOSRC}/"
case "${OS_RELEASE_ID}" in
ubuntu)
+ apt-get update
+ apt-get install -y containers-common
+ sed -ie 's/^\(# \)\?apparmor_profile =.*/apparmor_profile = ""/' /etc/containers/containers.conf
+ if [[ "$OS_RELEASE_VER" == "19" ]]; then
+ apt-get purge -y --auto-remove golang*
+ apt-get install -y golang-1.13
+ ln -s /usr/lib/go-1.13/bin/go /usr/bin/go
+ fi
+ if [[ "$OS_RELEASE_VER" == "20" ]]; then
+ apt-get install -y python-is-python3
+ fi
;;
fedora)
# All SELinux distros need this for systemd-in-a-container
@@ -78,14 +89,6 @@ case "$CG_FS_TYPE" in
warn "Forcing testing with crun instead of runc"
X=$(echo "export OCI_RUNTIME=/usr/bin/crun" | \
tee -a /etc/environment) && eval "$X" && echo "$X"
-
- if [[ "$OS_RELEASE_ID" == "fedora" ]]; then
- warn "Upgrading to the latest crun"
- # Normally not something to do for stable testing
- # but crun is new, and late-breaking fixes may be required
- # on short notice
- dnf update -y crun containers-common
- fi
;;
*)
die 110 "Unsure how to handle cgroup filesystem type '$CG_FS_TYPE'"
diff --git a/contrib/cirrus/swagger_stack_trace.png b/contrib/cirrus/swagger_stack_trace.png
new file mode 100644
index 000000000..6aa063bab
--- /dev/null
+++ b/contrib/cirrus/swagger_stack_trace.png
Binary files differ
diff --git a/contrib/spec/podman.spec.in b/contrib/spec/podman.spec.in
index 8d3cba612..260de7b20 100644
--- a/contrib/spec/podman.spec.in
+++ b/contrib/spec/podman.spec.in
@@ -77,8 +77,9 @@ BuildRequires: systemd-devel
Requires: skopeo-containers
Requires: containernetworking-plugins >= 0.6.0-3
Requires: iptables
-%if 0%{?rhel} <= 7
+%if 0%{?rhel} < 8 || 0%{?centos} < 8
Requires: container-selinux
+Requires: runc
%else
%if 0%{?rhel} || 0%{?centos}
Requires: runc
diff --git a/docs/Readme.md b/docs/Readme.md
index 987a5b8e4..9d3b9d06f 100644
--- a/docs/Readme.md
+++ b/docs/Readme.md
@@ -30,10 +30,26 @@ link on that page.
## API Reference
The [latest online documentation](http://docs.podman.io/en/latest/_static/api.html) is
-automatically generated from committed upstream sources. There is a short-duration
-cache involved, in case old content or an error is returned, try clearing your browser
-cache or returning to the site after 10-30 minutes.
-
-***Maintainers Note***: Please refer to [the Cirrus-CI tasks
-documentation](../contrib/cirrus/README.md#docs-task) for
-important operational details.
+automatically generated by two cooperating automation systems based on committed upstream
+source code. Firstly, [the Cirrus-CI docs task](../contrib/cirrus/README.md#docs-task) builds
+`pkg/api/swagger.yaml` and uploads it to a public-facing location (Google Storage Bucket -
+an online service for storing unstructured data). Second, [Read The Docs](readthedocs.com)
+reacts to the github.com repository change, building the content for the [libpod documentation
+site](https://podman.readthedocs.io/). This site includes for the API section,
+some javascript which consumes the uploaded `swagger.yaml` file directly from the Google
+Storage Bucket.
+
+Since there are multiple systems and local cache is involved, it's possible that updates to
+documentation (especially the swagger/API docs) will lag by 10-or-so minutes. However,
+because the client (i.e. your web browser) is fetching content from multiple locations that
+do not share a common domain, accessing the API section may show a stack-trace similar to
+the following:
+
+![Javascript Stack Trace Image](../contrib/cirrus/swagger_stack_trace.png)
+
+If reloading the page, or clearing your local cache does not fix the problem, it is
+likely caused by broken metadata needed to protect clients from cross-site-scripting
+style attacks. Please [notify a maintainer](https://github.com/containers/libpod#communications)
+so they may investigate how/why the swagger.yaml file's CORS-metadata is incorrect. See
+[the Cirrus-CI tasks documentation](../contrib/cirrus/README.md#docs-task) for
+details regarding this situation.
diff --git a/docs/source/markdown/podman-create.1.md b/docs/source/markdown/podman-create.1.md
index a69ef04d1..dbc835920 100644
--- a/docs/source/markdown/podman-create.1.md
+++ b/docs/source/markdown/podman-create.1.md
@@ -593,6 +593,10 @@ Tune the container's pids limit. Set `0` to have unlimited pids for the containe
Run container in an existing pod. If you want Podman to make the pod for you, preference the pod name with `new:`.
To make a pod with more granular options, use the `podman pod create` command before creating a container.
+**--pod-id-file**=*path*
+
+Run container in an existing pod and read the pod's ID from the specified file. If a container is run within a pod, and the pod has an infra-container, the infra-container will be started before the container is.
+
**--privileged**=*true|false*
Give extended privileges to this container. The default is *false*.
diff --git a/docs/source/markdown/podman-events.1.md b/docs/source/markdown/podman-events.1.md
index a05047684..abfc6e9c1 100644
--- a/docs/source/markdown/podman-events.1.md
+++ b/docs/source/markdown/podman-events.1.md
@@ -15,6 +15,8 @@ value to `file`. Only `file` and `journald` are accepted. A `none` logger is al
available but this logging mechanism completely disables events; nothing will be reported by
`podman events`.
+By default, streaming mode is used, printing new events as they occur. Previous events can be listed via `--since` and `--until`.
+
The *container* event type will report the follow statuses:
* attach
* checkpoint
diff --git a/docs/source/markdown/podman-generate-systemd.1.md b/docs/source/markdown/podman-generate-systemd.1.md
index 72031b19b..2facd754c 100644
--- a/docs/source/markdown/podman-generate-systemd.1.md
+++ b/docs/source/markdown/podman-generate-systemd.1.md
@@ -26,10 +26,7 @@ Use the name of the container for the start, stop, and description in the unit f
**--new**
-Create a new container via podman-run instead of starting an existing one. This option relies on container configuration files, which may not map directly to podman CLI flags; please review the generated output carefully before placing in production.
-Since we use systemd `Type=forking` service, using this option will force the container run with the detached param `-d`.
-
-Note: Generating systemd unit files with `--new` flag is not yet supported for pods.
+Using this flag will yield unit files that do not expect containers and pods to exist. Instead, new containers and pods are created based on their configuration files. The unit files are created best effort and may need to be further edited; please review the generated files carefully before using them in production.
**--time**, **-t**=*value*
diff --git a/docs/source/markdown/podman-login.1.md b/docs/source/markdown/podman-login.1.md
index a69b311eb..79c7ff640 100644
--- a/docs/source/markdown/podman-login.1.md
+++ b/docs/source/markdown/podman-login.1.md
@@ -12,7 +12,9 @@ and password. If the registry is not specified, the first registry under [regist
from registries.conf will be used. **podman login** reads in the username and password from STDIN.
The username and password can also be set using the **username** and **password** flags.
The path of the authentication file can be specified by the user by setting the **authfile**
-flag. The default path used is **${XDG\_RUNTIME\_DIR}/containers/auth.json**.
+flag. The default path used is **${XDG\_RUNTIME\_DIR}/containers/auth.json**. If there is a valid
+username and password in the **authfile** , Podman will use those existing credentials if the user does not pass in a username.
+If those credentials are not present, Podman will then use any existing credentials found in **$HOME/.docker/config.json**.
**podman [GLOBAL OPTIONS]**
diff --git a/docs/source/markdown/podman-pod-create.1.md b/docs/source/markdown/podman-pod-create.1.md
index 489c9b32e..de6b600f0 100644
--- a/docs/source/markdown/podman-pod-create.1.md
+++ b/docs/source/markdown/podman-pod-create.1.md
@@ -47,6 +47,10 @@ Set a hostname to the pod
Create an infra container and associate it with the pod. An infra container is a lightweight container used to coordinate the shared kernel namespace of a pod. Default: true.
+**--infra-conmon-pidfile**=*file*
+
+Write the pid of the infra container's **conmon** process to a file. As **conmon** runs in a separate process than Podman, this is necessary when using systemd to manage Podman containers and pods.
+
**--infra-command**=*command*
The command that will be run to start the infra container. Default: "/pause".
diff --git a/docs/source/markdown/podman-pod-rm.1.md b/docs/source/markdown/podman-pod-rm.1.md
index 14da2071f..95e7ab002 100644
--- a/docs/source/markdown/podman-pod-rm.1.md
+++ b/docs/source/markdown/podman-pod-rm.1.md
@@ -31,6 +31,10 @@ The latest option is not supported on the remote client.
Stop running containers and delete all stopped containers before removal of pod.
+**--pod-id-file**
+
+Read pod ID from the specified file and remove the pod. Can be specified multiple times.
+
## EXAMPLE
podman pod rm mywebserverpod
@@ -43,6 +47,8 @@ podman pod rm -f -a
podman pod rm -fa
+podman pod rm --pod-id-file /path/to/id/file
+
## SEE ALSO
podman-pod(1)
diff --git a/docs/source/markdown/podman-pod-start.1.md b/docs/source/markdown/podman-pod-start.1.md
index 29960d6aa..6c6cfa2cf 100644
--- a/docs/source/markdown/podman-pod-start.1.md
+++ b/docs/source/markdown/podman-pod-start.1.md
@@ -22,6 +22,10 @@ Instead of providing the pod name or ID, start the last created pod.
The latest option is not supported on the remote client.
+**--pod-id-file**
+
+Read pod ID from the specified file and start the pod. Can be specified multiple times.
+
## EXAMPLE
podman pod start mywebserverpod
@@ -32,6 +36,7 @@ podman pod start --latest
podman pod start --all
+podman pod start --pod-id-file /path/to/id/file
## SEE ALSO
podman-pod(1), podman-pod-stop(1), podman-start(1)
diff --git a/docs/source/markdown/podman-pod-stop.1.md b/docs/source/markdown/podman-pod-stop.1.md
index b5e7aef7d..7ce9ff941 100644
--- a/docs/source/markdown/podman-pod-stop.1.md
+++ b/docs/source/markdown/podman-pod-stop.1.md
@@ -31,6 +31,10 @@ The latest option is not supported on the remote client.
Timeout to wait before forcibly stopping the containers in the pod.
+**--pod-id-file**
+
+Read pod ID from the specified file and stop the pod. Can be specified multiple times.
+
## EXAMPLE
Stop a pod called *mywebserverpod*
@@ -62,6 +66,13 @@ $ podman pod stop --all
cc8f0bea67b1a1a11aec1ecd38102a1be4b145577f21fc843c7c83b77fc28907
```
+Stop two pods via --pod-id-file
+```
+$ podman pod stop --pod-id-file file1 --pod-id-file file2
+19456b4cd557eaf9629825113a552681a6013f8c8cad258e36ab825ef536e818
+cc8f0bea67b1a1a11aec1ecd38102a1be4b145577f21fc843c7c83b77fc28907
+```
+
Stop all pods with a timeout of 1 second.
```
$ podman pod stop -a -t 1
diff --git a/docs/source/markdown/podman-run.1.md b/docs/source/markdown/podman-run.1.md
index 02db8b205..22f7cae09 100644
--- a/docs/source/markdown/podman-run.1.md
+++ b/docs/source/markdown/podman-run.1.md
@@ -605,6 +605,10 @@ Run container in an existing pod. If you want Podman to make the pod for you, pr
To make a pod with more granular options, use the **podman pod create** command before creating a container.
If a container is run with a pod, and the pod has an infra-container, the infra-container will be started before the container is.
+**--pod-id-file**=*path*
+
+Run container in an existing pod and read the pod's ID from the specified file. If a container is run within a pod, and the pod has an infra-container, the infra-container will be started before the container is.
+
**--privileged**=**true**|**false**
Give extended privileges to this container. The default is **false**.
diff --git a/go.mod b/go.mod
index 709862a6b..bacc8d72f 100644
--- a/go.mod
+++ b/go.mod
@@ -11,7 +11,7 @@ require (
github.com/containernetworking/cni v0.7.2-0.20200304161608-4fae32b84921
github.com/containernetworking/plugins v0.8.6
github.com/containers/buildah v1.14.9-0.20200523094741-de0f541d9224
- github.com/containers/common v0.12.0
+ github.com/containers/common v0.13.0
github.com/containers/conmon v2.0.17+incompatible
github.com/containers/image/v5 v5.4.5-0.20200529084758-46b2ee6aebb0
github.com/containers/psgo v1.5.1
@@ -33,7 +33,7 @@ require (
github.com/gorilla/schema v1.1.0
github.com/hashicorp/go-multierror v1.0.0
github.com/hpcloud/tail v1.0.0
- github.com/json-iterator/go v1.1.9
+ github.com/json-iterator/go v1.1.10
github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618
github.com/onsi/ginkgo v1.12.3
github.com/onsi/gomega v1.10.1
@@ -51,7 +51,7 @@ require (
github.com/sirupsen/logrus v1.6.0
github.com/spf13/cobra v0.0.7
github.com/spf13/pflag v1.0.5
- github.com/stretchr/testify v1.6.0
+ github.com/stretchr/testify v1.6.1
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2
github.com/uber/jaeger-client-go v2.23.1+incompatible
github.com/uber/jaeger-lib v2.2.0+incompatible // indirect
diff --git a/go.sum b/go.sum
index c6cf39ee1..9ac2c82c9 100644
--- a/go.sum
+++ b/go.sum
@@ -70,8 +70,8 @@ github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHV
github.com/containers/buildah v1.14.9-0.20200523094741-de0f541d9224 h1:EqwBZRqyUYvU7JOmmSSPviSaAoUP1wN0cefXXDZ9ATo=
github.com/containers/buildah v1.14.9-0.20200523094741-de0f541d9224/go.mod h1:5ZkWjOuK90yl55L5R+purJNLfUo0VUr8pstJazNtYck=
github.com/containers/common v0.11.2/go.mod h1:2w3QE6VUmhltGYW4wV00h4okq1Crs7hNI1ZD2I0QRUY=
-github.com/containers/common v0.12.0 h1:LR/sYyzFa22rFhfu6J9dEYhVkrWjagUigz/ewHhHL9s=
-github.com/containers/common v0.12.0/go.mod h1:PKlahPDnQQYcXuIw5qq8mq6yNuCHBtgABphzy6pN0iI=
+github.com/containers/common v0.13.0 h1:+7FHpPNz3YR2YcVIVNnPg2sVrXytxNgNHbd3n7SosL0=
+github.com/containers/common v0.13.0/go.mod h1:LJlijBz9zi7pJqZvlbxCOsw6qNn31rzb7Zo6NBJNQxU=
github.com/containers/conmon v2.0.17+incompatible h1:8BooocmNIwjOwAUGAoDD6fi3u0RrFyQ/fDkQzdiVtrI=
github.com/containers/conmon v2.0.17+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
github.com/containers/image/v5 v5.4.3/go.mod h1:pN0tvp3YbDd7BWavK2aE0mvJUqVd2HmhPjekyWSFm0U=
@@ -248,8 +248,8 @@ github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBv
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns=
-github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
+github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
@@ -318,8 +318,6 @@ github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
-github.com/onsi/ginkgo v1.12.2 h1:Ke9m3h2Hu0wsZ45yewCqhYr3Z+emcNTuLY2nMWCkrSI=
-github.com/onsi/ginkgo v1.12.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.12.3 h1:+RYp9QczoWz9zfUyLP/5SLXQVhfr6gZOoKGfQqHuLZQ=
github.com/onsi/ginkgo v1.12.3/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
@@ -445,6 +443,8 @@ github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.0 h1:jlIyCplCJFULU/01vCkhKuTyc3OorI3bJFuw6obfgho=
github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
diff --git a/hack/install_bats.sh b/hack/install_bats.sh
new file mode 100755
index 000000000..00ded07a9
--- /dev/null
+++ b/hack/install_bats.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+
+set -e
+
+die() { echo "${1:-No error message given} (from $(basename $0))"; exit 1; }
+
+buildDir=$(mktemp -d)
+git clone https://github.com/bats-core/bats-core $buildDir
+
+pushd $buildDir
+pwd
+git reset --hard ${VERSION}
+./install.sh /usr/local
+popd
+
+rm -rf $buildDir
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index 43e873bd6..f6fc3c1a4 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -1209,13 +1209,35 @@ func (c *Container) stop(timeout uint) error {
}
}
+ // Check if conmon is still alive.
+ // If it is not, we won't be getting an exit file.
+ conmonAlive, err := c.ociRuntime.CheckConmonRunning(c)
+ if err != nil {
+ return err
+ }
+
if err := c.ociRuntime.StopContainer(c, timeout, all); err != nil {
return err
}
+ c.newContainerEvent(events.Stop)
+
c.state.PID = 0
c.state.ConmonPID = 0
c.state.StoppedByUser = true
+
+ if !conmonAlive {
+ // Conmon is dead, so we can't epect an exit code.
+ c.state.ExitCode = -1
+ c.state.FinishedTime = time.Now()
+ c.state.State = define.ContainerStateStopped
+ if err := c.save(); err != nil {
+ logrus.Errorf("Error saving container %s status: %v", c.ID(), err)
+ }
+
+ return errors.Wrapf(define.ErrConmonDead, "container %s conmon process missing, cannot retrieve exit code", c.ID())
+ }
+
if err := c.save(); err != nil {
return errors.Wrapf(err, "error saving container %s state after stopping", c.ID())
}
@@ -1225,8 +1247,6 @@ func (c *Container) stop(timeout uint) error {
return err
}
- c.newContainerEvent(events.Stop)
-
return nil
}
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index d08e012a6..9afe11b2b 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -79,7 +79,8 @@ func (c *Container) prepare() error {
go func() {
defer wg.Done()
// Set up network namespace if not already set up
- if c.config.CreateNetNS && c.state.NetNS == nil && !c.config.PostConfigureNetNS {
+ noNetNS := c.state.NetNS == nil
+ if c.config.CreateNetNS && noNetNS && !c.config.PostConfigureNetNS {
netNS, networkStatus, createNetNSErr = c.runtime.createNetNS(c)
if createNetNSErr != nil {
return
@@ -94,7 +95,7 @@ func (c *Container) prepare() error {
}
// handle rootless network namespace setup
- if c.state.NetNS != nil && c.config.NetMode.IsSlirp4netns() && !c.config.PostConfigureNetNS {
+ if noNetNS && c.config.NetMode.IsSlirp4netns() && !c.config.PostConfigureNetNS {
createNetNSErr = c.runtime.setupRootlessNetNS(c)
}
}()
@@ -392,7 +393,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
}
for _, i := range c.config.Spec.Linux.Namespaces {
- if i.Type == spec.UTSNamespace {
+ if i.Type == spec.UTSNamespace && i.Path == "" {
hostname := c.Hostname()
g.SetHostname(hostname)
g.AddProcessEnv("HOSTNAME", hostname)
@@ -591,7 +592,8 @@ func (c *Container) addNamespaceContainer(g *generate.Generator, ns LinuxNS, ctr
if specNS == spec.UTSNamespace {
hostname := nsCtr.Hostname()
- g.SetHostname(hostname)
+ // Joining an existing namespace, cannot set the hostname
+ g.SetHostname("")
g.AddProcessEnv("HOSTNAME", hostname)
}
@@ -1171,6 +1173,15 @@ func (c *Container) makeBindMounts() error {
// finally, save it in the new container
c.state.BindMounts["/etc/hosts"] = hostsPath
}
+
+ if !hasCurrentUserMapped(c) {
+ if err := makeAccessible(resolvPath, c.RootUID(), c.RootGID()); err != nil {
+ return err
+ }
+ if err := makeAccessible(hostsPath, c.RootUID(), c.RootGID()); err != nil {
+ return err
+ }
+ }
} else {
if !c.config.UseImageResolvConf {
newResolv, err := c.generateResolvConf()
diff --git a/libpod/container_log.go b/libpod/container_log.go
index bfa303e84..c3a84d048 100644
--- a/libpod/container_log.go
+++ b/libpod/container_log.go
@@ -19,7 +19,7 @@ func (r *Runtime) Log(containers []*Container, options *logs.LogOptions, logChan
return nil
}
-// ReadLog reads a containers log based on the input options and returns loglines over a channel
+// ReadLog reads a containers log based on the input options and returns loglines over a channel.
func (c *Container) ReadLog(options *logs.LogOptions, logChannel chan *logs.LogLine) error {
// TODO Skip sending logs until journald logs can be read
// TODO make this not a magic string
@@ -61,7 +61,7 @@ func (c *Container) readFromLogFile(options *logs.LogOptions, logChannel chan *l
partial += nll.Msg
continue
} else if !nll.Partial() && len(partial) > 1 {
- nll.Msg = partial
+ nll.Msg = partial + nll.Msg
partial = ""
}
nll.CID = c.ID()
diff --git a/libpod/define/errors.go b/libpod/define/errors.go
index 16df2a1cc..083553b7e 100644
--- a/libpod/define/errors.go
+++ b/libpod/define/errors.go
@@ -141,6 +141,9 @@ var (
// ErrConmonOutdated indicates the version of conmon found (whether via the configuration or $PATH)
// is out of date for the current podman version
ErrConmonOutdated = errors.New("outdated conmon version")
+ // ErrConmonDead indicates that the container's conmon process has been
+ // killed, preventing normal operation.
+ ErrConmonDead = errors.New("conmon process killed")
// ErrImageInUse indicates the requested operation failed because the image was in use
ErrImageInUse = errors.New("image is being used")
diff --git a/libpod/define/pod_inspect.go b/libpod/define/pod_inspect.go
index 26fd2cab4..7f06e16fc 100644
--- a/libpod/define/pod_inspect.go
+++ b/libpod/define/pod_inspect.go
@@ -18,6 +18,9 @@ type InspectPodData struct {
Namespace string `json:"Namespace,omitempty"`
// Created is the time when the pod was created.
Created time.Time
+ // CreateCommand is the full command plus arguments of the process the
+ // container has been created with.
+ CreateCommand []string `json:"CreateCommand,omitempty"`
// State represents the current state of the pod.
State string `json:"State"`
// Hostname is the hostname that the pod will set.
diff --git a/libpod/filters/pods.go b/libpod/filters/pods.go
index 9bf436eab..0edb9fbf2 100644
--- a/libpod/filters/pods.go
+++ b/libpod/filters/pods.go
@@ -57,13 +57,13 @@ func GeneratePodFilterFunc(filter, filterValue string) (
return nil, errors.Errorf("%s is not a valid status", filterValue)
}
return func(p *libpod.Pod) bool {
- ctr_statuses, err := p.Status()
+ ctrStatuses, err := p.Status()
if err != nil {
return false
}
- for _, ctr_status := range ctr_statuses {
- state := ctr_status.String()
- if ctr_status == define.ContainerStateConfigured {
+ for _, ctrStatus := range ctrStatuses {
+ state := ctrStatus.String()
+ if ctrStatus == define.ContainerStateConfigured {
state = "created"
}
if state == filterValue {
diff --git a/libpod/image/filters.go b/libpod/image/filters.go
index 747eba165..9d99fb344 100644
--- a/libpod/image/filters.go
+++ b/libpod/image/filters.go
@@ -102,8 +102,8 @@ func ReferenceFilter(ctx context.Context, referenceFilter string) ResultFilter {
}
}
-// IdFilter allows you to filter by image Id
-func IdFilter(idFilter string) ResultFilter {
+// IDFilter allows you to filter by image Id
+func IDFilter(idFilter string) ResultFilter {
return func(i *Image) bool {
return i.ID() == idFilter
}
@@ -172,7 +172,7 @@ func (ir *Runtime) createFilterFuncs(filters []string, img *Image) ([]ResultFilt
case "reference":
filterFuncs = append(filterFuncs, ReferenceFilter(ctx, splitFilter[1]))
case "id":
- filterFuncs = append(filterFuncs, IdFilter(splitFilter[1]))
+ filterFuncs = append(filterFuncs, IDFilter(splitFilter[1]))
default:
return nil, errors.Errorf("invalid filter %s ", splitFilter[0])
}
diff --git a/libpod/oci.go b/libpod/oci.go
index 684a7ba42..c2f0041b1 100644
--- a/libpod/oci.go
+++ b/libpod/oci.go
@@ -107,6 +107,13 @@ type OCIRuntime interface {
// error.
CheckpointContainer(ctr *Container, options ContainerCheckpointOptions) error
+ // CheckConmonRunning verifies that the given container's Conmon
+ // instance is still running. Runtimes without Conmon, or systems where
+ // the PID of conmon is not available, should mock this as True.
+ // True indicates that Conmon for the instance is running, False
+ // indicates it is not.
+ CheckConmonRunning(ctr *Container) (bool, error)
+
// SupportsCheckpoint returns whether this OCI runtime
// implementation supports the CheckpointContainer() operation.
SupportsCheckpoint() bool
diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go
index 9c92b036e..0921a532b 100644
--- a/libpod/oci_conmon_linux.go
+++ b/libpod/oci_conmon_linux.go
@@ -669,6 +669,31 @@ func (r *ConmonOCIRuntime) CheckpointContainer(ctr *Container, options Container
return utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, nil, r.path, args...)
}
+func (r *ConmonOCIRuntime) CheckConmonRunning(ctr *Container) (bool, error) {
+ if ctr.state.ConmonPID == 0 {
+ // If the container is running or paused, assume Conmon is
+ // running. We didn't record Conmon PID on some old versions, so
+ // that is likely what's going on...
+ // Unusual enough that we should print a warning message though.
+ if ctr.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) {
+ logrus.Warnf("Conmon PID is not set, but container is running!")
+ return true, nil
+ }
+ // Container's not running, so conmon PID being unset is
+ // expected. Conmon is not running.
+ return false, nil
+ }
+
+ // We have a conmon PID. Ping it with signal 0.
+ if err := unix.Kill(ctr.state.ConmonPID, 0); err != nil {
+ if err == unix.ESRCH {
+ return false, nil
+ }
+ return false, errors.Wrapf(err, "error pinging container %s conmon with signal 0", ctr.ID())
+ }
+ return true, nil
+}
+
// SupportsCheckpoint checks if the OCI runtime supports checkpointing
// containers.
func (r *ConmonOCIRuntime) SupportsCheckpoint() bool {
diff --git a/libpod/oci_missing.go b/libpod/oci_missing.go
index 4da16876c..90e90cc6c 100644
--- a/libpod/oci_missing.go
+++ b/libpod/oci_missing.go
@@ -163,6 +163,11 @@ func (r *MissingRuntime) CheckpointContainer(ctr *Container, options ContainerCh
return r.printError()
}
+// CheckConmonRunning is not available as the runtime is missing
+func (r *MissingRuntime) CheckConmonRunning(ctr *Container) (bool, error) {
+ return false, r.printError()
+}
+
// SupportsCheckpoint returns false as checkpointing requires a working runtime
func (r *MissingRuntime) SupportsCheckpoint() bool {
return false
diff --git a/libpod/oci_util.go b/libpod/oci_util.go
index 53567d2d0..8b40dad81 100644
--- a/libpod/oci_util.go
+++ b/libpod/oci_util.go
@@ -36,14 +36,30 @@ func bindPorts(ports []ocicni.PortMapping) ([]*os.File, error) {
var files []*os.File
notifySCTP := false
for _, i := range ports {
+ isV6 := net.ParseIP(i.HostIP).To4() == nil
+ if i.HostIP == "" {
+ isV6 = false
+ }
switch i.Protocol {
case "udp":
- addr, err := net.ResolveUDPAddr("udp", fmt.Sprintf("%s:%d", i.HostIP, i.HostPort))
+ var (
+ addr *net.UDPAddr
+ err error
+ )
+ if isV6 {
+ addr, err = net.ResolveUDPAddr("udp6", fmt.Sprintf("[%s]:%d", i.HostIP, i.HostPort))
+ } else {
+ addr, err = net.ResolveUDPAddr("udp4", fmt.Sprintf("%s:%d", i.HostIP, i.HostPort))
+ }
if err != nil {
return nil, errors.Wrapf(err, "cannot resolve the UDP address")
}
- server, err := net.ListenUDP("udp", addr)
+ proto := "udp4"
+ if isV6 {
+ proto = "udp6"
+ }
+ server, err := net.ListenUDP(proto, addr)
if err != nil {
return nil, errors.Wrapf(err, "cannot listen on the UDP port")
}
@@ -54,12 +70,24 @@ func bindPorts(ports []ocicni.PortMapping) ([]*os.File, error) {
files = append(files, f)
case "tcp":
- addr, err := net.ResolveTCPAddr("tcp4", fmt.Sprintf("%s:%d", i.HostIP, i.HostPort))
+ var (
+ addr *net.TCPAddr
+ err error
+ )
+ if isV6 {
+ addr, err = net.ResolveTCPAddr("tcp6", fmt.Sprintf("[%s]:%d", i.HostIP, i.HostPort))
+ } else {
+ addr, err = net.ResolveTCPAddr("tcp4", fmt.Sprintf("%s:%d", i.HostIP, i.HostPort))
+ }
if err != nil {
return nil, errors.Wrapf(err, "cannot resolve the TCP address")
}
- server, err := net.ListenTCP("tcp4", addr)
+ proto := "tcp4"
+ if isV6 {
+ proto = "tcp6"
+ }
+ server, err := net.ListenTCP(proto, addr)
if err != nil {
return nil, errors.Wrapf(err, "cannot listen on the TCP port")
}
diff --git a/libpod/options.go b/libpod/options.go
index 8e0d3df86..5a0f60093 100644
--- a/libpod/options.go
+++ b/libpod/options.go
@@ -1538,6 +1538,30 @@ func WithPodHostname(hostname string) PodCreateOption {
}
}
+// WithPodCreateCommand adds the full command plus arguments of the current
+// process to the pod config.
+func WithPodCreateCommand() PodCreateOption {
+ return func(pod *Pod) error {
+ if pod.valid {
+ return define.ErrPodFinalized
+ }
+ pod.config.CreateCommand = os.Args
+ return nil
+ }
+}
+
+// WithInfraConmonPidFile sets the path to a custom conmon PID file for the
+// infra container.
+func WithInfraConmonPidFile(path string) PodCreateOption {
+ return func(pod *Pod) error {
+ if pod.valid {
+ return define.ErrPodFinalized
+ }
+ pod.config.InfraContainer.ConmonPidFile = path
+ return nil
+ }
+}
+
// WithPodLabels sets the labels of a pod.
func WithPodLabels(labels map[string]string) PodCreateOption {
return func(pod *Pod) error {
diff --git a/libpod/pod.go b/libpod/pod.go
index 8afaa6052..bf0d7a397 100644
--- a/libpod/pod.go
+++ b/libpod/pod.go
@@ -64,6 +64,10 @@ type PodConfig struct {
// Time pod was created
CreatedTime time.Time `json:"created"`
+ // CreateCommand is the full command plus arguments of the process the
+ // container has been created with.
+ CreateCommand []string `json:"CreateCommand,omitempty"`
+
// ID of the pod's lock
LockID uint32 `json:"lockID"`
}
@@ -79,6 +83,7 @@ type podState struct {
// InfraContainerConfig is the configuration for the pod's infra container
type InfraContainerConfig struct {
+ ConmonPidFile string `json:"conmonPidFile"`
HasInfraContainer bool `json:"makeInfraContainer"`
HostNetwork bool `json:"infraHostNetwork,omitempty"`
PortBindings []ocicni.PortMapping `json:"infraPortBindings"`
@@ -124,6 +129,12 @@ func (p *Pod) CreatedTime() time.Time {
return p.config.CreatedTime
}
+// CreateCommand returns the os.Args of the process with which the pod has been
+// created.
+func (p *Pod) CreateCommand() []string {
+ return p.config.CreateCommand
+}
+
// CgroupParent returns the pod's CGroup parent
func (p *Pod) CgroupParent() string {
return p.config.CgroupParent
@@ -246,6 +257,20 @@ func (p *Pod) InfraContainerID() (string, error) {
return p.state.InfraContainerID, nil
}
+// InfraContainer returns the infra container.
+func (p *Pod) InfraContainer() (*Container, error) {
+ if !p.HasInfraContainer() {
+ return nil, errors.Wrap(define.ErrNoSuchCtr, "pod has no infra container")
+ }
+
+ id, err := p.InfraContainerID()
+ if err != nil {
+ return nil, err
+ }
+
+ return p.runtime.state.Container(id)
+}
+
// TODO add pod batching
// Lock pod to avoid lock contention
// Store and lock all containers (no RemoveContainer in batch guarantees cache will not become stale)
diff --git a/libpod/pod_api.go b/libpod/pod_api.go
index e2c4b515d..c8605eb69 100644
--- a/libpod/pod_api.go
+++ b/libpod/pod_api.go
@@ -489,6 +489,7 @@ func (p *Pod) Inspect() (*define.InspectPodData, error) {
Name: p.Name(),
Namespace: p.Namespace(),
Created: p.CreatedTime(),
+ CreateCommand: p.config.CreateCommand,
State: podState,
Hostname: p.config.Hostname,
Labels: p.Labels(),
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index 655b42e51..aa91dff03 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -464,9 +464,11 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool,
}
}
- // Check that the container's in a good state to be removed
+ // Check that the container's in a good state to be removed.
if c.state.State == define.ContainerStateRunning {
- if err := c.stop(c.StopTimeout()); err != nil {
+ // Ignore ErrConmonDead - we couldn't retrieve the container's
+ // exit code properly, but it's still stopped.
+ if err := c.stop(c.StopTimeout()); err != nil && errors.Cause(err) != define.ErrConmonDead {
return errors.Wrapf(err, "cannot remove container %s as it could not be stopped", c.ID())
}
}
diff --git a/libpod/runtime_pod_infra_linux.go b/libpod/runtime_pod_infra_linux.go
index 06a7b3936..a0dee3aa1 100644
--- a/libpod/runtime_pod_infra_linux.go
+++ b/libpod/runtime_pod_infra_linux.go
@@ -130,6 +130,9 @@ func (r *Runtime) makeInfraContainer(ctx context.Context, p *Pod, imgName, rawIm
options = append(options, WithRootFSFromImage(imgID, imgName, rawImageName))
options = append(options, WithName(containerName))
options = append(options, withIsInfra())
+ if len(p.config.InfraContainer.ConmonPidFile) > 0 {
+ options = append(options, WithConmonPidFile(p.config.InfraContainer.ConmonPidFile))
+ }
return r.newContainer(ctx, g.Config, options...)
}
diff --git a/pkg/api/handlers/compat/containers.go b/pkg/api/handlers/compat/containers.go
index 1fd068ba6..8ce2180ab 100644
--- a/pkg/api/handlers/compat/containers.go
+++ b/pkg/api/handlers/compat/containers.go
@@ -348,7 +348,7 @@ func LogsFromContainer(w http.ResponseWriter, r *http.Request) {
}
func LibpodToContainer(l *libpod.Container, sz bool) (*handlers.Container, error) {
- imageId, imageName := l.Image()
+ imageID, imageName := l.Image()
var (
err error
@@ -378,7 +378,7 @@ func LibpodToContainer(l *libpod.Container, sz bool) (*handlers.Container, error
ID: l.ID(),
Names: []string{fmt.Sprintf("/%s", l.Name())},
Image: imageName,
- ImageID: imageId,
+ ImageID: imageID,
Command: strings.Join(l.Command(), " "),
Created: l.CreatedTime().Unix(),
Ports: nil,
diff --git a/pkg/api/handlers/compat/images.go b/pkg/api/handlers/compat/images.go
index b64ed0036..ce9ff1b19 100644
--- a/pkg/api/handlers/compat/images.go
+++ b/pkg/api/handlers/compat/images.go
@@ -224,7 +224,7 @@ func CreateImageFromSrc(w http.ResponseWriter, r *http.Request) {
Status string `json:"status"`
Progress string `json:"progress"`
ProgressDetail map[string]string `json:"progressDetail"`
- Id string `json:"id"`
+ Id string `json:"id"` //nolint
}{
Status: iid,
ProgressDetail: map[string]string{},
@@ -289,7 +289,7 @@ func CreateImageFromImage(w http.ResponseWriter, r *http.Request) {
Error string `json:"error"`
Progress string `json:"progress"`
ProgressDetail map[string]string `json:"progressDetail"`
- Id string `json:"id"`
+ Id string `json:"id"` //nolint
}{
Status: fmt.Sprintf("pulling image (%s) from %s", img.Tag, strings.Join(img.Names(), ", ")),
ProgressDetail: map[string]string{},
diff --git a/pkg/api/handlers/compat/images_build.go b/pkg/api/handlers/compat/images_build.go
index e9d8fd719..6cc766a38 100644
--- a/pkg/api/handlers/compat/images_build.go
+++ b/pkg/api/handlers/compat/images_build.go
@@ -59,10 +59,10 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
ForceRm bool `schema:"forcerm"`
Memory int64 `schema:"memory"`
MemSwap int64 `schema:"memswap"`
- CpuShares uint64 `schema:"cpushares"`
- CpuSetCpus string `schema:"cpusetcpus"`
- CpuPeriod uint64 `schema:"cpuperiod"`
- CpuQuota int64 `schema:"cpuquota"`
+ CpuShares uint64 `schema:"cpushares"` //nolint
+ CpuSetCpus string `schema:"cpusetcpus"` //nolint
+ CpuPeriod uint64 `schema:"cpuperiod"` //nolint
+ CpuQuota int64 `schema:"cpuquota"` //nolint
BuildArgs string `schema:"buildargs"`
ShmSize int `schema:"shmsize"`
Squash bool `schema:"squash"`
diff --git a/pkg/api/handlers/compat/info.go b/pkg/api/handlers/compat/info.go
index e9756a03f..d4a933c54 100644
--- a/pkg/api/handlers/compat/info.go
+++ b/pkg/api/handlers/compat/info.go
@@ -177,7 +177,7 @@ func getContainersState(r *libpod.Runtime) map[define.ContainerStatus]int {
if err != nil {
continue
}
- states[state] += 1
+ states[state]++
}
}
return states
diff --git a/pkg/api/handlers/compat/networks.go b/pkg/api/handlers/compat/networks.go
index c52ca093f..8734ba405 100644
--- a/pkg/api/handlers/compat/networks.go
+++ b/pkg/api/handlers/compat/networks.go
@@ -20,10 +20,6 @@ import (
"github.com/pkg/errors"
)
-type CompatInspectNetwork struct {
- types.NetworkResource
-}
-
func InspectNetwork(w http.ResponseWriter, r *http.Request) {
runtime := r.Context().Value("runtime").(*libpod.Runtime)
diff --git a/pkg/api/handlers/compat/ping.go b/pkg/api/handlers/compat/ping.go
index abee3d8e8..d275c4a02 100644
--- a/pkg/api/handlers/compat/ping.go
+++ b/pkg/api/handlers/compat/ping.go
@@ -14,13 +14,13 @@ import (
// Clients will use the Header availability to test which backend engine is in use.
// Note: Additionally handler supports GET and HEAD methods
func Ping(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("API-Version", utils.ApiVersion[utils.CompatTree][utils.CurrentApiVersion].String())
+ w.Header().Set("API-Version", utils.APIVersion[utils.CompatTree][utils.CurrentAPIVersion].String())
w.Header().Set("BuildKit-Version", "")
w.Header().Set("Docker-Experimental", "true")
w.Header().Set("Cache-Control", "no-cache")
w.Header().Set("Pragma", "no-cache")
- w.Header().Set("Libpod-API-Version", utils.ApiVersion[utils.LibpodTree][utils.CurrentApiVersion].String())
+ w.Header().Set("Libpod-API-Version", utils.APIVersion[utils.LibpodTree][utils.CurrentAPIVersion].String())
w.Header().Set("Libpod-Buildha-Version", buildah.Version)
w.WriteHeader(http.StatusOK)
diff --git a/pkg/api/handlers/compat/version.go b/pkg/api/handlers/compat/version.go
index bfc226bb8..3164b16b9 100644
--- a/pkg/api/handlers/compat/version.go
+++ b/pkg/api/handlers/compat/version.go
@@ -34,14 +34,14 @@ func VersionHandler(w http.ResponseWriter, r *http.Request) {
Name: "Podman Engine",
Version: versionInfo.Version,
Details: map[string]string{
- "APIVersion": utils.ApiVersion[utils.LibpodTree][utils.CurrentApiVersion].String(),
+ "APIVersion": utils.APIVersion[utils.LibpodTree][utils.CurrentAPIVersion].String(),
"Arch": goRuntime.GOARCH,
"BuildTime": time.Unix(versionInfo.Built, 0).Format(time.RFC3339),
"Experimental": "true",
"GitCommit": versionInfo.GitCommit,
"GoVersion": versionInfo.GoVersion,
"KernelVersion": infoData.Host.Kernel,
- "MinAPIVersion": utils.ApiVersion[utils.LibpodTree][utils.MinimalApiVersion].String(),
+ "MinAPIVersion": utils.APIVersion[utils.LibpodTree][utils.MinimalAPIVersion].String(),
"Os": goRuntime.GOOS,
},
}}
diff --git a/pkg/api/handlers/decoder.go b/pkg/api/handlers/decoder.go
index 03b86275d..e46cd8837 100644
--- a/pkg/api/handlers/decoder.go
+++ b/pkg/api/handlers/decoder.go
@@ -17,7 +17,7 @@ func NewAPIDecoder() *schema.Decoder {
d := schema.NewDecoder()
d.IgnoreUnknownKeys(true)
- d.RegisterConverter(map[string][]string{}, convertUrlValuesString)
+ d.RegisterConverter(map[string][]string{}, convertURLValuesString)
d.RegisterConverter(time.Time{}, convertTimeString)
var Signal syscall.Signal
@@ -35,12 +35,12 @@ func NewAPIDecoder() *schema.Decoder {
// panic(err)
// }
// payload = url.QueryEscape(payload)
-func convertUrlValuesString(query string) reflect.Value {
+func convertURLValuesString(query string) reflect.Value {
f := map[string][]string{}
err := json.Unmarshal([]byte(query), &f)
if err != nil {
- logrus.Infof("convertUrlValuesString: Failed to Unmarshal %s: %s", query, err.Error())
+ logrus.Infof("convertURLValuesString: Failed to Unmarshal %s: %s", query, err.Error())
}
return reflect.ValueOf(f)
diff --git a/pkg/api/handlers/libpod/pods.go b/pkg/api/handlers/libpod/pods.go
index c3f8d5d66..7d4d03144 100644
--- a/pkg/api/handlers/libpod/pods.go
+++ b/pkg/api/handlers/libpod/pods.go
@@ -17,6 +17,7 @@ import (
"github.com/containers/libpod/pkg/util"
"github.com/gorilla/schema"
"github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
)
func PodCreate(w http.ResponseWriter, r *http.Request) {
@@ -31,11 +32,11 @@ func PodCreate(w http.ResponseWriter, r *http.Request) {
}
pod, err := generate.MakePod(&psg, runtime)
if err != nil {
- http_code := http.StatusInternalServerError
+ httpCode := http.StatusInternalServerError
if errors.Cause(err) == define.ErrPodExists {
- http_code = http.StatusConflict
+ httpCode = http.StatusConflict
}
- utils.Error(w, "Something went wrong.", http_code, err)
+ utils.Error(w, "Something went wrong.", httpCode, err)
return
}
utils.WriteResponse(w, http.StatusCreated, handlers.IDResponse{ID: pod.ID()})
@@ -375,6 +376,7 @@ func PodKill(w http.ResponseWriter, r *http.Request) {
sig, err := util.ParseSignal(signal)
if err != nil {
utils.InternalServerError(w, errors.Wrapf(err, "unable to parse signal value"))
+ return
}
name := utils.GetName(r)
pod, err := runtime.LookupPod(name)
@@ -382,6 +384,7 @@ func PodKill(w http.ResponseWriter, r *http.Request) {
utils.PodNotFound(w, name, err)
return
}
+ logrus.Debugf("Killing pod %s with signal %d", pod.ID(), sig)
podStates, err := pod.Status()
if err != nil {
utils.Error(w, "Something went wrong.", http.StatusInternalServerError, err)
diff --git a/pkg/api/handlers/libpod/volumes.go b/pkg/api/handlers/libpod/volumes.go
index c42ca407b..b5574b87b 100644
--- a/pkg/api/handlers/libpod/volumes.go
+++ b/pkg/api/handlers/libpod/volumes.go
@@ -46,7 +46,7 @@ func CreateVolume(w http.ResponseWriter, r *http.Request) {
volumeOptions = append(volumeOptions, libpod.WithVolumeLabels(input.Label))
}
if len(input.Options) > 0 {
- parsedOptions, err := parse.ParseVolumeOptions(input.Options)
+ parsedOptions, err := parse.VolumeOptions(input.Options)
if err != nil {
utils.InternalServerError(w, err)
return
diff --git a/pkg/api/handlers/utils/containers.go b/pkg/api/handlers/utils/containers.go
index a46b308b5..4bcac6e72 100644
--- a/pkg/api/handlers/utils/containers.go
+++ b/pkg/api/handlers/utils/containers.go
@@ -62,7 +62,7 @@ func WaitContainer(w http.ResponseWriter, r *http.Request) (int32, error) {
func CreateContainer(ctx context.Context, w http.ResponseWriter, runtime *libpod.Runtime, cc *createconfig.CreateConfig) {
var pod *libpod.Pod
- ctr, err := createconfig.CreateContainerFromCreateConfig(runtime, cc, ctx, pod)
+ ctr, err := createconfig.CreateContainerFromCreateConfig(ctx, runtime, cc, pod)
if err != nil {
Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "CreateContainerFromCreateConfig()"))
return
diff --git a/pkg/api/handlers/utils/handler.go b/pkg/api/handlers/utils/handler.go
index 2f4a54b98..62fdc05dd 100644
--- a/pkg/api/handlers/utils/handler.go
+++ b/pkg/api/handlers/utils/handler.go
@@ -28,27 +28,27 @@ const (
// CompatTree supports Libpod endpoints
CompatTree
- // CurrentApiVersion announces what is the current API level
- CurrentApiVersion = VersionLevel(iota)
- // MinimalApiVersion announces what is the oldest API level supported
- MinimalApiVersion
+ // CurrentAPIVersion announces what is the current API level
+ CurrentAPIVersion = VersionLevel(iota)
+ // MinimalAPIVersion announces what is the oldest API level supported
+ MinimalAPIVersion
)
var (
// See https://docs.docker.com/engine/api/v1.40/
// libpod compat handlers are expected to honor docker API versions
- // ApiVersion provides the current and minimal API versions for compat and libpod endpoint trees
+ // APIVersion provides the current and minimal API versions for compat and libpod endpoint trees
// Note: GET|HEAD /_ping is never versioned and provides the API-Version and Libpod-API-Version headers to allow
// clients to shop for the Version they wish to support
- ApiVersion = map[VersionTree]map[VersionLevel]semver.Version{
+ APIVersion = map[VersionTree]map[VersionLevel]semver.Version{
LibpodTree: {
- CurrentApiVersion: semver.MustParse("1.0.0"),
- MinimalApiVersion: semver.MustParse("1.0.0"),
+ CurrentAPIVersion: semver.MustParse("1.0.0"),
+ MinimalAPIVersion: semver.MustParse("1.0.0"),
},
CompatTree: {
- CurrentApiVersion: semver.MustParse("1.40.0"),
- MinimalApiVersion: semver.MustParse("1.24.0"),
+ CurrentAPIVersion: semver.MustParse("1.40.0"),
+ MinimalAPIVersion: semver.MustParse("1.24.0"),
},
}
@@ -103,8 +103,8 @@ func SupportedVersionWithDefaults(r *http.Request) (semver.Version, error) {
}
return SupportedVersion(r,
- fmt.Sprintf(">=%s <=%s", ApiVersion[tree][MinimalApiVersion].String(),
- ApiVersion[tree][CurrentApiVersion].String()))
+ fmt.Sprintf(">=%s <=%s", APIVersion[tree][MinimalAPIVersion].String(),
+ APIVersion[tree][CurrentAPIVersion].String()))
}
// WriteResponse encodes the given value as JSON or string and renders it for http client
diff --git a/pkg/api/handlers/utils/handler_test.go b/pkg/api/handlers/utils/handler_test.go
index 6009432b5..d9fd22b80 100644
--- a/pkg/api/handlers/utils/handler_test.go
+++ b/pkg/api/handlers/utils/handler_test.go
@@ -12,12 +12,12 @@ import (
func TestSupportedVersion(t *testing.T) {
req, err := http.NewRequest("GET",
- fmt.Sprintf("/v%s/libpod/testing/versions", ApiVersion[LibpodTree][CurrentApiVersion]),
+ fmt.Sprintf("/v%s/libpod/testing/versions", APIVersion[LibpodTree][CurrentAPIVersion]),
nil)
if err != nil {
t.Fatal(err)
}
- req = mux.SetURLVars(req, map[string]string{"version": ApiVersion[LibpodTree][CurrentApiVersion].String()})
+ req = mux.SetURLVars(req, map[string]string{"version": APIVersion[LibpodTree][CurrentAPIVersion].String()})
rr := httptest.NewRecorder()
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
diff --git a/pkg/api/handlers/utils/pods.go b/pkg/api/handlers/utils/pods.go
index 5b6f6d34d..4a5cbd05c 100644
--- a/pkg/api/handlers/utils/pods.go
+++ b/pkg/api/handlers/utils/pods.go
@@ -54,7 +54,7 @@ func GetPods(w http.ResponseWriter, r *http.Request) ([]*entities.ListPodsReport
if err != nil {
return nil, err
}
- infraId, err := pod.InfraContainerID()
+ infraID, err := pod.InfraContainerID()
if err != nil {
return nil, err
}
@@ -65,7 +65,7 @@ func GetPods(w http.ResponseWriter, r *http.Request) ([]*entities.ListPodsReport
Name: pod.Name(),
Namespace: pod.Namespace(),
Status: status,
- InfraId: infraId,
+ InfraId: infraID,
Labels: pod.Labels(),
}
for _, ctr := range ctrs {
diff --git a/pkg/api/server/docs.go b/pkg/api/server/docs.go
index c989c7927..124c16092 100644
--- a/pkg/api/server/docs.go
+++ b/pkg/api/server/docs.go
@@ -4,6 +4,31 @@
// only as experimental as this point. The endpoints, parameters, inputs, and
// return values can all change.
//
+// To start the service and keep it running for 5,000 seconds (-t 0 runs forever):
+//
+// podman system service -t 5000 &
+//
+// You can then use cURL on the socket using requests documented below.
+//
+// NOTE: if you install the package podman-docker, it will create a symbolic
+// link for /var/run/docker.sock to /run/podman/podman.sock
+//
+// See podman-service(1) for more information.
+//
+// Quick Examples:
+//
+// 'podman info'
+//
+// curl --unix-socket /run/podman/podman.sock http://d/v1.0.0/libpod/info
+//
+// 'podman pull quay.io/containers/podman'
+//
+// curl -XPOST --unix-socket /run/podman/podman.sock -v 'http://d/v1.0.0/images/create?fromImage=quay.io%2Fcontainers%2Fpodman'
+//
+// 'podman list images'
+//
+// curl --unix-socket /run/podman/podman.sock -v 'http://d/v1.0.0/libpod/images/json' | jq
+//
// Terms Of Service:
//
// Schemes: http, https
diff --git a/pkg/api/server/handler_api.go b/pkg/api/server/handler_api.go
index 7a7db12f3..dbdb7f17b 100644
--- a/pkg/api/server/handler_api.go
+++ b/pkg/api/server/handler_api.go
@@ -34,9 +34,9 @@ func (s *APIServer) APIHandler(h http.HandlerFunc) http.HandlerFunc {
}
// TODO: Use r.ConnContext when ported to go 1.13
- c := context.WithValue(r.Context(), "decoder", s.Decoder)
- c = context.WithValue(c, "runtime", s.Runtime)
- c = context.WithValue(c, "shutdownFunc", s.Shutdown)
+ c := context.WithValue(r.Context(), "decoder", s.Decoder) //nolint
+ c = context.WithValue(c, "runtime", s.Runtime) //nolint
+ c = context.WithValue(c, "shutdownFunc", s.Shutdown) //nolint
r = r.WithContext(c)
h(w, r)
diff --git a/pkg/api/server/server.go b/pkg/api/server/server.go
index 499a4c58a..bd6a99b96 100644
--- a/pkg/api/server/server.go
+++ b/pkg/api/server/server.go
@@ -257,7 +257,7 @@ func (t *IdleTracker) ConnState(conn net.Conn, state http.ConnState) {
if oldActive == 0 {
t.timer.Stop()
}
- t.total += 1
+ t.total++
case http.StateIdle, http.StateClosed:
delete(t.active, conn)
// Restart the timer if we've become idle
diff --git a/pkg/bindings/bindings.go b/pkg/bindings/bindings.go
index da47ea713..94f7a45d0 100644
--- a/pkg/bindings/bindings.go
+++ b/pkg/bindings/bindings.go
@@ -5,7 +5,6 @@
// This package exposes a series of methods that allow users to firstly
// create their connection with the API endpoints. Once the connection
// is established, users can then manage the Podman container runtime.
-
package bindings
import (
@@ -28,7 +27,7 @@ var (
pFalse = false
PFalse = &pFalse
- // _*YES*- podman will fail to run if this value is wrong
+ // APIVersion - podman will fail to run if this value is wrong
APIVersion = semver.MustParse("1.0.0")
)
diff --git a/pkg/bindings/connection.go b/pkg/bindings/connection.go
index b130b9598..aa7f3707c 100644
--- a/pkg/bindings/connection.go
+++ b/pkg/bindings/connection.go
@@ -41,7 +41,7 @@ type APIResponse struct {
}
type Connection struct {
- Uri *url.URL
+ URI *url.URL
Client *http.Client
}
@@ -137,7 +137,7 @@ func NewConnectionWithIdentity(ctx context.Context, uri string, passPhrase strin
func tcpClient(_url *url.URL) (Connection, error) {
connection := Connection{
- Uri: _url,
+ URI: _url,
}
connection.Client = &http.Client{
Transport: &http.Transport{
@@ -246,7 +246,7 @@ func sshClient(_url *url.URL, secure bool, passPhrase string, identities ...stri
return Connection{}, errors.Wrapf(err, "Connection to bastion host (%s) failed.", _url.String())
}
- connection := Connection{Uri: _url}
+ connection := Connection{URI: _url}
connection.Client = &http.Client{
Transport: &http.Transport{
DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) {
@@ -257,7 +257,7 @@ func sshClient(_url *url.URL, secure bool, passPhrase string, identities ...stri
}
func unixClient(_url *url.URL) (Connection, error) {
- connection := Connection{Uri: _url}
+ connection := Connection{URI: _url}
connection.Client = &http.Client{
Transport: &http.Transport{
DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) {
diff --git a/pkg/bindings/containers/attach.go b/pkg/bindings/containers/attach.go
index b7f35c30d..44c7f4002 100644
--- a/pkg/bindings/containers/attach.go
+++ b/pkg/bindings/containers/attach.go
@@ -25,7 +25,7 @@ import (
)
// Attach attaches to a running container
-func Attach(ctx context.Context, nameOrId string, detachKeys *string, logs, stream *bool, stdin io.Reader, stdout io.Writer, stderr io.Writer, attachReady chan bool) error {
+func Attach(ctx context.Context, nameOrID string, detachKeys *string, logs, stream *bool, stdin io.Reader, stdout io.Writer, stderr io.Writer, attachReady chan bool) error {
isSet := struct {
stdin bool
stdout bool
@@ -52,7 +52,7 @@ func Attach(ctx context.Context, nameOrId string, detachKeys *string, logs, stre
}
// Do we need to wire in stdin?
- ctnr, err := Inspect(ctx, nameOrId, bindings.PFalse)
+ ctnr, err := Inspect(ctx, nameOrID, bindings.PFalse)
if err != nil {
return err
}
@@ -115,7 +115,7 @@ func Attach(ctx context.Context, nameOrId string, detachKeys *string, logs, stre
IdleConnTimeout: time.Duration(0),
}
conn.Client.Transport = t
- response, err := conn.DoRequest(nil, http.MethodPost, "/containers/%s/attach", params, headers, nameOrId)
+ response, err := conn.DoRequest(nil, http.MethodPost, "/containers/%s/attach", params, headers, nameOrID)
if err != nil {
return err
}
@@ -129,7 +129,7 @@ func Attach(ctx context.Context, nameOrId string, detachKeys *string, logs, stre
winCtx, winCancel := context.WithCancel(ctx)
defer winCancel()
- go attachHandleResize(ctx, winCtx, winChange, false, nameOrId, file)
+ go attachHandleResize(ctx, winCtx, winChange, false, nameOrID, file)
}
// If we are attaching around a start, we need to "signal"
@@ -243,13 +243,13 @@ func DemuxFrame(r io.Reader, buffer []byte, length int) (frame []byte, err error
}
// ResizeContainerTTY sets container's TTY height and width in characters
-func ResizeContainerTTY(ctx context.Context, nameOrId string, height *int, width *int) error {
- return resizeTTY(ctx, bindings.JoinURL("containers", nameOrId, "resize"), height, width)
+func ResizeContainerTTY(ctx context.Context, nameOrID string, height *int, width *int) error {
+ return resizeTTY(ctx, bindings.JoinURL("containers", nameOrID, "resize"), height, width)
}
// ResizeExecTTY sets session's TTY height and width in characters
-func ResizeExecTTY(ctx context.Context, nameOrId string, height *int, width *int) error {
- return resizeTTY(ctx, bindings.JoinURL("exec", nameOrId, "resize"), height, width)
+func ResizeExecTTY(ctx context.Context, nameOrID string, height *int, width *int) error {
+ return resizeTTY(ctx, bindings.JoinURL("exec", nameOrID, "resize"), height, width)
}
// resizeTTY set size of TTY of container
diff --git a/pkg/bindings/containers/checkpoint.go b/pkg/bindings/containers/checkpoint.go
index f483a9297..916ec8071 100644
--- a/pkg/bindings/containers/checkpoint.go
+++ b/pkg/bindings/containers/checkpoint.go
@@ -10,9 +10,9 @@ import (
"github.com/containers/libpod/pkg/domain/entities"
)
-// Checkpoint checkpoints the given container (identified by nameOrId). All additional
+// Checkpoint checkpoints the given container (identified by nameOrID). All additional
// options are options and allow for more fine grained control of the checkpoint process.
-func Checkpoint(ctx context.Context, nameOrId string, keep, leaveRunning, tcpEstablished, ignoreRootFS *bool, export *string) (*entities.CheckpointReport, error) {
+func Checkpoint(ctx context.Context, nameOrID string, keep, leaveRunning, tcpEstablished, ignoreRootFS *bool, export *string) (*entities.CheckpointReport, error) {
var report entities.CheckpointReport
conn, err := bindings.GetClient(ctx)
if err != nil {
@@ -34,16 +34,16 @@ func Checkpoint(ctx context.Context, nameOrId string, keep, leaveRunning, tcpEst
if export != nil {
params.Set("export", *export)
}
- response, err := conn.DoRequest(nil, http.MethodPost, "/containers/%s/checkpoint", params, nil, nameOrId)
+ response, err := conn.DoRequest(nil, http.MethodPost, "/containers/%s/checkpoint", params, nil, nameOrID)
if err != nil {
return nil, err
}
return &report, response.Process(&report)
}
-// Restore restores a checkpointed container to running. The container is identified by the nameOrId option. All
+// Restore restores a checkpointed container to running. The container is identified by the nameOrID option. All
// additional options are optional and allow finer control of the restore processs.
-func Restore(ctx context.Context, nameOrId string, keep, tcpEstablished, ignoreRootFS, ignoreStaticIP, ignoreStaticMAC *bool, name, importArchive *string) (*entities.RestoreReport, error) {
+func Restore(ctx context.Context, nameOrID string, keep, tcpEstablished, ignoreRootFS, ignoreStaticIP, ignoreStaticMAC *bool, name, importArchive *string) (*entities.RestoreReport, error) {
var report entities.RestoreReport
conn, err := bindings.GetClient(ctx)
if err != nil {
@@ -71,7 +71,7 @@ func Restore(ctx context.Context, nameOrId string, keep, tcpEstablished, ignoreR
if importArchive != nil {
params.Set("import", *importArchive)
}
- response, err := conn.DoRequest(nil, http.MethodPost, "/containers/%s/restore", params, nil, nameOrId)
+ response, err := conn.DoRequest(nil, http.MethodPost, "/containers/%s/restore", params, nil, nameOrID)
if err != nil {
return nil, err
}
diff --git a/pkg/bindings/containers/commit.go b/pkg/bindings/containers/commit.go
index 780d42272..1a9ddc970 100644
--- a/pkg/bindings/containers/commit.go
+++ b/pkg/bindings/containers/commit.go
@@ -10,16 +10,16 @@ import (
"github.com/containers/libpod/pkg/bindings"
)
-// Commit creates a container image from a container. The container is defined by nameOrId. Use
+// Commit creates a container image from a container. The container is defined by nameOrID. Use
// the CommitOptions for finer grain control on characteristics of the resulting image.
-func Commit(ctx context.Context, nameOrId string, options CommitOptions) (handlers.IDResponse, error) {
+func Commit(ctx context.Context, nameOrID string, options CommitOptions) (handlers.IDResponse, error) {
id := handlers.IDResponse{}
conn, err := bindings.GetClient(ctx)
if err != nil {
return id, err
}
params := url.Values{}
- params.Set("container", nameOrId)
+ params.Set("container", nameOrID)
if options.Author != nil {
params.Set("author", *options.Author)
}
diff --git a/pkg/bindings/containers/diff.go b/pkg/bindings/containers/diff.go
index 06a828c30..e7a50248a 100644
--- a/pkg/bindings/containers/diff.go
+++ b/pkg/bindings/containers/diff.go
@@ -9,13 +9,13 @@ import (
)
// Diff provides the changes between two container layers
-func Diff(ctx context.Context, nameOrId string) ([]archive.Change, error) {
+func Diff(ctx context.Context, nameOrID string) ([]archive.Change, error) {
conn, err := bindings.GetClient(ctx)
if err != nil {
return nil, err
}
- response, err := conn.DoRequest(nil, http.MethodGet, "/containers/%s/changes", nil, nil, nameOrId)
+ response, err := conn.DoRequest(nil, http.MethodGet, "/containers/%s/changes", nil, nil, nameOrID)
if err != nil {
return nil, err
}
diff --git a/pkg/bindings/generate/generate.go b/pkg/bindings/generate/generate.go
index 161b722f3..5e4be4896 100644
--- a/pkg/bindings/generate/generate.go
+++ b/pkg/bindings/generate/generate.go
@@ -10,7 +10,7 @@ import (
"github.com/containers/libpod/pkg/domain/entities"
)
-func GenerateKube(ctx context.Context, nameOrID string, options entities.GenerateKubeOptions) (*entities.GenerateKubeReport, error) {
+func Kube(ctx context.Context, nameOrID string, options entities.GenerateKubeOptions) (*entities.GenerateKubeReport, error) {
conn, err := bindings.GetClient(ctx)
if err != nil {
return nil, err
diff --git a/pkg/bindings/images/diff.go b/pkg/bindings/images/diff.go
index e2d344ea0..25cbde188 100644
--- a/pkg/bindings/images/diff.go
+++ b/pkg/bindings/images/diff.go
@@ -9,13 +9,13 @@ import (
)
// Diff provides the changes between two container layers
-func Diff(ctx context.Context, nameOrId string) ([]archive.Change, error) {
+func Diff(ctx context.Context, nameOrID string) ([]archive.Change, error) {
conn, err := bindings.GetClient(ctx)
if err != nil {
return nil, err
}
- response, err := conn.DoRequest(nil, http.MethodGet, "/images/%s/changes", nil, nil, nameOrId)
+ response, err := conn.DoRequest(nil, http.MethodGet, "/images/%s/changes", nil, nil, nameOrID)
if err != nil {
return nil, err
}
diff --git a/pkg/bindings/images/images.go b/pkg/bindings/images/images.go
index e0802a6e1..a82a9080b 100644
--- a/pkg/bindings/images/images.go
+++ b/pkg/bindings/images/images.go
@@ -80,7 +80,7 @@ func GetImage(ctx context.Context, nameOrID string, size *bool) (*entities.Image
}
// Tree retrieves a "tree" based representation of the given image
-func Tree(ctx context.Context, nameOrId string, whatRequires *bool) (*entities.ImageTreeReport, error) {
+func Tree(ctx context.Context, nameOrID string, whatRequires *bool) (*entities.ImageTreeReport, error) {
var report entities.ImageTreeReport
conn, err := bindings.GetClient(ctx)
if err != nil {
@@ -90,7 +90,7 @@ func Tree(ctx context.Context, nameOrId string, whatRequires *bool) (*entities.I
if whatRequires != nil {
params.Set("size", strconv.FormatBool(*whatRequires))
}
- response, err := conn.DoRequest(nil, http.MethodGet, "/images/%s/tree", params, nil, nameOrId)
+ response, err := conn.DoRequest(nil, http.MethodGet, "/images/%s/tree", params, nil, nameOrID)
if err != nil {
return nil, err
}
diff --git a/pkg/bindings/play/play.go b/pkg/bindings/play/play.go
index 288cca454..9a4f56b6d 100644
--- a/pkg/bindings/play/play.go
+++ b/pkg/bindings/play/play.go
@@ -13,7 +13,7 @@ import (
"github.com/containers/libpod/pkg/domain/entities"
)
-func PlayKube(ctx context.Context, path string, options entities.PlayKubeOptions) (*entities.PlayKubeReport, error) {
+func Kube(ctx context.Context, path string, options entities.PlayKubeOptions) (*entities.PlayKubeReport, error) {
var report entities.PlayKubeReport
conn, err := bindings.GetClient(ctx)
if err != nil {
diff --git a/pkg/domain/entities/container_ps.go b/pkg/domain/entities/container_ps.go
index fd94d93be..c5e11f188 100644
--- a/pkg/domain/entities/container_ps.go
+++ b/pkg/domain/entities/container_ps.go
@@ -85,9 +85,9 @@ func (a psSortedCommand) Less(i, j int) bool {
return strings.Join(a.SortListContainers[i].Command, " ") < strings.Join(a.SortListContainers[j].Command, " ")
}
-type psSortedId struct{ SortListContainers }
+type psSortedID struct{ SortListContainers }
-func (a psSortedId) Less(i, j int) bool {
+func (a psSortedID) Less(i, j int) bool {
return a.SortListContainers[i].ID < a.SortListContainers[j].ID
}
@@ -139,7 +139,7 @@ func (a PsSortedCreateTime) Less(i, j int) bool {
func SortPsOutput(sortBy string, psOutput SortListContainers) (SortListContainers, error) {
switch sortBy {
case "id":
- sort.Sort(psSortedId{psOutput})
+ sort.Sort(psSortedID{psOutput})
case "image":
sort.Sort(psSortedImage{psOutput})
case "command":
diff --git a/pkg/domain/entities/containers.go b/pkg/domain/entities/containers.go
index 8d85a9b23..b4d8e6c29 100644
--- a/pkg/domain/entities/containers.go
+++ b/pkg/domain/entities/containers.go
@@ -56,7 +56,7 @@ type WaitOptions struct {
}
type WaitReport struct {
- Id string
+ Id string //nolint
Error error
ExitCode int32
}
@@ -76,7 +76,7 @@ type PauseUnPauseOptions struct {
type PauseUnpauseReport struct {
Err error
- Id string
+ Id string //nolint
}
type StopOptions struct {
@@ -84,12 +84,12 @@ type StopOptions struct {
CIDFiles []string
Ignore bool
Latest bool
- Timeout uint
+ Timeout *uint
}
type StopReport struct {
Err error
- Id string
+ Id string //nolint
}
type TopOptions struct {
@@ -110,7 +110,7 @@ type KillOptions struct {
type KillReport struct {
Err error
- Id string
+ Id string //nolint
}
type RestartOptions struct {
@@ -122,7 +122,7 @@ type RestartOptions struct {
type RestartReport struct {
Err error
- Id string
+ Id string //nolint
}
type RmOptions struct {
@@ -137,7 +137,7 @@ type RmOptions struct {
type RmReport struct {
Err error
- Id string
+ Id string //nolint
}
type ContainerInspectReport struct {
@@ -157,7 +157,7 @@ type CommitOptions struct {
}
type CommitReport struct {
- Id string
+ Id string //nolint
}
type ContainerExportOptions struct {
@@ -176,7 +176,7 @@ type CheckpointOptions struct {
type CheckpointReport struct {
Err error
- Id string
+ Id string //nolint
}
type RestoreOptions struct {
@@ -193,11 +193,11 @@ type RestoreOptions struct {
type RestoreReport struct {
Err error
- Id string
+ Id string //nolint
}
type ContainerCreateReport struct {
- Id string
+ Id string //nolint
}
// AttachOptions describes the cli and other values
@@ -263,7 +263,7 @@ type ContainerStartOptions struct {
// ContainerStartReport describes the response from starting
// containers from the cli
type ContainerStartReport struct {
- Id string
+ Id string //nolint
RawInput string
Err error
ExitCode int
@@ -303,7 +303,7 @@ type ContainerRunOptions struct {
// a container
type ContainerRunReport struct {
ExitCode int
- Id string
+ Id string //nolint
}
// ContainerCleanupOptions are the CLI values for the
@@ -320,7 +320,7 @@ type ContainerCleanupOptions struct {
// container cleanup
type ContainerCleanupReport struct {
CleanErr error
- Id string
+ Id string //nolint
RmErr error
RmiErr error
}
@@ -336,7 +336,7 @@ type ContainerInitOptions struct {
// container init
type ContainerInitReport struct {
Err error
- Id string
+ Id string //nolint
}
//ContainerMountOptions describes the input values for mounting containers
@@ -358,7 +358,7 @@ type ContainerUnmountOptions struct {
// ContainerMountReport describes the response from container mount
type ContainerMountReport struct {
Err error
- Id string
+ Id string //nolint
Name string
Path string
}
@@ -366,7 +366,7 @@ type ContainerMountReport struct {
// ContainerUnmountReport describes the response from umounting a container
type ContainerUnmountReport struct {
Err error
- Id string
+ Id string //nolint
}
// ContainerPruneOptions describes the options needed
@@ -392,7 +392,7 @@ type ContainerPortOptions struct {
// ContainerPortReport describes the output needed for
// the CLI to output ports
type ContainerPortReport struct {
- Id string
+ Id string //nolint
Ports []ocicni.PortMapping
}
diff --git a/pkg/domain/entities/engine.go b/pkg/domain/entities/engine.go
index b2bef0eea..1f056bad7 100644
--- a/pkg/domain/entities/engine.go
+++ b/pkg/domain/entities/engine.go
@@ -39,7 +39,7 @@ type PodmanConfig struct {
CGroupUsage string // rootless code determines Usage message
ConmonPath string // --conmon flag will set Engine.ConmonPath
- CpuProfile string // Hidden: Should CPU profile be taken
+ CPUProfile string // Hidden: Should CPU profile be taken
EngineMode EngineMode // ABI or Tunneling mode
Identities []string // ssh identities for connecting to server
MaxWorks int // maximum number of parallel threads
@@ -52,7 +52,7 @@ type PodmanConfig struct {
SpanCtx context.Context // context to use when tracing
Syslog bool // write to StdOut and Syslog, not supported when tunneling
Trace bool // Hidden: Trace execution
- Uri string // URI to RESTful API Service
+ URI string // URI to RESTful API Service
Runroot string
StorageDriver string
diff --git a/pkg/domain/entities/engine_container.go b/pkg/domain/entities/engine_container.go
index 3d5161745..979df7581 100644
--- a/pkg/domain/entities/engine_container.go
+++ b/pkg/domain/entities/engine_container.go
@@ -12,25 +12,25 @@ import (
type ContainerEngine interface {
AutoUpdate(ctx context.Context, options AutoUpdateOptions) (*AutoUpdateReport, []error)
Config(ctx context.Context) (*config.Config, error)
- ContainerAttach(ctx context.Context, nameOrId string, options AttachOptions) error
+ ContainerAttach(ctx context.Context, nameOrID string, options AttachOptions) error
ContainerCheckpoint(ctx context.Context, namesOrIds []string, options CheckpointOptions) ([]*CheckpointReport, error)
ContainerCleanup(ctx context.Context, namesOrIds []string, options ContainerCleanupOptions) ([]*ContainerCleanupReport, error)
- ContainerCommit(ctx context.Context, nameOrId string, options CommitOptions) (*CommitReport, error)
+ ContainerCommit(ctx context.Context, nameOrID string, options CommitOptions) (*CommitReport, error)
ContainerCp(ctx context.Context, source, dest string, options ContainerCpOptions) (*ContainerCpReport, error)
ContainerCreate(ctx context.Context, s *specgen.SpecGenerator) (*ContainerCreateReport, error)
- ContainerDiff(ctx context.Context, nameOrId string, options DiffOptions) (*DiffReport, error)
- ContainerExec(ctx context.Context, nameOrId string, options ExecOptions, streams define.AttachStreams) (int, error)
+ ContainerDiff(ctx context.Context, nameOrID string, options DiffOptions) (*DiffReport, error)
+ ContainerExec(ctx context.Context, nameOrID string, options ExecOptions, streams define.AttachStreams) (int, error)
ContainerExecDetached(ctx context.Context, nameOrID string, options ExecOptions) (string, error)
- ContainerExists(ctx context.Context, nameOrId string) (*BoolReport, error)
- ContainerExport(ctx context.Context, nameOrId string, options ContainerExportOptions) error
+ ContainerExists(ctx context.Context, nameOrID string) (*BoolReport, error)
+ ContainerExport(ctx context.Context, nameOrID string, options ContainerExportOptions) error
ContainerInit(ctx context.Context, namesOrIds []string, options ContainerInitOptions) ([]*ContainerInitReport, error)
ContainerInspect(ctx context.Context, namesOrIds []string, options InspectOptions) ([]*ContainerInspectReport, error)
ContainerKill(ctx context.Context, namesOrIds []string, options KillOptions) ([]*KillReport, error)
ContainerList(ctx context.Context, options ContainerListOptions) ([]ListContainer, error)
ContainerLogs(ctx context.Context, containers []string, options ContainerLogsOptions) error
- ContainerMount(ctx context.Context, nameOrIds []string, options ContainerMountOptions) ([]*ContainerMountReport, error)
+ ContainerMount(ctx context.Context, nameOrIDs []string, options ContainerMountOptions) ([]*ContainerMountReport, error)
ContainerPause(ctx context.Context, namesOrIds []string, options PauseUnPauseOptions) ([]*PauseUnpauseReport, error)
- ContainerPort(ctx context.Context, nameOrId string, options ContainerPortOptions) ([]*ContainerPortReport, error)
+ ContainerPort(ctx context.Context, nameOrID string, options ContainerPortOptions) ([]*ContainerPortReport, error)
ContainerPrune(ctx context.Context, options ContainerPruneOptions) (*ContainerPruneReport, error)
ContainerRestart(ctx context.Context, namesOrIds []string, options RestartOptions) ([]*RestartReport, error)
ContainerRestore(ctx context.Context, namesOrIds []string, options RestoreOptions) ([]*RestoreReport, error)
@@ -41,14 +41,14 @@ type ContainerEngine interface {
ContainerStats(ctx context.Context, namesOrIds []string, options ContainerStatsOptions) error
ContainerStop(ctx context.Context, namesOrIds []string, options StopOptions) ([]*StopReport, error)
ContainerTop(ctx context.Context, options TopOptions) (*StringSliceReport, error)
- ContainerUnmount(ctx context.Context, nameOrIds []string, options ContainerUnmountOptions) ([]*ContainerUnmountReport, error)
+ ContainerUnmount(ctx context.Context, nameOrIDs []string, options ContainerUnmountOptions) ([]*ContainerUnmountReport, error)
ContainerUnpause(ctx context.Context, namesOrIds []string, options PauseUnPauseOptions) ([]*PauseUnpauseReport, error)
ContainerWait(ctx context.Context, namesOrIds []string, options WaitOptions) ([]WaitReport, error)
Events(ctx context.Context, opts EventsOptions) error
GenerateSystemd(ctx context.Context, nameOrID string, opts GenerateSystemdOptions) (*GenerateSystemdReport, error)
GenerateKube(ctx context.Context, nameOrID string, opts GenerateKubeOptions) (*GenerateKubeReport, error)
SystemPrune(ctx context.Context, options SystemPruneOptions) (*SystemPruneReport, error)
- HealthCheckRun(ctx context.Context, nameOrId string, options HealthCheckOptions) (*define.HealthCheckResults, error)
+ HealthCheckRun(ctx context.Context, nameOrID string, options HealthCheckOptions) (*define.HealthCheckResults, error)
Info(ctx context.Context) (*define.Info, error)
NetworkCreate(ctx context.Context, name string, options NetworkCreateOptions) (*NetworkCreateReport, error)
NetworkInspect(ctx context.Context, namesOrIds []string, options NetworkInspectOptions) ([]NetworkInspectReport, error)
@@ -56,7 +56,7 @@ type ContainerEngine interface {
NetworkRm(ctx context.Context, namesOrIds []string, options NetworkRmOptions) ([]*NetworkRmReport, error)
PlayKube(ctx context.Context, path string, opts PlayKubeOptions) (*PlayKubeReport, error)
PodCreate(ctx context.Context, opts PodCreateOptions) (*PodCreateReport, error)
- PodExists(ctx context.Context, nameOrId string) (*BoolReport, error)
+ PodExists(ctx context.Context, nameOrID string) (*BoolReport, error)
PodInspect(ctx context.Context, options PodInspectOptions) (*PodInspectReport, error)
PodKill(ctx context.Context, namesOrIds []string, options PodKillOptions) ([]*PodKillReport, error)
PodPause(ctx context.Context, namesOrIds []string, options PodPauseOptions) ([]*PodPauseReport, error)
@@ -75,7 +75,7 @@ type ContainerEngine interface {
Unshare(ctx context.Context, args []string) error
VarlinkService(ctx context.Context, opts ServiceOptions) error
Version(ctx context.Context) (*SystemVersionReport, error)
- VolumeCreate(ctx context.Context, opts VolumeCreateOptions) (*IdOrNameResponse, error)
+ VolumeCreate(ctx context.Context, opts VolumeCreateOptions) (*IDOrNameResponse, error)
VolumeInspect(ctx context.Context, namesOrIds []string, opts VolumeInspectOptions) ([]*VolumeInspectReport, error)
VolumeList(ctx context.Context, opts VolumeListOptions) ([]*VolumeListReport, error)
VolumePrune(ctx context.Context, opts VolumePruneOptions) ([]*VolumePruneReport, error)
diff --git a/pkg/domain/entities/engine_image.go b/pkg/domain/entities/engine_image.go
index 7d7099838..60fb20b6e 100644
--- a/pkg/domain/entities/engine_image.go
+++ b/pkg/domain/entities/engine_image.go
@@ -9,9 +9,9 @@ import (
type ImageEngine interface {
Build(ctx context.Context, containerFiles []string, opts BuildOptions) (*BuildReport, error)
Config(ctx context.Context) (*config.Config, error)
- Diff(ctx context.Context, nameOrId string, options DiffOptions) (*DiffReport, error)
- Exists(ctx context.Context, nameOrId string) (*BoolReport, error)
- History(ctx context.Context, nameOrId string, opts ImageHistoryOptions) (*ImageHistoryReport, error)
+ Diff(ctx context.Context, nameOrID string, options DiffOptions) (*DiffReport, error)
+ Exists(ctx context.Context, nameOrID string) (*BoolReport, error)
+ History(ctx context.Context, nameOrID string, opts ImageHistoryOptions) (*ImageHistoryReport, error)
Import(ctx context.Context, opts ImageImportOptions) (*ImageImportReport, error)
Inspect(ctx context.Context, namesOrIDs []string, opts InspectOptions) ([]*ImageInspectReport, error)
List(ctx context.Context, opts ImageListOptions) ([]*ImageSummary, error)
@@ -20,14 +20,14 @@ type ImageEngine interface {
Pull(ctx context.Context, rawImage string, opts ImagePullOptions) (*ImagePullReport, error)
Push(ctx context.Context, source string, destination string, opts ImagePushOptions) error
Remove(ctx context.Context, images []string, opts ImageRemoveOptions) (*ImageRemoveReport, []error)
- Save(ctx context.Context, nameOrId string, tags []string, options ImageSaveOptions) error
+ Save(ctx context.Context, nameOrID string, tags []string, options ImageSaveOptions) error
Search(ctx context.Context, term string, opts ImageSearchOptions) ([]ImageSearchReport, error)
SetTrust(ctx context.Context, args []string, options SetTrustOptions) error
ShowTrust(ctx context.Context, args []string, options ShowTrustOptions) (*ShowTrustReport, error)
Shutdown(ctx context.Context)
- Tag(ctx context.Context, nameOrId string, tags []string, options ImageTagOptions) error
- Tree(ctx context.Context, nameOrId string, options ImageTreeOptions) (*ImageTreeReport, error)
- Untag(ctx context.Context, nameOrId string, tags []string, options ImageUntagOptions) error
+ Tag(ctx context.Context, nameOrID string, tags []string, options ImageTagOptions) error
+ Tree(ctx context.Context, nameOrID string, options ImageTreeOptions) (*ImageTreeReport, error)
+ Untag(ctx context.Context, nameOrID string, tags []string, options ImageUntagOptions) error
ManifestCreate(ctx context.Context, names, images []string, opts ManifestCreateOptions) (string, error)
ManifestInspect(ctx context.Context, name string) ([]byte, error)
ManifestAdd(ctx context.Context, opts ManifestAddOptions) (string, error)
diff --git a/pkg/domain/entities/filters.go b/pkg/domain/entities/filters.go
index c7e227244..2ddbffbcd 100644
--- a/pkg/domain/entities/filters.go
+++ b/pkg/domain/entities/filters.go
@@ -20,14 +20,14 @@ type Names interface {
Names() []string
}
-// IdOrName interface allows filters to access ID() or Name() of object
-type IdOrNamed interface {
+// IDOrName interface allows filters to access ID() or Name() of object
+type IDOrNamed interface {
Identifier
Named
}
-// IdOrName interface allows filters to access ID() or Names() of object
-type IdOrNames interface {
+// IDOrName interface allows filters to access ID() or Names() of object
+type IDOrNames interface {
Identifier
Names
}
@@ -42,11 +42,11 @@ func CompileImageFilters(filters url.Values) ImageFilter {
for name, targets := range filters {
switch name {
case "id":
- fns = append(fns, FilterIdFn(targets))
+ fns = append(fns, FilterIDFn(targets))
case "name":
fns = append(fns, FilterNamesFn(targets))
case "idOrName":
- fns = append(fns, FilterIdOrNameFn(targets))
+ fns = append(fns, FilterIDOrNameFn(targets))
}
}
@@ -66,11 +66,11 @@ func CompileContainerFilters(filters url.Values) ContainerFilter {
for name, targets := range filters {
switch name {
case "id":
- fns = append(fns, FilterIdFn(targets))
+ fns = append(fns, FilterIDFn(targets))
case "name":
fns = append(fns, FilterNameFn(targets))
case "idOrName":
- fns = append(fns, FilterIdOrNameFn(targets))
+ fns = append(fns, FilterIDOrNameFn(targets))
}
}
@@ -89,7 +89,7 @@ func CompileVolumeFilters(filters url.Values) VolumeFilter {
for name, targets := range filters {
if name == "id" {
- fns = append(fns, FilterIdFn(targets))
+ fns = append(fns, FilterIDFn(targets))
}
}
@@ -103,7 +103,7 @@ func CompileVolumeFilters(filters url.Values) VolumeFilter {
}
}
-func FilterIdFn(id []string) func(Identifier) bool {
+func FilterIDFn(id []string) func(Identifier) bool {
return func(obj Identifier) bool {
for _, v := range id {
if strings.Contains(obj.Id(), v) {
@@ -138,8 +138,8 @@ func FilterNamesFn(name []string) func(Names) bool {
}
}
-func FilterIdOrNameFn(id []string) func(IdOrNamed) bool {
- return func(obj IdOrNamed) bool {
+func FilterIDOrNameFn(id []string) func(IDOrNamed) bool {
+ return func(obj IDOrNamed) bool {
for _, v := range id {
if strings.Contains(obj.Id(), v) || strings.Contains(obj.Name(), v) {
return true
diff --git a/pkg/domain/entities/images.go b/pkg/domain/entities/images.go
index 19a2c87f5..81f52fef5 100644
--- a/pkg/domain/entities/images.go
+++ b/pkg/domain/entities/images.go
@@ -45,13 +45,13 @@ type Image struct {
HealthCheck *manifest.Schema2HealthConfig `json:",omitempty"`
}
-func (i *Image) Id() string {
+func (i *Image) Id() string { //nolint
return i.ID
}
type ImageSummary struct {
ID string `json:"Id"`
- ParentId string `json:",omitempty"`
+ ParentId string `json:",omitempty"` // nolint
RepoTags []string `json:",omitempty"`
Created time.Time `json:",omitempty"`
Size int64 `json:",omitempty"`
@@ -70,7 +70,7 @@ type ImageSummary struct {
History []string `json:",omitempty"`
}
-func (i *ImageSummary) Id() string {
+func (i *ImageSummary) Id() string { //nolint
return i.ID
}
@@ -266,7 +266,7 @@ type ImageImportOptions struct {
}
type ImageImportReport struct {
- Id string
+ Id string //nolint
}
type ImageSaveOptions struct {
@@ -299,7 +299,7 @@ type ShowTrustReport struct {
Raw []byte
SystemRegistriesDirPath string
JSONOutput []byte
- Policies []*trust.TrustPolicy
+ Policies []*trust.Policy
}
// SetTrustOptions describes the CLI options for setting trust
diff --git a/pkg/domain/entities/play.go b/pkg/domain/entities/play.go
index 4f485cbee..0823bc64e 100644
--- a/pkg/domain/entities/play.go
+++ b/pkg/domain/entities/play.go
@@ -26,12 +26,18 @@ type PlayKubeOptions struct {
SeccompProfileRoot string
}
-// PlayKubeReport contains the results of running play kube.
-type PlayKubeReport struct {
- // Pod - the ID of the created pod.
- Pod string
+// PlayKubePod represents a single pod and associated containers created by play kube
+type PlayKubePod struct {
+ // ID - ID of the pod created as a result of play kube.
+ ID string
// Containers - the IDs of the containers running in the created pod.
Containers []string
// Logs - non-fatal erros and log messages while processing.
Logs []string
}
+
+// PlayKubeReport contains the results of running play kube.
+type PlayKubeReport struct {
+ // Pods - pods created by play kube.
+ Pods []PlayKubePod
+}
diff --git a/pkg/domain/entities/pods.go b/pkg/domain/entities/pods.go
index 37acba6e6..fc76ddd41 100644
--- a/pkg/domain/entities/pods.go
+++ b/pkg/domain/entities/pods.go
@@ -17,15 +17,15 @@ type PodKillOptions struct {
type PodKillReport struct {
Errs []error
- Id string
+ Id string //nolint
}
type ListPodsReport struct {
Cgroup string
Containers []*ListPodContainer
Created time.Time
- Id string
- InfraId string
+ Id string //nolint
+ InfraId string //nolint
Name string
Namespace string
Status string
@@ -33,7 +33,7 @@ type ListPodsReport struct {
}
type ListPodContainer struct {
- Id string
+ Id string //nolint
Names string
Status string
}
@@ -45,7 +45,7 @@ type PodPauseOptions struct {
type PodPauseReport struct {
Errs []error
- Id string
+ Id string //nolint
}
type PodunpauseOptions struct {
@@ -55,7 +55,7 @@ type PodunpauseOptions struct {
type PodUnpauseReport struct {
Errs []error
- Id string
+ Id string //nolint
}
type PodStopOptions struct {
@@ -67,7 +67,7 @@ type PodStopOptions struct {
type PodStopReport struct {
Errs []error
- Id string
+ Id string //nolint
}
type PodRestartOptions struct {
@@ -77,7 +77,7 @@ type PodRestartOptions struct {
type PodRestartReport struct {
Errs []error
- Id string
+ Id string //nolint
}
type PodStartOptions struct {
@@ -87,7 +87,7 @@ type PodStartOptions struct {
type PodStartReport struct {
Errs []error
- Id string
+ Id string //nolint
}
type PodRmOptions struct {
@@ -99,23 +99,24 @@ type PodRmOptions struct {
type PodRmReport struct {
Err error
- Id string
+ Id string //nolint
}
type PodCreateOptions struct {
- CGroupParent string
- Hostname string
- Infra bool
- InfraImage string
- InfraCommand string
- Labels map[string]string
- Name string
- Net *NetOptions
- Share []string
+ CGroupParent string
+ Hostname string
+ Infra bool
+ InfraImage string
+ InfraCommand string
+ InfraConmonPidFile string
+ Labels map[string]string
+ Name string
+ Net *NetOptions
+ Share []string
}
type PodCreateReport struct {
- Id string
+ Id string //nolint
}
func (p PodCreateOptions) ToPodSpecGen(s *specgen.PodSpecGenerator) {
@@ -127,6 +128,9 @@ func (p PodCreateOptions) ToPodSpecGen(s *specgen.PodSpecGenerator) {
if len(p.InfraCommand) > 0 {
s.InfraCommand = strings.Split(p.InfraCommand, " ")
}
+ if len(p.InfraConmonPidFile) > 0 {
+ s.InfraConmonPidFile = p.InfraConmonPidFile
+ }
s.InfraImage = p.InfraImage
s.SharedNamespaces = p.Share
@@ -155,7 +159,7 @@ type PodPruneOptions struct {
type PodPruneReport struct {
Err error
- Id string
+ Id string //nolint
}
type PodTopOptions struct {
diff --git a/pkg/domain/entities/set.go b/pkg/domain/entities/set.go
index c8d6cb1a9..1d31d82f9 100644
--- a/pkg/domain/entities/set.go
+++ b/pkg/domain/entities/set.go
@@ -4,12 +4,12 @@ import (
"strings"
)
-type stringSet struct {
+type StringSet struct {
m map[string]struct{}
}
-func NewStringSet(elem ...string) *stringSet {
- s := &stringSet{}
+func NewStringSet(elem ...string) *StringSet {
+ s := &StringSet{}
s.m = make(map[string]struct{}, len(elem))
for _, e := range elem {
s.Add(e)
@@ -17,20 +17,20 @@ func NewStringSet(elem ...string) *stringSet {
return s
}
-func (s *stringSet) Add(elem string) {
+func (s *StringSet) Add(elem string) {
s.m[elem] = struct{}{}
}
-func (s *stringSet) Remove(elem string) {
+func (s *StringSet) Remove(elem string) {
delete(s.m, elem)
}
-func (s *stringSet) Contains(elem string) bool {
+func (s *StringSet) Contains(elem string) bool {
_, ok := s.m[elem]
return ok
}
-func (s *stringSet) Elements() []string {
+func (s *StringSet) Elements() []string {
keys := make([]string, len(s.m))
i := 0
for k := range s.m {
@@ -40,6 +40,6 @@ func (s *stringSet) Elements() []string {
return keys
}
-func (s *stringSet) String() string {
+func (s *StringSet) String() string {
return strings.Join(s.Elements(), ", ")
}
diff --git a/pkg/domain/entities/types.go b/pkg/domain/entities/types.go
index 21ab025de..622f74838 100644
--- a/pkg/domain/entities/types.go
+++ b/pkg/domain/entities/types.go
@@ -11,7 +11,7 @@ import (
)
type Container struct {
- IdOrNamed
+ IDOrNamed
}
type Volume struct {
@@ -19,7 +19,7 @@ type Volume struct {
}
type Report struct {
- Id []string
+ Id []string //nolint
Err map[string]error
}
diff --git a/pkg/domain/entities/volumes.go b/pkg/domain/entities/volumes.go
index 23c066083..7cf7d82a2 100644
--- a/pkg/domain/entities/volumes.go
+++ b/pkg/domain/entities/volumes.go
@@ -16,9 +16,9 @@ type VolumeCreateOptions struct {
Options map[string]string `schema:"opts"`
}
-type IdOrNameResponse struct {
+type IDOrNameResponse struct {
// The Id or Name of an object
- IdOrName string
+ IDOrName string
}
type VolumeConfigResponse struct {
@@ -63,7 +63,7 @@ type VolumeRmOptions struct {
type VolumeRmReport struct {
Err error
- Id string
+ Id string //nolint
}
type VolumeInspectOptions struct {
@@ -80,7 +80,7 @@ type VolumePruneOptions struct {
type VolumePruneReport struct {
Err error
- Id string
+ Id string //nolint
}
type VolumeListOptions struct {
diff --git a/pkg/domain/infra/abi/containers.go b/pkg/domain/infra/abi/containers.go
index eb45d4630..4d6d0d59a 100644
--- a/pkg/domain/infra/abi/containers.go
+++ b/pkg/domain/infra/abi/containers.go
@@ -75,8 +75,8 @@ func getContainersByContext(all, latest bool, names []string, runtime *libpod.Ru
}
// TODO: Should return *entities.ContainerExistsReport, error
-func (ic *ContainerEngine) ContainerExists(ctx context.Context, nameOrId string) (*entities.BoolReport, error) {
- _, err := ic.Libpod.LookupContainer(nameOrId)
+func (ic *ContainerEngine) ContainerExists(ctx context.Context, nameOrID string) (*entities.BoolReport, error) {
+ _, err := ic.Libpod.LookupContainer(nameOrID)
if err != nil && errors.Cause(err) != define.ErrNoSuchCtr {
return nil, err
}
@@ -162,32 +162,33 @@ func (ic *ContainerEngine) ContainerStop(ctx context.Context, namesOrIds []strin
if err != nil && !(options.Ignore && errors.Cause(err) == define.ErrNoSuchCtr) {
return nil, err
}
- for _, con := range ctrs {
- report := entities.StopReport{Id: con.ID()}
- err = con.StopWithTimeout(options.Timeout)
+ errMap, err := parallel.ContainerOp(ctx, ctrs, func(c *libpod.Container) error {
+ var err error
+ if options.Timeout != nil {
+ err = c.StopWithTimeout(*options.Timeout)
+ } else {
+ err = c.Stop()
+ }
if err != nil {
- // These first two are considered non-fatal under the right conditions
- if errors.Cause(err) == define.ErrCtrStopped {
- logrus.Debugf("Container %s is already stopped", con.ID())
- reports = append(reports, &report)
- continue
-
- } else if options.All && errors.Cause(err) == define.ErrCtrStateInvalid {
- logrus.Debugf("Container %s is not running, could not stop", con.ID())
- reports = append(reports, &report)
- continue
+ switch {
+ case errors.Cause(err) == define.ErrCtrStopped:
+ logrus.Debugf("Container %s is already stopped", c.ID())
+ case options.All && errors.Cause(err) == define.ErrCtrStateInvalid:
+ logrus.Debugf("Container %s is not running, could not stop", c.ID())
+ default:
+ return err
}
- report.Err = err
- reports = append(reports, &report)
- continue
- } else if err := con.Cleanup(ctx); err != nil {
- // Only if no error, proceed to cleanup to ensure all
- // mounts are removed before we exit.
- report.Err = err
- reports = append(reports, &report)
- continue
}
- reports = append(reports, &report)
+ return c.Cleanup(ctx)
+ })
+ if err != nil {
+ return nil, err
+ }
+ for ctr, err := range errMap {
+ report := new(entities.StopReport)
+ report.Id = ctr.ID()
+ report.Err = err
+ reports = append(reports, report)
}
return reports, nil
}
@@ -322,7 +323,7 @@ func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string,
return reports, nil
}
- errMap, err := parallel.ParallelContainerOp(ctx, ctrs, func(c *libpod.Container) error {
+ errMap, err := parallel.ContainerOp(ctx, ctrs, func(c *libpod.Container) error {
err := ic.Libpod.RemoveContainer(ctx, c, options.Force, options.Volumes)
if err != nil {
if options.Ignore && errors.Cause(err) == define.ErrNoSuchCtr {
@@ -383,11 +384,11 @@ func (ic *ContainerEngine) ContainerTop(ctx context.Context, options entities.To
return report, err
}
-func (ic *ContainerEngine) ContainerCommit(ctx context.Context, nameOrId string, options entities.CommitOptions) (*entities.CommitReport, error) {
+func (ic *ContainerEngine) ContainerCommit(ctx context.Context, nameOrID string, options entities.CommitOptions) (*entities.CommitReport, error) {
var (
mimeType string
)
- ctr, err := ic.Libpod.LookupContainer(nameOrId)
+ ctr, err := ic.Libpod.LookupContainer(nameOrID)
if err != nil {
return nil, err
}
@@ -428,8 +429,8 @@ func (ic *ContainerEngine) ContainerCommit(ctx context.Context, nameOrId string,
return &entities.CommitReport{Id: newImage.ID()}, nil
}
-func (ic *ContainerEngine) ContainerExport(ctx context.Context, nameOrId string, options entities.ContainerExportOptions) error {
- ctr, err := ic.Libpod.LookupContainer(nameOrId)
+func (ic *ContainerEngine) ContainerExport(ctx context.Context, nameOrID string, options entities.ContainerExportOptions) error {
+ ctr, err := ic.Libpod.LookupContainer(nameOrID)
if err != nil {
return err
}
@@ -527,8 +528,8 @@ func (ic *ContainerEngine) ContainerCreate(ctx context.Context, s *specgen.SpecG
return &entities.ContainerCreateReport{Id: ctr.ID()}, nil
}
-func (ic *ContainerEngine) ContainerAttach(ctx context.Context, nameOrId string, options entities.AttachOptions) error {
- ctrs, err := getContainersByContext(false, options.Latest, []string{nameOrId}, ic.Libpod)
+func (ic *ContainerEngine) ContainerAttach(ctx context.Context, nameOrID string, options entities.AttachOptions) error {
+ ctrs, err := getContainersByContext(false, options.Latest, []string{nameOrID}, ic.Libpod)
if err != nil {
return err
}
@@ -590,12 +591,12 @@ func checkExecPreserveFDs(options entities.ExecOptions) (int, error) {
return ec, nil
}
-func (ic *ContainerEngine) ContainerExec(ctx context.Context, nameOrId string, options entities.ExecOptions, streams define.AttachStreams) (int, error) {
+func (ic *ContainerEngine) ContainerExec(ctx context.Context, nameOrID string, options entities.ExecOptions, streams define.AttachStreams) (int, error) {
ec, err := checkExecPreserveFDs(options)
if err != nil {
return ec, err
}
- ctrs, err := getContainersByContext(false, options.Latest, []string{nameOrId}, ic.Libpod)
+ ctrs, err := getContainersByContext(false, options.Latest, []string{nameOrID}, ic.Libpod)
if err != nil {
return ec, err
}
@@ -607,12 +608,12 @@ func (ic *ContainerEngine) ContainerExec(ctx context.Context, nameOrId string, o
return define.TranslateExecErrorToExitCode(ec, err), err
}
-func (ic *ContainerEngine) ContainerExecDetached(ctx context.Context, nameOrId string, options entities.ExecOptions) (string, error) {
+func (ic *ContainerEngine) ContainerExecDetached(ctx context.Context, nameOrID string, options entities.ExecOptions) (string, error) {
_, err := checkExecPreserveFDs(options)
if err != nil {
return "", err
}
- ctrs, err := getContainersByContext(false, options.Latest, []string{nameOrId}, ic.Libpod)
+ ctrs, err := getContainersByContext(false, options.Latest, []string{nameOrID}, ic.Libpod)
if err != nil {
return "", err
}
@@ -766,15 +767,15 @@ func (ic *ContainerEngine) ContainerList(ctx context.Context, options entities.C
}
// ContainerDiff provides changes to given container
-func (ic *ContainerEngine) ContainerDiff(ctx context.Context, nameOrId string, opts entities.DiffOptions) (*entities.DiffReport, error) {
+func (ic *ContainerEngine) ContainerDiff(ctx context.Context, nameOrID string, opts entities.DiffOptions) (*entities.DiffReport, error) {
if opts.Latest {
ctnr, err := ic.Libpod.GetLatestContainer()
if err != nil {
return nil, errors.Wrap(err, "unable to get latest container")
}
- nameOrId = ctnr.ID()
+ nameOrID = ctnr.ID()
}
- changes, err := ic.Libpod.GetDiff("", nameOrId)
+ changes, err := ic.Libpod.GetDiff("", nameOrID)
return &entities.DiffReport{Changes: changes}, err
}
@@ -976,7 +977,7 @@ func (ic *ContainerEngine) ContainerInit(ctx context.Context, namesOrIds []strin
return reports, nil
}
-func (ic *ContainerEngine) ContainerMount(ctx context.Context, nameOrIds []string, options entities.ContainerMountOptions) ([]*entities.ContainerMountReport, error) {
+func (ic *ContainerEngine) ContainerMount(ctx context.Context, nameOrIDs []string, options entities.ContainerMountOptions) ([]*entities.ContainerMountReport, error) {
if os.Geteuid() != 0 {
if driver := ic.Libpod.StorageConfig().GraphDriverName; driver != "vfs" {
// Do not allow to mount a graphdriver that is not vfs if we are creating the userns as part
@@ -993,7 +994,7 @@ func (ic *ContainerEngine) ContainerMount(ctx context.Context, nameOrIds []strin
}
}
var reports []*entities.ContainerMountReport
- ctrs, err := getContainersByContext(options.All, options.Latest, nameOrIds, ic.Libpod)
+ ctrs, err := getContainersByContext(options.All, options.Latest, nameOrIDs, ic.Libpod)
if err != nil {
return nil, err
}
@@ -1028,9 +1029,9 @@ func (ic *ContainerEngine) ContainerMount(ctx context.Context, nameOrIds []strin
return reports, nil
}
-func (ic *ContainerEngine) ContainerUnmount(ctx context.Context, nameOrIds []string, options entities.ContainerUnmountOptions) ([]*entities.ContainerUnmountReport, error) {
+func (ic *ContainerEngine) ContainerUnmount(ctx context.Context, nameOrIDs []string, options entities.ContainerUnmountOptions) ([]*entities.ContainerUnmountReport, error) {
var reports []*entities.ContainerUnmountReport
- ctrs, err := getContainersByContext(options.All, options.Latest, nameOrIds, ic.Libpod)
+ ctrs, err := getContainersByContext(options.All, options.Latest, nameOrIDs, ic.Libpod)
if err != nil {
return nil, err
}
@@ -1063,9 +1064,9 @@ func (ic *ContainerEngine) Config(_ context.Context) (*config.Config, error) {
return ic.Libpod.GetConfig()
}
-func (ic *ContainerEngine) ContainerPort(ctx context.Context, nameOrId string, options entities.ContainerPortOptions) ([]*entities.ContainerPortReport, error) {
+func (ic *ContainerEngine) ContainerPort(ctx context.Context, nameOrID string, options entities.ContainerPortOptions) ([]*entities.ContainerPortReport, error) {
var reports []*entities.ContainerPortReport
- ctrs, err := getContainersByContext(options.All, options.Latest, []string{nameOrId}, ic.Libpod)
+ ctrs, err := getContainersByContext(options.All, options.Latest, []string{nameOrID}, ic.Libpod)
if err != nil {
return nil, err
}
diff --git a/pkg/domain/infra/abi/generate.go b/pkg/domain/infra/abi/generate.go
index abb5e2911..8853303d5 100644
--- a/pkg/domain/infra/abi/generate.go
+++ b/pkg/domain/infra/abi/generate.go
@@ -4,7 +4,6 @@ import (
"bytes"
"context"
"fmt"
- "strings"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/define"
@@ -16,165 +15,29 @@ import (
)
func (ic *ContainerEngine) GenerateSystemd(ctx context.Context, nameOrID string, options entities.GenerateSystemdOptions) (*entities.GenerateSystemdReport, error) {
- opts := generate.Options{
- Files: options.Files,
- New: options.New,
- }
-
// First assume it's a container.
- if info, found, err := ic.generateSystemdgenContainerInfo(nameOrID, nil, options); found && err != nil {
- return nil, err
- } else if found && err == nil {
- output, err := generate.CreateContainerSystemdUnit(info, opts)
- if err != nil {
- return nil, err
+ ctr, ctrErr := ic.Libpod.LookupContainer(nameOrID)
+ if ctrErr == nil {
+ // Generate the unit for the container.
+ s, err := generate.ContainerUnit(ctr, options)
+ if err == nil {
+ return &entities.GenerateSystemdReport{Output: s}, nil
}
- return &entities.GenerateSystemdReport{Output: output}, nil
- }
-
- // --new does not support pods.
- if options.New {
- return nil, errors.Errorf("error generating systemd unit files: cannot generate generic files for a pod")
}
- // We're either having a pod or garbage.
+ // If it's not a container, we either have a pod or garbage.
pod, err := ic.Libpod.LookupPod(nameOrID)
if err != nil {
- return nil, err
- }
-
- // Error out if the pod has no infra container, which we require to be the
- // main service.
- if !pod.HasInfraContainer() {
- return nil, fmt.Errorf("error generating systemd unit files: Pod %q has no infra container", pod.Name())
+ err = errors.Wrap(ctrErr, err.Error())
+ return nil, errors.Wrapf(err, "%s does not refer to a container or pod", nameOrID)
}
- // Generate a systemdgen.ContainerInfo for the infra container. This
- // ContainerInfo acts as the main service of the pod.
- infraID, err := pod.InfraContainerID()
- if err != nil {
- return nil, nil
- }
- podInfo, _, err := ic.generateSystemdgenContainerInfo(infraID, pod, options)
+ // Generate the units for the pod and all its containers.
+ s, err := generate.PodUnits(pod, options)
if err != nil {
return nil, err
}
-
- // Compute the container-dependency graph for the Pod.
- containers, err := pod.AllContainers()
- if err != nil {
- return nil, err
- }
- if len(containers) == 0 {
- return nil, fmt.Errorf("error generating systemd unit files: Pod %q has no containers", pod.Name())
- }
- graph, err := libpod.BuildContainerGraph(containers)
- if err != nil {
- return nil, err
- }
-
- // Traverse the dependency graph and create systemdgen.ContainerInfo's for
- // each container.
- containerInfos := []*generate.ContainerInfo{podInfo}
- for ctr, dependencies := range graph.DependencyMap() {
- // Skip the infra container as we already generated it.
- if ctr.ID() == infraID {
- continue
- }
- ctrInfo, _, err := ic.generateSystemdgenContainerInfo(ctr.ID(), nil, options)
- if err != nil {
- return nil, err
- }
- // Now add the container's dependencies and at the container as a
- // required service of the infra container.
- for _, dep := range dependencies {
- if dep.ID() == infraID {
- ctrInfo.BoundToServices = append(ctrInfo.BoundToServices, podInfo.ServiceName)
- } else {
- _, serviceName := generateServiceName(dep, nil, options)
- ctrInfo.BoundToServices = append(ctrInfo.BoundToServices, serviceName)
- }
- }
- podInfo.RequiredServices = append(podInfo.RequiredServices, ctrInfo.ServiceName)
- containerInfos = append(containerInfos, ctrInfo)
- }
-
- // Now generate the systemd service for all containers.
- builder := strings.Builder{}
- for i, info := range containerInfos {
- if i > 0 {
- builder.WriteByte('\n')
- }
- out, err := generate.CreateContainerSystemdUnit(info, opts)
- if err != nil {
- return nil, err
- }
- builder.WriteString(out)
- }
-
- return &entities.GenerateSystemdReport{Output: builder.String()}, nil
-}
-
-// generateSystemdgenContainerInfo is a helper to generate a
-// systemdgen.ContainerInfo for `GenerateSystemd`.
-func (ic *ContainerEngine) generateSystemdgenContainerInfo(nameOrID string, pod *libpod.Pod, options entities.GenerateSystemdOptions) (*generate.ContainerInfo, bool, error) {
- ctr, err := ic.Libpod.LookupContainer(nameOrID)
- if err != nil {
- return nil, false, err
- }
-
- timeout := ctr.StopTimeout()
- if options.StopTimeout != nil {
- timeout = *options.StopTimeout
- }
-
- config := ctr.Config()
- conmonPidFile := config.ConmonPidFile
- if conmonPidFile == "" {
- return nil, true, errors.Errorf("conmon PID file path is empty, try to recreate the container with --conmon-pidfile flag")
- }
-
- createCommand := []string{}
- if config.CreateCommand != nil {
- createCommand = config.CreateCommand
- } else if options.New {
- return nil, true, errors.Errorf("cannot use --new on container %q: no create command found", nameOrID)
- }
-
- name, serviceName := generateServiceName(ctr, pod, options)
- info := &generate.ContainerInfo{
- ServiceName: serviceName,
- ContainerName: name,
- RestartPolicy: options.RestartPolicy,
- PIDFile: conmonPidFile,
- StopTimeout: timeout,
- GenerateTimestamp: true,
- CreateCommand: createCommand,
- }
-
- return info, true, nil
-}
-
-// generateServiceName generates the container name and the service name for systemd service.
-func generateServiceName(ctr *libpod.Container, pod *libpod.Pod, options entities.GenerateSystemdOptions) (string, string) {
- var kind, name, ctrName string
- if pod == nil {
- kind = options.ContainerPrefix //defaults to container
- name = ctr.ID()
- if options.Name {
- name = ctr.Name()
- }
- ctrName = name
- } else {
- kind = options.PodPrefix //defaults to pod
- name = pod.ID()
- ctrName = ctr.ID()
- if options.Name {
- name = pod.Name()
- ctrName = ctr.Name()
- }
- }
- return ctrName, fmt.Sprintf("%s%s%s", kind, options.Separator, name)
+ return &entities.GenerateSystemdReport{Output: s}, nil
}
func (ic *ContainerEngine) GenerateKube(ctx context.Context, nameOrID string, options entities.GenerateKubeOptions) (*entities.GenerateKubeReport, error) {
diff --git a/pkg/domain/infra/abi/healthcheck.go b/pkg/domain/infra/abi/healthcheck.go
index 4e925ef56..dfa9a6fa5 100644
--- a/pkg/domain/infra/abi/healthcheck.go
+++ b/pkg/domain/infra/abi/healthcheck.go
@@ -7,8 +7,8 @@ import (
"github.com/containers/libpod/pkg/domain/entities"
)
-func (ic *ContainerEngine) HealthCheckRun(ctx context.Context, nameOrId string, options entities.HealthCheckOptions) (*define.HealthCheckResults, error) {
- status, err := ic.Libpod.HealthCheck(nameOrId)
+func (ic *ContainerEngine) HealthCheckRun(ctx context.Context, nameOrID string, options entities.HealthCheckOptions) (*define.HealthCheckResults, error) {
+ status, err := ic.Libpod.HealthCheck(nameOrID)
if err != nil {
return nil, err
}
diff --git a/pkg/domain/infra/abi/images.go b/pkg/domain/infra/abi/images.go
index d8af4d339..67f331aac 100644
--- a/pkg/domain/infra/abi/images.go
+++ b/pkg/domain/infra/abi/images.go
@@ -38,8 +38,8 @@ import (
// SignatureStoreDir defines default directory to store signatures
const SignatureStoreDir = "/var/lib/containers/sigstore"
-func (ir *ImageEngine) Exists(_ context.Context, nameOrId string) (*entities.BoolReport, error) {
- _, err := ir.Libpod.ImageRuntime().NewFromLocal(nameOrId)
+func (ir *ImageEngine) Exists(_ context.Context, nameOrID string) (*entities.BoolReport, error) {
+ _, err := ir.Libpod.ImageRuntime().NewFromLocal(nameOrID)
if err != nil && errors.Cause(err) != define.ErrNoSuchImage {
return nil, err
}
@@ -65,8 +65,8 @@ func (ir *ImageEngine) pruneImagesHelper(ctx context.Context, all bool, filters
return &report, nil
}
-func (ir *ImageEngine) History(ctx context.Context, nameOrId string, opts entities.ImageHistoryOptions) (*entities.ImageHistoryReport, error) {
- image, err := ir.Libpod.ImageRuntime().NewFromLocal(nameOrId)
+func (ir *ImageEngine) History(ctx context.Context, nameOrID string, opts entities.ImageHistoryOptions) (*entities.ImageHistoryReport, error) {
+ image, err := ir.Libpod.ImageRuntime().NewFromLocal(nameOrID)
if err != nil {
return nil, err
}
@@ -261,8 +261,8 @@ func (ir *ImageEngine) Push(ctx context.Context, source string, destination stri
nil)
}
-// func (r *imageRuntime) Delete(ctx context.Context, nameOrId string, opts entities.ImageDeleteOptions) (*entities.ImageDeleteReport, error) {
-// image, err := r.libpod.ImageEngine().NewFromLocal(nameOrId)
+// func (r *imageRuntime) Delete(ctx context.Context, nameOrID string, opts entities.ImageDeleteOptions) (*entities.ImageDeleteReport, error) {
+// image, err := r.libpod.ImageEngine().NewFromLocal(nameOrID)
// if err != nil {
// return nil, err
// }
@@ -292,8 +292,8 @@ func (ir *ImageEngine) Push(ctx context.Context, source string, destination stri
// return &report, nil
// }
-func (ir *ImageEngine) Tag(ctx context.Context, nameOrId string, tags []string, options entities.ImageTagOptions) error {
- newImage, err := ir.Libpod.ImageRuntime().NewFromLocal(nameOrId)
+func (ir *ImageEngine) Tag(ctx context.Context, nameOrID string, tags []string, options entities.ImageTagOptions) error {
+ newImage, err := ir.Libpod.ImageRuntime().NewFromLocal(nameOrID)
if err != nil {
return err
}
@@ -305,8 +305,8 @@ func (ir *ImageEngine) Tag(ctx context.Context, nameOrId string, tags []string,
return nil
}
-func (ir *ImageEngine) Untag(ctx context.Context, nameOrId string, tags []string, options entities.ImageUntagOptions) error {
- newImage, err := ir.Libpod.ImageRuntime().NewFromLocal(nameOrId)
+func (ir *ImageEngine) Untag(ctx context.Context, nameOrID string, tags []string, options entities.ImageUntagOptions) error {
+ newImage, err := ir.Libpod.ImageRuntime().NewFromLocal(nameOrID)
if err != nil {
return err
}
@@ -356,16 +356,16 @@ func (ir *ImageEngine) Import(ctx context.Context, opts entities.ImageImportOpti
return &entities.ImageImportReport{Id: id}, nil
}
-func (ir *ImageEngine) Save(ctx context.Context, nameOrId string, tags []string, options entities.ImageSaveOptions) error {
- newImage, err := ir.Libpod.ImageRuntime().NewFromLocal(nameOrId)
+func (ir *ImageEngine) Save(ctx context.Context, nameOrID string, tags []string, options entities.ImageSaveOptions) error {
+ newImage, err := ir.Libpod.ImageRuntime().NewFromLocal(nameOrID)
if err != nil {
return err
}
- return newImage.Save(ctx, nameOrId, options.Format, options.Output, tags, options.Quiet, options.Compress)
+ return newImage.Save(ctx, nameOrID, options.Format, options.Output, tags, options.Quiet, options.Compress)
}
-func (ir *ImageEngine) Diff(_ context.Context, nameOrId string, _ entities.DiffOptions) (*entities.DiffReport, error) {
- changes, err := ir.Libpod.GetDiff("", nameOrId)
+func (ir *ImageEngine) Diff(_ context.Context, nameOrID string, _ entities.DiffOptions) (*entities.DiffReport, error) {
+ changes, err := ir.Libpod.GetDiff("", nameOrID)
if err != nil {
return nil, err
}
@@ -420,8 +420,8 @@ func (ir *ImageEngine) Build(ctx context.Context, containerFiles []string, opts
return &entities.BuildReport{ID: id}, nil
}
-func (ir *ImageEngine) Tree(ctx context.Context, nameOrId string, opts entities.ImageTreeOptions) (*entities.ImageTreeReport, error) {
- img, err := ir.Libpod.ImageRuntime().NewFromLocal(nameOrId)
+func (ir *ImageEngine) Tree(ctx context.Context, nameOrID string, opts entities.ImageTreeOptions) (*entities.ImageTreeReport, error) {
+ img, err := ir.Libpod.ImageRuntime().NewFromLocal(nameOrID)
if err != nil {
return nil, err
}
diff --git a/pkg/domain/infra/abi/parse/parse.go b/pkg/domain/infra/abi/parse/parse.go
index 6c0e1ee55..2320c6a32 100644
--- a/pkg/domain/infra/abi/parse/parse.go
+++ b/pkg/domain/infra/abi/parse/parse.go
@@ -12,7 +12,7 @@ import (
// Handle volume options from CLI.
// Parse "o" option to find UID, GID.
-func ParseVolumeOptions(opts map[string]string) ([]libpod.VolumeCreateOption, error) {
+func VolumeOptions(opts map[string]string) ([]libpod.VolumeCreateOption, error) {
libpodOptions := []libpod.VolumeCreateOption{}
volumeOptions := make(map[string]string)
diff --git a/pkg/domain/infra/abi/play.go b/pkg/domain/infra/abi/play.go
index 6d0919d2b..f5b93c51b 100644
--- a/pkg/domain/infra/abi/play.go
+++ b/pkg/domain/infra/abi/play.go
@@ -26,6 +26,7 @@ import (
"github.com/ghodss/yaml"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
+ v1apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
)
@@ -38,13 +39,7 @@ const (
func (ic *ContainerEngine) PlayKube(ctx context.Context, path string, options entities.PlayKubeOptions) (*entities.PlayKubeReport, error) {
var (
- containers []*libpod.Container
- pod *libpod.Pod
- podOptions []libpod.PodCreateOption
- podYAML v1.Pod
- registryCreds *types.DockerAuthConfig
- writer io.Writer
- report entities.PlayKubeReport
+ kubeObject v1.ObjectReference
)
content, err := ioutil.ReadFile(path)
@@ -52,25 +47,84 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, path string, options en
return nil, err
}
- if err := yaml.Unmarshal(content, &podYAML); err != nil {
+ if err := yaml.Unmarshal(content, &kubeObject); err != nil {
return nil, errors.Wrapf(err, "unable to read %q as YAML", path)
}
// NOTE: pkg/bindings/play is also parsing the file.
// A pkg/kube would be nice to refactor and abstract
// parts of the K8s-related code.
- if podYAML.Kind != "Pod" {
- return nil, errors.Errorf("invalid YAML kind: %q. Pod is the only supported Kubernetes YAML kind", podYAML.Kind)
+ switch kubeObject.Kind {
+ case "Pod":
+ var podYAML v1.Pod
+ var podTemplateSpec v1.PodTemplateSpec
+ if err := yaml.Unmarshal(content, &podYAML); err != nil {
+ return nil, errors.Wrapf(err, "unable to read YAML %q as Kube Pod", path)
+ }
+ podTemplateSpec.ObjectMeta = podYAML.ObjectMeta
+ podTemplateSpec.Spec = podYAML.Spec
+ return ic.playKubePod(ctx, podTemplateSpec.ObjectMeta.Name, &podTemplateSpec, options)
+ case "Deployment":
+ var deploymentYAML v1apps.Deployment
+ if err := yaml.Unmarshal(content, &deploymentYAML); err != nil {
+ return nil, errors.Wrapf(err, "unable to read YAML %q as Kube Deployment", path)
+ }
+ return ic.playKubeDeployment(ctx, &deploymentYAML, options)
+ default:
+ return nil, errors.Errorf("invalid YAML kind: %q. [Pod|Deployment] are the only supported Kubernetes Kinds", kubeObject.Kind)
+ }
+
+}
+
+func (ic *ContainerEngine) playKubeDeployment(ctx context.Context, deploymentYAML *v1apps.Deployment, options entities.PlayKubeOptions) (*entities.PlayKubeReport, error) {
+ var (
+ deploymentName string
+ podSpec v1.PodTemplateSpec
+ numReplicas int32
+ i int32
+ report entities.PlayKubeReport
+ )
+
+ deploymentName = deploymentYAML.ObjectMeta.Name
+ if deploymentName == "" {
+ return nil, errors.Errorf("Deployment does not have a name")
}
+ numReplicas = 1
+ if deploymentYAML.Spec.Replicas != nil {
+ numReplicas = *deploymentYAML.Spec.Replicas
+ }
+ podSpec = deploymentYAML.Spec.Template
+
+ // create "replicas" number of pods
+ for i = 0; i < numReplicas; i++ {
+ podName := fmt.Sprintf("%s-pod-%d", deploymentName, i)
+ podReport, err := ic.playKubePod(ctx, podName, &podSpec, options)
+ if err != nil {
+ return nil, errors.Wrapf(err, "Error encountered while bringing up pod %s", podName)
+ }
+ report.Pods = append(report.Pods, podReport.Pods...)
+ }
+ return &report, nil
+}
+
+func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podYAML *v1.PodTemplateSpec, options entities.PlayKubeOptions) (*entities.PlayKubeReport, error) {
+ var (
+ containers []*libpod.Container
+ pod *libpod.Pod
+ podOptions []libpod.PodCreateOption
+ registryCreds *types.DockerAuthConfig
+ writer io.Writer
+ playKubePod entities.PlayKubePod
+ report entities.PlayKubeReport
+ )
// check for name collision between pod and container
- podName := podYAML.ObjectMeta.Name
if podName == "" {
return nil, errors.Errorf("pod does not have a name")
}
for _, n := range podYAML.Spec.Containers {
if n.Name == podName {
- report.Logs = append(report.Logs,
+ playKubePod.Logs = append(playKubePod.Logs,
fmt.Sprintf("a container exists with the same name (%q) as the pod in your YAML file; changing pod name to %s_pod\n", podName, podName))
podName = fmt.Sprintf("%s_pod", podName)
}
@@ -239,11 +293,11 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, path string, options en
if err != nil {
return nil, err
}
- conf, err := kubeContainerToCreateConfig(ctx, container, ic.Libpod, newImage, namespaces, volumes, pod.ID(), podInfraID, seccompPaths)
+ conf, err := kubeContainerToCreateConfig(ctx, container, ic.Libpod, newImage, namespaces, volumes, pod.ID(), podName, podInfraID, seccompPaths)
if err != nil {
return nil, err
}
- ctr, err := createconfig.CreateContainerFromCreateConfig(ic.Libpod, conf, ctx, pod)
+ ctr, err := createconfig.CreateContainerFromCreateConfig(ctx, ic.Libpod, conf, pod)
if err != nil {
return nil, err
}
@@ -259,11 +313,13 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, path string, options en
}
}
- report.Pod = pod.ID()
+ playKubePod.ID = pod.ID()
for _, ctr := range containers {
- report.Containers = append(report.Containers, ctr.ID())
+ playKubePod.Containers = append(playKubePod.Containers, ctr.ID())
}
+ report.Pods = append(report.Pods, playKubePod)
+
return &report, nil
}
@@ -351,7 +407,7 @@ func setupSecurityContext(securityConfig *createconfig.SecurityConfig, userConfi
}
// kubeContainerToCreateConfig takes a v1.Container and returns a createconfig describing a container
-func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container, runtime *libpod.Runtime, newImage *image.Image, namespaces map[string]string, volumes map[string]string, podID, infraID string, seccompPaths *kubeSeccompPaths) (*createconfig.CreateConfig, error) {
+func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container, runtime *libpod.Runtime, newImage *image.Image, namespaces map[string]string, volumes map[string]string, podID, podName, infraID string, seccompPaths *kubeSeccompPaths) (*createconfig.CreateConfig, error) {
var (
containerConfig createconfig.CreateConfig
pidConfig createconfig.PidConfig
@@ -368,7 +424,14 @@ func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container
containerConfig.Image = containerYAML.Image
containerConfig.ImageID = newImage.ID()
- containerConfig.Name = containerYAML.Name
+
+ // podName should be non-empty for Deployment objects to be able to create
+ // multiple pods having containers with unique names
+ if podName == "" {
+ return nil, errors.Errorf("kubeContainerToCreateConfig got empty podName")
+ }
+ containerConfig.Name = fmt.Sprintf("%s-%s", podName, containerYAML.Name)
+
containerConfig.Tty = containerYAML.TTY
containerConfig.Pod = podID
@@ -382,7 +445,10 @@ func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container
setupSecurityContext(&securityConfig, &userConfig, containerYAML)
- securityConfig.SeccompProfilePath = seccompPaths.findForContainer(containerConfig.Name)
+ // Since we prefix the container name with pod name to work-around the uniqueness requirement,
+ // the seccom profile should reference the actual container name from the YAML
+ // but apply to the containers with the prefixed name
+ securityConfig.SeccompProfilePath = seccompPaths.findForContainer(containerYAML.Name)
containerConfig.Command = []string{}
if imageData != nil && imageData.Config != nil {
diff --git a/pkg/domain/infra/abi/pods.go b/pkg/domain/infra/abi/pods.go
index 320880920..054b59b06 100644
--- a/pkg/domain/infra/abi/pods.go
+++ b/pkg/domain/infra/abi/pods.go
@@ -45,8 +45,8 @@ func getPodsByContext(all, latest bool, pods []string, runtime *libpod.Runtime)
return outpods, err
}
-func (ic *ContainerEngine) PodExists(ctx context.Context, nameOrId string) (*entities.BoolReport, error) {
- _, err := ic.Libpod.LookupPod(nameOrId)
+func (ic *ContainerEngine) PodExists(ctx context.Context, nameOrID string) (*entities.BoolReport, error) {
+ _, err := ic.Libpod.LookupPod(nameOrID)
if err != nil && errors.Cause(err) != define.ErrNoSuchPod {
return nil, err
}
@@ -144,6 +144,7 @@ func (ic *ContainerEngine) PodStop(ctx context.Context, namesOrIds []string, opt
var (
reports []*entities.PodStopReport
)
+
pods, err := getPodsByContext(options.All, options.Latest, namesOrIds, ic.Libpod)
if err != nil && !(options.Ignore && errors.Cause(err) == define.ErrNoSuchPod) {
return nil, err
@@ -199,10 +200,12 @@ func (ic *ContainerEngine) PodStart(ctx context.Context, namesOrIds []string, op
var (
reports []*entities.PodStartReport
)
+
pods, err := getPodsByContext(options.All, options.Latest, namesOrIds, ic.Libpod)
if err != nil {
return nil, err
}
+
for _, p := range pods {
report := entities.PodStartReport{Id: p.ID()}
errs, err := p.Start(ctx)
@@ -227,6 +230,7 @@ func (ic *ContainerEngine) PodRm(ctx context.Context, namesOrIds []string, optio
var (
reports []*entities.PodRmReport
)
+
pods, err := getPodsByContext(options.All, options.Latest, namesOrIds, ic.Libpod)
if err != nil && !(options.Ignore && errors.Cause(err) == define.ErrNoSuchPod) {
return nil, err
@@ -347,7 +351,7 @@ func (ic *ContainerEngine) PodPs(ctx context.Context, options entities.PodPSOpti
Status: state.String(),
})
}
- infraId, err := p.InfraContainerID()
+ infraID, err := p.InfraContainerID()
if err != nil {
return nil, err
}
@@ -356,7 +360,7 @@ func (ic *ContainerEngine) PodPs(ctx context.Context, options entities.PodPSOpti
Containers: lpcs,
Created: p.CreatedTime(),
Id: p.ID(),
- InfraId: infraId,
+ InfraId: infraID,
Name: p.Name(),
Namespace: p.Namespace(),
Status: status,
diff --git a/pkg/domain/infra/abi/system.go b/pkg/domain/infra/abi/system.go
index 9b538b301..33ba58558 100644
--- a/pkg/domain/infra/abi/system.go
+++ b/pkg/domain/infra/abi/system.go
@@ -72,11 +72,9 @@ func (ic *ContainerEngine) SetupRootless(_ context.Context, cmd *cobra.Command)
return err
}
unitName := fmt.Sprintf("podman-%d.scope", os.Getpid())
- if err := utils.RunUnderSystemdScope(os.Getpid(), "user.slice", unitName); err != nil {
- if conf.Engine.CgroupManager == config.SystemdCgroupsManager {
+ if conf.Engine.CgroupManager == config.SystemdCgroupsManager {
+ if err := utils.RunUnderSystemdScope(os.Getpid(), "user.slice", unitName); err != nil {
logrus.Warnf("Failed to add podman to systemd sandbox cgroup: %v", err)
- } else {
- logrus.Debugf("Failed to add podman to systemd sandbox cgroup: %v", err)
}
}
}
@@ -338,7 +336,7 @@ func (ic *ContainerEngine) SystemDf(ctx context.Context, options entities.System
}
for _, viu := range inUse {
if util.StringInSlice(viu, runningContainers) {
- consInUse += 1
+ consInUse++
}
}
report := entities.SystemDfVolumeReport{
@@ -376,12 +374,12 @@ func (se *SystemEngine) Renumber(ctx context.Context, flags *pflag.FlagSet, conf
return nil
}
-func (s SystemEngine) Migrate(ctx context.Context, flags *pflag.FlagSet, config *entities.PodmanConfig, options entities.SystemMigrateOptions) error {
+func (se SystemEngine) Migrate(ctx context.Context, flags *pflag.FlagSet, config *entities.PodmanConfig, options entities.SystemMigrateOptions) error {
return nil
}
-func (s SystemEngine) Shutdown(ctx context.Context) {
- if err := s.Libpod.Shutdown(false); err != nil {
+func (se SystemEngine) Shutdown(ctx context.Context) {
+ if err := se.Libpod.Shutdown(false); err != nil {
logrus.Error(err)
}
}
diff --git a/pkg/domain/infra/abi/trust.go b/pkg/domain/infra/abi/trust.go
index 5b89c91d9..03986ad0e 100644
--- a/pkg/domain/infra/abi/trust.go
+++ b/pkg/domain/infra/abi/trust.go
@@ -112,8 +112,8 @@ func (ir *ImageEngine) SetTrust(ctx context.Context, args []string, options enti
return ioutil.WriteFile(policyPath, data, 0644)
}
-func getPolicyShowOutput(policyContentStruct trust.PolicyContent, systemRegistriesDirPath string) ([]*trust.TrustPolicy, error) {
- var output []*trust.TrustPolicy
+func getPolicyShowOutput(policyContentStruct trust.PolicyContent, systemRegistriesDirPath string) ([]*trust.Policy, error) {
+ var output []*trust.Policy
registryConfigs, err := trust.LoadAndMergeConfig(systemRegistriesDirPath)
if err != nil {
@@ -121,7 +121,7 @@ func getPolicyShowOutput(policyContentStruct trust.PolicyContent, systemRegistri
}
if len(policyContentStruct.Default) > 0 {
- defaultPolicyStruct := trust.TrustPolicy{
+ defaultPolicyStruct := trust.Policy{
Name: "* (default)",
RepoName: "default",
Type: trustTypeDescription(policyContentStruct.Default[0].Type),
@@ -130,7 +130,7 @@ func getPolicyShowOutput(policyContentStruct trust.PolicyContent, systemRegistri
}
for _, transval := range policyContentStruct.Transports {
for repo, repoval := range transval {
- tempTrustShowOutput := trust.TrustPolicy{
+ tempTrustShowOutput := trust.Policy{
Name: repo,
RepoName: repo,
Type: repoval[0].Type,
diff --git a/pkg/domain/infra/abi/volumes.go b/pkg/domain/infra/abi/volumes.go
index 91b2440df..a311e0c4e 100644
--- a/pkg/domain/infra/abi/volumes.go
+++ b/pkg/domain/infra/abi/volumes.go
@@ -10,7 +10,7 @@ import (
"github.com/pkg/errors"
)
-func (ic *ContainerEngine) VolumeCreate(ctx context.Context, opts entities.VolumeCreateOptions) (*entities.IdOrNameResponse, error) {
+func (ic *ContainerEngine) VolumeCreate(ctx context.Context, opts entities.VolumeCreateOptions) (*entities.IDOrNameResponse, error) {
var (
volumeOptions []libpod.VolumeCreateOption
)
@@ -24,7 +24,7 @@ func (ic *ContainerEngine) VolumeCreate(ctx context.Context, opts entities.Volum
volumeOptions = append(volumeOptions, libpod.WithVolumeLabels(opts.Label))
}
if len(opts.Options) > 0 {
- parsedOptions, err := parse.ParseVolumeOptions(opts.Options)
+ parsedOptions, err := parse.VolumeOptions(opts.Options)
if err != nil {
return nil, err
}
@@ -34,7 +34,7 @@ func (ic *ContainerEngine) VolumeCreate(ctx context.Context, opts entities.Volum
if err != nil {
return nil, err
}
- return &entities.IdOrNameResponse{IdOrName: vol.Name()}, nil
+ return &entities.IDOrNameResponse{IDOrName: vol.Name()}, nil
}
func (ic *ContainerEngine) VolumeRm(ctx context.Context, namesOrIds []string, opts entities.VolumeRmOptions) ([]*entities.VolumeRmReport, error) {
diff --git a/pkg/domain/infra/runtime_abi.go b/pkg/domain/infra/runtime_abi.go
index 0a82b9a6b..60d0c6e86 100644
--- a/pkg/domain/infra/runtime_abi.go
+++ b/pkg/domain/infra/runtime_abi.go
@@ -20,7 +20,7 @@ func NewContainerEngine(facts *entities.PodmanConfig) (entities.ContainerEngine,
r, err := NewLibpodRuntime(facts.FlagSet, facts)
return r, err
case entities.TunnelMode:
- ctx, err := bindings.NewConnectionWithIdentity(context.Background(), facts.Uri, facts.PassPhrase, facts.Identities...)
+ ctx, err := bindings.NewConnectionWithIdentity(context.Background(), facts.URI, facts.PassPhrase, facts.Identities...)
return &tunnel.ContainerEngine{ClientCxt: ctx}, err
}
return nil, fmt.Errorf("runtime mode '%v' is not supported", facts.EngineMode)
@@ -33,7 +33,7 @@ func NewImageEngine(facts *entities.PodmanConfig) (entities.ImageEngine, error)
r, err := NewLibpodImageRuntime(facts.FlagSet, facts)
return r, err
case entities.TunnelMode:
- ctx, err := bindings.NewConnectionWithIdentity(context.Background(), facts.Uri, facts.PassPhrase, facts.Identities...)
+ ctx, err := bindings.NewConnectionWithIdentity(context.Background(), facts.URI, facts.PassPhrase, facts.Identities...)
return &tunnel.ImageEngine{ClientCxt: ctx}, err
}
return nil, fmt.Errorf("runtime mode '%v' is not supported", facts.EngineMode)
diff --git a/pkg/domain/infra/runtime_tunnel.go b/pkg/domain/infra/runtime_tunnel.go
index bba7d2c0c..24a93b888 100644
--- a/pkg/domain/infra/runtime_tunnel.go
+++ b/pkg/domain/infra/runtime_tunnel.go
@@ -16,7 +16,7 @@ func NewContainerEngine(facts *entities.PodmanConfig) (entities.ContainerEngine,
case entities.ABIMode:
return nil, fmt.Errorf("direct runtime not supported")
case entities.TunnelMode:
- ctx, err := bindings.NewConnectionWithIdentity(context.Background(), facts.Uri, facts.PassPhrase, facts.Identities...)
+ ctx, err := bindings.NewConnectionWithIdentity(context.Background(), facts.URI, facts.PassPhrase, facts.Identities...)
return &tunnel.ContainerEngine{ClientCxt: ctx}, err
}
return nil, fmt.Errorf("runtime mode '%v' is not supported", facts.EngineMode)
@@ -28,7 +28,7 @@ func NewImageEngine(facts *entities.PodmanConfig) (entities.ImageEngine, error)
case entities.ABIMode:
return nil, fmt.Errorf("direct image runtime not supported")
case entities.TunnelMode:
- ctx, err := bindings.NewConnectionWithIdentity(context.Background(), facts.Uri, facts.PassPhrase, facts.Identities...)
+ ctx, err := bindings.NewConnectionWithIdentity(context.Background(), facts.URI, facts.PassPhrase, facts.Identities...)
return &tunnel.ImageEngine{ClientCxt: ctx}, err
}
return nil, fmt.Errorf("runtime mode '%v' is not supported", facts.EngineMode)
diff --git a/pkg/domain/infra/tunnel/containers.go b/pkg/domain/infra/tunnel/containers.go
index 36b7bf535..68a8b0329 100644
--- a/pkg/domain/infra/tunnel/containers.go
+++ b/pkg/domain/infra/tunnel/containers.go
@@ -26,8 +26,8 @@ func (ic *ContainerEngine) ContainerRunlabel(ctx context.Context, label string,
return errors.New("not implemented")
}
-func (ic *ContainerEngine) ContainerExists(ctx context.Context, nameOrId string) (*entities.BoolReport, error) {
- exists, err := containers.Exists(ic.ClientCxt, nameOrId)
+func (ic *ContainerEngine) ContainerExists(ctx context.Context, nameOrID string) (*entities.BoolReport, error) {
+ exists, err := containers.Exists(ic.ClientCxt, nameOrID)
return &entities.BoolReport{Value: exists}, err
}
@@ -100,7 +100,7 @@ func (ic *ContainerEngine) ContainerStop(ctx context.Context, namesOrIds []strin
}
for _, c := range ctrs {
report := entities.StopReport{Id: c.ID}
- if err = containers.Stop(ic.ClientCxt, c.ID, &options.Timeout); err != nil {
+ if err = containers.Stop(ic.ClientCxt, c.ID, options.Timeout); err != nil {
// These first two are considered non-fatal under the right conditions
if errors.Cause(err).Error() == define.ErrCtrStopped.Error() {
logrus.Debugf("Container %s is already stopped", c.ID)
@@ -230,10 +230,10 @@ func (ic *ContainerEngine) ContainerTop(ctx context.Context, options entities.To
return &entities.StringSliceReport{Value: topOutput}, nil
}
-func (ic *ContainerEngine) ContainerCommit(ctx context.Context, nameOrId string, options entities.CommitOptions) (*entities.CommitReport, error) {
+func (ic *ContainerEngine) ContainerCommit(ctx context.Context, nameOrID string, options entities.CommitOptions) (*entities.CommitReport, error) {
var (
repo string
- tag string = "latest"
+ tag = "latest"
)
if len(options.ImageName) > 0 {
ref, err := reference.Parse(options.ImageName)
@@ -259,14 +259,14 @@ func (ic *ContainerEngine) ContainerCommit(ctx context.Context, nameOrId string,
Repo: &repo,
Tag: &tag,
}
- response, err := containers.Commit(ic.ClientCxt, nameOrId, commitOpts)
+ response, err := containers.Commit(ic.ClientCxt, nameOrID, commitOpts)
if err != nil {
return nil, err
}
return &entities.CommitReport{Id: response.ID}, nil
}
-func (ic *ContainerEngine) ContainerExport(ctx context.Context, nameOrId string, options entities.ContainerExportOptions) error {
+func (ic *ContainerEngine) ContainerExport(ctx context.Context, nameOrID string, options entities.ContainerExportOptions) error {
var (
err error
w io.Writer
@@ -277,7 +277,7 @@ func (ic *ContainerEngine) ContainerExport(ctx context.Context, nameOrId string,
return err
}
}
- return containers.Export(ic.ClientCxt, nameOrId, w)
+ return containers.Export(ic.ClientCxt, nameOrID, w)
}
func (ic *ContainerEngine) ContainerCheckpoint(ctx context.Context, namesOrIds []string, options entities.CheckpointOptions) ([]*entities.CheckpointReport, error) {
@@ -357,7 +357,7 @@ func (ic *ContainerEngine) ContainerCreate(ctx context.Context, s *specgen.SpecG
return &entities.ContainerCreateReport{Id: response.ID}, nil
}
-func (ic *ContainerEngine) ContainerLogs(_ context.Context, nameOrIds []string, options entities.ContainerLogsOptions) error {
+func (ic *ContainerEngine) ContainerLogs(_ context.Context, nameOrIDs []string, options entities.ContainerLogsOptions) error {
since := options.Since.Format(time.RFC3339)
tail := strconv.FormatInt(options.Tail, 10)
stdout := options.Writer != nil
@@ -375,7 +375,7 @@ func (ic *ContainerEngine) ContainerLogs(_ context.Context, nameOrIds []string,
outCh := make(chan string)
ctx, cancel := context.WithCancel(context.Background())
go func() {
- err = containers.Logs(ic.ClientCxt, nameOrIds[0], opts, outCh, outCh)
+ err = containers.Logs(ic.ClientCxt, nameOrIDs[0], opts, outCh, outCh)
cancel()
}()
@@ -389,8 +389,8 @@ func (ic *ContainerEngine) ContainerLogs(_ context.Context, nameOrIds []string,
}
}
-func (ic *ContainerEngine) ContainerAttach(ctx context.Context, nameOrId string, options entities.AttachOptions) error {
- return containers.Attach(ic.ClientCxt, nameOrId, &options.DetachKeys, nil, bindings.PTrue, options.Stdin, options.Stdout, options.Stderr, nil)
+func (ic *ContainerEngine) ContainerAttach(ctx context.Context, nameOrID string, options entities.AttachOptions) error {
+ return containers.Attach(ic.ClientCxt, nameOrID, &options.DetachKeys, nil, bindings.PTrue, options.Stdin, options.Stdout, options.Stderr, nil)
}
func makeExecConfig(options entities.ExecOptions) *handlers.ExecCreateConfig {
@@ -415,10 +415,10 @@ func makeExecConfig(options entities.ExecOptions) *handlers.ExecCreateConfig {
return createConfig
}
-func (ic *ContainerEngine) ContainerExec(ctx context.Context, nameOrId string, options entities.ExecOptions, streams define.AttachStreams) (int, error) {
+func (ic *ContainerEngine) ContainerExec(ctx context.Context, nameOrID string, options entities.ExecOptions, streams define.AttachStreams) (int, error) {
createConfig := makeExecConfig(options)
- sessionID, err := containers.ExecCreate(ic.ClientCxt, nameOrId, createConfig)
+ sessionID, err := containers.ExecCreate(ic.ClientCxt, nameOrID, createConfig)
if err != nil {
return 125, err
}
@@ -435,10 +435,10 @@ func (ic *ContainerEngine) ContainerExec(ctx context.Context, nameOrId string, o
return inspectOut.ExitCode, nil
}
-func (ic *ContainerEngine) ContainerExecDetached(ctx context.Context, nameOrId string, options entities.ExecOptions) (string, error) {
+func (ic *ContainerEngine) ContainerExecDetached(ctx context.Context, nameOrID string, options entities.ExecOptions) (string, error) {
createConfig := makeExecConfig(options)
- sessionID, err := containers.ExecCreate(ic.ClientCxt, nameOrId, createConfig)
+ sessionID, err := containers.ExecCreate(ic.ClientCxt, nameOrID, createConfig)
if err != nil {
return "", err
}
@@ -525,8 +525,8 @@ func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.Conta
return &report, err
}
-func (ic *ContainerEngine) ContainerDiff(ctx context.Context, nameOrId string, _ entities.DiffOptions) (*entities.DiffReport, error) {
- changes, err := containers.Diff(ic.ClientCxt, nameOrId)
+func (ic *ContainerEngine) ContainerDiff(ctx context.Context, nameOrID string, _ entities.DiffOptions) (*entities.DiffReport, error) {
+ changes, err := containers.Diff(ic.ClientCxt, nameOrID)
return &entities.DiffReport{Changes: changes}, err
}
@@ -555,11 +555,11 @@ func (ic *ContainerEngine) ContainerInit(ctx context.Context, namesOrIds []strin
return reports, nil
}
-func (ic *ContainerEngine) ContainerMount(ctx context.Context, nameOrIds []string, options entities.ContainerMountOptions) ([]*entities.ContainerMountReport, error) {
+func (ic *ContainerEngine) ContainerMount(ctx context.Context, nameOrIDs []string, options entities.ContainerMountOptions) ([]*entities.ContainerMountReport, error) {
return nil, errors.New("mounting containers is not supported for remote clients")
}
-func (ic *ContainerEngine) ContainerUnmount(ctx context.Context, nameOrIds []string, options entities.ContainerUnmountOptions) ([]*entities.ContainerUnmountReport, error) {
+func (ic *ContainerEngine) ContainerUnmount(ctx context.Context, nameOrIDs []string, options entities.ContainerUnmountOptions) ([]*entities.ContainerUnmountReport, error) {
return nil, errors.New("unmounting containers is not supported for remote clients")
}
@@ -567,13 +567,13 @@ func (ic *ContainerEngine) Config(_ context.Context) (*config.Config, error) {
return config.Default()
}
-func (ic *ContainerEngine) ContainerPort(ctx context.Context, nameOrId string, options entities.ContainerPortOptions) ([]*entities.ContainerPortReport, error) {
+func (ic *ContainerEngine) ContainerPort(ctx context.Context, nameOrID string, options entities.ContainerPortOptions) ([]*entities.ContainerPortReport, error) {
var (
reports []*entities.ContainerPortReport
namesOrIds []string
)
- if len(nameOrId) > 0 {
- namesOrIds = append(namesOrIds, nameOrId)
+ if len(nameOrID) > 0 {
+ namesOrIds = append(namesOrIds, nameOrID)
}
ctrs, err := getContainersByContext(ic.ClientCxt, options.All, namesOrIds)
if err != nil {
diff --git a/pkg/domain/infra/tunnel/generate.go b/pkg/domain/infra/tunnel/generate.go
index eb5587f89..519dc5907 100644
--- a/pkg/domain/infra/tunnel/generate.go
+++ b/pkg/domain/infra/tunnel/generate.go
@@ -13,5 +13,5 @@ func (ic *ContainerEngine) GenerateSystemd(ctx context.Context, nameOrID string,
}
func (ic *ContainerEngine) GenerateKube(ctx context.Context, nameOrID string, options entities.GenerateKubeOptions) (*entities.GenerateKubeReport, error) {
- return generate.GenerateKube(ic.ClientCxt, nameOrID, options)
+ return generate.Kube(ic.ClientCxt, nameOrID, options)
}
diff --git a/pkg/domain/infra/tunnel/healthcheck.go b/pkg/domain/infra/tunnel/healthcheck.go
index e589489b3..56bdd6759 100644
--- a/pkg/domain/infra/tunnel/healthcheck.go
+++ b/pkg/domain/infra/tunnel/healthcheck.go
@@ -8,6 +8,6 @@ import (
"github.com/containers/libpod/pkg/domain/entities"
)
-func (ic *ContainerEngine) HealthCheckRun(ctx context.Context, nameOrId string, options entities.HealthCheckOptions) (*define.HealthCheckResults, error) {
- return containers.RunHealthCheck(ic.ClientCxt, nameOrId)
+func (ic *ContainerEngine) HealthCheckRun(ctx context.Context, nameOrID string, options entities.HealthCheckOptions) (*define.HealthCheckResults, error) {
+ return containers.RunHealthCheck(ic.ClientCxt, nameOrID)
}
diff --git a/pkg/domain/infra/tunnel/helpers.go b/pkg/domain/infra/tunnel/helpers.go
index 862c7a5d6..2bbc0e7a5 100644
--- a/pkg/domain/infra/tunnel/helpers.go
+++ b/pkg/domain/infra/tunnel/helpers.go
@@ -13,11 +13,11 @@ import (
"github.com/pkg/errors"
)
-func getContainersByContext(contextWithConnection context.Context, all bool, namesOrIds []string) ([]entities.ListContainer, error) {
+func getContainersByContext(contextWithConnection context.Context, all bool, namesOrIDs []string) ([]entities.ListContainer, error) {
var (
cons []entities.ListContainer
)
- if all && len(namesOrIds) > 0 {
+ if all && len(namesOrIDs) > 0 {
return nil, errors.New("cannot lookup containers and all")
}
c, err := containers.List(contextWithConnection, nil, bindings.PTrue, nil, nil, nil, bindings.PTrue)
@@ -27,7 +27,7 @@ func getContainersByContext(contextWithConnection context.Context, all bool, nam
if all {
return c, err
}
- for _, id := range namesOrIds {
+ for _, id := range namesOrIDs {
var found bool
for _, con := range c {
if id == con.ID || strings.HasPrefix(con.ID, id) || util.StringInSlice(id, con.Names) {
@@ -43,11 +43,11 @@ func getContainersByContext(contextWithConnection context.Context, all bool, nam
return cons, nil
}
-func getPodsByContext(contextWithConnection context.Context, all bool, namesOrIds []string) ([]*entities.ListPodsReport, error) {
+func getPodsByContext(contextWithConnection context.Context, all bool, namesOrIDs []string) ([]*entities.ListPodsReport, error) {
var (
sPods []*entities.ListPodsReport
)
- if all && len(namesOrIds) > 0 {
+ if all && len(namesOrIDs) > 0 {
return nil, errors.New("cannot lookup specific pods and all")
}
@@ -58,17 +58,17 @@ func getPodsByContext(contextWithConnection context.Context, all bool, namesOrId
if all {
return fPods, nil
}
- for _, nameOrId := range namesOrIds {
+ for _, nameOrID := range namesOrIDs {
var found bool
for _, f := range fPods {
- if f.Name == nameOrId || strings.HasPrefix(f.Id, nameOrId) {
+ if f.Name == nameOrID || strings.HasPrefix(f.Id, nameOrID) {
sPods = append(sPods, f)
found = true
break
}
}
if !found {
- return nil, errors.Wrapf(define.ErrNoSuchPod, "unable to find pod %q", nameOrId)
+ return nil, errors.Wrapf(define.ErrNoSuchPod, "unable to find pod %q", nameOrID)
}
}
return sPods, nil
diff --git a/pkg/domain/infra/tunnel/images.go b/pkg/domain/infra/tunnel/images.go
index c300e74d0..fc7ac0aa8 100644
--- a/pkg/domain/infra/tunnel/images.go
+++ b/pkg/domain/infra/tunnel/images.go
@@ -18,8 +18,8 @@ import (
"github.com/pkg/errors"
)
-func (ir *ImageEngine) Exists(_ context.Context, nameOrId string) (*entities.BoolReport, error) {
- found, err := images.Exists(ir.ClientCxt, nameOrId)
+func (ir *ImageEngine) Exists(_ context.Context, nameOrID string) (*entities.BoolReport, error) {
+ found, err := images.Exists(ir.ClientCxt, nameOrID)
return &entities.BoolReport{Value: found}, err
}
@@ -50,8 +50,8 @@ func (ir *ImageEngine) List(ctx context.Context, opts entities.ImageListOptions)
return is, nil
}
-func (ir *ImageEngine) History(ctx context.Context, nameOrId string, opts entities.ImageHistoryOptions) (*entities.ImageHistoryReport, error) {
- results, err := images.History(ir.ClientCxt, nameOrId)
+func (ir *ImageEngine) History(ctx context.Context, nameOrID string, opts entities.ImageHistoryOptions) (*entities.ImageHistoryReport, error) {
+ results, err := images.History(ir.ClientCxt, nameOrID)
if err != nil {
return nil, err
}
@@ -98,7 +98,7 @@ func (ir *ImageEngine) Pull(ctx context.Context, rawImage string, options entiti
return &entities.ImagePullReport{Images: pulledImages}, nil
}
-func (ir *ImageEngine) Tag(ctx context.Context, nameOrId string, tags []string, options entities.ImageTagOptions) error {
+func (ir *ImageEngine) Tag(ctx context.Context, nameOrID string, tags []string, options entities.ImageTagOptions) error {
for _, newTag := range tags {
var (
tag, repo string
@@ -114,19 +114,19 @@ func (ir *ImageEngine) Tag(ctx context.Context, nameOrId string, tags []string,
repo = r.Name()
}
if len(repo) < 1 {
- return errors.Errorf("invalid image name %q", nameOrId)
+ return errors.Errorf("invalid image name %q", nameOrID)
}
- if err := images.Tag(ir.ClientCxt, nameOrId, tag, repo); err != nil {
+ if err := images.Tag(ir.ClientCxt, nameOrID, tag, repo); err != nil {
return err
}
}
return nil
}
-func (ir *ImageEngine) Untag(ctx context.Context, nameOrId string, tags []string, options entities.ImageUntagOptions) error {
+func (ir *ImageEngine) Untag(ctx context.Context, nameOrID string, tags []string, options entities.ImageUntagOptions) error {
// Remove all tags if none are provided
if len(tags) == 0 {
- newImage, err := images.GetImage(ir.ClientCxt, nameOrId, bindings.PFalse)
+ newImage, err := images.GetImage(ir.ClientCxt, nameOrID, bindings.PFalse)
if err != nil {
return err
}
@@ -148,9 +148,9 @@ func (ir *ImageEngine) Untag(ctx context.Context, nameOrId string, tags []string
repo = r.Name()
}
if len(repo) < 1 {
- return errors.Errorf("invalid image name %q", nameOrId)
+ return errors.Errorf("invalid image name %q", nameOrID)
}
- if err := images.Untag(ir.ClientCxt, nameOrId, tag, repo); err != nil {
+ if err := images.Untag(ir.ClientCxt, nameOrID, tag, repo); err != nil {
return err
}
}
@@ -199,7 +199,7 @@ func (ir *ImageEngine) Push(ctx context.Context, source string, destination stri
return images.Push(ir.ClientCxt, source, destination, options)
}
-func (ir *ImageEngine) Save(ctx context.Context, nameOrId string, tags []string, options entities.ImageSaveOptions) error {
+func (ir *ImageEngine) Save(ctx context.Context, nameOrID string, tags []string, options entities.ImageSaveOptions) error {
var (
f *os.File
err error
@@ -217,7 +217,7 @@ func (ir *ImageEngine) Save(ctx context.Context, nameOrId string, tags []string,
return err
}
- exErr := images.Export(ir.ClientCxt, nameOrId, f, &options.Format, &options.Compress)
+ exErr := images.Export(ir.ClientCxt, nameOrID, f, &options.Format, &options.Compress)
if err := f.Close(); err != nil {
return err
}
@@ -250,8 +250,8 @@ func (ir *ImageEngine) Save(ctx context.Context, nameOrId string, tags []string,
}
// Diff reports the changes to the given image
-func (ir *ImageEngine) Diff(ctx context.Context, nameOrId string, _ entities.DiffOptions) (*entities.DiffReport, error) {
- changes, err := images.Diff(ir.ClientCxt, nameOrId)
+func (ir *ImageEngine) Diff(ctx context.Context, nameOrID string, _ entities.DiffOptions) (*entities.DiffReport, error) {
+ changes, err := images.Diff(ir.ClientCxt, nameOrID)
if err != nil {
return nil, err
}
@@ -277,8 +277,8 @@ func (ir *ImageEngine) Build(ctx context.Context, containerFiles []string, opts
return images.Build(ir.ClientCxt, containerFiles, opts, tarfile)
}
-func (ir *ImageEngine) Tree(ctx context.Context, nameOrId string, opts entities.ImageTreeOptions) (*entities.ImageTreeReport, error) {
- return images.Tree(ir.ClientCxt, nameOrId, &opts.WhatRequires)
+func (ir *ImageEngine) Tree(ctx context.Context, nameOrID string, opts entities.ImageTreeOptions) (*entities.ImageTreeReport, error) {
+ return images.Tree(ir.ClientCxt, nameOrID, &opts.WhatRequires)
}
// Shutdown Libpod engine
diff --git a/pkg/domain/infra/tunnel/play.go b/pkg/domain/infra/tunnel/play.go
index 15383a703..5f6bc4a2a 100644
--- a/pkg/domain/infra/tunnel/play.go
+++ b/pkg/domain/infra/tunnel/play.go
@@ -8,5 +8,5 @@ import (
)
func (ic *ContainerEngine) PlayKube(ctx context.Context, path string, options entities.PlayKubeOptions) (*entities.PlayKubeReport, error) {
- return play.PlayKube(ic.ClientCxt, path, options)
+ return play.Kube(ic.ClientCxt, path, options)
}
diff --git a/pkg/domain/infra/tunnel/pods.go b/pkg/domain/infra/tunnel/pods.go
index b93c48aab..5ca4a6a80 100644
--- a/pkg/domain/infra/tunnel/pods.go
+++ b/pkg/domain/infra/tunnel/pods.go
@@ -7,11 +7,12 @@ import (
"github.com/containers/libpod/pkg/bindings/pods"
"github.com/containers/libpod/pkg/domain/entities"
"github.com/containers/libpod/pkg/specgen"
+ "github.com/containers/libpod/pkg/util"
"github.com/pkg/errors"
)
-func (ic *ContainerEngine) PodExists(ctx context.Context, nameOrId string) (*entities.BoolReport, error) {
- exists, err := pods.Exists(ic.ClientCxt, nameOrId)
+func (ic *ContainerEngine) PodExists(ctx context.Context, nameOrID string) (*entities.BoolReport, error) {
+ exists, err := pods.Exists(ic.ClientCxt, nameOrID)
return &entities.BoolReport{Value: exists}, err
}
@@ -19,6 +20,12 @@ func (ic *ContainerEngine) PodKill(ctx context.Context, namesOrIds []string, opt
var (
reports []*entities.PodKillReport
)
+
+ _, err := util.ParseSignal(options.Signal)
+ if err != nil {
+ return nil, err
+ }
+
foundPods, err := getPodsByContext(ic.ClientCxt, options.All, namesOrIds)
if err != nil {
return nil, err
@@ -87,7 +94,7 @@ func (ic *ContainerEngine) PodUnpause(ctx context.Context, namesOrIds []string,
func (ic *ContainerEngine) PodStop(ctx context.Context, namesOrIds []string, options entities.PodStopOptions) ([]*entities.PodStopReport, error) {
var (
reports []*entities.PodStopReport
- timeout int = -1
+ timeout = -1
)
foundPods, err := getPodsByContext(ic.ClientCxt, options.All, namesOrIds)
if err != nil && !(options.Ignore && errors.Cause(err) == define.ErrNoSuchPod) {
diff --git a/pkg/domain/infra/tunnel/volumes.go b/pkg/domain/infra/tunnel/volumes.go
index e48a7fa7c..5b65c66ea 100644
--- a/pkg/domain/infra/tunnel/volumes.go
+++ b/pkg/domain/infra/tunnel/volumes.go
@@ -7,12 +7,12 @@ import (
"github.com/containers/libpod/pkg/domain/entities"
)
-func (ic *ContainerEngine) VolumeCreate(ctx context.Context, opts entities.VolumeCreateOptions) (*entities.IdOrNameResponse, error) {
+func (ic *ContainerEngine) VolumeCreate(ctx context.Context, opts entities.VolumeCreateOptions) (*entities.IDOrNameResponse, error) {
response, err := volumes.Create(ic.ClientCxt, opts)
if err != nil {
return nil, err
}
- return &entities.IdOrNameResponse{IdOrName: response.Name}, nil
+ return &entities.IDOrNameResponse{IDOrName: response.Name}, nil
}
func (ic *ContainerEngine) VolumeRm(ctx context.Context, namesOrIds []string, opts entities.VolumeRmOptions) ([]*entities.VolumeRmReport, error) {
diff --git a/pkg/domain/utils/utils.go b/pkg/domain/utils/utils.go
index c17769f62..ee213e1b6 100644
--- a/pkg/domain/utils/utils.go
+++ b/pkg/domain/utils/utils.go
@@ -31,7 +31,7 @@ func ToLibpodFilters(f url.Values) (filters []string) {
return
}
-func ToUrlValues(f []string) (filters url.Values) {
+func ToURLValues(f []string) (filters url.Values) {
filters = make(url.Values)
for _, v := range f {
t := strings.SplitN(v, "=", 2)
diff --git a/pkg/network/network.go b/pkg/network/network.go
index 526ee92d8..3ff664316 100644
--- a/pkg/network/network.go
+++ b/pkg/network/network.go
@@ -14,7 +14,7 @@ import (
)
// DefaultNetworkDriver is the default network type used
-var DefaultNetworkDriver string = "bridge"
+var DefaultNetworkDriver = "bridge"
// SupportedNetworkDrivers describes the list of supported drivers
var SupportedNetworkDrivers = []string{DefaultNetworkDriver}
diff --git a/pkg/parallel/parallel_linux.go b/pkg/parallel/parallel_linux.go
index e3f086c0e..472571972 100644
--- a/pkg/parallel/parallel_linux.go
+++ b/pkg/parallel/parallel_linux.go
@@ -9,11 +9,11 @@ import (
"github.com/sirupsen/logrus"
)
-// ParallelContainerOp performs the given function on the given set of
+// ContainerOp performs the given function on the given set of
// containers, using a number of parallel threads.
// If no error is returned, each container specified in ctrs will have an entry
// in the resulting map; containers with no error will be set to nil.
-func ParallelContainerOp(ctx context.Context, ctrs []*libpod.Container, applyFunc func(*libpod.Container) error) (map[*libpod.Container]error, error) {
+func ContainerOp(ctx context.Context, ctrs []*libpod.Container, applyFunc func(*libpod.Container) error) (map[*libpod.Container]error, error) {
jobControlLock.RLock()
defer jobControlLock.RUnlock()
@@ -22,7 +22,7 @@ func ParallelContainerOp(ctx context.Context, ctrs []*libpod.Container, applyFun
// The expectation is that most of the time is spent in applyFunc
// anyways.
var (
- errMap map[*libpod.Container]error = make(map[*libpod.Container]error)
+ errMap = make(map[*libpod.Container]error)
errLock sync.Mutex
allDone sync.WaitGroup
)
diff --git a/pkg/spec/createconfig.go b/pkg/spec/createconfig.go
index 2cf30a59e..e19c582b5 100644
--- a/pkg/spec/createconfig.go
+++ b/pkg/spec/createconfig.go
@@ -399,7 +399,7 @@ func AddPrivilegedDevices(g *generate.Generator) error {
return addPrivilegedDevices(g)
}
-func CreateContainerFromCreateConfig(r *libpod.Runtime, createConfig *CreateConfig, ctx context.Context, pod *libpod.Pod) (*libpod.Container, error) {
+func CreateContainerFromCreateConfig(ctx context.Context, r *libpod.Runtime, createConfig *CreateConfig, pod *libpod.Pod) (*libpod.Container, error) {
runtimeSpec, options, err := createConfig.MakeContainerConfig(r, pod)
if err != nil {
return nil, err
diff --git a/pkg/specgen/container_validate.go b/pkg/specgen/container_validate.go
index 2c5891f9a..45179343b 100644
--- a/pkg/specgen/container_validate.go
+++ b/pkg/specgen/container_validate.go
@@ -10,7 +10,7 @@ import (
var (
// ErrInvalidSpecConfig describes an error that the given SpecGenerator is invalid
- ErrInvalidSpecConfig error = errors.New("invalid configuration")
+ ErrInvalidSpecConfig = errors.New("invalid configuration")
// SystemDValues describes the only values that SystemD can be
SystemDValues = []string{"true", "false", "always"}
// ImageVolumeModeValues describes the only values that ImageVolumeMode can be
diff --git a/pkg/specgen/generate/container_create.go b/pkg/specgen/generate/container_create.go
index 74ae848af..33075b543 100644
--- a/pkg/specgen/generate/container_create.go
+++ b/pkg/specgen/generate/container_create.go
@@ -230,7 +230,7 @@ func createContainerOptions(ctx context.Context, rt *libpod.Runtime, s *specgen.
options = append(options, libpod.WithPrivileged(s.Privileged))
// Get namespace related options
- namespaceOptions, err := GenerateNamespaceOptions(ctx, s, rt, pod, img)
+ namespaceOptions, err := namespaceOptions(ctx, s, rt, pod, img)
if err != nil {
return nil, err
}
diff --git a/pkg/specgen/generate/namespaces.go b/pkg/specgen/generate/namespaces.go
index ffa96a5cf..e67afe1bf 100644
--- a/pkg/specgen/generate/namespaces.go
+++ b/pkg/specgen/generate/namespaces.go
@@ -72,13 +72,13 @@ func GetDefaultNamespaceMode(nsType string, cfg *config.Config, pod *libpod.Pod)
return toReturn, errors.Wrapf(define.ErrInvalidArg, "invalid namespace type %q passed", nsType)
}
-// GenerateNamespaceOptions generates container creation options for all
+// namespaceOptions generates container creation options for all
// namespaces in a SpecGenerator.
// Pod is the pod the container will join. May be nil is the container is not
// joining a pod.
// TODO: Consider grouping options that are not directly attached to a namespace
// elsewhere.
-func GenerateNamespaceOptions(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runtime, pod *libpod.Pod, img *image.Image) ([]libpod.CtrCreateOption, error) {
+func namespaceOptions(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runtime, pod *libpod.Pod, img *image.Image) ([]libpod.CtrCreateOption, error) {
toReturn := []libpod.CtrCreateOption{}
// If pod is not nil, get infra container.
diff --git a/pkg/specgen/generate/pod_create.go b/pkg/specgen/generate/pod_create.go
index cd2d69cfb..5ccb1ba80 100644
--- a/pkg/specgen/generate/pod_create.go
+++ b/pkg/specgen/generate/pod_create.go
@@ -93,5 +93,9 @@ func createPodOptions(p *specgen.PodSpecGenerator) ([]libpod.PodCreateOption, er
options = append(options, libpod.WithInfraContainerPorts(ports))
}
options = append(options, libpod.WithPodCgroups())
+ options = append(options, libpod.WithPodCreateCommand())
+ if len(p.InfraConmonPidFile) > 0 {
+ options = append(options, libpod.WithInfraConmonPidFile(p.InfraConmonPidFile))
+ }
return options, nil
}
diff --git a/pkg/specgen/pod_validate.go b/pkg/specgen/pod_validate.go
index 640447e71..2d57cdb91 100644
--- a/pkg/specgen/pod_validate.go
+++ b/pkg/specgen/pod_validate.go
@@ -7,7 +7,7 @@ import (
var (
// ErrInvalidPodSpecConfig describes an error given when the podspecgenerator is invalid
- ErrInvalidPodSpecConfig error = errors.New("invalid pod spec")
+ ErrInvalidPodSpecConfig = errors.New("invalid pod spec")
// containerConfig has the default configurations defined in containers.conf
containerConfig = util.DefaultContainerConfig()
)
diff --git a/pkg/specgen/podspecgen.go b/pkg/specgen/podspecgen.go
index 11976233a..600d27004 100644
--- a/pkg/specgen/podspecgen.go
+++ b/pkg/specgen/podspecgen.go
@@ -25,6 +25,9 @@ type PodBasicConfig struct {
// InfraCommand and InfraImages in this struct.
// Optional.
NoInfra bool `json:"no_infra,omitempty"`
+ // InfraConmonPidFile is a custom path to store the infra container's
+ // conmon PID.
+ InfraConmonPidFile string `json:"infra_conmon_pid_file,omitempty"`
// InfraCommand sets the command that will be used to start the infra
// container.
// If not set, the default set in the Libpod configuration file will be
diff --git a/pkg/systemd/generate/common.go b/pkg/systemd/generate/common.go
new file mode 100644
index 000000000..fe56dc874
--- /dev/null
+++ b/pkg/systemd/generate/common.go
@@ -0,0 +1,50 @@
+package generate
+
+import (
+ "github.com/pkg/errors"
+)
+
+// EnvVariable "PODMAN_SYSTEMD_UNIT" is set in all generated systemd units and
+// is set to the unit's (unique) name.
+const EnvVariable = "PODMAN_SYSTEMD_UNIT"
+
+// restartPolicies includes all valid restart policies to be used in a unit
+// file.
+var restartPolicies = []string{"no", "on-success", "on-failure", "on-abnormal", "on-watchdog", "on-abort", "always"}
+
+// validateRestartPolicy checks that the user-provided policy is valid.
+func validateRestartPolicy(restart string) error {
+ for _, i := range restartPolicies {
+ if i == restart {
+ return nil
+ }
+ }
+ return errors.Errorf("%s is not a valid restart policy", restart)
+}
+
+const headerTemplate = `# {{.ServiceName}}.service
+# autogenerated by Podman {{.PodmanVersion}}
+{{- if .TimeStamp}}
+# {{.TimeStamp}}
+{{- end}}
+
+[Unit]
+Description=Podman {{.ServiceName}}.service
+Documentation=man:podman-generate-systemd(1)
+Wants=network.target
+After=network-online.target
+`
+
+// filterPodFlags removes --pod and --pod-id-file from the specified command.
+func filterPodFlags(command []string) []string {
+ processed := []string{}
+ for i := 0; i < len(command); i++ {
+ s := command[i]
+ if s == "--pod" || s == "--pod-id-file" {
+ i++
+ continue
+ }
+ processed = append(processed, s)
+ }
+ return processed
+}
diff --git a/pkg/systemd/generate/common_test.go b/pkg/systemd/generate/common_test.go
new file mode 100644
index 000000000..f53bb7828
--- /dev/null
+++ b/pkg/systemd/generate/common_test.go
@@ -0,0 +1,25 @@
+package generate
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestFilterPodFlags(t *testing.T) {
+
+ tests := []struct {
+ input []string
+ }{
+ {[]string{"podman", "pod", "create"}},
+ {[]string{"podman", "pod", "create", "--name", "foo"}},
+ {[]string{"podman", "pod", "create", "--pod-id-file", "foo"}},
+ {[]string{"podman", "run", "--pod", "foo"}},
+ }
+
+ for _, test := range tests {
+ processed := filterPodFlags(test.input)
+ assert.NotContains(t, processed, "--pod-id-file")
+ assert.NotContains(t, processed, "--pod")
+ }
+}
diff --git a/pkg/systemd/generate/containers.go b/pkg/systemd/generate/containers.go
new file mode 100644
index 000000000..dced1a3da
--- /dev/null
+++ b/pkg/systemd/generate/containers.go
@@ -0,0 +1,289 @@
+package generate
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+ "text/template"
+ "time"
+
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/pkg/domain/entities"
+ "github.com/containers/libpod/version"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// containerInfo contains data required for generating a container's systemd
+// unit file.
+type containerInfo struct {
+ // ServiceName of the systemd service.
+ ServiceName string
+ // Name or ID of the container.
+ ContainerNameOrID string
+ // StopTimeout sets the timeout Podman waits before killing the container
+ // during service stop.
+ StopTimeout uint
+ // RestartPolicy of the systemd unit (e.g., no, on-failure, always).
+ RestartPolicy string
+ // PIDFile of the service. Required for forking services. Must point to the
+ // PID of the associated conmon process.
+ PIDFile string
+ // ContainerIDFile to be used in the unit.
+ ContainerIDFile string
+ // GenerateTimestamp, if set the generated unit file has a time stamp.
+ GenerateTimestamp bool
+ // BoundToServices are the services this service binds to. Note that this
+ // service runs after them.
+ BoundToServices []string
+ // PodmanVersion for the header. Will be set internally. Will be auto-filled
+ // if left empty.
+ PodmanVersion string
+ // Executable is the path to the podman executable. Will be auto-filled if
+ // left empty.
+ Executable string
+ // TimeStamp at the time of creating the unit file. Will be set internally.
+ TimeStamp string
+ // CreateCommand is the full command plus arguments of the process the
+ // container has been created with.
+ CreateCommand []string
+ // EnvVariable is generate.EnvVariable and must not be set.
+ EnvVariable string
+ // ExecStartPre of the unit.
+ ExecStartPre string
+ // ExecStart of the unit.
+ ExecStart string
+ // ExecStop of the unit.
+ ExecStop string
+ // ExecStopPost of the unit.
+ ExecStopPost string
+
+ // If not nil, the container is part of the pod. We can use the
+ // podInfo to extract the relevant data.
+ pod *podInfo
+}
+
+const containerTemplate = headerTemplate + `
+{{- if .BoundToServices}}
+RefuseManualStart=yes
+RefuseManualStop=yes
+BindsTo={{- range $index, $value := .BoundToServices -}}{{if $index}} {{end}}{{ $value }}.service{{end}}
+After={{- range $index, $value := .BoundToServices -}}{{if $index}} {{end}}{{ $value }}.service{{end}}
+{{- end}}
+
+[Service]
+Environment={{.EnvVariable}}=%n
+Restart={{.RestartPolicy}}
+{{- if .ExecStartPre}}
+ExecStartPre={{.ExecStartPre}}
+{{- end}}
+ExecStart={{.ExecStart}}
+ExecStop={{.ExecStop}}
+{{- if .ExecStopPost}}
+ExecStopPost={{.ExecStopPost}}
+{{- end}}
+PIDFile={{.PIDFile}}
+KillMode=none
+Type=forking
+
+[Install]
+WantedBy=multi-user.target default.target`
+
+// ContainerUnit generates a systemd unit for the specified container. Based
+// on the options, the return value might be the entire unit or a file it has
+// been written to.
+func ContainerUnit(ctr *libpod.Container, options entities.GenerateSystemdOptions) (string, error) {
+ info, err := generateContainerInfo(ctr, options)
+ if err != nil {
+ return "", err
+ }
+ return executeContainerTemplate(info, options)
+}
+
+func generateContainerInfo(ctr *libpod.Container, options entities.GenerateSystemdOptions) (*containerInfo, error) {
+ timeout := ctr.StopTimeout()
+ if options.StopTimeout != nil {
+ timeout = *options.StopTimeout
+ }
+
+ config := ctr.Config()
+ conmonPidFile := config.ConmonPidFile
+ if conmonPidFile == "" {
+ return nil, errors.Errorf("conmon PID file path is empty, try to recreate the container with --conmon-pidfile flag")
+ }
+
+ createCommand := []string{}
+ if config.CreateCommand != nil {
+ createCommand = config.CreateCommand
+ } else if options.New {
+ return nil, errors.Errorf("cannot use --new on container %q: no create command found", ctr.ID())
+ }
+
+ nameOrID, serviceName := containerServiceName(ctr, options)
+
+ info := containerInfo{
+ ServiceName: serviceName,
+ ContainerNameOrID: nameOrID,
+ RestartPolicy: options.RestartPolicy,
+ PIDFile: conmonPidFile,
+ StopTimeout: timeout,
+ GenerateTimestamp: true,
+ CreateCommand: createCommand,
+ }
+
+ return &info, nil
+}
+
+// containerServiceName returns the nameOrID and the service name of the
+// container.
+func containerServiceName(ctr *libpod.Container, options entities.GenerateSystemdOptions) (string, string) {
+ nameOrID := ctr.ID()
+ if options.Name {
+ nameOrID = ctr.Name()
+ }
+ serviceName := fmt.Sprintf("%s%s%s", options.ContainerPrefix, options.Separator, nameOrID)
+ return nameOrID, serviceName
+}
+
+// executeContainerTemplate executes the container template on the specified
+// containerInfo. Note that the containerInfo is also post processed and
+// completed, which allows for an easier unit testing.
+func executeContainerTemplate(info *containerInfo, options entities.GenerateSystemdOptions) (string, error) {
+ if err := validateRestartPolicy(info.RestartPolicy); err != nil {
+ return "", err
+ }
+
+ // Make sure the executable is set.
+ if info.Executable == "" {
+ executable, err := os.Executable()
+ if err != nil {
+ executable = "/usr/bin/podman"
+ logrus.Warnf("Could not obtain podman executable location, using default %s", executable)
+ }
+ info.Executable = executable
+ }
+
+ info.EnvVariable = EnvVariable
+ info.ExecStart = "{{.Executable}} start {{.ContainerNameOrID}}"
+ info.ExecStop = "{{.Executable}} stop {{if (ge .StopTimeout 0)}}-t {{.StopTimeout}}{{end}} {{.ContainerNameOrID}}"
+
+ // Assemble the ExecStart command when creating a new container.
+ //
+ // Note that we cannot catch all corner cases here such that users
+ // *must* manually check the generated files. A container might have
+ // been created via a Python script, which would certainly yield an
+ // invalid `info.CreateCommand`. Hence, we're doing a best effort unit
+ // generation and don't try aiming at completeness.
+ if options.New {
+ info.PIDFile = "%t/" + info.ServiceName + ".pid"
+ info.ContainerIDFile = "%t/" + info.ServiceName + ".ctr-id"
+ // The create command must at least have three arguments:
+ // /usr/bin/podman run $IMAGE
+ index := 2
+ if info.CreateCommand[1] == "container" {
+ index = 3
+ }
+ if len(info.CreateCommand) < index+1 {
+ return "", errors.Errorf("container's create command is too short or invalid: %v", info.CreateCommand)
+ }
+ // We're hard-coding the first five arguments and append the
+ // CreateCommand with a stripped command and subcomand.
+ startCommand := []string{
+ info.Executable,
+ "run",
+ "--conmon-pidfile", "{{.PIDFile}}",
+ "--cidfile", "{{.ContainerIDFile}}",
+ "--cgroups=no-conmon",
+ }
+ // If the container is in a pod, make sure that the
+ // --pod-id-file is set correctly.
+ if info.pod != nil {
+ podFlags := []string{"--pod-id-file", info.pod.PodIDFile}
+ startCommand = append(startCommand, podFlags...)
+ info.CreateCommand = filterPodFlags(info.CreateCommand)
+ }
+
+ // Enforce detaching
+ //
+ // since we use systemd `Type=forking` service
+ // @see https://www.freedesktop.org/software/systemd/man/systemd.service.html#Type=
+ // when we generated systemd service file with the --new param,
+ // `ExecStart` will have `/usr/bin/podman run ...`
+ // if `info.CreateCommand` has no `-d` or `--detach` param,
+ // podman will run the container in default attached mode,
+ // as a result, `systemd start` will wait the `podman run` command exit until failed with timeout error.
+ hasDetachParam := false
+ for _, p := range info.CreateCommand[index:] {
+ if p == "--detach" || p == "-d" {
+ hasDetachParam = true
+ }
+ }
+ if !hasDetachParam {
+ startCommand = append(startCommand, "-d")
+ }
+ startCommand = append(startCommand, info.CreateCommand[index:]...)
+
+ info.ExecStartPre = "/usr/bin/rm -f {{.PIDFile}} {{.ContainerIDFile}}"
+ info.ExecStart = strings.Join(startCommand, " ")
+ info.ExecStop = "{{.Executable}} stop --ignore --cidfile {{.ContainerIDFile}} {{if (ge .StopTimeout 0)}}-t {{.StopTimeout}}{{end}}"
+ info.ExecStopPost = "{{.Executable}} rm --ignore -f --cidfile {{.ContainerIDFile}}"
+ }
+
+ if info.PodmanVersion == "" {
+ info.PodmanVersion = version.Version
+ }
+ if info.GenerateTimestamp {
+ info.TimeStamp = fmt.Sprintf("%v", time.Now().Format(time.UnixDate))
+ }
+
+ // Sort the slices to assure a deterministic output.
+ sort.Strings(info.BoundToServices)
+
+ // Generate the template and compile it.
+ //
+ // Note that we need a two-step generation process to allow for fields
+ // embedding other fields. This way we can replace `A -> B -> C` and
+ // make the code easier to maintain at the cost of a slightly slower
+ // generation. That's especially needed for embedding the PID and ID
+ // files in other fields which will eventually get replaced in the 2nd
+ // template execution.
+ templ, err := template.New("container_template").Parse(containerTemplate)
+ if err != nil {
+ return "", errors.Wrap(err, "error parsing systemd service template")
+ }
+
+ var buf bytes.Buffer
+ if err := templ.Execute(&buf, info); err != nil {
+ return "", err
+ }
+
+ // Now parse the generated template (i.e., buf) and execute it.
+ templ, err = template.New("container_template").Parse(buf.String())
+ if err != nil {
+ return "", errors.Wrap(err, "error parsing systemd service template")
+ }
+
+ buf = bytes.Buffer{}
+ if err := templ.Execute(&buf, info); err != nil {
+ return "", err
+ }
+
+ if !options.Files {
+ return buf.String(), nil
+ }
+
+ buf.WriteByte('\n')
+ cwd, err := os.Getwd()
+ if err != nil {
+ return "", errors.Wrap(err, "error getting current working directory")
+ }
+ path := filepath.Join(cwd, fmt.Sprintf("%s.service", info.ServiceName))
+ if err := ioutil.WriteFile(path, buf.Bytes(), 0644); err != nil {
+ return "", errors.Wrap(err, "error generating systemd unit")
+ }
+ return path, nil
+}
diff --git a/pkg/systemd/generate/containers_test.go b/pkg/systemd/generate/containers_test.go
new file mode 100644
index 000000000..8365ecd7a
--- /dev/null
+++ b/pkg/systemd/generate/containers_test.go
@@ -0,0 +1,366 @@
+package generate
+
+import (
+ "testing"
+
+ "github.com/containers/libpod/pkg/domain/entities"
+)
+
+func TestValidateRestartPolicyContainer(t *testing.T) {
+ type containerInfo struct {
+ restart string
+ }
+ tests := []struct {
+ name string
+ containerInfo containerInfo
+ wantErr bool
+ }{
+ {"good-on", containerInfo{restart: "no"}, false},
+ {"good-on-success", containerInfo{restart: "on-success"}, false},
+ {"good-on-failure", containerInfo{restart: "on-failure"}, false},
+ {"good-on-abnormal", containerInfo{restart: "on-abnormal"}, false},
+ {"good-on-watchdog", containerInfo{restart: "on-watchdog"}, false},
+ {"good-on-abort", containerInfo{restart: "on-abort"}, false},
+ {"good-always", containerInfo{restart: "always"}, false},
+ {"fail", containerInfo{restart: "foobar"}, true},
+ {"failblank", containerInfo{restart: ""}, true},
+ }
+ for _, tt := range tests {
+ test := tt
+ t.Run(tt.name, func(t *testing.T) {
+ if err := validateRestartPolicy(test.containerInfo.restart); (err != nil) != test.wantErr {
+ t.Errorf("ValidateRestartPolicy() error = %v, wantErr %v", err, test.wantErr)
+ }
+ })
+ }
+}
+
+func TestCreateContainerSystemdUnit(t *testing.T) {
+ goodID := `# container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401.service
+# autogenerated by Podman CI
+
+[Unit]
+Description=Podman container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401.service
+Documentation=man:podman-generate-systemd(1)
+Wants=network.target
+After=network-online.target
+
+[Service]
+Environment=PODMAN_SYSTEMD_UNIT=%n
+Restart=always
+ExecStart=/usr/bin/podman start 639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401
+ExecStop=/usr/bin/podman stop -t 10 639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401
+PIDFile=/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid
+KillMode=none
+Type=forking
+
+[Install]
+WantedBy=multi-user.target default.target`
+
+ goodName := `# container-foobar.service
+# autogenerated by Podman CI
+
+[Unit]
+Description=Podman container-foobar.service
+Documentation=man:podman-generate-systemd(1)
+Wants=network.target
+After=network-online.target
+
+[Service]
+Environment=PODMAN_SYSTEMD_UNIT=%n
+Restart=always
+ExecStart=/usr/bin/podman start foobar
+ExecStop=/usr/bin/podman stop -t 10 foobar
+PIDFile=/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid
+KillMode=none
+Type=forking
+
+[Install]
+WantedBy=multi-user.target default.target`
+
+ goodNameBoundTo := `# container-foobar.service
+# autogenerated by Podman CI
+
+[Unit]
+Description=Podman container-foobar.service
+Documentation=man:podman-generate-systemd(1)
+Wants=network.target
+After=network-online.target
+RefuseManualStart=yes
+RefuseManualStop=yes
+BindsTo=a.service b.service c.service pod.service
+After=a.service b.service c.service pod.service
+
+[Service]
+Environment=PODMAN_SYSTEMD_UNIT=%n
+Restart=always
+ExecStart=/usr/bin/podman start foobar
+ExecStop=/usr/bin/podman stop -t 10 foobar
+PIDFile=/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid
+KillMode=none
+Type=forking
+
+[Install]
+WantedBy=multi-user.target default.target`
+
+ goodNameNew := `# jadda-jadda.service
+# autogenerated by Podman CI
+
+[Unit]
+Description=Podman jadda-jadda.service
+Documentation=man:podman-generate-systemd(1)
+Wants=network.target
+After=network-online.target
+
+[Service]
+Environment=PODMAN_SYSTEMD_UNIT=%n
+Restart=always
+ExecStartPre=/usr/bin/rm -f %t/jadda-jadda.pid %t/jadda-jadda.ctr-id
+ExecStart=/usr/bin/podman run --conmon-pidfile %t/jadda-jadda.pid --cidfile %t/jadda-jadda.ctr-id --cgroups=no-conmon -d --name jadda-jadda --hostname hello-world awesome-image:latest command arg1 ... argN
+ExecStop=/usr/bin/podman stop --ignore --cidfile %t/jadda-jadda.ctr-id -t 42
+ExecStopPost=/usr/bin/podman rm --ignore -f --cidfile %t/jadda-jadda.ctr-id
+PIDFile=%t/jadda-jadda.pid
+KillMode=none
+Type=forking
+
+[Install]
+WantedBy=multi-user.target default.target`
+
+ goodNameNewWithPodFile := `# jadda-jadda.service
+# autogenerated by Podman CI
+
+[Unit]
+Description=Podman jadda-jadda.service
+Documentation=man:podman-generate-systemd(1)
+Wants=network.target
+After=network-online.target
+
+[Service]
+Environment=PODMAN_SYSTEMD_UNIT=%n
+Restart=always
+ExecStartPre=/usr/bin/rm -f %t/jadda-jadda.pid %t/jadda-jadda.ctr-id
+ExecStart=/usr/bin/podman run --conmon-pidfile %t/jadda-jadda.pid --cidfile %t/jadda-jadda.ctr-id --cgroups=no-conmon --pod-id-file /tmp/pod-foobar.pod-id-file -d --name jadda-jadda --hostname hello-world awesome-image:latest command arg1 ... argN
+ExecStop=/usr/bin/podman stop --ignore --cidfile %t/jadda-jadda.ctr-id -t 42
+ExecStopPost=/usr/bin/podman rm --ignore -f --cidfile %t/jadda-jadda.ctr-id
+PIDFile=%t/jadda-jadda.pid
+KillMode=none
+Type=forking
+
+[Install]
+WantedBy=multi-user.target default.target`
+ goodNameNewDetach := `# jadda-jadda.service
+# autogenerated by Podman CI
+
+[Unit]
+Description=Podman jadda-jadda.service
+Documentation=man:podman-generate-systemd(1)
+Wants=network.target
+After=network-online.target
+
+[Service]
+Environment=PODMAN_SYSTEMD_UNIT=%n
+Restart=always
+ExecStartPre=/usr/bin/rm -f %t/jadda-jadda.pid %t/jadda-jadda.ctr-id
+ExecStart=/usr/bin/podman run --conmon-pidfile %t/jadda-jadda.pid --cidfile %t/jadda-jadda.ctr-id --cgroups=no-conmon --detach --name jadda-jadda --hostname hello-world awesome-image:latest command arg1 ... argN
+ExecStop=/usr/bin/podman stop --ignore --cidfile %t/jadda-jadda.ctr-id -t 42
+ExecStopPost=/usr/bin/podman rm --ignore -f --cidfile %t/jadda-jadda.ctr-id
+PIDFile=%t/jadda-jadda.pid
+KillMode=none
+Type=forking
+
+[Install]
+WantedBy=multi-user.target default.target`
+
+ goodIDNew := `# container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401.service
+# autogenerated by Podman CI
+
+[Unit]
+Description=Podman container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401.service
+Documentation=man:podman-generate-systemd(1)
+Wants=network.target
+After=network-online.target
+
+[Service]
+Environment=PODMAN_SYSTEMD_UNIT=%n
+Restart=always
+ExecStartPre=/usr/bin/rm -f %t/container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401.pid %t/container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401.ctr-id
+ExecStart=/usr/bin/podman run --conmon-pidfile %t/container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401.pid --cidfile %t/container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401.ctr-id --cgroups=no-conmon -d awesome-image:latest
+ExecStop=/usr/bin/podman stop --ignore --cidfile %t/container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401.ctr-id -t 10
+ExecStopPost=/usr/bin/podman rm --ignore -f --cidfile %t/container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401.ctr-id
+PIDFile=%t/container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401.pid
+KillMode=none
+Type=forking
+
+[Install]
+WantedBy=multi-user.target default.target`
+
+ tests := []struct {
+ name string
+ info containerInfo
+ want string
+ new bool
+ wantErr bool
+ }{
+
+ {"good with id",
+ containerInfo{
+ Executable: "/usr/bin/podman",
+ ServiceName: "container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401",
+ ContainerNameOrID: "639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401",
+ RestartPolicy: "always",
+ PIDFile: "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid",
+ StopTimeout: 10,
+ PodmanVersion: "CI",
+ EnvVariable: EnvVariable,
+ },
+ goodID,
+ false,
+ false,
+ },
+ {"good with name",
+ containerInfo{
+ Executable: "/usr/bin/podman",
+ ServiceName: "container-foobar",
+ ContainerNameOrID: "foobar",
+ RestartPolicy: "always",
+ PIDFile: "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid",
+ StopTimeout: 10,
+ PodmanVersion: "CI",
+ EnvVariable: EnvVariable,
+ },
+ goodName,
+ false,
+ false,
+ },
+ {"good with name and bound to",
+ containerInfo{
+ Executable: "/usr/bin/podman",
+ ServiceName: "container-foobar",
+ ContainerNameOrID: "foobar",
+ RestartPolicy: "always",
+ PIDFile: "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid",
+ StopTimeout: 10,
+ PodmanVersion: "CI",
+ BoundToServices: []string{"pod", "a", "b", "c"},
+ EnvVariable: EnvVariable,
+ },
+ goodNameBoundTo,
+ false,
+ false,
+ },
+ {"bad restart policy",
+ containerInfo{
+ Executable: "/usr/bin/podman",
+ ServiceName: "639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401",
+ RestartPolicy: "never",
+ PIDFile: "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid",
+ StopTimeout: 10,
+ PodmanVersion: "CI",
+ EnvVariable: EnvVariable,
+ },
+ "",
+ false,
+ true,
+ },
+ {"good with name and generic",
+ containerInfo{
+ Executable: "/usr/bin/podman",
+ ServiceName: "jadda-jadda",
+ ContainerNameOrID: "jadda-jadda",
+ RestartPolicy: "always",
+ PIDFile: "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid",
+ StopTimeout: 42,
+ PodmanVersion: "CI",
+ CreateCommand: []string{"I'll get stripped", "container", "run", "--name", "jadda-jadda", "--hostname", "hello-world", "awesome-image:latest", "command", "arg1", "...", "argN"},
+ EnvVariable: EnvVariable,
+ },
+ goodNameNew,
+ true,
+ false,
+ },
+ {"good with explicit short detach param",
+ containerInfo{
+ Executable: "/usr/bin/podman",
+ ServiceName: "jadda-jadda",
+ ContainerNameOrID: "jadda-jadda",
+ RestartPolicy: "always",
+ PIDFile: "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid",
+ StopTimeout: 42,
+ PodmanVersion: "CI",
+ CreateCommand: []string{"I'll get stripped", "container", "run", "-d", "--name", "jadda-jadda", "--hostname", "hello-world", "awesome-image:latest", "command", "arg1", "...", "argN"},
+ EnvVariable: EnvVariable,
+ },
+ goodNameNew,
+ true,
+ false,
+ },
+ {"good with explicit short detach param and podInfo",
+ containerInfo{
+ Executable: "/usr/bin/podman",
+ ServiceName: "jadda-jadda",
+ ContainerNameOrID: "jadda-jadda",
+ RestartPolicy: "always",
+ PIDFile: "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid",
+ StopTimeout: 42,
+ PodmanVersion: "CI",
+ CreateCommand: []string{"I'll get stripped", "container", "run", "-d", "--name", "jadda-jadda", "--hostname", "hello-world", "awesome-image:latest", "command", "arg1", "...", "argN"},
+ EnvVariable: EnvVariable,
+ pod: &podInfo{
+ PodIDFile: "/tmp/pod-foobar.pod-id-file",
+ },
+ },
+ goodNameNewWithPodFile,
+ true,
+ false,
+ },
+ {"good with explicit full detach param",
+ containerInfo{
+ Executable: "/usr/bin/podman",
+ ServiceName: "jadda-jadda",
+ ContainerNameOrID: "jadda-jadda",
+ RestartPolicy: "always",
+ PIDFile: "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid",
+ StopTimeout: 42,
+ PodmanVersion: "CI",
+ CreateCommand: []string{"I'll get stripped", "container", "run", "--detach", "--name", "jadda-jadda", "--hostname", "hello-world", "awesome-image:latest", "command", "arg1", "...", "argN"},
+ EnvVariable: EnvVariable,
+ },
+ goodNameNewDetach,
+ true,
+ false,
+ },
+ {"good with id and no param",
+ containerInfo{
+ Executable: "/usr/bin/podman",
+ ServiceName: "container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401",
+ ContainerNameOrID: "639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401",
+ RestartPolicy: "always",
+ PIDFile: "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid",
+ StopTimeout: 10,
+ PodmanVersion: "CI",
+ CreateCommand: []string{"I'll get stripped", "container", "run", "awesome-image:latest"},
+ EnvVariable: EnvVariable,
+ },
+ goodIDNew,
+ true,
+ false,
+ },
+ }
+ for _, tt := range tests {
+ test := tt
+ t.Run(tt.name, func(t *testing.T) {
+ opts := entities.GenerateSystemdOptions{
+ Files: false,
+ New: test.new,
+ }
+ got, err := executeContainerTemplate(&test.info, opts)
+ if (err != nil) != test.wantErr {
+ t.Errorf("CreateContainerSystemdUnit() error = \n%v, wantErr \n%v", err, test.wantErr)
+ return
+ }
+ if got != test.want {
+ t.Errorf("CreateContainerSystemdUnit() = \n%v\n---------> want\n%v", got, test.want)
+ }
+ })
+ }
+}
diff --git a/pkg/systemd/generate/pods.go b/pkg/systemd/generate/pods.go
new file mode 100644
index 000000000..5cfd5ab0a
--- /dev/null
+++ b/pkg/systemd/generate/pods.go
@@ -0,0 +1,341 @@
+package generate
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+ "text/template"
+ "time"
+
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/pkg/domain/entities"
+ "github.com/containers/libpod/version"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// podInfo contains data required for generating a pod's systemd
+// unit file.
+type podInfo struct {
+ // ServiceName of the systemd service.
+ ServiceName string
+ // Name or ID of the infra container.
+ InfraNameOrID string
+ // StopTimeout sets the timeout Podman waits before killing the container
+ // during service stop.
+ StopTimeout uint
+ // RestartPolicy of the systemd unit (e.g., no, on-failure, always).
+ RestartPolicy string
+ // PIDFile of the service. Required for forking services. Must point to the
+ // PID of the associated conmon process.
+ PIDFile string
+ // PodIDFile of the unit.
+ PodIDFile string
+ // GenerateTimestamp, if set the generated unit file has a time stamp.
+ GenerateTimestamp bool
+ // RequiredServices are services this service requires. Note that this
+ // service runs before them.
+ RequiredServices []string
+ // PodmanVersion for the header. Will be set internally. Will be auto-filled
+ // if left empty.
+ PodmanVersion string
+ // Executable is the path to the podman executable. Will be auto-filled if
+ // left empty.
+ Executable string
+ // TimeStamp at the time of creating the unit file. Will be set internally.
+ TimeStamp string
+ // CreateCommand is the full command plus arguments of the process the
+ // container has been created with.
+ CreateCommand []string
+ // PodCreateCommand - a post-processed variant of CreateCommand to use
+ // when creating the pod.
+ PodCreateCommand string
+ // EnvVariable is generate.EnvVariable and must not be set.
+ EnvVariable string
+ // ExecStartPre1 of the unit.
+ ExecStartPre1 string
+ // ExecStartPre2 of the unit.
+ ExecStartPre2 string
+ // ExecStart of the unit.
+ ExecStart string
+ // ExecStop of the unit.
+ ExecStop string
+ // ExecStopPost of the unit.
+ ExecStopPost string
+}
+
+const podTemplate = headerTemplate + `Requires={{- range $index, $value := .RequiredServices -}}{{if $index}} {{end}}{{ $value }}.service{{end}}
+Before={{- range $index, $value := .RequiredServices -}}{{if $index}} {{end}}{{ $value }}.service{{end}}
+
+[Service]
+Environment={{.EnvVariable}}=%n
+Restart={{.RestartPolicy}}
+{{- if .ExecStartPre1}}
+ExecStartPre={{.ExecStartPre1}}
+{{- end}}
+{{- if .ExecStartPre2}}
+ExecStartPre={{.ExecStartPre2}}
+{{- end}}
+ExecStart={{.ExecStart}}
+ExecStop={{.ExecStop}}
+{{- if .ExecStopPost}}
+ExecStopPost={{.ExecStopPost}}
+{{- end}}
+PIDFile={{.PIDFile}}
+KillMode=none
+Type=forking
+
+[Install]
+WantedBy=multi-user.target default.target`
+
+// PodUnits generates systemd units for the specified pod and its containers.
+// Based on the options, the return value might be the content of all units or
+// the files they been written to.
+func PodUnits(pod *libpod.Pod, options entities.GenerateSystemdOptions) (string, error) {
+ // Error out if the pod has no infra container, which we require to be the
+ // main service.
+ if !pod.HasInfraContainer() {
+ return "", errors.Errorf("error generating systemd unit files: Pod %q has no infra container", pod.Name())
+ }
+
+ podInfo, err := generatePodInfo(pod, options)
+ if err != nil {
+ return "", err
+ }
+
+ infraID, err := pod.InfraContainerID()
+ if err != nil {
+ return "", err
+ }
+
+ // Compute the container-dependency graph for the Pod.
+ containers, err := pod.AllContainers()
+ if err != nil {
+ return "", err
+ }
+ if len(containers) == 0 {
+ return "", errors.Errorf("error generating systemd unit files: Pod %q has no containers", pod.Name())
+ }
+ graph, err := libpod.BuildContainerGraph(containers)
+ if err != nil {
+ return "", err
+ }
+
+ // Traverse the dependency graph and create systemdgen.containerInfo's for
+ // each container.
+ containerInfos := []*containerInfo{}
+ for ctr, dependencies := range graph.DependencyMap() {
+ // Skip the infra container as we already generated it.
+ if ctr.ID() == infraID {
+ continue
+ }
+ ctrInfo, err := generateContainerInfo(ctr, options)
+ if err != nil {
+ return "", err
+ }
+ // Now add the container's dependencies and at the container as a
+ // required service of the infra container.
+ for _, dep := range dependencies {
+ if dep.ID() == infraID {
+ ctrInfo.BoundToServices = append(ctrInfo.BoundToServices, podInfo.ServiceName)
+ } else {
+ _, serviceName := containerServiceName(dep, options)
+ ctrInfo.BoundToServices = append(ctrInfo.BoundToServices, serviceName)
+ }
+ }
+ podInfo.RequiredServices = append(podInfo.RequiredServices, ctrInfo.ServiceName)
+ containerInfos = append(containerInfos, ctrInfo)
+ }
+
+ // Now generate the systemd service for all containers.
+ builder := strings.Builder{}
+ out, err := executePodTemplate(podInfo, options)
+ if err != nil {
+ return "", err
+ }
+ builder.WriteString(out)
+ for _, info := range containerInfos {
+ info.pod = podInfo
+ builder.WriteByte('\n')
+ out, err := executeContainerTemplate(info, options)
+ if err != nil {
+ return "", err
+ }
+ builder.WriteString(out)
+ }
+
+ return builder.String(), nil
+}
+
+func generatePodInfo(pod *libpod.Pod, options entities.GenerateSystemdOptions) (*podInfo, error) {
+ // Generate a systemdgen.containerInfo for the infra container. This
+ // containerInfo acts as the main service of the pod.
+ infraCtr, err := pod.InfraContainer()
+ if err != nil {
+ return nil, errors.Wrap(err, "could not find infra container")
+ }
+
+ timeout := infraCtr.StopTimeout()
+ if options.StopTimeout != nil {
+ timeout = *options.StopTimeout
+ }
+
+ config := infraCtr.Config()
+ conmonPidFile := config.ConmonPidFile
+ if conmonPidFile == "" {
+ return nil, errors.Errorf("conmon PID file path is empty, try to recreate the container with --conmon-pidfile flag")
+ }
+
+ createCommand := pod.CreateCommand()
+ if options.New && len(createCommand) == 0 {
+ return nil, errors.Errorf("cannot use --new on pod %q: no create command found", pod.ID())
+ }
+
+ nameOrID := pod.ID()
+ ctrNameOrID := infraCtr.ID()
+ if options.Name {
+ nameOrID = pod.Name()
+ ctrNameOrID = infraCtr.Name()
+ }
+ serviceName := fmt.Sprintf("%s%s%s", options.PodPrefix, options.Separator, nameOrID)
+
+ info := podInfo{
+ ServiceName: serviceName,
+ InfraNameOrID: ctrNameOrID,
+ RestartPolicy: options.RestartPolicy,
+ PIDFile: conmonPidFile,
+ StopTimeout: timeout,
+ GenerateTimestamp: true,
+ CreateCommand: createCommand,
+ }
+ return &info, nil
+}
+
+// executePodTemplate executes the pod template on the specified podInfo. Note
+// that the podInfo is also post processed and completed, which allows for an
+// easier unit testing.
+func executePodTemplate(info *podInfo, options entities.GenerateSystemdOptions) (string, error) {
+ if err := validateRestartPolicy(info.RestartPolicy); err != nil {
+ return "", err
+ }
+
+ // Make sure the executable is set.
+ if info.Executable == "" {
+ executable, err := os.Executable()
+ if err != nil {
+ executable = "/usr/bin/podman"
+ logrus.Warnf("Could not obtain podman executable location, using default %s", executable)
+ }
+ info.Executable = executable
+ }
+
+ info.EnvVariable = EnvVariable
+ info.ExecStart = "{{.Executable}} start {{.InfraNameOrID}}"
+ info.ExecStop = "{{.Executable}} stop {{if (ge .StopTimeout 0)}}-t {{.StopTimeout}}{{end}} {{.InfraNameOrID}}"
+
+ // Assemble the ExecStart command when creating a new pod.
+ //
+ // Note that we cannot catch all corner cases here such that users
+ // *must* manually check the generated files. A pod might have been
+ // created via a Python script, which would certainly yield an invalid
+ // `info.CreateCommand`. Hence, we're doing a best effort unit
+ // generation and don't try aiming at completeness.
+ if options.New {
+ info.PIDFile = "%t/" + info.ServiceName + ".pid"
+ info.PodIDFile = "%t/" + info.ServiceName + ".pod-id"
+
+ podCreateIndex := 0
+ var podRootArgs, podCreateArgs []string
+ switch len(info.CreateCommand) {
+ case 0, 1, 2:
+ return "", errors.Errorf("pod does not appear to be created via `podman pod create`: %v", info.CreateCommand)
+ default:
+ // Make sure that pod was created with `pod create` and
+ // not something else, such as `run --pod new`.
+ for i := 1; i < len(info.CreateCommand); i++ {
+ if info.CreateCommand[i-1] == "pod" && info.CreateCommand[i] == "create" {
+ podCreateIndex = i
+ break
+ }
+ }
+ if podCreateIndex == 0 {
+ return "", errors.Errorf("pod does not appear to be created via `podman pod create`: %v", info.CreateCommand)
+ }
+ podRootArgs = info.CreateCommand[1 : podCreateIndex-2]
+ podCreateArgs = filterPodFlags(info.CreateCommand[podCreateIndex+1:])
+ }
+ // We're hard-coding the first five arguments and append the
+ // CreateCommand with a stripped command and subcomand.
+ startCommand := []string{info.Executable}
+ startCommand = append(startCommand, podRootArgs...)
+ startCommand = append(startCommand,
+ []string{"pod", "create",
+ "--infra-conmon-pidfile", "{{.PIDFile}}",
+ "--pod-id-file", "{{.PodIDFile}}"}...)
+
+ startCommand = append(startCommand, podCreateArgs...)
+
+ info.ExecStartPre1 = "/usr/bin/rm -f {{.PIDFile}} {{.PodIDFile}}"
+ info.ExecStartPre2 = strings.Join(startCommand, " ")
+ info.ExecStart = "{{.Executable}} pod start --pod-id-file {{.PodIDFile}}"
+ info.ExecStop = "{{.Executable}} pod stop --ignore --pod-id-file {{.PodIDFile}} {{if (ge .StopTimeout 0)}}-t {{.StopTimeout}}{{end}}"
+ info.ExecStopPost = "{{.Executable}} pod rm --ignore -f --pod-id-file {{.PodIDFile}}"
+ }
+ if info.PodmanVersion == "" {
+ info.PodmanVersion = version.Version
+ }
+ if info.GenerateTimestamp {
+ info.TimeStamp = fmt.Sprintf("%v", time.Now().Format(time.UnixDate))
+ }
+
+ // Sort the slices to assure a deterministic output.
+ sort.Strings(info.RequiredServices)
+
+ // Generate the template and compile it.
+ //
+ // Note that we need a two-step generation process to allow for fields
+ // embedding other fields. This way we can replace `A -> B -> C` and
+ // make the code easier to maintain at the cost of a slightly slower
+ // generation. That's especially needed for embedding the PID and ID
+ // files in other fields which will eventually get replaced in the 2nd
+ // template execution.
+ templ, err := template.New("pod_template").Parse(podTemplate)
+ if err != nil {
+ return "", errors.Wrap(err, "error parsing systemd service template")
+ }
+
+ var buf bytes.Buffer
+ if err := templ.Execute(&buf, info); err != nil {
+ return "", err
+ }
+
+ // Now parse the generated template (i.e., buf) and execute it.
+ templ, err = template.New("pod_template").Parse(buf.String())
+ if err != nil {
+ return "", errors.Wrap(err, "error parsing systemd service template")
+ }
+
+ buf = bytes.Buffer{}
+ if err := templ.Execute(&buf, info); err != nil {
+ return "", err
+ }
+
+ if !options.Files {
+ return buf.String(), nil
+ }
+
+ buf.WriteByte('\n')
+ cwd, err := os.Getwd()
+ if err != nil {
+ return "", errors.Wrap(err, "error getting current working directory")
+ }
+ path := filepath.Join(cwd, fmt.Sprintf("%s.service", info.ServiceName))
+ if err := ioutil.WriteFile(path, buf.Bytes(), 0644); err != nil {
+ return "", errors.Wrap(err, "error generating systemd unit")
+ }
+ return path, nil
+}
diff --git a/pkg/systemd/generate/pods_test.go b/pkg/systemd/generate/pods_test.go
new file mode 100644
index 000000000..f6e225c35
--- /dev/null
+++ b/pkg/systemd/generate/pods_test.go
@@ -0,0 +1,100 @@
+package generate
+
+import (
+ "testing"
+
+ "github.com/containers/libpod/pkg/domain/entities"
+)
+
+func TestValidateRestartPolicyPod(t *testing.T) {
+ type podInfo struct {
+ restart string
+ }
+ tests := []struct {
+ name string
+ podInfo podInfo
+ wantErr bool
+ }{
+ {"good-on", podInfo{restart: "no"}, false},
+ {"good-on-success", podInfo{restart: "on-success"}, false},
+ {"good-on-failure", podInfo{restart: "on-failure"}, false},
+ {"good-on-abnormal", podInfo{restart: "on-abnormal"}, false},
+ {"good-on-watchdog", podInfo{restart: "on-watchdog"}, false},
+ {"good-on-abort", podInfo{restart: "on-abort"}, false},
+ {"good-always", podInfo{restart: "always"}, false},
+ {"fail", podInfo{restart: "foobar"}, true},
+ {"failblank", podInfo{restart: ""}, true},
+ }
+ for _, tt := range tests {
+ test := tt
+ t.Run(tt.name, func(t *testing.T) {
+ if err := validateRestartPolicy(test.podInfo.restart); (err != nil) != test.wantErr {
+ t.Errorf("ValidateRestartPolicy() error = %v, wantErr %v", err, test.wantErr)
+ }
+ })
+ }
+}
+
+func TestCreatePodSystemdUnit(t *testing.T) {
+ podGoodName := `# pod-123abc.service
+# autogenerated by Podman CI
+
+[Unit]
+Description=Podman pod-123abc.service
+Documentation=man:podman-generate-systemd(1)
+Wants=network.target
+After=network-online.target
+Requires=container-1.service container-2.service
+Before=container-1.service container-2.service
+
+[Service]
+Environment=PODMAN_SYSTEMD_UNIT=%n
+Restart=always
+ExecStart=/usr/bin/podman start jadda-jadda-infra
+ExecStop=/usr/bin/podman stop -t 10 jadda-jadda-infra
+PIDFile=/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid
+KillMode=none
+Type=forking
+
+[Install]
+WantedBy=multi-user.target default.target`
+
+ tests := []struct {
+ name string
+ info podInfo
+ want string
+ wantErr bool
+ }{
+ {"pod",
+ podInfo{
+ Executable: "/usr/bin/podman",
+ ServiceName: "pod-123abc",
+ InfraNameOrID: "jadda-jadda-infra",
+ RestartPolicy: "always",
+ PIDFile: "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid",
+ StopTimeout: 10,
+ PodmanVersion: "CI",
+ RequiredServices: []string{"container-1", "container-2"},
+ },
+ podGoodName,
+ false,
+ },
+ }
+
+ for _, tt := range tests {
+ test := tt
+ t.Run(tt.name, func(t *testing.T) {
+ opts := entities.GenerateSystemdOptions{
+ Files: false,
+ }
+ got, err := executePodTemplate(&test.info, opts)
+ if (err != nil) != test.wantErr {
+ t.Errorf("CreatePodSystemdUnit() error = \n%v, wantErr \n%v", err, test.wantErr)
+ return
+ }
+ if got != test.want {
+ t.Errorf("CreatePodSystemdUnit() = \n%v\n---------> want\n%v", got, test.want)
+ }
+ })
+ }
+}
diff --git a/pkg/systemd/generate/systemdgen.go b/pkg/systemd/generate/systemdgen.go
deleted file mode 100644
index 73fe52c0e..000000000
--- a/pkg/systemd/generate/systemdgen.go
+++ /dev/null
@@ -1,237 +0,0 @@
-package generate
-
-import (
- "bytes"
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
- "sort"
- "strings"
- "text/template"
- "time"
-
- "github.com/containers/libpod/version"
- "github.com/pkg/errors"
- "github.com/sirupsen/logrus"
-)
-
-// EnvVariable "PODMAN_SYSTEMD_UNIT" is set in all generated systemd units and
-// is set to the unit's (unique) name.
-const EnvVariable = "PODMAN_SYSTEMD_UNIT"
-
-// ContainerInfo contains data required for generating a container's systemd
-// unit file.
-type ContainerInfo struct {
- // ServiceName of the systemd service.
- ServiceName string
- // Name or ID of the container.
- ContainerName string
- // InfraContainer of the pod.
- InfraContainer string
- // StopTimeout sets the timeout Podman waits before killing the container
- // during service stop.
- StopTimeout uint
- // RestartPolicy of the systemd unit (e.g., no, on-failure, always).
- RestartPolicy string
- // PIDFile of the service. Required for forking services. Must point to the
- // PID of the associated conmon process.
- PIDFile string
- // GenerateTimestamp, if set the generated unit file has a time stamp.
- GenerateTimestamp bool
- // BoundToServices are the services this service binds to. Note that this
- // service runs after them.
- BoundToServices []string
- // RequiredServices are services this service requires. Note that this
- // service runs before them.
- RequiredServices []string
- // PodmanVersion for the header. Will be set internally. Will be auto-filled
- // if left empty.
- PodmanVersion string
- // Executable is the path to the podman executable. Will be auto-filled if
- // left empty.
- Executable string
- // TimeStamp at the time of creating the unit file. Will be set internally.
- TimeStamp string
- // New controls if a new container is created or if an existing one is started.
- New bool
- // CreateCommand is the full command plus arguments of the process the
- // container has been created with.
- CreateCommand []string
- // RunCommand is a post-processed variant of CreateCommand and used for
- // the ExecStart field in generic unit files.
- RunCommand string
- // EnvVariable is generate.EnvVariable and must not be set.
- EnvVariable string
-}
-
-var restartPolicies = []string{"no", "on-success", "on-failure", "on-abnormal", "on-watchdog", "on-abort", "always"}
-
-// validateRestartPolicy checks that the user-provided policy is valid.
-func validateRestartPolicy(restart string) error {
- for _, i := range restartPolicies {
- if i == restart {
- return nil
- }
- }
- return errors.Errorf("%s is not a valid restart policy", restart)
-}
-
-const containerTemplate = `# {{.ServiceName}}.service
-# autogenerated by Podman {{.PodmanVersion}}
-{{- if .TimeStamp}}
-# {{.TimeStamp}}
-{{- end}}
-
-[Unit]
-Description=Podman {{.ServiceName}}.service
-Documentation=man:podman-generate-systemd(1)
-Wants=network.target
-After=network-online.target
-{{- if .BoundToServices}}
-RefuseManualStart=yes
-RefuseManualStop=yes
-BindsTo={{- range $index, $value := .BoundToServices -}}{{if $index}} {{end}}{{ $value }}.service{{end}}
-After={{- range $index, $value := .BoundToServices -}}{{if $index}} {{end}}{{ $value }}.service{{end}}
-{{- end}}
-{{- if .RequiredServices}}
-Requires={{- range $index, $value := .RequiredServices -}}{{if $index}} {{end}}{{ $value }}.service{{end}}
-Before={{- range $index, $value := .RequiredServices -}}{{if $index}} {{end}}{{ $value }}.service{{end}}
-{{- end}}
-
-[Service]
-Environment={{.EnvVariable}}=%n
-Restart={{.RestartPolicy}}
-{{- if .New}}
-ExecStartPre=/usr/bin/rm -f %t/%n-pid %t/%n-cid
-ExecStart={{.RunCommand}}
-ExecStop={{.Executable}} stop --ignore --cidfile %t/%n-cid {{if (ge .StopTimeout 0)}}-t {{.StopTimeout}}{{end}}
-ExecStopPost={{.Executable}} rm --ignore -f --cidfile %t/%n-cid
-PIDFile=%t/%n-pid
-{{- else}}
-ExecStart={{.Executable}} start {{.ContainerName}}
-ExecStop={{.Executable}} stop {{if (ge .StopTimeout 0)}}-t {{.StopTimeout}}{{end}} {{.ContainerName}}
-PIDFile={{.PIDFile}}
-{{- end}}
-KillMode=none
-Type=forking
-
-[Install]
-WantedBy=multi-user.target default.target`
-
-// Options include different options to control the unit file generation.
-type Options struct {
- // When set, generate service files in the current working directory and
- // return the paths to these files instead of returning all contents in one
- // big string.
- Files bool
- // New controls if a new container is created or if an existing one is started.
- New bool
-}
-
-// CreateContainerSystemdUnit creates a systemd unit file for a container.
-func CreateContainerSystemdUnit(info *ContainerInfo, opts Options) (string, error) {
- if err := validateRestartPolicy(info.RestartPolicy); err != nil {
- return "", err
- }
-
- // Make sure the executable is set.
- if info.Executable == "" {
- executable, err := os.Executable()
- if err != nil {
- executable = "/usr/bin/podman"
- logrus.Warnf("Could not obtain podman executable location, using default %s", executable)
- }
- info.Executable = executable
- }
-
- info.EnvVariable = EnvVariable
-
- // Assemble the ExecStart command when creating a new container.
- //
- // Note that we cannot catch all corner cases here such that users
- // *must* manually check the generated files. A container might have
- // been created via a Python script, which would certainly yield an
- // invalid `info.CreateCommand`. Hence, we're doing a best effort unit
- // generation and don't try aiming at completeness.
- if opts.New {
- // The create command must at least have three arguments:
- // /usr/bin/podman run $IMAGE
- index := 2
- if info.CreateCommand[1] == "container" {
- index = 3
- }
- if len(info.CreateCommand) < index+1 {
- return "", errors.Errorf("container's create command is too short or invalid: %v", info.CreateCommand)
- }
- // We're hard-coding the first five arguments and append the
- // CreateCommand with a stripped command and subcomand.
- command := []string{
- info.Executable,
- "run",
- "--conmon-pidfile", "%t/%n-pid",
- "--cidfile", "%t/%n-cid",
- "--cgroups=no-conmon",
- }
-
- // Enforce detaching
- //
- // since we use systemd `Type=forking` service
- // @see https://www.freedesktop.org/software/systemd/man/systemd.service.html#Type=
- // when we generated systemd service file with the --new param,
- // `ExecStart` will have `/usr/bin/podman run ...`
- // if `info.CreateCommand` has no `-d` or `--detach` param,
- // podman will run the container in default attached mode,
- // as a result, `systemd start` will wait the `podman run` command exit until failed with timeout error.
- hasDetachParam := false
- for _, p := range info.CreateCommand[index:] {
- if p == "--detach" || p == "-d" {
- hasDetachParam = true
- }
- }
- if !hasDetachParam {
- command = append(command, "-d")
- }
-
- command = append(command, info.CreateCommand[index:]...)
- info.RunCommand = strings.Join(command, " ")
- info.New = true
- }
-
- if info.PodmanVersion == "" {
- info.PodmanVersion = version.Version
- }
- if info.GenerateTimestamp {
- info.TimeStamp = fmt.Sprintf("%v", time.Now().Format(time.UnixDate))
- }
-
- // Sort the slices to assure a deterministic output.
- sort.Strings(info.RequiredServices)
- sort.Strings(info.BoundToServices)
-
- // Generate the template and compile it.
- templ, err := template.New("systemd_service_file").Parse(containerTemplate)
- if err != nil {
- return "", errors.Wrap(err, "error parsing systemd service template")
- }
-
- var buf bytes.Buffer
- if err := templ.Execute(&buf, info); err != nil {
- return "", err
- }
-
- if !opts.Files {
- return buf.String(), nil
- }
-
- buf.WriteByte('\n')
- cwd, err := os.Getwd()
- if err != nil {
- return "", errors.Wrap(err, "error getting current working directory")
- }
- path := filepath.Join(cwd, fmt.Sprintf("%s.service", info.ServiceName))
- if err := ioutil.WriteFile(path, buf.Bytes(), 0644); err != nil {
- return "", errors.Wrap(err, "error generating systemd unit")
- }
- return path, nil
-}
diff --git a/pkg/systemd/generate/systemdgen_test.go b/pkg/systemd/generate/systemdgen_test.go
deleted file mode 100644
index 3269405a6..000000000
--- a/pkg/systemd/generate/systemdgen_test.go
+++ /dev/null
@@ -1,347 +0,0 @@
-package generate
-
-import (
- "testing"
-)
-
-func TestValidateRestartPolicy(t *testing.T) {
- type ContainerInfo struct {
- restart string
- }
- tests := []struct {
- name string
- ContainerInfo ContainerInfo
- wantErr bool
- }{
- {"good-on", ContainerInfo{restart: "no"}, false},
- {"good-on-success", ContainerInfo{restart: "on-success"}, false},
- {"good-on-failure", ContainerInfo{restart: "on-failure"}, false},
- {"good-on-abnormal", ContainerInfo{restart: "on-abnormal"}, false},
- {"good-on-watchdog", ContainerInfo{restart: "on-watchdog"}, false},
- {"good-on-abort", ContainerInfo{restart: "on-abort"}, false},
- {"good-always", ContainerInfo{restart: "always"}, false},
- {"fail", ContainerInfo{restart: "foobar"}, true},
- {"failblank", ContainerInfo{restart: ""}, true},
- }
- for _, tt := range tests {
- test := tt
- t.Run(tt.name, func(t *testing.T) {
- if err := validateRestartPolicy(test.ContainerInfo.restart); (err != nil) != test.wantErr {
- t.Errorf("ValidateRestartPolicy() error = %v, wantErr %v", err, test.wantErr)
- }
- })
- }
-}
-
-func TestCreateContainerSystemdUnit(t *testing.T) {
- goodID := `# container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401.service
-# autogenerated by Podman CI
-
-[Unit]
-Description=Podman container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401.service
-Documentation=man:podman-generate-systemd(1)
-Wants=network.target
-After=network-online.target
-
-[Service]
-Environment=PODMAN_SYSTEMD_UNIT=%n
-Restart=always
-ExecStart=/usr/bin/podman start 639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401
-ExecStop=/usr/bin/podman stop -t 10 639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401
-PIDFile=/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid
-KillMode=none
-Type=forking
-
-[Install]
-WantedBy=multi-user.target default.target`
-
- goodName := `# container-foobar.service
-# autogenerated by Podman CI
-
-[Unit]
-Description=Podman container-foobar.service
-Documentation=man:podman-generate-systemd(1)
-Wants=network.target
-After=network-online.target
-
-[Service]
-Environment=PODMAN_SYSTEMD_UNIT=%n
-Restart=always
-ExecStart=/usr/bin/podman start foobar
-ExecStop=/usr/bin/podman stop -t 10 foobar
-PIDFile=/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid
-KillMode=none
-Type=forking
-
-[Install]
-WantedBy=multi-user.target default.target`
-
- goodNameBoundTo := `# container-foobar.service
-# autogenerated by Podman CI
-
-[Unit]
-Description=Podman container-foobar.service
-Documentation=man:podman-generate-systemd(1)
-Wants=network.target
-After=network-online.target
-RefuseManualStart=yes
-RefuseManualStop=yes
-BindsTo=a.service b.service c.service pod.service
-After=a.service b.service c.service pod.service
-
-[Service]
-Environment=PODMAN_SYSTEMD_UNIT=%n
-Restart=always
-ExecStart=/usr/bin/podman start foobar
-ExecStop=/usr/bin/podman stop -t 10 foobar
-PIDFile=/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid
-KillMode=none
-Type=forking
-
-[Install]
-WantedBy=multi-user.target default.target`
-
- podGoodName := `# pod-123abc.service
-# autogenerated by Podman CI
-
-[Unit]
-Description=Podman pod-123abc.service
-Documentation=man:podman-generate-systemd(1)
-Wants=network.target
-After=network-online.target
-Requires=container-1.service container-2.service
-Before=container-1.service container-2.service
-
-[Service]
-Environment=PODMAN_SYSTEMD_UNIT=%n
-Restart=always
-ExecStart=/usr/bin/podman start jadda-jadda-infra
-ExecStop=/usr/bin/podman stop -t 10 jadda-jadda-infra
-PIDFile=/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid
-KillMode=none
-Type=forking
-
-[Install]
-WantedBy=multi-user.target default.target`
-
- goodNameNew := `# jadda-jadda.service
-# autogenerated by Podman CI
-
-[Unit]
-Description=Podman jadda-jadda.service
-Documentation=man:podman-generate-systemd(1)
-Wants=network.target
-After=network-online.target
-
-[Service]
-Environment=PODMAN_SYSTEMD_UNIT=%n
-Restart=always
-ExecStartPre=/usr/bin/rm -f %t/%n-pid %t/%n-cid
-ExecStart=/usr/bin/podman run --conmon-pidfile %t/%n-pid --cidfile %t/%n-cid --cgroups=no-conmon -d --name jadda-jadda --hostname hello-world awesome-image:latest command arg1 ... argN
-ExecStop=/usr/bin/podman stop --ignore --cidfile %t/%n-cid -t 42
-ExecStopPost=/usr/bin/podman rm --ignore -f --cidfile %t/%n-cid
-PIDFile=%t/%n-pid
-KillMode=none
-Type=forking
-
-[Install]
-WantedBy=multi-user.target default.target`
-
- goodNameNewDetach := `# jadda-jadda.service
-# autogenerated by Podman CI
-
-[Unit]
-Description=Podman jadda-jadda.service
-Documentation=man:podman-generate-systemd(1)
-Wants=network.target
-After=network-online.target
-
-[Service]
-Environment=PODMAN_SYSTEMD_UNIT=%n
-Restart=always
-ExecStartPre=/usr/bin/rm -f %t/%n-pid %t/%n-cid
-ExecStart=/usr/bin/podman run --conmon-pidfile %t/%n-pid --cidfile %t/%n-cid --cgroups=no-conmon --detach --name jadda-jadda --hostname hello-world awesome-image:latest command arg1 ... argN
-ExecStop=/usr/bin/podman stop --ignore --cidfile %t/%n-cid -t 42
-ExecStopPost=/usr/bin/podman rm --ignore -f --cidfile %t/%n-cid
-PIDFile=%t/%n-pid
-KillMode=none
-Type=forking
-
-[Install]
-WantedBy=multi-user.target default.target`
-
- goodIdNew := `# container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401.service
-# autogenerated by Podman CI
-
-[Unit]
-Description=Podman container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401.service
-Documentation=man:podman-generate-systemd(1)
-Wants=network.target
-After=network-online.target
-
-[Service]
-Environment=PODMAN_SYSTEMD_UNIT=%n
-Restart=always
-ExecStartPre=/usr/bin/rm -f %t/%n-pid %t/%n-cid
-ExecStart=/usr/bin/podman run --conmon-pidfile %t/%n-pid --cidfile %t/%n-cid --cgroups=no-conmon -d awesome-image:latest
-ExecStop=/usr/bin/podman stop --ignore --cidfile %t/%n-cid -t 10
-ExecStopPost=/usr/bin/podman rm --ignore -f --cidfile %t/%n-cid
-PIDFile=%t/%n-pid
-KillMode=none
-Type=forking
-
-[Install]
-WantedBy=multi-user.target default.target`
-
- tests := []struct {
- name string
- info ContainerInfo
- want string
- wantErr bool
- }{
-
- {"good with id",
- ContainerInfo{
- Executable: "/usr/bin/podman",
- ServiceName: "container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401",
- ContainerName: "639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401",
- RestartPolicy: "always",
- PIDFile: "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid",
- StopTimeout: 10,
- PodmanVersion: "CI",
- },
- goodID,
- false,
- },
- {"good with name",
- ContainerInfo{
- Executable: "/usr/bin/podman",
- ServiceName: "container-foobar",
- ContainerName: "foobar",
- RestartPolicy: "always",
- PIDFile: "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid",
- StopTimeout: 10,
- PodmanVersion: "CI",
- },
- goodName,
- false,
- },
- {"good with name and bound to",
- ContainerInfo{
- Executable: "/usr/bin/podman",
- ServiceName: "container-foobar",
- ContainerName: "foobar",
- RestartPolicy: "always",
- PIDFile: "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid",
- StopTimeout: 10,
- PodmanVersion: "CI",
- BoundToServices: []string{"pod", "a", "b", "c"},
- },
- goodNameBoundTo,
- false,
- },
- {"pod",
- ContainerInfo{
- Executable: "/usr/bin/podman",
- ServiceName: "pod-123abc",
- ContainerName: "jadda-jadda-infra",
- RestartPolicy: "always",
- PIDFile: "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid",
- StopTimeout: 10,
- PodmanVersion: "CI",
- RequiredServices: []string{"container-1", "container-2"},
- },
- podGoodName,
- false,
- },
- {"bad restart policy",
- ContainerInfo{
- Executable: "/usr/bin/podman",
- ServiceName: "639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401",
- RestartPolicy: "never",
- PIDFile: "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid",
- StopTimeout: 10,
- PodmanVersion: "CI",
- },
- "",
- true,
- },
- {"good with name and generic",
- ContainerInfo{
- Executable: "/usr/bin/podman",
- ServiceName: "jadda-jadda",
- ContainerName: "jadda-jadda",
- RestartPolicy: "always",
- PIDFile: "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid",
- StopTimeout: 42,
- PodmanVersion: "CI",
- New: true,
- CreateCommand: []string{"I'll get stripped", "container", "run", "--name", "jadda-jadda", "--hostname", "hello-world", "awesome-image:latest", "command", "arg1", "...", "argN"},
- },
- goodNameNew,
- false,
- },
- {"good with explicit short detach param",
- ContainerInfo{
- Executable: "/usr/bin/podman",
- ServiceName: "jadda-jadda",
- ContainerName: "jadda-jadda",
- RestartPolicy: "always",
- PIDFile: "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid",
- StopTimeout: 42,
- PodmanVersion: "CI",
- New: true,
- CreateCommand: []string{"I'll get stripped", "container", "run", "-d", "--name", "jadda-jadda", "--hostname", "hello-world", "awesome-image:latest", "command", "arg1", "...", "argN"},
- },
- goodNameNew,
- false,
- },
- {"good with explicit full detach param",
- ContainerInfo{
- Executable: "/usr/bin/podman",
- ServiceName: "jadda-jadda",
- ContainerName: "jadda-jadda",
- RestartPolicy: "always",
- PIDFile: "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid",
- StopTimeout: 42,
- PodmanVersion: "CI",
- New: true,
- CreateCommand: []string{"I'll get stripped", "container", "run", "--detach", "--name", "jadda-jadda", "--hostname", "hello-world", "awesome-image:latest", "command", "arg1", "...", "argN"},
- },
- goodNameNewDetach,
- false,
- },
- {"good with id and no param",
- ContainerInfo{
- Executable: "/usr/bin/podman",
- ServiceName: "container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401",
- ContainerName: "639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401",
- RestartPolicy: "always",
- PIDFile: "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid",
- StopTimeout: 10,
- PodmanVersion: "CI",
- New: true,
- CreateCommand: []string{"I'll get stripped", "container", "run", "awesome-image:latest"},
- },
- goodIdNew,
- false,
- },
- }
- for _, tt := range tests {
- test := tt
- t.Run(tt.name, func(t *testing.T) {
- opts := Options{
- Files: false,
- New: test.info.New,
- }
- got, err := CreateContainerSystemdUnit(&test.info, opts)
- if (err != nil) != test.wantErr {
- t.Errorf("CreateContainerSystemdUnit() error = \n%v, wantErr \n%v", err, test.wantErr)
- return
- }
- if got != test.want {
- t.Errorf("CreateContainerSystemdUnit() = \n%v\n---------> want\n%v", got, test.want)
- }
- })
- }
-}
diff --git a/pkg/trust/config.go b/pkg/trust/config.go
index 0bafc722b..164df2a90 100644
--- a/pkg/trust/config.go
+++ b/pkg/trust/config.go
@@ -1,7 +1,7 @@
package trust
-// Trust Policy describes a basic trust policy configuration
-type TrustPolicy struct {
+// Policy describes a basic trust policy configuration
+type Policy struct {
Name string `json:"name"`
RepoName string `json:"repo_name,omitempty"`
Keys []string `json:"keys,omitempty"`
diff --git a/pkg/varlinkapi/create.go b/pkg/varlinkapi/create.go
index 571ce6115..d921130e7 100644
--- a/pkg/varlinkapi/create.go
+++ b/pkg/varlinkapi/create.go
@@ -220,7 +220,7 @@ func CreateContainer(ctx context.Context, c *GenericCLIResults, runtime *libpod.
}
}
- ctr, err := CreateContainerFromCreateConfig(runtime, createConfig, ctx, pod)
+ ctr, err := CreateContainerFromCreateConfig(ctx, runtime, createConfig, pod)
if err != nil {
return nil, nil, err
}
@@ -909,7 +909,7 @@ func ParseCreateOpts(ctx context.Context, c *GenericCLIResults, runtime *libpod.
return config, nil
}
-func CreateContainerFromCreateConfig(r *libpod.Runtime, createConfig *cc.CreateConfig, ctx context.Context, pod *libpod.Pod) (*libpod.Container, error) {
+func CreateContainerFromCreateConfig(ctx context.Context, r *libpod.Runtime, createConfig *cc.CreateConfig, pod *libpod.Pod) (*libpod.Container, error) {
runtimeSpec, options, err := createConfig.MakeContainerConfig(r, pod)
if err != nil {
return nil, err
diff --git a/pkg/varlinkapi/volumes.go b/pkg/varlinkapi/volumes.go
index aa0eb1fb5..3b6276287 100644
--- a/pkg/varlinkapi/volumes.go
+++ b/pkg/varlinkapi/volumes.go
@@ -25,7 +25,7 @@ func (i *VarlinkAPI) VolumeCreate(call iopodman.VarlinkCall, options iopodman.Vo
volumeOptions = append(volumeOptions, libpod.WithVolumeLabels(options.Labels))
}
if len(options.Options) > 0 {
- parsedOptions, err := parse.ParseVolumeOptions(options.Options)
+ parsedOptions, err := parse.VolumeOptions(options.Options)
if err != nil {
return call.ReplyErrorOccurred(err.Error())
}
diff --git a/test/dockerpy/README.md b/test/dockerpy/README.md
index 2894fc8ab..32e426d58 100644
--- a/test/dockerpy/README.md
+++ b/test/dockerpy/README.md
@@ -6,11 +6,6 @@ Running tests
=============
To run the tests locally in your sandbox:
-#### Make sure that the Podman system service is running to do so
-
-```
-sudo podman --log-level=debug system service -t0 unix:/run/podman/podman.sock
-```
#### Run the entire test
```
diff --git a/test/dockerpy/__init__.py b/test/dockerpy/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/test/dockerpy/__init__.py
diff --git a/test/dockerpy/common.py b/test/dockerpy/common.py
index 767a94ec0..fdacb49be 100644
--- a/test/dockerpy/common.py
+++ b/test/dockerpy/common.py
@@ -1,6 +1,68 @@
import docker
+import subprocess
+import os
+import sys
+import time
from docker import Client
+from . import constant
+alpineDict = {
+ "name": "docker.io/library/alpine:latest",
+ "shortName": "alpine",
+ "tarballName": "alpine.tar"}
def get_client():
- return docker.Client(base_url="unix:/run/podman/podman.sock")
+ client = docker.Client(base_url="http://localhost:8080",timeout=15)
+ return client
+
+client = get_client()
+
+def podman():
+ binary = os.getenv("PODMAN_BINARY")
+ if binary is None:
+ binary = "bin/podman"
+ return binary
+
+def restore_image_from_cache():
+ client.load_image(constant.ImageCacheDir+alpineDict["tarballName"])
+
+def run_top_container():
+ client.pull(constant.ALPINE)
+ c = client.create_container(constant.ALPINE,name=constant.TOP)
+ client.start(container=c.get("Id"))
+
+def enable_sock(TestClass):
+ TestClass.podman = subprocess.Popen(
+ [
+ podman(), "system", "service", "tcp:localhost:8080",
+ "--log-level=debug", "--time=0"
+ ],
+ shell=False,
+ stdin=subprocess.DEVNULL,
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.DEVNULL,
+ )
+ time.sleep(2)
+
+def terminate_connection(TestClass):
+ TestClass.podman.terminate()
+ stdout, stderr = TestClass.podman.communicate(timeout=0.5)
+ if stdout:
+ print("\nService Stdout:\n" + stdout.decode('utf-8'))
+ if stderr:
+ print("\nService Stderr:\n" + stderr.decode('utf-8'))
+
+ if TestClass.podman.returncode > 0:
+ sys.stderr.write("podman exited with error code {}\n".format(
+ TestClass.podman.returncode))
+ sys.exit(2)
+
+def remove_all_containers():
+ containers = client.containers(quiet=True)
+ for c in containers:
+ client.remove_container(container=c.get("Id"), force=True)
+
+def remove_all_images():
+ allImages = client.images()
+ for image in allImages:
+ client.remove_image(image,force=True)
diff --git a/test/dockerpy/constant.py b/test/dockerpy/constant.py
index e00457442..8a3f1d984 100644
--- a/test/dockerpy/constant.py
+++ b/test/dockerpy/constant.py
@@ -9,3 +9,5 @@ ALPINEAMD64ID = "961769676411f082461f9ef46626dd7a2d1e2b2a38e6a44364bcbecf51e
ALPINEARM64DIGEST = "docker.io/library/alpine@sha256:db7f3dcef3d586f7dd123f107c93d7911515a5991c4b9e51fa2a43e46335a43e"
ALPINEARM64ID = "915beeae46751fc564998c79e73a1026542e945ca4f73dc841d09ccc6c2c0672"
infra = "k8s.gcr.io/pause:3.2"
+TOP = "top"
+ImageCacheDir = "/tmp/podman/imagecachedir"
diff --git a/test/dockerpy/containers.py b/test/dockerpy/containers.py
new file mode 100644
index 000000000..d70ec932c
--- /dev/null
+++ b/test/dockerpy/containers.py
@@ -0,0 +1,46 @@
+
+import unittest
+import docker
+import requests
+import os
+from docker import Client
+from . import constant
+from . import common
+
+client = common.get_client()
+
+class TestContainers(unittest.TestCase):
+
+ podman = None
+
+ def setUp(self):
+ super().setUp()
+ common.run_top_container()
+
+ def tearDown(self):
+ common.remove_all_containers()
+ common.remove_all_images()
+ return super().tearDown()
+
+ @classmethod
+ def setUpClass(cls):
+ super().setUpClass()
+ common.enable_sock(cls)
+
+ @classmethod
+ def tearDownClass(cls):
+ common.terminate_connection(cls)
+ return super().tearDownClass()
+
+ def test_inspect_container(self):
+ # Inspect bogus container
+ with self.assertRaises(requests.HTTPError):
+ client.inspect_container("dummy")
+ # Inspect valid container
+ container = client.inspect_container(constant.TOP)
+ self.assertIn(constant.TOP , container["Name"])
+
+
+if __name__ == '__main__':
+ # Setup temporary space
+ unittest.main()
diff --git a/test/dockerpy/images.py b/test/dockerpy/images.py
index 07ea6c0f8..1e07d25c7 100644
--- a/test/dockerpy/images.py
+++ b/test/dockerpy/images.py
@@ -11,19 +11,29 @@ client = common.get_client()
class TestImages(unittest.TestCase):
+ podman = None
def setUp(self):
super().setUp()
client.pull(constant.ALPINE)
def tearDown(self):
- allImages = client.images()
- for image in allImages:
- client.remove_image(image,force=True)
+ common.remove_all_images()
return super().tearDown()
-# Inspect Image
+ @classmethod
+ def setUpClass(cls):
+ super().setUpClass()
+ common.enable_sock(cls)
+
+
+ @classmethod
+ def tearDownClass(cls):
+ common.terminate_connection(cls)
+ return super().tearDownClass()
+# Inspect Image
+
def test_inspect_image(self):
# Check for error with wrong image name
with self.assertRaises(requests.HTTPError):
@@ -79,8 +89,8 @@ class TestImages(unittest.TestCase):
for i in response:
# Alpine found
if "docker.io/library/alpine" in i["Name"]:
- self.assertTrue(True, msg="Image found")
- self.assertFalse(False,msg="Image not found")
+ self.assertTrue
+ self.assertFalse
# Image Exist (No docker-py support yet)
@@ -105,19 +115,22 @@ class TestImages(unittest.TestCase):
alpine_image = client.inspect_image(constant.ALPINE)
for h in imageHistory:
if h["Id"] in alpine_image["Id"]:
- self.assertTrue(True,msg="Image History validated")
- self.assertFalse(False,msg="Unable to get image history")
+ self.assertTrue
+ self.assertFalse
# Prune Image (No docker-py support yet)
# Export Image
def test_export_image(self):
- file = "/tmp/alpine-latest.tar"
+ client.pull(constant.BB)
+ file = os.path.join(constant.ImageCacheDir , "busybox.tar")
+ if not os.path.exists(constant.ImageCacheDir):
+ os.makedirs(constant.ImageCacheDir)
# Check for error with wrong image name
with self.assertRaises(requests.HTTPError):
client.get_image("dummy")
- response = client.get_image(constant.ALPINE)
+ response = client.get_image(constant.BB)
image_tar = open(file,mode="wb")
image_tar.write(response.data)
image_tar.close()
@@ -125,6 +138,13 @@ class TestImages(unittest.TestCase):
# Import|Load Image
+ def test_import_image(self):
+ allImages = client.images()
+ self.assertEqual(len(allImages), 1)
+ file = os.path.join(constant.ImageCacheDir , "busybox.tar")
+ client.import_image_from_file(filename=file)
+ allImages = client.images()
+ self.assertEqual(len(allImages), 2)
if __name__ == '__main__':
# Setup temporary space
diff --git a/test/e2e/checkpoint_test.go b/test/e2e/checkpoint_test.go
index e6a3d2f7a..663205209 100644
--- a/test/e2e/checkpoint_test.go
+++ b/test/e2e/checkpoint_test.go
@@ -232,6 +232,8 @@ var _ = Describe("Podman checkpoint", func() {
})
It("podman checkpoint container with established tcp connections", func() {
+ // Broken on Ubuntu.
+ SkipIfNotFedora()
localRunString := getRunString([]string{redis})
session := podmanTest.Podman(localRunString)
session.WaitWithDefaultTimeout()
diff --git a/test/e2e/cp_test.go b/test/e2e/cp_test.go
index f95f8646c..6ae54ba34 100644
--- a/test/e2e/cp_test.go
+++ b/test/e2e/cp_test.go
@@ -141,6 +141,8 @@ var _ = Describe("Podman cp", func() {
})
It("podman cp stdin/stdout", func() {
+ SkipIfRemote()
+ Skip("Looks like SkipIfRemote() is not working")
session := podmanTest.Podman([]string{"create", ALPINE, "ls", "foo"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
diff --git a/test/e2e/create_test.go b/test/e2e/create_test.go
index f40472a7c..b9a1ff83d 100644
--- a/test/e2e/create_test.go
+++ b/test/e2e/create_test.go
@@ -2,6 +2,7 @@ package integration
import (
"fmt"
+ "io/ioutil"
"os"
"path/filepath"
@@ -221,6 +222,42 @@ var _ = Describe("Podman create", func() {
Expect(match).To(BeTrue())
})
+ It("podman create --pod-id-file", func() {
+ // First, make sure that --pod and --pod-id-file yield an error
+ // if used together.
+ session := podmanTest.Podman([]string{"create", "--pod", "foo", "--pod-id-file", "bar", ALPINE, "ls"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(125))
+
+ tmpDir, err := ioutil.TempDir("", "")
+ Expect(err).To(BeNil())
+ defer os.RemoveAll(tmpDir)
+
+ podName := "rudoplh"
+ ctrName := "prancer"
+ podIDFile := tmpDir + "pod-id-file"
+
+ // Now, let's create a pod with --pod-id-file.
+ session = podmanTest.Podman([]string{"pod", "create", "--pod-id-file", podIDFile, "--name", podName})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"pod", "inspect", podName})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.IsJSONOutputValid()).To(BeTrue())
+ podData := session.InspectPodToJSON()
+
+ // Finally we can create a container with --pod-id-file and do
+ // some checks to make sure it's working as expected.
+ session = podmanTest.Podman([]string{"create", "--pod-id-file", podIDFile, "--name", ctrName, ALPINE, "top"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ ctrJSON := podmanTest.InspectContainer(ctrName)
+ Expect(podData.ID).To(Equal(ctrJSON[0].Pod)) // Make sure the container's pod matches the pod's ID
+ })
+
It("podman run entrypoint and cmd test", func() {
name := "test101"
create := podmanTest.Podman([]string{"create", "--name", name, redis})
diff --git a/test/e2e/generate_kube_test.go b/test/e2e/generate_kube_test.go
index 389f2c822..7872a9fbf 100644
--- a/test/e2e/generate_kube_test.go
+++ b/test/e2e/generate_kube_test.go
@@ -254,6 +254,8 @@ var _ = Describe("Podman generate kube", func() {
})
It("podman generate with user and reimport kube on pod", func() {
+ // This test fails on ubuntu due to https://github.com/seccomp/containers-golang/pull/27
+ SkipIfNotFedora()
podName := "toppod"
_, rc, _ := podmanTest.CreatePod(podName)
Expect(rc).To(Equal(0))
@@ -280,7 +282,8 @@ var _ = Describe("Podman generate kube", func() {
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- inspect1 := podmanTest.Podman([]string{"inspect", "--format", "{{.Config.User}}", "test1"})
+ // container name in pod is <podName>-<ctrName>
+ inspect1 := podmanTest.Podman([]string{"inspect", "--format", "{{.Config.User}}", "toppod-test1"})
inspect1.WaitWithDefaultTimeout()
Expect(inspect1.ExitCode()).To(Equal(0))
Expect(inspect1.OutputToString()).To(ContainSubstring(inspect.OutputToString()))
@@ -293,6 +296,7 @@ var _ = Describe("Podman generate kube", func() {
// we need a container name because IDs don't persist after rm/play
ctrName := "test-ctr"
+ ctrNameInKubePod := "test1-test-ctr"
session1 := podmanTest.Podman([]string{"run", "-d", "--pod", "new:test1", "--name", ctrName, "-v", vol1 + ":/volume/:z", "alpine", "top"})
session1.WaitWithDefaultTimeout()
@@ -311,7 +315,7 @@ var _ = Describe("Podman generate kube", func() {
play.WaitWithDefaultTimeout()
Expect(play.ExitCode()).To(Equal(0))
- inspect := podmanTest.Podman([]string{"inspect", ctrName})
+ inspect := podmanTest.Podman([]string{"inspect", ctrNameInKubePod})
inspect.WaitWithDefaultTimeout()
Expect(inspect.ExitCode()).To(Equal(0))
Expect(inspect.OutputToString()).To(ContainSubstring(vol1))
diff --git a/test/e2e/generate_systemd_test.go b/test/e2e/generate_systemd_test.go
index d5ae441e2..497e8f71e 100644
--- a/test/e2e/generate_systemd_test.go
+++ b/test/e2e/generate_systemd_test.go
@@ -3,6 +3,7 @@
package integration
import (
+ "io/ioutil"
"os"
. "github.com/containers/libpod/test/utils"
@@ -191,7 +192,7 @@ var _ = Describe("Podman generate systemd", func() {
found, _ := session.GrepString("# container-foo.service")
Expect(found).To(BeTrue())
- found, _ = session.GrepString("stop --ignore --cidfile %t/%n-cid -t 42")
+ found, _ = session.GrepString("stop --ignore --cidfile %t/container-foo.ctr-id -t 42")
Expect(found).To(BeTrue())
})
@@ -230,7 +231,7 @@ var _ = Describe("Podman generate systemd", func() {
session := podmanTest.Podman([]string{"generate", "systemd", "--time", "42", "--name", "--new", "foo"})
session.WaitWithDefaultTimeout()
- Expect(session.ExitCode()).To(Equal(125))
+ Expect(session.ExitCode()).To(Equal(0))
})
It("podman generate systemd --container-prefix con", func() {
@@ -325,4 +326,49 @@ var _ = Describe("Podman generate systemd", func() {
found, _ = session.GrepString("BindsTo=p_foo.service")
Expect(found).To(BeTrue())
})
+
+ It("podman generate systemd pod with containers --new", func() {
+ tmpDir, err := ioutil.TempDir("", "")
+ Expect(err).To(BeNil())
+ tmpFile := tmpDir + "podID"
+ defer os.RemoveAll(tmpDir)
+
+ n := podmanTest.Podman([]string{"pod", "create", "--pod-id-file", tmpFile, "--name", "foo"})
+ n.WaitWithDefaultTimeout()
+ Expect(n.ExitCode()).To(Equal(0))
+
+ n = podmanTest.Podman([]string{"create", "--pod", "foo", "--name", "foo-1", "alpine", "top"})
+ n.WaitWithDefaultTimeout()
+ Expect(n.ExitCode()).To(Equal(0))
+
+ n = podmanTest.Podman([]string{"create", "--pod", "foo", "--name", "foo-2", "alpine", "top"})
+ n.WaitWithDefaultTimeout()
+ Expect(n.ExitCode()).To(Equal(0))
+
+ session := podmanTest.Podman([]string{"generate", "systemd", "--new", "--name", "foo"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ // Grepping the output (in addition to unit tests)
+ found, _ := session.GrepString("# pod-foo.service")
+ Expect(found).To(BeTrue())
+
+ found, _ = session.GrepString("Requires=container-foo-1.service container-foo-2.service")
+ Expect(found).To(BeTrue())
+
+ found, _ = session.GrepString("BindsTo=pod-foo.service")
+ Expect(found).To(BeTrue())
+
+ found, _ = session.GrepString("pod create --infra-conmon-pidfile %t/pod-foo.pid --pod-id-file %t/pod-foo.pod-id --name foo")
+ Expect(found).To(BeTrue())
+
+ found, _ = session.GrepString("ExecStartPre=/usr/bin/rm -f %t/pod-foo.pid %t/pod-foo.pod-id")
+ Expect(found).To(BeTrue())
+
+ found, _ = session.GrepString("pod stop --ignore --pod-id-file %t/pod-foo.pod-id -t 10")
+ Expect(found).To(BeTrue())
+
+ found, _ = session.GrepString("pod rm --ignore -f --pod-id-file %t/pod-foo.pod-id")
+ Expect(found).To(BeTrue())
+ })
})
diff --git a/test/e2e/play_kube_test.go b/test/e2e/play_kube_test.go
index 9daf266b8..7fe4ce967 100644
--- a/test/e2e/play_kube_test.go
+++ b/test/e2e/play_kube_test.go
@@ -3,6 +3,7 @@
package integration
import (
+ "bytes"
"fmt"
"io/ioutil"
"os"
@@ -14,7 +15,18 @@ import (
. "github.com/onsi/gomega"
)
-var yamlTemplate = `
+var unknownKindYaml = `
+apiVerson: v1
+kind: UnknownKind
+metadata:
+ labels:
+ app: app1
+ name: unknown
+spec:
+ hostname: unknown
+`
+
+var podYamlTemplate = `
apiVersion: v1
kind: Pod
metadata:
@@ -77,31 +89,137 @@ spec:
status: {}
`
+var deploymentYamlTemplate = `
+apiVersion: v1
+kind: Deployment
+metadata:
+ creationTimestamp: "2019-07-17T14:44:08Z"
+ labels:
+ app: {{ .Name }}
+ name: {{ .Name }}
+{{ with .Annotations }}
+ annotations:
+ {{ range $key, $value := . }}
+ {{ $key }}: {{ $value }}
+ {{ end }}
+{{ end }}
+
+spec:
+ replicas: {{ .Replicas }}
+ selector:
+ matchLabels:
+ app: {{ .Name }}
+ template:
+ {{ with .PodTemplate }}
+ metadata:
+ labels:
+ app: {{ .Name }}
+ {{ with .Annotations }}
+ annotations:
+ {{ range $key, $value := . }}
+ {{ $key }}: {{ $value }}
+ {{ end }}
+ {{ end }}
+ spec:
+ hostname: {{ .Hostname }}
+ containers:
+ {{ with .Ctrs }}
+ {{ range . }}
+ - command:
+ {{ range .Cmd }}
+ - {{.}}
+ {{ end }}
+ env:
+ - name: PATH
+ value: /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+ - name: TERM
+ value: xterm
+ - name: HOSTNAME
+ - name: container
+ value: podman
+ image: {{ .Image }}
+ name: {{ .Name }}
+ imagePullPolicy: {{ .PullPolicy }}
+ resources: {}
+ {{ if .SecurityContext }}
+ securityContext:
+ allowPrivilegeEscalation: true
+ {{ if .Caps }}
+ capabilities:
+ {{ with .CapAdd }}
+ add:
+ {{ range . }}
+ - {{.}}
+ {{ end }}
+ {{ end }}
+ {{ with .CapDrop }}
+ drop:
+ {{ range . }}
+ - {{.}}
+ {{ end }}
+ {{ end }}
+ {{ end }}
+ privileged: false
+ readOnlyRootFilesystem: false
+ workingDir: /
+ {{ end }}
+ {{ end }}
+ {{ end }}
+ {{ end }}
+`
+
var (
- defaultCtrName = "testCtr"
- defaultCtrCmd = []string{"top"}
- defaultCtrImage = ALPINE
- defaultPodName = "testPod"
- seccompPwdEPERM = []byte(`{"defaultAction":"SCMP_ACT_ALLOW","syscalls":[{"name":"getcwd","action":"SCMP_ACT_ERRNO"}]}`)
+ defaultCtrName = "testCtr"
+ defaultCtrCmd = []string{"top"}
+ defaultCtrImage = ALPINE
+ defaultPodName = "testPod"
+ defaultDeploymentName = "testDeployment"
+ seccompPwdEPERM = []byte(`{"defaultAction":"SCMP_ACT_ALLOW","syscalls":[{"name":"getcwd","action":"SCMP_ACT_ERRNO"}]}`)
)
-func generateKubeYaml(pod *Pod, fileName string) error {
+func writeYaml(content string, fileName string) error {
f, err := os.Create(fileName)
if err != nil {
return err
}
defer f.Close()
- t, err := template.New("pod").Parse(yamlTemplate)
+ _, err = f.WriteString(content)
if err != nil {
return err
}
- if err := t.Execute(f, pod); err != nil {
+ return nil
+}
+
+func generatePodKubeYaml(pod *Pod, fileName string) error {
+ templateBytes := &bytes.Buffer{}
+
+ t, err := template.New("pod").Parse(podYamlTemplate)
+ if err != nil {
return err
}
- return nil
+ if err := t.Execute(templateBytes, pod); err != nil {
+ return err
+ }
+
+ return writeYaml(templateBytes.String(), fileName)
+}
+
+func generateDeploymentKubeYaml(deployment *Deployment, fileName string) error {
+ templateBytes := &bytes.Buffer{}
+
+ t, err := template.New("deployment").Parse(deploymentYamlTemplate)
+ if err != nil {
+ return err
+ }
+
+ if err := t.Execute(templateBytes, deployment); err != nil {
+ return err
+ }
+
+ return writeYaml(templateBytes.String(), fileName)
}
// Pod describes the options a kube yaml can be configured at pod level
@@ -146,6 +264,59 @@ func withAnnotation(k, v string) podOption {
}
}
+// Deployment describes the options a kube yaml can be configured at deployment level
+type Deployment struct {
+ Name string
+ Replicas int32
+ Annotations map[string]string
+ PodTemplate *Pod
+}
+
+func getDeployment(options ...deploymentOption) *Deployment {
+ d := Deployment{defaultDeploymentName, 1, make(map[string]string), getPod()}
+ for _, option := range options {
+ option(&d)
+ }
+
+ return &d
+}
+
+type deploymentOption func(*Deployment)
+
+func withDeploymentAnnotation(k, v string) deploymentOption {
+ return func(deployment *Deployment) {
+ deployment.Annotations[k] = v
+ }
+}
+
+func withPod(pod *Pod) deploymentOption {
+ return func(d *Deployment) {
+ d.PodTemplate = pod
+ }
+}
+
+func withReplicas(replicas int32) deploymentOption {
+ return func(d *Deployment) {
+ d.Replicas = replicas
+ }
+}
+
+// getPodNamesInDeployment returns list of Pod objects
+// with just their name set, so that it can be passed around
+// and into getCtrNameInPod for ease of testing
+func getPodNamesInDeployment(d *Deployment) []Pod {
+ var pods []Pod
+ var i int32
+
+ for i = 0; i < d.Replicas; i++ {
+ p := Pod{}
+ p.Name = fmt.Sprintf("%s-pod-%d", d.Name, i)
+ pods = append(pods, p)
+ }
+
+ return pods
+}
+
// Ctr describes the options a kube yaml can be configured at container level
type Ctr struct {
Name string
@@ -208,6 +379,10 @@ func withPullPolicy(policy string) ctrOption {
}
}
+func getCtrNameInPod(pod *Pod) string {
+ return fmt.Sprintf("%s-%s", pod.Name, defaultCtrName)
+}
+
var _ = Describe("Podman generate kube", func() {
var (
tempdir string
@@ -234,8 +409,18 @@ var _ = Describe("Podman generate kube", func() {
processTestResult(f)
})
+ It("podman play kube fail with yaml of unsupported kind", func() {
+ err := writeYaml(unknownKindYaml, kubeYaml)
+ Expect(err).To(BeNil())
+
+ kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
+ kube.WaitWithDefaultTimeout()
+ Expect(kube.ExitCode()).To(Not(Equal(0)))
+
+ })
+
It("podman play kube fail with nonexist authfile", func() {
- err := generateKubeYaml(getPod(), kubeYaml)
+ err := generatePodKubeYaml(getPod(), kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", "--authfile", "/tmp/nonexist", kubeYaml})
@@ -245,14 +430,15 @@ var _ = Describe("Podman generate kube", func() {
})
It("podman play kube test correct command", func() {
- err := generateKubeYaml(getPod(), kubeYaml)
+ pod := getPod()
+ err := generatePodKubeYaml(pod, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
kube.WaitWithDefaultTimeout()
Expect(kube.ExitCode()).To(Equal(0))
- inspect := podmanTest.Podman([]string{"inspect", defaultCtrName})
+ inspect := podmanTest.Podman([]string{"inspect", getCtrNameInPod(pod)})
inspect.WaitWithDefaultTimeout()
Expect(inspect.ExitCode()).To(Equal(0))
Expect(inspect.OutputToString()).To(ContainSubstring(defaultCtrCmd[0]))
@@ -261,33 +447,34 @@ var _ = Describe("Podman generate kube", func() {
It("podman play kube test correct output", func() {
p := getPod(withCtr(getCtr(withCmd([]string{"echo", "hello"}))))
- err := generateKubeYaml(p, kubeYaml)
+ err := generatePodKubeYaml(p, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
kube.WaitWithDefaultTimeout()
Expect(kube.ExitCode()).To(Equal(0))
- logs := podmanTest.Podman([]string{"logs", defaultCtrName})
+ logs := podmanTest.Podman([]string{"logs", getCtrNameInPod(p)})
logs.WaitWithDefaultTimeout()
Expect(logs.ExitCode()).To(Equal(0))
Expect(logs.OutputToString()).To(ContainSubstring("hello"))
- inspect := podmanTest.Podman([]string{"inspect", defaultCtrName, "--format", "'{{ .Config.Cmd }}'"})
+ inspect := podmanTest.Podman([]string{"inspect", getCtrNameInPod(p), "--format", "'{{ .Config.Cmd }}'"})
inspect.WaitWithDefaultTimeout()
Expect(inspect.ExitCode()).To(Equal(0))
Expect(inspect.OutputToString()).To(ContainSubstring("hello"))
})
It("podman play kube test hostname", func() {
- err := generateKubeYaml(getPod(), kubeYaml)
+ pod := getPod()
+ err := generatePodKubeYaml(pod, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
kube.WaitWithDefaultTimeout()
Expect(kube.ExitCode()).To(Equal(0))
- inspect := podmanTest.Podman([]string{"inspect", defaultCtrName, "--format", "{{ .Config.Hostname }}"})
+ inspect := podmanTest.Podman([]string{"inspect", getCtrNameInPod(pod), "--format", "{{ .Config.Hostname }}"})
inspect.WaitWithDefaultTimeout()
Expect(inspect.ExitCode()).To(Equal(0))
Expect(inspect.OutputToString()).To(Equal(defaultPodName))
@@ -295,14 +482,15 @@ var _ = Describe("Podman generate kube", func() {
It("podman play kube test with customized hostname", func() {
hostname := "myhostname"
- err := generateKubeYaml(getPod(withHostname(hostname)), kubeYaml)
+ pod := getPod(withHostname(hostname))
+ err := generatePodKubeYaml(getPod(withHostname(hostname)), kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
kube.WaitWithDefaultTimeout()
Expect(kube.ExitCode()).To(Equal(0))
- inspect := podmanTest.Podman([]string{"inspect", defaultCtrName, "--format", "{{ .Config.Hostname }}"})
+ inspect := podmanTest.Podman([]string{"inspect", getCtrNameInPod(pod), "--format", "{{ .Config.Hostname }}"})
inspect.WaitWithDefaultTimeout()
Expect(inspect.ExitCode()).To(Equal(0))
Expect(inspect.OutputToString()).To(Equal(hostname))
@@ -312,14 +500,15 @@ var _ = Describe("Podman generate kube", func() {
capAdd := "CAP_SYS_ADMIN"
ctr := getCtr(withCapAdd([]string{capAdd}), withCmd([]string{"cat", "/proc/self/status"}))
- err := generateKubeYaml(getPod(withCtr(ctr)), kubeYaml)
+ pod := getPod(withCtr(ctr))
+ err := generatePodKubeYaml(pod, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
kube.WaitWithDefaultTimeout()
Expect(kube.ExitCode()).To(Equal(0))
- inspect := podmanTest.Podman([]string{"inspect", defaultCtrName})
+ inspect := podmanTest.Podman([]string{"inspect", getCtrNameInPod(pod)})
inspect.WaitWithDefaultTimeout()
Expect(inspect.ExitCode()).To(Equal(0))
Expect(inspect.OutputToString()).To(ContainSubstring(capAdd))
@@ -329,14 +518,15 @@ var _ = Describe("Podman generate kube", func() {
capDrop := "CAP_CHOWN"
ctr := getCtr(withCapDrop([]string{capDrop}))
- err := generateKubeYaml(getPod(withCtr(ctr)), kubeYaml)
+ pod := getPod(withCtr(ctr))
+ err := generatePodKubeYaml(pod, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
kube.WaitWithDefaultTimeout()
Expect(kube.ExitCode()).To(Equal(0))
- inspect := podmanTest.Podman([]string{"inspect", defaultCtrName})
+ inspect := podmanTest.Podman([]string{"inspect", getCtrNameInPod(pod)})
inspect.WaitWithDefaultTimeout()
Expect(inspect.ExitCode()).To(Equal(0))
Expect(inspect.OutputToString()).To(ContainSubstring(capDrop))
@@ -344,14 +534,15 @@ var _ = Describe("Podman generate kube", func() {
It("podman play kube no security context", func() {
// expect play kube to not fail if no security context is specified
- err := generateKubeYaml(getPod(withCtr(getCtr(withSecurityContext(false)))), kubeYaml)
+ pod := getPod(withCtr(getCtr(withSecurityContext(false))))
+ err := generatePodKubeYaml(pod, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
kube.WaitWithDefaultTimeout()
Expect(kube.ExitCode()).To(Equal(0))
- inspect := podmanTest.Podman([]string{"inspect", defaultCtrName})
+ inspect := podmanTest.Podman([]string{"inspect", getCtrNameInPod(pod)})
inspect.WaitWithDefaultTimeout()
Expect(inspect.ExitCode()).To(Equal(0))
})
@@ -367,7 +558,8 @@ var _ = Describe("Podman generate kube", func() {
ctrAnnotation := "container.seccomp.security.alpha.kubernetes.io/" + defaultCtrName
ctr := getCtr(withCmd([]string{"pwd"}))
- err = generateKubeYaml(getPod(withCtr(ctr), withAnnotation(ctrAnnotation, "localhost/"+filepath.Base(jsonFile))), kubeYaml)
+ pod := getPod(withCtr(ctr), withAnnotation(ctrAnnotation, "localhost/"+filepath.Base(jsonFile)))
+ err = generatePodKubeYaml(pod, kubeYaml)
Expect(err).To(BeNil())
// CreateSeccompJson will put the profile into podmanTest.TempDir. Use --seccomp-profile-root to tell play kube where to look
@@ -375,7 +567,7 @@ var _ = Describe("Podman generate kube", func() {
kube.WaitWithDefaultTimeout()
Expect(kube.ExitCode()).To(Equal(0))
- logs := podmanTest.Podman([]string{"logs", defaultCtrName})
+ logs := podmanTest.Podman([]string{"logs", getCtrNameInPod(pod)})
logs.WaitWithDefaultTimeout()
Expect(logs.ExitCode()).To(Equal(0))
Expect(logs.OutputToString()).To(ContainSubstring("Operation not permitted"))
@@ -392,7 +584,8 @@ var _ = Describe("Podman generate kube", func() {
ctr := getCtr(withCmd([]string{"pwd"}))
- err = generateKubeYaml(getPod(withCtr(ctr), withAnnotation("seccomp.security.alpha.kubernetes.io/pod", "localhost/"+filepath.Base(jsonFile))), kubeYaml)
+ pod := getPod(withCtr(ctr), withAnnotation("seccomp.security.alpha.kubernetes.io/pod", "localhost/"+filepath.Base(jsonFile)))
+ err = generatePodKubeYaml(pod, kubeYaml)
Expect(err).To(BeNil())
// CreateSeccompJson will put the profile into podmanTest.TempDir. Use --seccomp-profile-root to tell play kube where to look
@@ -400,7 +593,7 @@ var _ = Describe("Podman generate kube", func() {
kube.WaitWithDefaultTimeout()
Expect(kube.ExitCode()).To(Equal(0))
- logs := podmanTest.Podman([]string{"logs", defaultCtrName})
+ logs := podmanTest.Podman([]string{"logs", getCtrNameInPod(pod)})
logs.WaitWithDefaultTimeout()
Expect(logs.ExitCode()).To(Equal(0))
Expect(logs.OutputToString()).To(ContainSubstring("Operation not permitted"))
@@ -408,7 +601,7 @@ var _ = Describe("Podman generate kube", func() {
It("podman play kube with pull policy of never should be 125", func() {
ctr := getCtr(withPullPolicy("never"), withImage(BB_GLIBC))
- err := generateKubeYaml(getPod(withCtr(ctr)), kubeYaml)
+ err := generatePodKubeYaml(getPod(withCtr(ctr)), kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -418,7 +611,7 @@ var _ = Describe("Podman generate kube", func() {
It("podman play kube with pull policy of missing", func() {
ctr := getCtr(withPullPolicy("missing"), withImage(BB))
- err := generateKubeYaml(getPod(withCtr(ctr)), kubeYaml)
+ err := generatePodKubeYaml(getPod(withCtr(ctr)), kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -444,7 +637,7 @@ var _ = Describe("Podman generate kube", func() {
oldBBinspect := inspect.InspectImageJSON()
ctr := getCtr(withPullPolicy("always"), withImage(BB))
- err := generateKubeYaml(getPod(withCtr(ctr)), kubeYaml)
+ err := generatePodKubeYaml(getPod(withCtr(ctr)), kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -475,7 +668,7 @@ var _ = Describe("Podman generate kube", func() {
oldBBinspect := inspect.InspectImageJSON()
ctr := getCtr(withImage(BB))
- err := generateKubeYaml(getPod(withCtr(ctr)), kubeYaml)
+ err := generatePodKubeYaml(getPod(withCtr(ctr)), kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -519,7 +712,7 @@ spec:
kube.WaitWithDefaultTimeout()
Expect(kube.ExitCode()).To(Equal(0))
- inspect := podmanTest.Podman([]string{"inspect", "demo_kube"})
+ inspect := podmanTest.Podman([]string{"inspect", "demo_pod-demo_kube"})
inspect.WaitWithDefaultTimeout()
Expect(inspect.ExitCode()).To(Equal(0))
@@ -529,4 +722,41 @@ spec:
Expect(ctr[0].Config.Labels["key1"]).To(ContainSubstring("value1"))
Expect(ctr[0].Config.StopSignal).To(Equal(uint(51)))
})
+
+ // Deployment related tests
+ It("podman play kube deployment 1 replica test correct command", func() {
+ deployment := getDeployment()
+ err := generateDeploymentKubeYaml(deployment, kubeYaml)
+ Expect(err).To(BeNil())
+
+ kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
+ kube.WaitWithDefaultTimeout()
+ Expect(kube.ExitCode()).To(Equal(0))
+
+ podNames := getPodNamesInDeployment(deployment)
+ inspect := podmanTest.Podman([]string{"inspect", getCtrNameInPod(&podNames[0])})
+ inspect.WaitWithDefaultTimeout()
+ Expect(inspect.ExitCode()).To(Equal(0))
+ Expect(inspect.OutputToString()).To(ContainSubstring(defaultCtrCmd[0]))
+ })
+
+ It("podman play kube deployment more than 1 replica test correct command", func() {
+ var i, numReplicas int32
+ numReplicas = 5
+ deployment := getDeployment(withReplicas(numReplicas))
+ err := generateDeploymentKubeYaml(deployment, kubeYaml)
+ Expect(err).To(BeNil())
+
+ kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
+ kube.WaitWithDefaultTimeout()
+ Expect(kube.ExitCode()).To(Equal(0))
+
+ podNames := getPodNamesInDeployment(deployment)
+ for i = 0; i < numReplicas; i++ {
+ inspect := podmanTest.Podman([]string{"inspect", getCtrNameInPod(&podNames[i])})
+ inspect.WaitWithDefaultTimeout()
+ Expect(inspect.ExitCode()).To(Equal(0))
+ Expect(inspect.OutputToString()).To(ContainSubstring(defaultCtrCmd[0]))
+ }
+ })
})
diff --git a/test/e2e/pod_inspect_test.go b/test/e2e/pod_inspect_test.go
index 8040adf1e..f1acd3750 100644
--- a/test/e2e/pod_inspect_test.go
+++ b/test/e2e/pod_inspect_test.go
@@ -57,4 +57,26 @@ var _ = Describe("Podman pod inspect", func() {
podData := inspect.InspectPodToJSON()
Expect(podData.ID).To(Equal(podid))
})
+
+ It("podman pod inspect (CreateCommand)", func() {
+ podName := "myTestPod"
+ createCommand := []string{"pod", "create", "--name", podName, "--hostname", "rudolph", "--share", "net"}
+
+ // Create the pod.
+ session := podmanTest.Podman(createCommand)
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ // Inspect the pod and make sure that the create command is
+ // exactly how we created the pod.
+ inspect := podmanTest.Podman([]string{"pod", "inspect", podName})
+ inspect.WaitWithDefaultTimeout()
+ Expect(inspect.ExitCode()).To(Equal(0))
+ Expect(inspect.IsJSONOutputValid()).To(BeTrue())
+ podData := inspect.InspectPodToJSON()
+ // Let's get the last len(createCommand) items in the command.
+ inspectCreateCommand := podData.CreateCommand
+ index := len(inspectCreateCommand) - len(createCommand)
+ Expect(inspectCreateCommand[index:]).To(Equal(createCommand))
+ })
})
diff --git a/test/e2e/pod_rm_test.go b/test/e2e/pod_rm_test.go
index 4060e1268..d0ece7b53 100644
--- a/test/e2e/pod_rm_test.go
+++ b/test/e2e/pod_rm_test.go
@@ -2,6 +2,7 @@ package integration
import (
"fmt"
+ "io/ioutil"
"os"
"path/filepath"
"strings"
@@ -229,4 +230,72 @@ var _ = Describe("Podman pod rm", func() {
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
})
+
+ It("podman pod start/remove single pod via --pod-id-file", func() {
+ tmpDir, err := ioutil.TempDir("", "")
+ Expect(err).To(BeNil())
+ tmpFile := tmpDir + "podID"
+ defer os.RemoveAll(tmpDir)
+
+ podName := "rudolph"
+
+ // Create a pod with --pod-id-file.
+ session := podmanTest.Podman([]string{"pod", "create", "--name", podName, "--pod-id-file", tmpFile})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ // Create container inside the pod.
+ session = podmanTest.Podman([]string{"create", "--pod", podName, ALPINE, "top"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"pod", "start", "--pod-id-file", tmpFile})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(2)) // infra+top
+
+ session = podmanTest.Podman([]string{"pod", "rm", "--pod-id-file", tmpFile, "--force"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
+ })
+
+ It("podman pod start/remove multiple pods via --pod-id-file", func() {
+ tmpDir, err := ioutil.TempDir("", "")
+ Expect(err).To(BeNil())
+ defer os.RemoveAll(tmpDir)
+
+ podIDFiles := []string{}
+ for _, i := range "0123456789" {
+ tmpFile := tmpDir + "cid" + string(i)
+ podName := "rudolph" + string(i)
+ // Create a pod with --pod-id-file.
+ session := podmanTest.Podman([]string{"pod", "create", "--name", podName, "--pod-id-file", tmpFile})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ // Create container inside the pod.
+ session = podmanTest.Podman([]string{"create", "--pod", podName, ALPINE, "top"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ // Append the id files along with the command.
+ podIDFiles = append(podIDFiles, "--pod-id-file")
+ podIDFiles = append(podIDFiles, tmpFile)
+ }
+
+ cmd := []string{"pod", "start"}
+ cmd = append(cmd, podIDFiles...)
+ session := podmanTest.Podman(cmd)
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(20)) // 10*(infra+top)
+
+ cmd = []string{"pod", "rm", "--force"}
+ cmd = append(cmd, podIDFiles...)
+ session = podmanTest.Podman(cmd)
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
+ })
})
diff --git a/test/e2e/pod_start_test.go b/test/e2e/pod_start_test.go
index 8e78cadfd..d7d623d6e 100644
--- a/test/e2e/pod_start_test.go
+++ b/test/e2e/pod_start_test.go
@@ -1,7 +1,11 @@
package integration
import (
+ "fmt"
+ "io/ioutil"
"os"
+ "strconv"
+ "strings"
. "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
@@ -136,4 +140,94 @@ var _ = Describe("Podman pod start", func() {
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(125))
})
+
+ It("podman pod start single pod via --pod-id-file", func() {
+ tmpDir, err := ioutil.TempDir("", "")
+ Expect(err).To(BeNil())
+ tmpFile := tmpDir + "podID"
+ defer os.RemoveAll(tmpDir)
+
+ podName := "rudolph"
+
+ // Create a pod with --pod-id-file.
+ session := podmanTest.Podman([]string{"pod", "create", "--name", podName, "--pod-id-file", tmpFile})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ // Create container inside the pod.
+ session = podmanTest.Podman([]string{"create", "--pod", podName, ALPINE, "top"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"pod", "start", "--pod-id-file", tmpFile})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(2)) // infra+top
+ })
+
+ It("podman pod start multiple pods via --pod-id-file", func() {
+ tmpDir, err := ioutil.TempDir("", "")
+ Expect(err).To(BeNil())
+ defer os.RemoveAll(tmpDir)
+
+ podIDFiles := []string{}
+ for _, i := range "0123456789" {
+ tmpFile := tmpDir + "cid" + string(i)
+ podName := "rudolph" + string(i)
+ // Create a pod with --pod-id-file.
+ session := podmanTest.Podman([]string{"pod", "create", "--name", podName, "--pod-id-file", tmpFile})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ // Create container inside the pod.
+ session = podmanTest.Podman([]string{"create", "--pod", podName, ALPINE, "top"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ // Append the id files along with the command.
+ podIDFiles = append(podIDFiles, "--pod-id-file")
+ podIDFiles = append(podIDFiles, tmpFile)
+ }
+
+ cmd := []string{"pod", "start"}
+ cmd = append(cmd, podIDFiles...)
+ session := podmanTest.Podman(cmd)
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(20)) // 10*(infra+top)
+ })
+
+ It("podman pod create --infra-conmon-pod create + start", func() {
+ tmpDir, err := ioutil.TempDir("", "")
+ Expect(err).To(BeNil())
+ tmpFile := tmpDir + "podID"
+ defer os.RemoveAll(tmpDir)
+
+ podName := "rudolph"
+ // Create a pod with --infra-conmon-pid.
+ session := podmanTest.Podman([]string{"pod", "create", "--name", podName, "--infra-conmon-pidfile", tmpFile})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"pod", "start", podName})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1)) // infra
+
+ readFirstLine := func(path string) string {
+ content, err := ioutil.ReadFile(path)
+ Expect(err).To(BeNil())
+ return strings.Split(string(content), "\n")[0]
+ }
+
+ // Read the infra-conmon-pidfile and perform some sanity checks
+ // on the pid.
+ infraConmonPID := readFirstLine(tmpFile)
+ _, err = strconv.Atoi(infraConmonPID) // Make sure it's a proper integer
+ Expect(err).To(BeNil())
+
+ cmdline := readFirstLine(fmt.Sprintf("/proc/%s/cmdline", infraConmonPID))
+ Expect(cmdline).To(ContainSubstring("/conmon"))
+ })
+
})
diff --git a/test/e2e/pod_stop_test.go b/test/e2e/pod_stop_test.go
index 0a46b07c9..0fe580921 100644
--- a/test/e2e/pod_stop_test.go
+++ b/test/e2e/pod_stop_test.go
@@ -1,6 +1,7 @@
package integration
import (
+ "io/ioutil"
"os"
. "github.com/containers/libpod/test/utils"
@@ -175,4 +176,72 @@ var _ = Describe("Podman pod stop", func() {
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(125))
})
+
+ It("podman pod start/stop single pod via --pod-id-file", func() {
+ tmpDir, err := ioutil.TempDir("", "")
+ Expect(err).To(BeNil())
+ tmpFile := tmpDir + "podID"
+ defer os.RemoveAll(tmpDir)
+
+ podName := "rudolph"
+
+ // Create a pod with --pod-id-file.
+ session := podmanTest.Podman([]string{"pod", "create", "--name", podName, "--pod-id-file", tmpFile})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ // Create container inside the pod.
+ session = podmanTest.Podman([]string{"create", "--pod", podName, ALPINE, "top"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"pod", "start", "--pod-id-file", tmpFile})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(2)) // infra+top
+
+ session = podmanTest.Podman([]string{"pod", "stop", "--pod-id-file", tmpFile})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
+ })
+
+ It("podman pod start/stop multiple pods via --pod-id-file", func() {
+ tmpDir, err := ioutil.TempDir("", "")
+ Expect(err).To(BeNil())
+ defer os.RemoveAll(tmpDir)
+
+ podIDFiles := []string{}
+ for _, i := range "0123456789" {
+ tmpFile := tmpDir + "cid" + string(i)
+ podName := "rudolph" + string(i)
+ // Create a pod with --pod-id-file.
+ session := podmanTest.Podman([]string{"pod", "create", "--name", podName, "--pod-id-file", tmpFile})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ // Create container inside the pod.
+ session = podmanTest.Podman([]string{"create", "--pod", podName, ALPINE, "top"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ // Append the id files along with the command.
+ podIDFiles = append(podIDFiles, "--pod-id-file")
+ podIDFiles = append(podIDFiles, tmpFile)
+ }
+
+ cmd := []string{"pod", "start"}
+ cmd = append(cmd, podIDFiles...)
+ session := podmanTest.Podman(cmd)
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(20)) // 10*(infra+top)
+
+ cmd = []string{"pod", "stop"}
+ cmd = append(cmd, podIDFiles...)
+ session = podmanTest.Podman(cmd)
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
+ })
})
diff --git a/test/e2e/run_networking_test.go b/test/e2e/run_networking_test.go
index 9db2f5d49..4fad85f00 100644
--- a/test/e2e/run_networking_test.go
+++ b/test/e2e/run_networking_test.go
@@ -129,6 +129,32 @@ var _ = Describe("Podman run networking", func() {
Expect(inspectOut[0].NetworkSettings.Ports[0].HostIP).To(Equal("127.0.0.1"))
})
+ It("podman run -p [::1]:8080:80/udp", func() {
+ name := "testctr"
+ session := podmanTest.Podman([]string{"create", "-t", "-p", "[::1]:8080:80/udp", "--name", name, ALPINE, "/bin/sh"})
+ session.WaitWithDefaultTimeout()
+ inspectOut := podmanTest.InspectContainer(name)
+ Expect(len(inspectOut)).To(Equal(1))
+ Expect(len(inspectOut[0].NetworkSettings.Ports)).To(Equal(1))
+ Expect(inspectOut[0].NetworkSettings.Ports[0].HostPort).To(Equal(int32(8080)))
+ Expect(inspectOut[0].NetworkSettings.Ports[0].ContainerPort).To(Equal(int32(80)))
+ Expect(inspectOut[0].NetworkSettings.Ports[0].Protocol).To(Equal("udp"))
+ Expect(inspectOut[0].NetworkSettings.Ports[0].HostIP).To(Equal("::1"))
+ })
+
+ It("podman run -p [::1]:8080:80/tcp", func() {
+ name := "testctr"
+ session := podmanTest.Podman([]string{"create", "-t", "-p", "[::1]:8080:80/tcp", "--name", name, ALPINE, "/bin/sh"})
+ session.WaitWithDefaultTimeout()
+ inspectOut := podmanTest.InspectContainer(name)
+ Expect(len(inspectOut)).To(Equal(1))
+ Expect(len(inspectOut[0].NetworkSettings.Ports)).To(Equal(1))
+ Expect(inspectOut[0].NetworkSettings.Ports[0].HostPort).To(Equal(int32(8080)))
+ Expect(inspectOut[0].NetworkSettings.Ports[0].ContainerPort).To(Equal(int32(80)))
+ Expect(inspectOut[0].NetworkSettings.Ports[0].Protocol).To(Equal("tcp"))
+ Expect(inspectOut[0].NetworkSettings.Ports[0].HostIP).To(Equal("::1"))
+ })
+
It("podman run --expose 80 -P", func() {
name := "testctr"
session := podmanTest.Podman([]string{"create", "-t", "--expose", "80", "-P", "--name", name, ALPINE, "/bin/sh"})
diff --git a/vendor/github.com/containers/common/pkg/config/config.go b/vendor/github.com/containers/common/pkg/config/config.go
index d0b56c7f6..9657ecb69 100644
--- a/vendor/github.com/containers/common/pkg/config/config.go
+++ b/vendor/github.com/containers/common/pkg/config/config.go
@@ -269,6 +269,9 @@ type EngineConfig struct {
// RemoteURI containers connection information used to connect to remote system.
RemoteURI string `toml:"remote_uri,omitempty"`
+ // Identity key file for RemoteURI
+ RemoteIdentity string `toml:"remote_identity,omitempty"`
+
// RuntimePath is the path to OCI runtime binary for launching containers.
// The first path pointing to a valid file will be used This is used only
// when there are no OCIRuntime/OCIRuntimes defined. It is used only to be
diff --git a/vendor/github.com/json-iterator/go/README.md b/vendor/github.com/json-iterator/go/README.md
index 50d56ffbf..52b111d5f 100644
--- a/vendor/github.com/json-iterator/go/README.md
+++ b/vendor/github.com/json-iterator/go/README.md
@@ -1,5 +1,5 @@
[![Sourcegraph](https://sourcegraph.com/github.com/json-iterator/go/-/badge.svg)](https://sourcegraph.com/github.com/json-iterator/go?badge)
-[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/json-iterator/go)
+[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://pkg.go.dev/github.com/json-iterator/go)
[![Build Status](https://travis-ci.org/json-iterator/go.svg?branch=master)](https://travis-ci.org/json-iterator/go)
[![codecov](https://codecov.io/gh/json-iterator/go/branch/master/graph/badge.svg)](https://codecov.io/gh/json-iterator/go)
[![rcard](https://goreportcard.com/badge/github.com/json-iterator/go)](https://goreportcard.com/report/github.com/json-iterator/go)
@@ -18,16 +18,16 @@ Source code: https://github.com/json-iterator/go-benchmark/blob/master/src/githu
Raw Result (easyjson requires static code generation)
-| | ns/op | allocation bytes | allocation times |
-| --- | --- | --- | --- |
-| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op |
-| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op |
-| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op |
-| std encode | 2213 ns/op | 712 B/op | 5 allocs/op |
-| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op |
-| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op |
+| | ns/op | allocation bytes | allocation times |
+| --------------- | ----------- | ---------------- | ---------------- |
+| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op |
+| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op |
+| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op |
+| std encode | 2213 ns/op | 712 B/op | 5 allocs/op |
+| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op |
+| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op |
-Always benchmark with your own workload.
+Always benchmark with your own workload.
The result depends heavily on the data input.
# Usage
@@ -41,10 +41,10 @@ import "encoding/json"
json.Marshal(&data)
```
-with
+with
```go
-import "github.com/json-iterator/go"
+import jsoniter "github.com/json-iterator/go"
var json = jsoniter.ConfigCompatibleWithStandardLibrary
json.Marshal(&data)
@@ -60,7 +60,7 @@ json.Unmarshal(input, &data)
with
```go
-import "github.com/json-iterator/go"
+import jsoniter "github.com/json-iterator/go"
var json = jsoniter.ConfigCompatibleWithStandardLibrary
json.Unmarshal(input, &data)
@@ -78,10 +78,10 @@ go get github.com/json-iterator/go
Contributors
-* [thockin](https://github.com/thockin)
-* [mattn](https://github.com/mattn)
-* [cch123](https://github.com/cch123)
-* [Oleg Shaldybin](https://github.com/olegshaldybin)
-* [Jason Toffaletti](https://github.com/toffaletti)
+- [thockin](https://github.com/thockin)
+- [mattn](https://github.com/mattn)
+- [cch123](https://github.com/cch123)
+- [Oleg Shaldybin](https://github.com/olegshaldybin)
+- [Jason Toffaletti](https://github.com/toffaletti)
Report issue or pull request, or email taowen@gmail.com, or [![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby)
diff --git a/vendor/github.com/json-iterator/go/any_str.go b/vendor/github.com/json-iterator/go/any_str.go
index a4b93c78c..1f12f6612 100644
--- a/vendor/github.com/json-iterator/go/any_str.go
+++ b/vendor/github.com/json-iterator/go/any_str.go
@@ -64,7 +64,6 @@ func (any *stringAny) ToInt64() int64 {
flag := 1
startPos := 0
- endPos := 0
if any.val[0] == '+' || any.val[0] == '-' {
startPos = 1
}
@@ -73,6 +72,7 @@ func (any *stringAny) ToInt64() int64 {
flag = -1
}
+ endPos := startPos
for i := startPos; i < len(any.val); i++ {
if any.val[i] >= '0' && any.val[i] <= '9' {
endPos = i + 1
@@ -98,7 +98,6 @@ func (any *stringAny) ToUint64() uint64 {
}
startPos := 0
- endPos := 0
if any.val[0] == '-' {
return 0
@@ -107,6 +106,7 @@ func (any *stringAny) ToUint64() uint64 {
startPos = 1
}
+ endPos := startPos
for i := startPos; i < len(any.val); i++ {
if any.val[i] >= '0' && any.val[i] <= '9' {
endPos = i + 1
diff --git a/vendor/github.com/json-iterator/go/config.go b/vendor/github.com/json-iterator/go/config.go
index 8c58fcba5..2adcdc3b7 100644
--- a/vendor/github.com/json-iterator/go/config.go
+++ b/vendor/github.com/json-iterator/go/config.go
@@ -183,11 +183,11 @@ func (cfg *frozenConfig) validateJsonRawMessage(extension EncoderExtension) {
encoder := &funcEncoder{func(ptr unsafe.Pointer, stream *Stream) {
rawMessage := *(*json.RawMessage)(ptr)
iter := cfg.BorrowIterator([]byte(rawMessage))
+ defer cfg.ReturnIterator(iter)
iter.Read()
- if iter.Error != nil {
+ if iter.Error != nil && iter.Error != io.EOF {
stream.WriteRaw("null")
} else {
- cfg.ReturnIterator(iter)
stream.WriteRaw(string(rawMessage))
}
}, func(ptr unsafe.Pointer) bool {
diff --git a/vendor/github.com/json-iterator/go/iter_object.go b/vendor/github.com/json-iterator/go/iter_object.go
index b65137114..58ee89c84 100644
--- a/vendor/github.com/json-iterator/go/iter_object.go
+++ b/vendor/github.com/json-iterator/go/iter_object.go
@@ -150,7 +150,7 @@ func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool {
if c == '}' {
return iter.decrementDepth()
}
- iter.ReportError("ReadObjectCB", `expect " after }, but found `+string([]byte{c}))
+ iter.ReportError("ReadObjectCB", `expect " after {, but found `+string([]byte{c}))
iter.decrementDepth()
return false
}
@@ -206,7 +206,7 @@ func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool {
if c == '}' {
return iter.decrementDepth()
}
- iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c}))
+ iter.ReportError("ReadMapCB", `expect " after {, but found `+string([]byte{c}))
iter.decrementDepth()
return false
}
diff --git a/vendor/github.com/json-iterator/go/reflect_extension.go b/vendor/github.com/json-iterator/go/reflect_extension.go
index 80320cd64..74a97bfe5 100644
--- a/vendor/github.com/json-iterator/go/reflect_extension.go
+++ b/vendor/github.com/json-iterator/go/reflect_extension.go
@@ -475,7 +475,7 @@ func calcFieldNames(originalFieldName string, tagProvidedFieldName string, whole
fieldNames = []string{tagProvidedFieldName}
}
// private?
- isNotExported := unicode.IsLower(rune(originalFieldName[0]))
+ isNotExported := unicode.IsLower(rune(originalFieldName[0])) || originalFieldName[0] == '_'
if isNotExported {
fieldNames = []string{}
}
diff --git a/vendor/github.com/json-iterator/go/reflect_map.go b/vendor/github.com/json-iterator/go/reflect_map.go
index 9e2b623fe..582967130 100644
--- a/vendor/github.com/json-iterator/go/reflect_map.go
+++ b/vendor/github.com/json-iterator/go/reflect_map.go
@@ -49,6 +49,33 @@ func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder {
return decoder
}
}
+
+ ptrType := reflect2.PtrTo(typ)
+ if ptrType.Implements(unmarshalerType) {
+ return &referenceDecoder{
+ &unmarshalerDecoder{
+ valType: ptrType,
+ },
+ }
+ }
+ if typ.Implements(unmarshalerType) {
+ return &unmarshalerDecoder{
+ valType: typ,
+ }
+ }
+ if ptrType.Implements(textUnmarshalerType) {
+ return &referenceDecoder{
+ &textUnmarshalerDecoder{
+ valType: ptrType,
+ },
+ }
+ }
+ if typ.Implements(textUnmarshalerType) {
+ return &textUnmarshalerDecoder{
+ valType: typ,
+ }
+ }
+
switch typ.Kind() {
case reflect.String:
return decoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String))
@@ -63,31 +90,6 @@ func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder {
typ = reflect2.DefaultTypeOfKind(typ.Kind())
return &numericMapKeyDecoder{decoderOfType(ctx, typ)}
default:
- ptrType := reflect2.PtrTo(typ)
- if ptrType.Implements(unmarshalerType) {
- return &referenceDecoder{
- &unmarshalerDecoder{
- valType: ptrType,
- },
- }
- }
- if typ.Implements(unmarshalerType) {
- return &unmarshalerDecoder{
- valType: typ,
- }
- }
- if ptrType.Implements(textUnmarshalerType) {
- return &referenceDecoder{
- &textUnmarshalerDecoder{
- valType: ptrType,
- },
- }
- }
- if typ.Implements(textUnmarshalerType) {
- return &textUnmarshalerDecoder{
- valType: typ,
- }
- }
return &lazyErrorDecoder{err: fmt.Errorf("unsupported map key type: %v", typ)}
}
}
@@ -103,6 +105,19 @@ func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder {
return encoder
}
}
+
+ if typ == textMarshalerType {
+ return &directTextMarshalerEncoder{
+ stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
+ }
+ }
+ if typ.Implements(textMarshalerType) {
+ return &textMarshalerEncoder{
+ valType: typ,
+ stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
+ }
+ }
+
switch typ.Kind() {
case reflect.String:
return encoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String))
@@ -117,17 +132,6 @@ func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder {
typ = reflect2.DefaultTypeOfKind(typ.Kind())
return &numericMapKeyEncoder{encoderOfType(ctx, typ)}
default:
- if typ == textMarshalerType {
- return &directTextMarshalerEncoder{
- stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
- }
- }
- if typ.Implements(textMarshalerType) {
- return &textMarshalerEncoder{
- valType: typ,
- stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
- }
- }
if typ.Kind() == reflect.Interface {
return &dynamicMapKeyEncoder{ctx, typ}
}
@@ -163,10 +167,6 @@ func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
if c == '}' {
return
}
- if c != '"' {
- iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c}))
- return
- }
iter.unreadByte()
key := decoder.keyType.UnsafeNew()
decoder.keyDecoder.Decode(key, iter)
diff --git a/vendor/github.com/json-iterator/go/reflect_optional.go b/vendor/github.com/json-iterator/go/reflect_optional.go
index 43ec71d6d..fa71f4748 100644
--- a/vendor/github.com/json-iterator/go/reflect_optional.go
+++ b/vendor/github.com/json-iterator/go/reflect_optional.go
@@ -2,7 +2,6 @@ package jsoniter
import (
"github.com/modern-go/reflect2"
- "reflect"
"unsafe"
)
@@ -10,9 +9,6 @@ func decoderOfOptional(ctx *ctx, typ reflect2.Type) ValDecoder {
ptrType := typ.(*reflect2.UnsafePtrType)
elemType := ptrType.Elem()
decoder := decoderOfType(ctx, elemType)
- if ctx.prefix == "" && elemType.Kind() == reflect.Ptr {
- return &dereferenceDecoder{elemType, decoder}
- }
return &OptionalDecoder{elemType, decoder}
}
diff --git a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go
index 5ad5cc561..d7eb0eb5c 100644
--- a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go
+++ b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go
@@ -507,7 +507,7 @@ func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator)
for c = ','; c == ','; c = iter.nextToken() {
decoder.decodeOneField(ptr, iter)
}
- if iter.Error != nil && iter.Error != io.EOF {
+ if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
if c != '}' {
@@ -588,7 +588,7 @@ func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator)
break
}
}
- if iter.Error != nil && iter.Error != io.EOF {
+ if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
iter.decrementDepth()
@@ -622,7 +622,7 @@ func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator
break
}
}
- if iter.Error != nil && iter.Error != io.EOF {
+ if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
iter.decrementDepth()
@@ -660,7 +660,7 @@ func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat
break
}
}
- if iter.Error != nil && iter.Error != io.EOF {
+ if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
iter.decrementDepth()
@@ -702,7 +702,7 @@ func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato
break
}
}
- if iter.Error != nil && iter.Error != io.EOF {
+ if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
iter.decrementDepth()
@@ -748,7 +748,7 @@ func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato
break
}
}
- if iter.Error != nil && iter.Error != io.EOF {
+ if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
iter.decrementDepth()
@@ -798,7 +798,7 @@ func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator
break
}
}
- if iter.Error != nil && iter.Error != io.EOF {
+ if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
iter.decrementDepth()
@@ -852,7 +852,7 @@ func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat
break
}
}
- if iter.Error != nil && iter.Error != io.EOF {
+ if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
iter.decrementDepth()
@@ -910,7 +910,7 @@ func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat
break
}
}
- if iter.Error != nil && iter.Error != io.EOF {
+ if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
iter.decrementDepth()
@@ -972,7 +972,7 @@ func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato
break
}
}
- if iter.Error != nil && iter.Error != io.EOF {
+ if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
iter.decrementDepth()
@@ -1038,7 +1038,7 @@ func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator
break
}
}
- if iter.Error != nil && iter.Error != io.EOF {
+ if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
iter.decrementDepth()
diff --git a/vendor/github.com/json-iterator/go/stream.go b/vendor/github.com/json-iterator/go/stream.go
index 17662fded..23d8a3ad6 100644
--- a/vendor/github.com/json-iterator/go/stream.go
+++ b/vendor/github.com/json-iterator/go/stream.go
@@ -103,14 +103,14 @@ func (stream *Stream) Flush() error {
if stream.Error != nil {
return stream.Error
}
- n, err := stream.out.Write(stream.buf)
+ _, err := stream.out.Write(stream.buf)
if err != nil {
if stream.Error == nil {
stream.Error = err
}
return err
}
- stream.buf = stream.buf[n:]
+ stream.buf = stream.buf[:0]
return nil
}
@@ -177,7 +177,6 @@ func (stream *Stream) WriteEmptyObject() {
func (stream *Stream) WriteMore() {
stream.writeByte(',')
stream.writeIndention(0)
- stream.Flush()
}
// WriteArrayStart write [ with possible indention
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go
index b4c46042b..49370eb16 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_format.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go
@@ -6,7 +6,6 @@
package assert
import (
- io "io"
http "net/http"
url "net/url"
time "time"
@@ -202,11 +201,11 @@ func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, arg
// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
-func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, body io.Reader, str interface{}, msg string, args ...interface{}) bool {
+func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
- return HTTPBodyContains(t, handler, method, url, values, body, str, append([]interface{}{msg}, args...)...)
+ return HTTPBodyContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...)
}
// HTTPBodyNotContainsf asserts that a specified handler returns a
@@ -215,11 +214,11 @@ func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url
// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
-func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, body io.Reader, str interface{}, msg string, args ...interface{}) bool {
+func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
- return HTTPBodyNotContains(t, handler, method, url, values, body, str, append([]interface{}{msg}, args...)...)
+ return HTTPBodyNotContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...)
}
// HTTPErrorf asserts that a specified handler returns an error status code.
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
index 9bea8d189..9db889427 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
@@ -6,7 +6,6 @@
package assert
import (
- io "io"
http "net/http"
url "net/url"
time "time"
@@ -386,11 +385,11 @@ func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args .
// a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
//
// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, body io.Reader, str interface{}, msgAndArgs ...interface{}) bool {
+func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
- return HTTPBodyContains(a.t, handler, method, url, values, body, str, msgAndArgs...)
+ return HTTPBodyContains(a.t, handler, method, url, values, str, msgAndArgs...)
}
// HTTPBodyContainsf asserts that a specified handler returns a
@@ -399,11 +398,11 @@ func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, u
// a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, body io.Reader, str interface{}, msg string, args ...interface{}) bool {
+func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
- return HTTPBodyContainsf(a.t, handler, method, url, values, body, str, msg, args...)
+ return HTTPBodyContainsf(a.t, handler, method, url, values, str, msg, args...)
}
// HTTPBodyNotContains asserts that a specified handler returns a
@@ -412,11 +411,11 @@ func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string,
// a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
//
// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, body io.Reader, str interface{}, msgAndArgs ...interface{}) bool {
+func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
- return HTTPBodyNotContains(a.t, handler, method, url, values, body, str, msgAndArgs...)
+ return HTTPBodyNotContains(a.t, handler, method, url, values, str, msgAndArgs...)
}
// HTTPBodyNotContainsf asserts that a specified handler returns a
@@ -425,11 +424,11 @@ func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string
// a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, body io.Reader, str interface{}, msg string, args ...interface{}) bool {
+func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
- return HTTPBodyNotContainsf(a.t, handler, method, url, values, body, str, msg, args...)
+ return HTTPBodyNotContainsf(a.t, handler, method, url, values, str, msg, args...)
}
// HTTPError asserts that a specified handler returns an error status code.
diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go
index 30ef7cc06..4ed341dd2 100644
--- a/vendor/github.com/stretchr/testify/assert/http_assertions.go
+++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go
@@ -2,7 +2,6 @@ package assert
import (
"fmt"
- "io"
"net/http"
"net/http/httptest"
"net/url"
@@ -112,13 +111,9 @@ func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, va
// HTTPBody is a helper that returns HTTP body of the response. It returns
// empty string if building a new request fails.
-func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values, body io.Reader) string {
+func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string {
w := httptest.NewRecorder()
-
- if values != nil {
- url = url + "?" + values.Encode()
- }
- req, err := http.NewRequest(method, url, body)
+ req, err := http.NewRequest(method, url+"?"+values.Encode(), nil)
if err != nil {
return ""
}
@@ -132,13 +127,13 @@ func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values, b
// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
//
// Returns whether the assertion was successful (true) or not (false).
-func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, body io.Reader, str interface{}, msgAndArgs ...interface{}) bool {
+func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
- httpBody := HTTPBody(handler, method, url, values, body)
+ body := HTTPBody(handler, method, url, values)
- contains := strings.Contains(httpBody, fmt.Sprint(str))
+ contains := strings.Contains(body, fmt.Sprint(str))
if !contains {
Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
}
@@ -152,13 +147,13 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string,
// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
//
// Returns whether the assertion was successful (true) or not (false).
-func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, body io.Reader, str interface{}, msgAndArgs ...interface{}) bool {
+func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
- httpBody := HTTPBody(handler, method, url, values, body)
+ body := HTTPBody(handler, method, url, values)
- contains := strings.Contains(httpBody, fmt.Sprint(str))
+ contains := strings.Contains(body, fmt.Sprint(str))
if contains {
Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
}
diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go
index 693648f8a..ec4624b28 100644
--- a/vendor/github.com/stretchr/testify/require/require.go
+++ b/vendor/github.com/stretchr/testify/require/require.go
@@ -7,7 +7,6 @@ package require
import (
assert "github.com/stretchr/testify/assert"
- io "io"
http "net/http"
url "net/url"
time "time"
@@ -489,11 +488,11 @@ func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...in
// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
//
// Returns whether the assertion was successful (true) or not (false).
-func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, body io.Reader, str interface{}, msgAndArgs ...interface{}) {
+func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
- if assert.HTTPBodyContains(t, handler, method, url, values, body, str, msgAndArgs...) {
+ if assert.HTTPBodyContains(t, handler, method, url, values, str, msgAndArgs...) {
return
}
t.FailNow()
@@ -505,11 +504,11 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url s
// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
-func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, body io.Reader, str interface{}, msg string, args ...interface{}) {
+func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
- if assert.HTTPBodyContainsf(t, handler, method, url, values, body, str, msg, args...) {
+ if assert.HTTPBodyContainsf(t, handler, method, url, values, str, msg, args...) {
return
}
t.FailNow()
@@ -521,11 +520,11 @@ func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url
// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
//
// Returns whether the assertion was successful (true) or not (false).
-func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, body io.Reader, str interface{}, msgAndArgs ...interface{}) {
+func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
- if assert.HTTPBodyNotContains(t, handler, method, url, values, body, str, msgAndArgs...) {
+ if assert.HTTPBodyNotContains(t, handler, method, url, values, str, msgAndArgs...) {
return
}
t.FailNow()
@@ -537,11 +536,11 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, ur
// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
-func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, body io.Reader, str interface{}, msg string, args ...interface{}) {
+func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
- if assert.HTTPBodyNotContainsf(t, handler, method, url, values, body, str, msg, args...) {
+ if assert.HTTPBodyNotContainsf(t, handler, method, url, values, str, msg, args...) {
return
}
t.FailNow()
diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go
index 84fc1c88d..103d7dcb6 100644
--- a/vendor/github.com/stretchr/testify/require/require_forward.go
+++ b/vendor/github.com/stretchr/testify/require/require_forward.go
@@ -7,7 +7,6 @@ package require
import (
assert "github.com/stretchr/testify/assert"
- io "io"
http "net/http"
url "net/url"
time "time"
@@ -387,11 +386,11 @@ func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args .
// a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
//
// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, body io.Reader, str interface{}, msgAndArgs ...interface{}) {
+func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
- HTTPBodyContains(a.t, handler, method, url, values, body, str, msgAndArgs...)
+ HTTPBodyContains(a.t, handler, method, url, values, str, msgAndArgs...)
}
// HTTPBodyContainsf asserts that a specified handler returns a
@@ -400,11 +399,11 @@ func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, u
// a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, body io.Reader, str interface{}, msg string, args ...interface{}) {
+func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
- HTTPBodyContainsf(a.t, handler, method, url, values, body, str, msg, args...)
+ HTTPBodyContainsf(a.t, handler, method, url, values, str, msg, args...)
}
// HTTPBodyNotContains asserts that a specified handler returns a
@@ -413,11 +412,11 @@ func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string,
// a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
//
// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, body io.Reader, str interface{}, msgAndArgs ...interface{}) {
+func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
- HTTPBodyNotContains(a.t, handler, method, url, values, body, str, msgAndArgs...)
+ HTTPBodyNotContains(a.t, handler, method, url, values, str, msgAndArgs...)
}
// HTTPBodyNotContainsf asserts that a specified handler returns a
@@ -426,11 +425,11 @@ func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string
// a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, body io.Reader, str interface{}, msg string, args ...interface{}) {
+func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
- HTTPBodyNotContainsf(a.t, handler, method, url, values, body, str, msg, args...)
+ HTTPBodyNotContainsf(a.t, handler, method, url, values, str, msg, args...)
}
// HTTPError asserts that a specified handler returns an error status code.
diff --git a/vendor/k8s.io/api/apps/v1/doc.go b/vendor/k8s.io/api/apps/v1/doc.go
new file mode 100644
index 000000000..61dc97bde
--- /dev/null
+++ b/vendor/k8s.io/api/apps/v1/doc.go
@@ -0,0 +1,21 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:protobuf-gen=package
+// +k8s:openapi-gen=true
+
+package v1 // import "k8s.io/api/apps/v1"
diff --git a/vendor/k8s.io/api/apps/v1/generated.pb.go b/vendor/k8s.io/api/apps/v1/generated.pb.go
new file mode 100644
index 000000000..6ef25f50f
--- /dev/null
+++ b/vendor/k8s.io/api/apps/v1/generated.pb.go
@@ -0,0 +1,8238 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto
+
+package v1
+
+import (
+ fmt "fmt"
+
+ io "io"
+
+ proto "github.com/gogo/protobuf/proto"
+ k8s_io_api_core_v1 "k8s.io/api/core/v1"
+ v11 "k8s.io/api/core/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ math "math"
+ math_bits "math/bits"
+ reflect "reflect"
+ strings "strings"
+
+ intstr "k8s.io/apimachinery/pkg/util/intstr"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+func (m *ControllerRevision) Reset() { *m = ControllerRevision{} }
+func (*ControllerRevision) ProtoMessage() {}
+func (*ControllerRevision) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{0}
+}
+func (m *ControllerRevision) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ControllerRevision) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ControllerRevision) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ControllerRevision.Merge(m, src)
+}
+func (m *ControllerRevision) XXX_Size() int {
+ return m.Size()
+}
+func (m *ControllerRevision) XXX_DiscardUnknown() {
+ xxx_messageInfo_ControllerRevision.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerRevision proto.InternalMessageInfo
+
+func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} }
+func (*ControllerRevisionList) ProtoMessage() {}
+func (*ControllerRevisionList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{1}
+}
+func (m *ControllerRevisionList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ControllerRevisionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ControllerRevisionList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ControllerRevisionList.Merge(m, src)
+}
+func (m *ControllerRevisionList) XXX_Size() int {
+ return m.Size()
+}
+func (m *ControllerRevisionList) XXX_DiscardUnknown() {
+ xxx_messageInfo_ControllerRevisionList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerRevisionList proto.InternalMessageInfo
+
+func (m *DaemonSet) Reset() { *m = DaemonSet{} }
+func (*DaemonSet) ProtoMessage() {}
+func (*DaemonSet) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{2}
+}
+func (m *DaemonSet) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DaemonSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DaemonSet) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DaemonSet.Merge(m, src)
+}
+func (m *DaemonSet) XXX_Size() int {
+ return m.Size()
+}
+func (m *DaemonSet) XXX_DiscardUnknown() {
+ xxx_messageInfo_DaemonSet.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DaemonSet proto.InternalMessageInfo
+
+func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} }
+func (*DaemonSetCondition) ProtoMessage() {}
+func (*DaemonSetCondition) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{3}
+}
+func (m *DaemonSetCondition) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DaemonSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DaemonSetCondition) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DaemonSetCondition.Merge(m, src)
+}
+func (m *DaemonSetCondition) XXX_Size() int {
+ return m.Size()
+}
+func (m *DaemonSetCondition) XXX_DiscardUnknown() {
+ xxx_messageInfo_DaemonSetCondition.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DaemonSetCondition proto.InternalMessageInfo
+
+func (m *DaemonSetList) Reset() { *m = DaemonSetList{} }
+func (*DaemonSetList) ProtoMessage() {}
+func (*DaemonSetList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{4}
+}
+func (m *DaemonSetList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DaemonSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DaemonSetList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DaemonSetList.Merge(m, src)
+}
+func (m *DaemonSetList) XXX_Size() int {
+ return m.Size()
+}
+func (m *DaemonSetList) XXX_DiscardUnknown() {
+ xxx_messageInfo_DaemonSetList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DaemonSetList proto.InternalMessageInfo
+
+func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} }
+func (*DaemonSetSpec) ProtoMessage() {}
+func (*DaemonSetSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{5}
+}
+func (m *DaemonSetSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DaemonSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DaemonSetSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DaemonSetSpec.Merge(m, src)
+}
+func (m *DaemonSetSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *DaemonSetSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_DaemonSetSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DaemonSetSpec proto.InternalMessageInfo
+
+func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} }
+func (*DaemonSetStatus) ProtoMessage() {}
+func (*DaemonSetStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{6}
+}
+func (m *DaemonSetStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DaemonSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DaemonSetStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DaemonSetStatus.Merge(m, src)
+}
+func (m *DaemonSetStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *DaemonSetStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_DaemonSetStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DaemonSetStatus proto.InternalMessageInfo
+
+func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} }
+func (*DaemonSetUpdateStrategy) ProtoMessage() {}
+func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{7}
+}
+func (m *DaemonSetUpdateStrategy) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DaemonSetUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DaemonSetUpdateStrategy) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DaemonSetUpdateStrategy.Merge(m, src)
+}
+func (m *DaemonSetUpdateStrategy) XXX_Size() int {
+ return m.Size()
+}
+func (m *DaemonSetUpdateStrategy) XXX_DiscardUnknown() {
+ xxx_messageInfo_DaemonSetUpdateStrategy.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DaemonSetUpdateStrategy proto.InternalMessageInfo
+
+func (m *Deployment) Reset() { *m = Deployment{} }
+func (*Deployment) ProtoMessage() {}
+func (*Deployment) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{8}
+}
+func (m *Deployment) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Deployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Deployment) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Deployment.Merge(m, src)
+}
+func (m *Deployment) XXX_Size() int {
+ return m.Size()
+}
+func (m *Deployment) XXX_DiscardUnknown() {
+ xxx_messageInfo_Deployment.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Deployment proto.InternalMessageInfo
+
+func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} }
+func (*DeploymentCondition) ProtoMessage() {}
+func (*DeploymentCondition) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{9}
+}
+func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeploymentCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeploymentCondition) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeploymentCondition.Merge(m, src)
+}
+func (m *DeploymentCondition) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeploymentCondition) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeploymentCondition.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo
+
+func (m *DeploymentList) Reset() { *m = DeploymentList{} }
+func (*DeploymentList) ProtoMessage() {}
+func (*DeploymentList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{10}
+}
+func (m *DeploymentList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeploymentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeploymentList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeploymentList.Merge(m, src)
+}
+func (m *DeploymentList) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeploymentList) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeploymentList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeploymentList proto.InternalMessageInfo
+
+func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} }
+func (*DeploymentSpec) ProtoMessage() {}
+func (*DeploymentSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{11}
+}
+func (m *DeploymentSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeploymentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeploymentSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeploymentSpec.Merge(m, src)
+}
+func (m *DeploymentSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeploymentSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeploymentSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeploymentSpec proto.InternalMessageInfo
+
+func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} }
+func (*DeploymentStatus) ProtoMessage() {}
+func (*DeploymentStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{12}
+}
+func (m *DeploymentStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeploymentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeploymentStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeploymentStatus.Merge(m, src)
+}
+func (m *DeploymentStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeploymentStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeploymentStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeploymentStatus proto.InternalMessageInfo
+
+func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} }
+func (*DeploymentStrategy) ProtoMessage() {}
+func (*DeploymentStrategy) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{13}
+}
+func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeploymentStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeploymentStrategy) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeploymentStrategy.Merge(m, src)
+}
+func (m *DeploymentStrategy) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeploymentStrategy) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeploymentStrategy.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo
+
+func (m *ReplicaSet) Reset() { *m = ReplicaSet{} }
+func (*ReplicaSet) ProtoMessage() {}
+func (*ReplicaSet) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{14}
+}
+func (m *ReplicaSet) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ReplicaSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ReplicaSet) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ReplicaSet.Merge(m, src)
+}
+func (m *ReplicaSet) XXX_Size() int {
+ return m.Size()
+}
+func (m *ReplicaSet) XXX_DiscardUnknown() {
+ xxx_messageInfo_ReplicaSet.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ReplicaSet proto.InternalMessageInfo
+
+func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} }
+func (*ReplicaSetCondition) ProtoMessage() {}
+func (*ReplicaSetCondition) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{15}
+}
+func (m *ReplicaSetCondition) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ReplicaSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ReplicaSetCondition) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ReplicaSetCondition.Merge(m, src)
+}
+func (m *ReplicaSetCondition) XXX_Size() int {
+ return m.Size()
+}
+func (m *ReplicaSetCondition) XXX_DiscardUnknown() {
+ xxx_messageInfo_ReplicaSetCondition.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ReplicaSetCondition proto.InternalMessageInfo
+
+func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} }
+func (*ReplicaSetList) ProtoMessage() {}
+func (*ReplicaSetList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{16}
+}
+func (m *ReplicaSetList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ReplicaSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ReplicaSetList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ReplicaSetList.Merge(m, src)
+}
+func (m *ReplicaSetList) XXX_Size() int {
+ return m.Size()
+}
+func (m *ReplicaSetList) XXX_DiscardUnknown() {
+ xxx_messageInfo_ReplicaSetList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ReplicaSetList proto.InternalMessageInfo
+
+func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} }
+func (*ReplicaSetSpec) ProtoMessage() {}
+func (*ReplicaSetSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{17}
+}
+func (m *ReplicaSetSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ReplicaSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ReplicaSetSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ReplicaSetSpec.Merge(m, src)
+}
+func (m *ReplicaSetSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *ReplicaSetSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_ReplicaSetSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ReplicaSetSpec proto.InternalMessageInfo
+
+func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} }
+func (*ReplicaSetStatus) ProtoMessage() {}
+func (*ReplicaSetStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{18}
+}
+func (m *ReplicaSetStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ReplicaSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ReplicaSetStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ReplicaSetStatus.Merge(m, src)
+}
+func (m *ReplicaSetStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *ReplicaSetStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_ReplicaSetStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ReplicaSetStatus proto.InternalMessageInfo
+
+func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} }
+func (*RollingUpdateDaemonSet) ProtoMessage() {}
+func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{19}
+}
+func (m *RollingUpdateDaemonSet) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *RollingUpdateDaemonSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *RollingUpdateDaemonSet) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RollingUpdateDaemonSet.Merge(m, src)
+}
+func (m *RollingUpdateDaemonSet) XXX_Size() int {
+ return m.Size()
+}
+func (m *RollingUpdateDaemonSet) XXX_DiscardUnknown() {
+ xxx_messageInfo_RollingUpdateDaemonSet.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RollingUpdateDaemonSet proto.InternalMessageInfo
+
+func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} }
+func (*RollingUpdateDeployment) ProtoMessage() {}
+func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{20}
+}
+func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *RollingUpdateDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *RollingUpdateDeployment) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RollingUpdateDeployment.Merge(m, src)
+}
+func (m *RollingUpdateDeployment) XXX_Size() int {
+ return m.Size()
+}
+func (m *RollingUpdateDeployment) XXX_DiscardUnknown() {
+ xxx_messageInfo_RollingUpdateDeployment.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo
+
+func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} }
+func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {}
+func (*RollingUpdateStatefulSetStrategy) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{21}
+}
+func (m *RollingUpdateStatefulSetStrategy) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *RollingUpdateStatefulSetStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *RollingUpdateStatefulSetStrategy) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RollingUpdateStatefulSetStrategy.Merge(m, src)
+}
+func (m *RollingUpdateStatefulSetStrategy) XXX_Size() int {
+ return m.Size()
+}
+func (m *RollingUpdateStatefulSetStrategy) XXX_DiscardUnknown() {
+ xxx_messageInfo_RollingUpdateStatefulSetStrategy.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RollingUpdateStatefulSetStrategy proto.InternalMessageInfo
+
+func (m *StatefulSet) Reset() { *m = StatefulSet{} }
+func (*StatefulSet) ProtoMessage() {}
+func (*StatefulSet) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{22}
+}
+func (m *StatefulSet) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *StatefulSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *StatefulSet) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_StatefulSet.Merge(m, src)
+}
+func (m *StatefulSet) XXX_Size() int {
+ return m.Size()
+}
+func (m *StatefulSet) XXX_DiscardUnknown() {
+ xxx_messageInfo_StatefulSet.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StatefulSet proto.InternalMessageInfo
+
+func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} }
+func (*StatefulSetCondition) ProtoMessage() {}
+func (*StatefulSetCondition) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{23}
+}
+func (m *StatefulSetCondition) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *StatefulSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *StatefulSetCondition) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_StatefulSetCondition.Merge(m, src)
+}
+func (m *StatefulSetCondition) XXX_Size() int {
+ return m.Size()
+}
+func (m *StatefulSetCondition) XXX_DiscardUnknown() {
+ xxx_messageInfo_StatefulSetCondition.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StatefulSetCondition proto.InternalMessageInfo
+
+func (m *StatefulSetList) Reset() { *m = StatefulSetList{} }
+func (*StatefulSetList) ProtoMessage() {}
+func (*StatefulSetList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{24}
+}
+func (m *StatefulSetList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *StatefulSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *StatefulSetList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_StatefulSetList.Merge(m, src)
+}
+func (m *StatefulSetList) XXX_Size() int {
+ return m.Size()
+}
+func (m *StatefulSetList) XXX_DiscardUnknown() {
+ xxx_messageInfo_StatefulSetList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StatefulSetList proto.InternalMessageInfo
+
+func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} }
+func (*StatefulSetSpec) ProtoMessage() {}
+func (*StatefulSetSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{25}
+}
+func (m *StatefulSetSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *StatefulSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *StatefulSetSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_StatefulSetSpec.Merge(m, src)
+}
+func (m *StatefulSetSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *StatefulSetSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_StatefulSetSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StatefulSetSpec proto.InternalMessageInfo
+
+func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} }
+func (*StatefulSetStatus) ProtoMessage() {}
+func (*StatefulSetStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{26}
+}
+func (m *StatefulSetStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *StatefulSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *StatefulSetStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_StatefulSetStatus.Merge(m, src)
+}
+func (m *StatefulSetStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *StatefulSetStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_StatefulSetStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StatefulSetStatus proto.InternalMessageInfo
+
+func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} }
+func (*StatefulSetUpdateStrategy) ProtoMessage() {}
+func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{27}
+}
+func (m *StatefulSetUpdateStrategy) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *StatefulSetUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *StatefulSetUpdateStrategy) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_StatefulSetUpdateStrategy.Merge(m, src)
+}
+func (m *StatefulSetUpdateStrategy) XXX_Size() int {
+ return m.Size()
+}
+func (m *StatefulSetUpdateStrategy) XXX_DiscardUnknown() {
+ xxx_messageInfo_StatefulSetUpdateStrategy.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StatefulSetUpdateStrategy proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*ControllerRevision)(nil), "k8s.io.api.apps.v1.ControllerRevision")
+ proto.RegisterType((*ControllerRevisionList)(nil), "k8s.io.api.apps.v1.ControllerRevisionList")
+ proto.RegisterType((*DaemonSet)(nil), "k8s.io.api.apps.v1.DaemonSet")
+ proto.RegisterType((*DaemonSetCondition)(nil), "k8s.io.api.apps.v1.DaemonSetCondition")
+ proto.RegisterType((*DaemonSetList)(nil), "k8s.io.api.apps.v1.DaemonSetList")
+ proto.RegisterType((*DaemonSetSpec)(nil), "k8s.io.api.apps.v1.DaemonSetSpec")
+ proto.RegisterType((*DaemonSetStatus)(nil), "k8s.io.api.apps.v1.DaemonSetStatus")
+ proto.RegisterType((*DaemonSetUpdateStrategy)(nil), "k8s.io.api.apps.v1.DaemonSetUpdateStrategy")
+ proto.RegisterType((*Deployment)(nil), "k8s.io.api.apps.v1.Deployment")
+ proto.RegisterType((*DeploymentCondition)(nil), "k8s.io.api.apps.v1.DeploymentCondition")
+ proto.RegisterType((*DeploymentList)(nil), "k8s.io.api.apps.v1.DeploymentList")
+ proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.api.apps.v1.DeploymentSpec")
+ proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.api.apps.v1.DeploymentStatus")
+ proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.api.apps.v1.DeploymentStrategy")
+ proto.RegisterType((*ReplicaSet)(nil), "k8s.io.api.apps.v1.ReplicaSet")
+ proto.RegisterType((*ReplicaSetCondition)(nil), "k8s.io.api.apps.v1.ReplicaSetCondition")
+ proto.RegisterType((*ReplicaSetList)(nil), "k8s.io.api.apps.v1.ReplicaSetList")
+ proto.RegisterType((*ReplicaSetSpec)(nil), "k8s.io.api.apps.v1.ReplicaSetSpec")
+ proto.RegisterType((*ReplicaSetStatus)(nil), "k8s.io.api.apps.v1.ReplicaSetStatus")
+ proto.RegisterType((*RollingUpdateDaemonSet)(nil), "k8s.io.api.apps.v1.RollingUpdateDaemonSet")
+ proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.api.apps.v1.RollingUpdateDeployment")
+ proto.RegisterType((*RollingUpdateStatefulSetStrategy)(nil), "k8s.io.api.apps.v1.RollingUpdateStatefulSetStrategy")
+ proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1.StatefulSet")
+ proto.RegisterType((*StatefulSetCondition)(nil), "k8s.io.api.apps.v1.StatefulSetCondition")
+ proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1.StatefulSetList")
+ proto.RegisterType((*StatefulSetSpec)(nil), "k8s.io.api.apps.v1.StatefulSetSpec")
+ proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1.StatefulSetStatus")
+ proto.RegisterType((*StatefulSetUpdateStrategy)(nil), "k8s.io.api.apps.v1.StatefulSetUpdateStrategy")
+}
+
+func init() {
+ proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto", fileDescriptor_e1014cab6f31e43b)
+}
+
+var fileDescriptor_e1014cab6f31e43b = []byte{
+ // 2031 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x24, 0x47,
+ 0x1d, 0x75, 0xcf, 0x87, 0x3d, 0x2e, 0xaf, 0xed, 0xdd, 0xb2, 0xb1, 0x27, 0xbb, 0x64, 0x66, 0x19,
+ 0x60, 0xe3, 0x64, 0xb3, 0x3d, 0xec, 0x66, 0x13, 0xa1, 0x2c, 0x02, 0x79, 0xc6, 0x21, 0x84, 0x78,
+ 0x6c, 0x53, 0x5e, 0xef, 0x61, 0x09, 0x12, 0xe5, 0xe9, 0xda, 0x71, 0xc7, 0xfd, 0xa5, 0xee, 0xea,
+ 0x61, 0x47, 0x5c, 0x10, 0x12, 0x9c, 0x38, 0xf0, 0x9f, 0x20, 0x84, 0xe0, 0x86, 0x22, 0xc4, 0x65,
+ 0x2f, 0x48, 0x11, 0x17, 0x72, 0xb2, 0xd8, 0xc9, 0x09, 0xa1, 0x1c, 0xb9, 0xe4, 0x02, 0xaa, 0xea,
+ 0xea, 0xef, 0x6a, 0xcf, 0xd8, 0x9b, 0x38, 0x24, 0xca, 0xcd, 0x53, 0xf5, 0x7e, 0xaf, 0x7f, 0x55,
+ 0xf5, 0xab, 0x7a, 0xaf, 0xab, 0x0d, 0xee, 0x1d, 0x7f, 0xdb, 0x53, 0x75, 0xbb, 0x7d, 0xec, 0x1f,
+ 0x12, 0xd7, 0x22, 0x94, 0x78, 0xed, 0x21, 0xb1, 0x34, 0xdb, 0x6d, 0x8b, 0x0e, 0xec, 0xe8, 0x6d,
+ 0xec, 0x38, 0x5e, 0x7b, 0x78, 0xbb, 0x3d, 0x20, 0x16, 0x71, 0x31, 0x25, 0x9a, 0xea, 0xb8, 0x36,
+ 0xb5, 0x21, 0x0c, 0x30, 0x2a, 0x76, 0x74, 0x95, 0x61, 0xd4, 0xe1, 0xed, 0xab, 0xb7, 0x06, 0x3a,
+ 0x3d, 0xf2, 0x0f, 0xd5, 0xbe, 0x6d, 0xb6, 0x07, 0xf6, 0xc0, 0x6e, 0x73, 0xe8, 0xa1, 0xff, 0x88,
+ 0xff, 0xe2, 0x3f, 0xf8, 0x5f, 0x01, 0xc5, 0xd5, 0x56, 0xe2, 0x31, 0x7d, 0xdb, 0x25, 0x92, 0xc7,
+ 0x5c, 0xbd, 0x1b, 0x63, 0x4c, 0xdc, 0x3f, 0xd2, 0x2d, 0xe2, 0x8e, 0xda, 0xce, 0xf1, 0x80, 0x35,
+ 0x78, 0x6d, 0x93, 0x50, 0x2c, 0x8b, 0x6a, 0x17, 0x45, 0xb9, 0xbe, 0x45, 0x75, 0x93, 0xe4, 0x02,
+ 0x5e, 0x9b, 0x14, 0xe0, 0xf5, 0x8f, 0x88, 0x89, 0x73, 0x71, 0xaf, 0x14, 0xc5, 0xf9, 0x54, 0x37,
+ 0xda, 0xba, 0x45, 0x3d, 0xea, 0x66, 0x83, 0x5a, 0xff, 0x51, 0x00, 0xec, 0xda, 0x16, 0x75, 0x6d,
+ 0xc3, 0x20, 0x2e, 0x22, 0x43, 0xdd, 0xd3, 0x6d, 0x0b, 0xfe, 0x14, 0xd4, 0xd8, 0x78, 0x34, 0x4c,
+ 0x71, 0x5d, 0xb9, 0xae, 0x6c, 0x2c, 0xdc, 0xf9, 0x96, 0x1a, 0x4f, 0x72, 0x44, 0xaf, 0x3a, 0xc7,
+ 0x03, 0xd6, 0xe0, 0xa9, 0x0c, 0xad, 0x0e, 0x6f, 0xab, 0xbb, 0x87, 0xef, 0x92, 0x3e, 0xed, 0x11,
+ 0x8a, 0x3b, 0xf0, 0xc9, 0x49, 0x73, 0x66, 0x7c, 0xd2, 0x04, 0x71, 0x1b, 0x8a, 0x58, 0xe1, 0x2e,
+ 0xa8, 0x70, 0xf6, 0x12, 0x67, 0xbf, 0x55, 0xc8, 0x2e, 0x06, 0xad, 0x22, 0xfc, 0xb3, 0x37, 0x1e,
+ 0x53, 0x62, 0xb1, 0xf4, 0x3a, 0x97, 0x04, 0x75, 0x65, 0x0b, 0x53, 0x8c, 0x38, 0x11, 0x7c, 0x19,
+ 0xd4, 0x5c, 0x91, 0x7e, 0xbd, 0x7c, 0x5d, 0xd9, 0x28, 0x77, 0x2e, 0x0b, 0x54, 0x2d, 0x1c, 0x16,
+ 0x8a, 0x10, 0xad, 0xbf, 0x2a, 0x60, 0x2d, 0x3f, 0xee, 0x6d, 0xdd, 0xa3, 0xf0, 0x9d, 0xdc, 0xd8,
+ 0xd5, 0xe9, 0xc6, 0xce, 0xa2, 0xf9, 0xc8, 0xa3, 0x07, 0x87, 0x2d, 0x89, 0x71, 0xbf, 0x0d, 0xaa,
+ 0x3a, 0x25, 0xa6, 0x57, 0x2f, 0x5d, 0x2f, 0x6f, 0x2c, 0xdc, 0xb9, 0xa1, 0xe6, 0x6b, 0x57, 0xcd,
+ 0x27, 0xd6, 0x59, 0x14, 0x94, 0xd5, 0xb7, 0x58, 0x30, 0x0a, 0x38, 0x5a, 0xff, 0x55, 0xc0, 0xfc,
+ 0x16, 0x26, 0xa6, 0x6d, 0xed, 0x13, 0x7a, 0x01, 0x8b, 0xd6, 0x05, 0x15, 0xcf, 0x21, 0x7d, 0xb1,
+ 0x68, 0x5f, 0x93, 0xe5, 0x1e, 0xa5, 0xb3, 0xef, 0x90, 0x7e, 0xbc, 0x50, 0xec, 0x17, 0xe2, 0xc1,
+ 0xf0, 0x6d, 0x30, 0xeb, 0x51, 0x4c, 0x7d, 0x8f, 0x2f, 0xd3, 0xc2, 0x9d, 0xaf, 0x9f, 0x4e, 0xc3,
+ 0xa1, 0x9d, 0x25, 0x41, 0x34, 0x1b, 0xfc, 0x46, 0x82, 0xa2, 0xf5, 0xaf, 0x12, 0x80, 0x11, 0xb6,
+ 0x6b, 0x5b, 0x9a, 0x4e, 0x59, 0xfd, 0xbe, 0x0e, 0x2a, 0x74, 0xe4, 0x10, 0x3e, 0x0d, 0xf3, 0x9d,
+ 0x1b, 0x61, 0x16, 0xf7, 0x47, 0x0e, 0xf9, 0xf8, 0xa4, 0xb9, 0x96, 0x8f, 0x60, 0x3d, 0x88, 0xc7,
+ 0xc0, 0xed, 0x28, 0xbf, 0x12, 0x8f, 0xbe, 0x9b, 0x7e, 0xf4, 0xc7, 0x27, 0x4d, 0xc9, 0x61, 0xa1,
+ 0x46, 0x4c, 0xe9, 0x04, 0xe1, 0x10, 0x40, 0x03, 0x7b, 0xf4, 0xbe, 0x8b, 0x2d, 0x2f, 0x78, 0x92,
+ 0x6e, 0x12, 0x31, 0xf2, 0x97, 0xa6, 0x5b, 0x1e, 0x16, 0xd1, 0xb9, 0x2a, 0xb2, 0x80, 0xdb, 0x39,
+ 0x36, 0x24, 0x79, 0x02, 0xbc, 0x01, 0x66, 0x5d, 0x82, 0x3d, 0xdb, 0xaa, 0x57, 0xf8, 0x28, 0xa2,
+ 0x09, 0x44, 0xbc, 0x15, 0x89, 0x5e, 0xf8, 0x22, 0x98, 0x33, 0x89, 0xe7, 0xe1, 0x01, 0xa9, 0x57,
+ 0x39, 0x70, 0x59, 0x00, 0xe7, 0x7a, 0x41, 0x33, 0x0a, 0xfb, 0x5b, 0xbf, 0x57, 0xc0, 0x62, 0x34,
+ 0x73, 0x17, 0xb0, 0x55, 0x3a, 0xe9, 0xad, 0xf2, 0xfc, 0xa9, 0x75, 0x52, 0xb0, 0x43, 0xde, 0x2b,
+ 0x27, 0x72, 0x66, 0x45, 0x08, 0x7f, 0x02, 0x6a, 0x1e, 0x31, 0x48, 0x9f, 0xda, 0xae, 0xc8, 0xf9,
+ 0x95, 0x29, 0x73, 0xc6, 0x87, 0xc4, 0xd8, 0x17, 0xa1, 0x9d, 0x4b, 0x2c, 0xe9, 0xf0, 0x17, 0x8a,
+ 0x28, 0xe1, 0x8f, 0x40, 0x8d, 0x12, 0xd3, 0x31, 0x30, 0x25, 0x62, 0x9b, 0xa4, 0xea, 0x9b, 0x95,
+ 0x0b, 0x23, 0xdb, 0xb3, 0xb5, 0xfb, 0x02, 0xc6, 0x37, 0x4a, 0x34, 0x0f, 0x61, 0x2b, 0x8a, 0x68,
+ 0xe0, 0x31, 0x58, 0xf2, 0x1d, 0x8d, 0x21, 0x29, 0x3b, 0xba, 0x07, 0x23, 0x51, 0x3e, 0x37, 0x4f,
+ 0x9d, 0x90, 0x83, 0x54, 0x48, 0x67, 0x4d, 0x3c, 0x60, 0x29, 0xdd, 0x8e, 0x32, 0xd4, 0x70, 0x13,
+ 0x2c, 0x9b, 0xba, 0x85, 0x08, 0xd6, 0x46, 0xfb, 0xa4, 0x6f, 0x5b, 0x9a, 0xc7, 0x0b, 0xa8, 0xda,
+ 0x59, 0x17, 0x04, 0xcb, 0xbd, 0x74, 0x37, 0xca, 0xe2, 0xe1, 0x36, 0x58, 0x0d, 0xcf, 0xd9, 0x1f,
+ 0xe8, 0x1e, 0xb5, 0xdd, 0xd1, 0xb6, 0x6e, 0xea, 0xb4, 0x3e, 0xcb, 0x79, 0xea, 0xe3, 0x93, 0xe6,
+ 0x2a, 0x92, 0xf4, 0x23, 0x69, 0x54, 0xeb, 0x37, 0xb3, 0x60, 0x39, 0x73, 0x1a, 0xc0, 0x07, 0x60,
+ 0xad, 0xef, 0xbb, 0x2e, 0xb1, 0xe8, 0x8e, 0x6f, 0x1e, 0x12, 0x77, 0xbf, 0x7f, 0x44, 0x34, 0xdf,
+ 0x20, 0x1a, 0x5f, 0xd1, 0x6a, 0xa7, 0x21, 0x72, 0x5d, 0xeb, 0x4a, 0x51, 0xa8, 0x20, 0x1a, 0xfe,
+ 0x10, 0x40, 0x8b, 0x37, 0xf5, 0x74, 0xcf, 0x8b, 0x38, 0x4b, 0x9c, 0x33, 0xda, 0x80, 0x3b, 0x39,
+ 0x04, 0x92, 0x44, 0xb1, 0x1c, 0x35, 0xe2, 0xe9, 0x2e, 0xd1, 0xb2, 0x39, 0x96, 0xd3, 0x39, 0x6e,
+ 0x49, 0x51, 0xa8, 0x20, 0x1a, 0xbe, 0x0a, 0x16, 0x82, 0xa7, 0xf1, 0x39, 0x17, 0x8b, 0xb3, 0x22,
+ 0xc8, 0x16, 0x76, 0xe2, 0x2e, 0x94, 0xc4, 0xb1, 0xa1, 0xd9, 0x87, 0x1e, 0x71, 0x87, 0x44, 0x7b,
+ 0x33, 0xf0, 0x00, 0x4c, 0x28, 0xab, 0x5c, 0x28, 0xa3, 0xa1, 0xed, 0xe6, 0x10, 0x48, 0x12, 0xc5,
+ 0x86, 0x16, 0x54, 0x4d, 0x6e, 0x68, 0xb3, 0xe9, 0xa1, 0x1d, 0x48, 0x51, 0xa8, 0x20, 0x9a, 0xd5,
+ 0x5e, 0x90, 0xf2, 0xe6, 0x10, 0xeb, 0x06, 0x3e, 0x34, 0x48, 0x7d, 0x2e, 0x5d, 0x7b, 0x3b, 0xe9,
+ 0x6e, 0x94, 0xc5, 0xc3, 0x37, 0xc1, 0x95, 0xa0, 0xe9, 0xc0, 0xc2, 0x11, 0x49, 0x8d, 0x93, 0x3c,
+ 0x27, 0x48, 0xae, 0xec, 0x64, 0x01, 0x28, 0x1f, 0x03, 0x5f, 0x07, 0x4b, 0x7d, 0xdb, 0x30, 0x78,
+ 0x3d, 0x76, 0x6d, 0xdf, 0xa2, 0xf5, 0x79, 0xce, 0x02, 0xd9, 0x1e, 0xea, 0xa6, 0x7a, 0x50, 0x06,
+ 0x09, 0x1f, 0x02, 0xd0, 0x0f, 0xe5, 0xc0, 0xab, 0x83, 0x62, 0xa1, 0xcf, 0xeb, 0x50, 0x2c, 0xc0,
+ 0x51, 0x93, 0x87, 0x12, 0x6c, 0xad, 0xf7, 0x14, 0xb0, 0x5e, 0xb0, 0xc7, 0xe1, 0xf7, 0x52, 0xaa,
+ 0x77, 0x33, 0xa3, 0x7a, 0xd7, 0x0a, 0xc2, 0x12, 0xd2, 0xd7, 0x07, 0x8b, 0xcc, 0x77, 0xe8, 0xd6,
+ 0x20, 0x80, 0x88, 0x13, 0xec, 0x25, 0x59, 0xee, 0x28, 0x09, 0x8c, 0x8f, 0xe1, 0x2b, 0xe3, 0x93,
+ 0xe6, 0x62, 0xaa, 0x0f, 0xa5, 0x39, 0x5b, 0xbf, 0x2c, 0x01, 0xb0, 0x45, 0x1c, 0xc3, 0x1e, 0x99,
+ 0xc4, 0xba, 0x08, 0xd7, 0xb2, 0x95, 0x72, 0x2d, 0x2d, 0xe9, 0x42, 0x44, 0xf9, 0x14, 0xda, 0x96,
+ 0xed, 0x8c, 0x6d, 0xf9, 0xc6, 0x04, 0x9e, 0xd3, 0x7d, 0xcb, 0x3f, 0xca, 0x60, 0x25, 0x06, 0xc7,
+ 0xc6, 0xe5, 0x5e, 0x6a, 0x09, 0x5f, 0xc8, 0x2c, 0xe1, 0xba, 0x24, 0xe4, 0x53, 0x73, 0x2e, 0xef,
+ 0x82, 0x25, 0xe6, 0x2b, 0x82, 0x55, 0xe3, 0xae, 0x65, 0xf6, 0xcc, 0xae, 0x25, 0x52, 0x9d, 0xed,
+ 0x14, 0x13, 0xca, 0x30, 0x17, 0xb8, 0xa4, 0xb9, 0xcf, 0xa3, 0x4b, 0xfa, 0x83, 0x02, 0x96, 0xe2,
+ 0x65, 0xba, 0x00, 0x9b, 0xd4, 0x4d, 0xdb, 0xa4, 0xc6, 0xe9, 0x75, 0x59, 0xe0, 0x93, 0xfe, 0x5e,
+ 0x49, 0x66, 0xcd, 0x8d, 0xd2, 0x06, 0x7b, 0xa1, 0x72, 0x0c, 0xbd, 0x8f, 0x3d, 0x21, 0xab, 0x97,
+ 0x82, 0x97, 0xa9, 0xa0, 0x0d, 0x45, 0xbd, 0x29, 0x4b, 0x55, 0xfa, 0x74, 0x2d, 0x55, 0xf9, 0x93,
+ 0xb1, 0x54, 0xf7, 0x41, 0xcd, 0x0b, 0xcd, 0x54, 0x85, 0x53, 0xde, 0x98, 0xb4, 0x9d, 0x85, 0x8f,
+ 0x8a, 0x58, 0x23, 0x07, 0x15, 0x31, 0xc9, 0xbc, 0x53, 0xf5, 0xb3, 0xf4, 0x4e, 0xac, 0xbc, 0x1d,
+ 0xec, 0x7b, 0x44, 0xe3, 0x5b, 0xa9, 0x16, 0x97, 0xf7, 0x1e, 0x6f, 0x45, 0xa2, 0x17, 0x1e, 0x80,
+ 0x75, 0xc7, 0xb5, 0x07, 0x2e, 0xf1, 0xbc, 0x2d, 0x82, 0x35, 0x43, 0xb7, 0x48, 0x38, 0x80, 0x40,
+ 0xf5, 0xae, 0x8d, 0x4f, 0x9a, 0xeb, 0x7b, 0x72, 0x08, 0x2a, 0x8a, 0x6d, 0xfd, 0xb9, 0x02, 0x2e,
+ 0x67, 0x4f, 0xc4, 0x02, 0x23, 0xa2, 0x9c, 0xcb, 0x88, 0xbc, 0x9c, 0x28, 0xd1, 0xc0, 0xa5, 0x25,
+ 0xde, 0xf9, 0x73, 0x65, 0xba, 0x09, 0x96, 0x85, 0xf1, 0x08, 0x3b, 0x85, 0x15, 0x8b, 0x96, 0xe7,
+ 0x20, 0xdd, 0x8d, 0xb2, 0x78, 0x78, 0x0f, 0x2c, 0xba, 0xdc, 0x5b, 0x85, 0x04, 0x81, 0x3f, 0xf9,
+ 0x8a, 0x20, 0x58, 0x44, 0xc9, 0x4e, 0x94, 0xc6, 0x32, 0x6f, 0x12, 0x5b, 0x8e, 0x90, 0xa0, 0x92,
+ 0xf6, 0x26, 0x9b, 0x59, 0x00, 0xca, 0xc7, 0xc0, 0x1e, 0x58, 0xf1, 0xad, 0x3c, 0x55, 0x50, 0x6b,
+ 0xd7, 0x04, 0xd5, 0xca, 0x41, 0x1e, 0x82, 0x64, 0x71, 0xf0, 0xc7, 0x29, 0xbb, 0x32, 0xcb, 0x4f,
+ 0x91, 0x17, 0x4e, 0xdf, 0x0e, 0x53, 0xfb, 0x15, 0x89, 0x8f, 0xaa, 0x4d, 0xeb, 0xa3, 0x5a, 0x7f,
+ 0x52, 0x00, 0xcc, 0x6f, 0xc1, 0x89, 0x2f, 0xf7, 0xb9, 0x88, 0x84, 0x44, 0x6a, 0x72, 0x87, 0x73,
+ 0x73, 0xb2, 0xc3, 0x89, 0x4f, 0xd0, 0xe9, 0x2c, 0x8e, 0x98, 0xde, 0x8b, 0xb9, 0x98, 0x99, 0xc2,
+ 0xe2, 0xc4, 0xf9, 0x3c, 0x9b, 0xc5, 0x49, 0xf0, 0x9c, 0x6e, 0x71, 0xfe, 0x5d, 0x02, 0x2b, 0x31,
+ 0x78, 0x6a, 0x8b, 0x23, 0x09, 0xf9, 0xf2, 0x72, 0x66, 0x3a, 0xdb, 0x11, 0x4f, 0xdd, 0xff, 0x89,
+ 0xed, 0x88, 0x13, 0x2a, 0xb0, 0x1d, 0xbf, 0x2b, 0x25, 0xb3, 0x3e, 0xa3, 0xed, 0xf8, 0x04, 0xae,
+ 0x2a, 0x3e, 0x77, 0xce, 0xa5, 0xf5, 0x97, 0x32, 0xb8, 0x9c, 0xdd, 0x82, 0x29, 0x1d, 0x54, 0x26,
+ 0xea, 0xe0, 0x1e, 0x58, 0x7d, 0xe4, 0x1b, 0xc6, 0x88, 0x8f, 0x21, 0x21, 0x86, 0x81, 0x82, 0x7e,
+ 0x55, 0x44, 0xae, 0x7e, 0x5f, 0x82, 0x41, 0xd2, 0xc8, 0xbc, 0x2c, 0x56, 0x9e, 0x55, 0x16, 0xab,
+ 0xe7, 0x90, 0x45, 0xb9, 0xb3, 0x28, 0x9f, 0xcb, 0x59, 0x4c, 0xad, 0x89, 0x92, 0xe3, 0x6a, 0xe2,
+ 0x3b, 0xfc, 0xaf, 0x15, 0xb0, 0x26, 0x7f, 0x7d, 0x86, 0x06, 0x58, 0x32, 0xf1, 0xe3, 0xe4, 0xe5,
+ 0xc5, 0x24, 0xc1, 0xf0, 0xa9, 0x6e, 0xa8, 0xc1, 0xd7, 0x1d, 0xf5, 0x2d, 0x8b, 0xee, 0xba, 0xfb,
+ 0xd4, 0xd5, 0xad, 0x41, 0x20, 0xb0, 0xbd, 0x14, 0x17, 0xca, 0x70, 0xb7, 0x3e, 0x54, 0xc0, 0x7a,
+ 0x81, 0xca, 0x5d, 0x6c, 0x26, 0xf0, 0x21, 0xa8, 0x99, 0xf8, 0xf1, 0xbe, 0xef, 0x0e, 0x42, 0x49,
+ 0x3e, 0xfb, 0x73, 0xf8, 0x2e, 0xec, 0x09, 0x16, 0x14, 0xf1, 0xb5, 0x76, 0xc1, 0xf5, 0xd4, 0x20,
+ 0xd9, 0xa6, 0x21, 0x8f, 0x7c, 0x83, 0xef, 0x1f, 0xe1, 0x29, 0x6e, 0x82, 0x79, 0x07, 0xbb, 0x54,
+ 0x8f, 0xcc, 0x68, 0xb5, 0xb3, 0x38, 0x3e, 0x69, 0xce, 0xef, 0x85, 0x8d, 0x28, 0xee, 0x6f, 0xfd,
+ 0xaa, 0x04, 0x16, 0x12, 0x24, 0x17, 0xa0, 0xef, 0x6f, 0xa4, 0xf4, 0x5d, 0xfa, 0xc5, 0x24, 0x39,
+ 0xaa, 0x22, 0x81, 0xef, 0x65, 0x04, 0xfe, 0x9b, 0x93, 0x88, 0x4e, 0x57, 0xf8, 0x8f, 0x4a, 0x60,
+ 0x35, 0x81, 0x8e, 0x25, 0xfe, 0x3b, 0x29, 0x89, 0xdf, 0xc8, 0x48, 0x7c, 0x5d, 0x16, 0xf3, 0xa5,
+ 0xc6, 0x4f, 0xd6, 0xf8, 0x3f, 0x2a, 0x60, 0x39, 0x31, 0x77, 0x17, 0x20, 0xf2, 0x5b, 0x69, 0x91,
+ 0x6f, 0x4e, 0xa8, 0x97, 0x02, 0x95, 0x7f, 0x52, 0x4d, 0xe5, 0xfd, 0x85, 0xbf, 0x5d, 0xf8, 0x39,
+ 0x58, 0x1d, 0xda, 0x86, 0x6f, 0x92, 0xae, 0x81, 0x75, 0x33, 0x04, 0x30, 0x55, 0x64, 0x93, 0xf8,
+ 0xa2, 0x94, 0x9e, 0xb8, 0x9e, 0xee, 0x51, 0x62, 0xd1, 0x07, 0x71, 0x64, 0xac, 0xc5, 0x0f, 0x24,
+ 0x74, 0x48, 0xfa, 0x10, 0xf8, 0x2a, 0x58, 0x60, 0x6a, 0xa6, 0xf7, 0xc9, 0x0e, 0x36, 0xc3, 0x9a,
+ 0x8a, 0xbe, 0x0f, 0xec, 0xc7, 0x5d, 0x28, 0x89, 0x83, 0x47, 0x60, 0xc5, 0xb1, 0xb5, 0x1e, 0xb6,
+ 0xf0, 0x80, 0xb0, 0xf3, 0x7f, 0xcf, 0x36, 0xf4, 0xfe, 0x88, 0xdf, 0x3b, 0xcc, 0x77, 0x5e, 0x0b,
+ 0xdf, 0x29, 0xf7, 0xf2, 0x10, 0xe6, 0xd9, 0x25, 0xcd, 0x7c, 0x3f, 0xcb, 0x28, 0xa1, 0x99, 0xfb,
+ 0x9c, 0x35, 0x97, 0xfb, 0x1f, 0x00, 0x59, 0x71, 0x9d, 0xf3, 0x83, 0x56, 0xd1, 0x8d, 0x4a, 0xed,
+ 0x5c, 0x5f, 0xa3, 0x3e, 0xaa, 0x80, 0x2b, 0xb9, 0x03, 0xf2, 0x33, 0xbc, 0xd3, 0xc8, 0x39, 0xaf,
+ 0xf2, 0x19, 0x9c, 0xd7, 0x26, 0x58, 0x16, 0x1f, 0xc2, 0x32, 0xc6, 0x2d, 0x32, 0xd0, 0xdd, 0x74,
+ 0x37, 0xca, 0xe2, 0x65, 0x77, 0x2a, 0xd5, 0x33, 0xde, 0xa9, 0x24, 0xb3, 0x10, 0xff, 0xbf, 0x11,
+ 0x54, 0x5d, 0x3e, 0x0b, 0xf1, 0x6f, 0x1c, 0x59, 0x3c, 0xfc, 0x6e, 0x58, 0x52, 0x11, 0xc3, 0x1c,
+ 0x67, 0xc8, 0xd4, 0x48, 0x44, 0x90, 0x41, 0x3f, 0xd3, 0xc7, 0x9e, 0x77, 0x24, 0x1f, 0x7b, 0x36,
+ 0x26, 0x94, 0xf2, 0xf4, 0x56, 0xf1, 0x6f, 0x0a, 0x78, 0xae, 0x70, 0x0f, 0xc0, 0xcd, 0x94, 0xce,
+ 0xde, 0xca, 0xe8, 0xec, 0xf3, 0x85, 0x81, 0x09, 0xb1, 0x35, 0xe5, 0x17, 0x22, 0x77, 0x27, 0x5e,
+ 0x88, 0x48, 0x5c, 0xd4, 0xe4, 0x9b, 0x91, 0xce, 0xc6, 0x93, 0xa7, 0x8d, 0x99, 0xf7, 0x9f, 0x36,
+ 0x66, 0x3e, 0x78, 0xda, 0x98, 0xf9, 0xc5, 0xb8, 0xa1, 0x3c, 0x19, 0x37, 0x94, 0xf7, 0xc7, 0x0d,
+ 0xe5, 0x83, 0x71, 0x43, 0xf9, 0xe7, 0xb8, 0xa1, 0xfc, 0xf6, 0xc3, 0xc6, 0xcc, 0xc3, 0xd2, 0xf0,
+ 0xf6, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x59, 0xb3, 0x11, 0xc0, 0x12, 0x26, 0x00, 0x00,
+}
+
+func (m *ControllerRevision) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ControllerRevision) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ControllerRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Revision))
+ i--
+ dAtA[i] = 0x18
+ {
+ size, err := m.Data.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ControllerRevisionList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ControllerRevisionList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ControllerRevisionList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *DaemonSet) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DaemonSet) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DaemonSetCondition) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DaemonSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Message)
+ copy(dAtA[i:], m.Message)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+ i--
+ dAtA[i] = 0x2a
+ i -= len(m.Reason)
+ copy(dAtA[i:], m.Reason)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
+ i--
+ dAtA[i] = 0x22
+ {
+ size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.Status)
+ copy(dAtA[i:], m.Status)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *DaemonSetList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DaemonSetList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DaemonSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *DaemonSetSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DaemonSetSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DaemonSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.RevisionHistoryLimit != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit))
+ i--
+ dAtA[i] = 0x30
+ }
+ i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds))
+ i--
+ dAtA[i] = 0x20
+ {
+ size, err := m.UpdateStrategy.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Template.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ if m.Selector != nil {
+ {
+ size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *DaemonSetStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DaemonSetStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DaemonSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Conditions) > 0 {
+ for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x52
+ }
+ }
+ if m.CollisionCount != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount))
+ i--
+ dAtA[i] = 0x48
+ }
+ i = encodeVarintGenerated(dAtA, i, uint64(m.NumberUnavailable))
+ i--
+ dAtA[i] = 0x40
+ i = encodeVarintGenerated(dAtA, i, uint64(m.NumberAvailable))
+ i--
+ dAtA[i] = 0x38
+ i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedNumberScheduled))
+ i--
+ dAtA[i] = 0x30
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration))
+ i--
+ dAtA[i] = 0x28
+ i = encodeVarintGenerated(dAtA, i, uint64(m.NumberReady))
+ i--
+ dAtA[i] = 0x20
+ i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredNumberScheduled))
+ i--
+ dAtA[i] = 0x18
+ i = encodeVarintGenerated(dAtA, i, uint64(m.NumberMisscheduled))
+ i--
+ dAtA[i] = 0x10
+ i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentNumberScheduled))
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func (m *DaemonSetUpdateStrategy) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DaemonSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DaemonSetUpdateStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.RollingUpdate != nil {
+ {
+ size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *Deployment) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Deployment) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Deployment) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeploymentCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ {
+ size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ i -= len(m.Message)
+ copy(dAtA[i:], m.Message)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+ i--
+ dAtA[i] = 0x2a
+ i -= len(m.Reason)
+ copy(dAtA[i:], m.Reason)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
+ i--
+ dAtA[i] = 0x22
+ i -= len(m.Status)
+ copy(dAtA[i:], m.Status)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *DeploymentList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeploymentList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeploymentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.ProgressDeadlineSeconds != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds))
+ i--
+ dAtA[i] = 0x48
+ }
+ i--
+ if m.Paused {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x38
+ if m.RevisionHistoryLimit != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit))
+ i--
+ dAtA[i] = 0x30
+ }
+ i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds))
+ i--
+ dAtA[i] = 0x28
+ {
+ size, err := m.Strategy.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ {
+ size, err := m.Template.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if m.Selector != nil {
+ {
+ size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Replicas != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeploymentStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeploymentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.CollisionCount != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount))
+ i--
+ dAtA[i] = 0x40
+ }
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas))
+ i--
+ dAtA[i] = 0x38
+ if len(m.Conditions) > 0 {
+ for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ }
+ i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas))
+ i--
+ dAtA[i] = 0x28
+ i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas))
+ i--
+ dAtA[i] = 0x20
+ i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas))
+ i--
+ dAtA[i] = 0x18
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas))
+ i--
+ dAtA[i] = 0x10
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration))
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeploymentStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.RollingUpdate != nil {
+ {
+ size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ReplicaSet) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ReplicaSet) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ReplicaSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Message)
+ copy(dAtA[i:], m.Message)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+ i--
+ dAtA[i] = 0x2a
+ i -= len(m.Reason)
+ copy(dAtA[i:], m.Reason)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
+ i--
+ dAtA[i] = 0x22
+ {
+ size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.Status)
+ copy(dAtA[i:], m.Status)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ReplicaSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ReplicaSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds))
+ i--
+ dAtA[i] = 0x20
+ {
+ size, err := m.Template.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if m.Selector != nil {
+ {
+ size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Replicas != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ReplicaSetStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ReplicaSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Conditions) > 0 {
+ for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ }
+ i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas))
+ i--
+ dAtA[i] = 0x28
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas))
+ i--
+ dAtA[i] = 0x20
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration))
+ i--
+ dAtA[i] = 0x18
+ i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas))
+ i--
+ dAtA[i] = 0x10
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas))
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RollingUpdateDaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.MaxUnavailable != nil {
+ {
+ size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RollingUpdateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.MaxSurge != nil {
+ {
+ size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.MaxUnavailable != nil {
+ {
+ size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *RollingUpdateStatefulSetStrategy) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RollingUpdateStatefulSetStrategy) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RollingUpdateStatefulSetStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Partition != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.Partition))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *StatefulSet) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *StatefulSet) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StatefulSet) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *StatefulSetCondition) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StatefulSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Message)
+ copy(dAtA[i:], m.Message)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+ i--
+ dAtA[i] = 0x2a
+ i -= len(m.Reason)
+ copy(dAtA[i:], m.Reason)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
+ i--
+ dAtA[i] = 0x22
+ {
+ size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.Status)
+ copy(dAtA[i:], m.Status)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *StatefulSetList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *StatefulSetList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StatefulSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *StatefulSetSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StatefulSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.RevisionHistoryLimit != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit))
+ i--
+ dAtA[i] = 0x40
+ }
+ {
+ size, err := m.UpdateStrategy.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ i -= len(m.PodManagementPolicy)
+ copy(dAtA[i:], m.PodManagementPolicy)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodManagementPolicy)))
+ i--
+ dAtA[i] = 0x32
+ i -= len(m.ServiceName)
+ copy(dAtA[i:], m.ServiceName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName)))
+ i--
+ dAtA[i] = 0x2a
+ if len(m.VolumeClaimTemplates) > 0 {
+ for iNdEx := len(m.VolumeClaimTemplates) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.VolumeClaimTemplates[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ {
+ size, err := m.Template.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if m.Selector != nil {
+ {
+ size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Replicas != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *StatefulSetStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *StatefulSetStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StatefulSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Conditions) > 0 {
+ for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x52
+ }
+ }
+ if m.CollisionCount != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount))
+ i--
+ dAtA[i] = 0x48
+ }
+ i -= len(m.UpdateRevision)
+ copy(dAtA[i:], m.UpdateRevision)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateRevision)))
+ i--
+ dAtA[i] = 0x3a
+ i -= len(m.CurrentRevision)
+ copy(dAtA[i:], m.CurrentRevision)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentRevision)))
+ i--
+ dAtA[i] = 0x32
+ i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas))
+ i--
+ dAtA[i] = 0x28
+ i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas))
+ i--
+ dAtA[i] = 0x20
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas))
+ i--
+ dAtA[i] = 0x18
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas))
+ i--
+ dAtA[i] = 0x10
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration))
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func (m *StatefulSetUpdateStrategy) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StatefulSetUpdateStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.RollingUpdate != nil {
+ {
+ size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+ offset -= sovGenerated(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *ControllerRevision) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Data.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.Revision))
+ return n
+}
+
+func (m *ControllerRevisionList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *DaemonSet) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *DaemonSetCondition) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Status)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastTransitionTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Reason)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *DaemonSetList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *DaemonSetSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Selector != nil {
+ l = m.Selector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = m.Template.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.UpdateStrategy.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.MinReadySeconds))
+ if m.RevisionHistoryLimit != nil {
+ n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit))
+ }
+ return n
+}
+
+func (m *DaemonSetStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled))
+ n += 1 + sovGenerated(uint64(m.NumberMisscheduled))
+ n += 1 + sovGenerated(uint64(m.DesiredNumberScheduled))
+ n += 1 + sovGenerated(uint64(m.NumberReady))
+ n += 1 + sovGenerated(uint64(m.ObservedGeneration))
+ n += 1 + sovGenerated(uint64(m.UpdatedNumberScheduled))
+ n += 1 + sovGenerated(uint64(m.NumberAvailable))
+ n += 1 + sovGenerated(uint64(m.NumberUnavailable))
+ if m.CollisionCount != nil {
+ n += 1 + sovGenerated(uint64(*m.CollisionCount))
+ }
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *DaemonSetUpdateStrategy) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.RollingUpdate != nil {
+ l = m.RollingUpdate.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *Deployment) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *DeploymentCondition) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Status)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Reason)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastUpdateTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastTransitionTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *DeploymentList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *DeploymentSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Replicas != nil {
+ n += 1 + sovGenerated(uint64(*m.Replicas))
+ }
+ if m.Selector != nil {
+ l = m.Selector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = m.Template.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Strategy.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.MinReadySeconds))
+ if m.RevisionHistoryLimit != nil {
+ n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit))
+ }
+ n += 2
+ if m.ProgressDeadlineSeconds != nil {
+ n += 1 + sovGenerated(uint64(*m.ProgressDeadlineSeconds))
+ }
+ return n
+}
+
+func (m *DeploymentStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 1 + sovGenerated(uint64(m.ObservedGeneration))
+ n += 1 + sovGenerated(uint64(m.Replicas))
+ n += 1 + sovGenerated(uint64(m.UpdatedReplicas))
+ n += 1 + sovGenerated(uint64(m.AvailableReplicas))
+ n += 1 + sovGenerated(uint64(m.UnavailableReplicas))
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ n += 1 + sovGenerated(uint64(m.ReadyReplicas))
+ if m.CollisionCount != nil {
+ n += 1 + sovGenerated(uint64(*m.CollisionCount))
+ }
+ return n
+}
+
+func (m *DeploymentStrategy) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.RollingUpdate != nil {
+ l = m.RollingUpdate.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *ReplicaSet) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ReplicaSetCondition) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Status)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastTransitionTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Reason)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ReplicaSetList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ReplicaSetSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Replicas != nil {
+ n += 1 + sovGenerated(uint64(*m.Replicas))
+ }
+ if m.Selector != nil {
+ l = m.Selector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = m.Template.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.MinReadySeconds))
+ return n
+}
+
+func (m *ReplicaSetStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 1 + sovGenerated(uint64(m.Replicas))
+ n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas))
+ n += 1 + sovGenerated(uint64(m.ObservedGeneration))
+ n += 1 + sovGenerated(uint64(m.ReadyReplicas))
+ n += 1 + sovGenerated(uint64(m.AvailableReplicas))
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *RollingUpdateDaemonSet) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.MaxUnavailable != nil {
+ l = m.MaxUnavailable.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *RollingUpdateDeployment) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.MaxUnavailable != nil {
+ l = m.MaxUnavailable.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.MaxSurge != nil {
+ l = m.MaxSurge.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *RollingUpdateStatefulSetStrategy) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Partition != nil {
+ n += 1 + sovGenerated(uint64(*m.Partition))
+ }
+ return n
+}
+
+func (m *StatefulSet) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *StatefulSetCondition) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Status)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastTransitionTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Reason)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *StatefulSetList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *StatefulSetSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Replicas != nil {
+ n += 1 + sovGenerated(uint64(*m.Replicas))
+ }
+ if m.Selector != nil {
+ l = m.Selector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = m.Template.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.VolumeClaimTemplates) > 0 {
+ for _, e := range m.VolumeClaimTemplates {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.ServiceName)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.PodManagementPolicy)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.UpdateStrategy.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.RevisionHistoryLimit != nil {
+ n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit))
+ }
+ return n
+}
+
+func (m *StatefulSetStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 1 + sovGenerated(uint64(m.ObservedGeneration))
+ n += 1 + sovGenerated(uint64(m.Replicas))
+ n += 1 + sovGenerated(uint64(m.ReadyReplicas))
+ n += 1 + sovGenerated(uint64(m.CurrentReplicas))
+ n += 1 + sovGenerated(uint64(m.UpdatedReplicas))
+ l = len(m.CurrentRevision)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.UpdateRevision)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.CollisionCount != nil {
+ n += 1 + sovGenerated(uint64(*m.CollisionCount))
+ }
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *StatefulSetUpdateStrategy) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.RollingUpdate != nil {
+ l = m.RollingUpdate.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *ControllerRevision) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ControllerRevision{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Data:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Data), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`,
+ `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ControllerRevisionList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]ControllerRevision{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ControllerRevision", "ControllerRevision", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&ControllerRevisionList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DaemonSet) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DaemonSet{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DaemonSetSpec", "DaemonSetSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DaemonSetStatus", "DaemonSetStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DaemonSetCondition) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DaemonSetCondition{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `Status:` + fmt.Sprintf("%v", this.Status) + `,`,
+ `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
+ `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
+ `Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DaemonSetList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]DaemonSet{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&DaemonSetList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DaemonSetSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DaemonSetSpec{`,
+ `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
+ `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`,
+ `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "DaemonSetUpdateStrategy", "DaemonSetUpdateStrategy", 1), `&`, ``, 1) + `,`,
+ `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`,
+ `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DaemonSetStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForConditions := "[]DaemonSetCondition{"
+ for _, f := range this.Conditions {
+ repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForConditions += "}"
+ s := strings.Join([]string{`&DaemonSetStatus{`,
+ `CurrentNumberScheduled:` + fmt.Sprintf("%v", this.CurrentNumberScheduled) + `,`,
+ `NumberMisscheduled:` + fmt.Sprintf("%v", this.NumberMisscheduled) + `,`,
+ `DesiredNumberScheduled:` + fmt.Sprintf("%v", this.DesiredNumberScheduled) + `,`,
+ `NumberReady:` + fmt.Sprintf("%v", this.NumberReady) + `,`,
+ `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`,
+ `UpdatedNumberScheduled:` + fmt.Sprintf("%v", this.UpdatedNumberScheduled) + `,`,
+ `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`,
+ `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`,
+ `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`,
+ `Conditions:` + repeatedStringForConditions + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DaemonSetUpdateStrategy) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DaemonSetUpdateStrategy{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Deployment) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Deployment{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeploymentCondition) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DeploymentCondition{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `Status:` + fmt.Sprintf("%v", this.Status) + `,`,
+ `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
+ `Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+ `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
+ `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeploymentList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]Deployment{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Deployment", "Deployment", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&DeploymentList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeploymentSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DeploymentSpec{`,
+ `Replicas:` + valueToStringGenerated(this.Replicas) + `,`,
+ `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
+ `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`,
+ `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`,
+ `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`,
+ `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`,
+ `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`,
+ `ProgressDeadlineSeconds:` + valueToStringGenerated(this.ProgressDeadlineSeconds) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeploymentStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForConditions := "[]DeploymentCondition{"
+ for _, f := range this.Conditions {
+ repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForConditions += "}"
+ s := strings.Join([]string{`&DeploymentStatus{`,
+ `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`,
+ `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`,
+ `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`,
+ `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`,
+ `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`,
+ `Conditions:` + repeatedStringForConditions + `,`,
+ `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`,
+ `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeploymentStrategy) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DeploymentStrategy{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ReplicaSet) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ReplicaSet{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicaSetSpec", "ReplicaSetSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicaSetStatus", "ReplicaSetStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ReplicaSetCondition) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ReplicaSetCondition{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `Status:` + fmt.Sprintf("%v", this.Status) + `,`,
+ `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
+ `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
+ `Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ReplicaSetList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]ReplicaSet{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&ReplicaSetList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ReplicaSetSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ReplicaSetSpec{`,
+ `Replicas:` + valueToStringGenerated(this.Replicas) + `,`,
+ `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
+ `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`,
+ `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ReplicaSetStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForConditions := "[]ReplicaSetCondition{"
+ for _, f := range this.Conditions {
+ repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForConditions += "}"
+ s := strings.Join([]string{`&ReplicaSetStatus{`,
+ `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`,
+ `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`,
+ `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`,
+ `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`,
+ `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`,
+ `Conditions:` + repeatedStringForConditions + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *RollingUpdateDaemonSet) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&RollingUpdateDaemonSet{`,
+ `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *RollingUpdateDeployment) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&RollingUpdateDeployment{`,
+ `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`,
+ `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *RollingUpdateStatefulSetStrategy) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&RollingUpdateStatefulSetStrategy{`,
+ `Partition:` + valueToStringGenerated(this.Partition) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *StatefulSet) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&StatefulSet{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "StatefulSetSpec", "StatefulSetSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "StatefulSetStatus", "StatefulSetStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *StatefulSetCondition) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&StatefulSetCondition{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `Status:` + fmt.Sprintf("%v", this.Status) + `,`,
+ `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
+ `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
+ `Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *StatefulSetList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]StatefulSet{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&StatefulSetList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *StatefulSetSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForVolumeClaimTemplates := "[]PersistentVolumeClaim{"
+ for _, f := range this.VolumeClaimTemplates {
+ repeatedStringForVolumeClaimTemplates += fmt.Sprintf("%v", f) + ","
+ }
+ repeatedStringForVolumeClaimTemplates += "}"
+ s := strings.Join([]string{`&StatefulSetSpec{`,
+ `Replicas:` + valueToStringGenerated(this.Replicas) + `,`,
+ `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
+ `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`,
+ `VolumeClaimTemplates:` + repeatedStringForVolumeClaimTemplates + `,`,
+ `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`,
+ `PodManagementPolicy:` + fmt.Sprintf("%v", this.PodManagementPolicy) + `,`,
+ `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "StatefulSetUpdateStrategy", "StatefulSetUpdateStrategy", 1), `&`, ``, 1) + `,`,
+ `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *StatefulSetStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForConditions := "[]StatefulSetCondition{"
+ for _, f := range this.Conditions {
+ repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForConditions += "}"
+ s := strings.Join([]string{`&StatefulSetStatus{`,
+ `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`,
+ `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`,
+ `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`,
+ `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`,
+ `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`,
+ `CurrentRevision:` + fmt.Sprintf("%v", this.CurrentRevision) + `,`,
+ `UpdateRevision:` + fmt.Sprintf("%v", this.UpdateRevision) + `,`,
+ `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`,
+ `Conditions:` + repeatedStringForConditions + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *StatefulSetUpdateStrategy) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&StatefulSetUpdateStrategy{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateStatefulSetStrategy", "RollingUpdateStatefulSetStrategy", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringGenerated(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *ControllerRevision) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ControllerRevision: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ControllerRevision: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
+ }
+ m.Revision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Revision |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ControllerRevisionList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ControllerRevisionList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, ControllerRevision{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DaemonSet) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DaemonSet: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DaemonSet: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DaemonSetCondition: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DaemonSetCondition: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = DaemonSetConditionType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Reason = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DaemonSetList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DaemonSetList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DaemonSetList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, DaemonSet{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DaemonSetSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DaemonSetSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Selector == nil {
+ m.Selector = &v1.LabelSelector{}
+ }
+ if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.UpdateStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType)
+ }
+ m.MinReadySeconds = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.MinReadySeconds |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.RevisionHistoryLimit = &v
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DaemonSetStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DaemonSetStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CurrentNumberScheduled", wireType)
+ }
+ m.CurrentNumberScheduled = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.CurrentNumberScheduled |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NumberMisscheduled", wireType)
+ }
+ m.NumberMisscheduled = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.NumberMisscheduled |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DesiredNumberScheduled", wireType)
+ }
+ m.DesiredNumberScheduled = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.DesiredNumberScheduled |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NumberReady", wireType)
+ }
+ m.NumberReady = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.NumberReady |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType)
+ }
+ m.ObservedGeneration = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ObservedGeneration |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UpdatedNumberScheduled", wireType)
+ }
+ m.UpdatedNumberScheduled = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.UpdatedNumberScheduled |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 7:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NumberAvailable", wireType)
+ }
+ m.NumberAvailable = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.NumberAvailable |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NumberUnavailable", wireType)
+ }
+ m.NumberUnavailable = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.NumberUnavailable |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 9:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.CollisionCount = &v
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Conditions = append(m.Conditions, DaemonSetCondition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DaemonSetUpdateStrategy: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DaemonSetUpdateStrategy: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = DaemonSetUpdateStrategyType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.RollingUpdate == nil {
+ m.RollingUpdate = &RollingUpdateDaemonSet{}
+ }
+ if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Deployment) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Deployment: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Deployment: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeploymentCondition) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeploymentCondition: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeploymentCondition: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = DeploymentConditionType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Reason = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeploymentList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeploymentList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeploymentList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, Deployment{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeploymentSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeploymentSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeploymentSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Replicas = &v
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Selector == nil {
+ m.Selector = &v1.LabelSelector{}
+ }
+ if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Strategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType)
+ }
+ m.MinReadySeconds = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.MinReadySeconds |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.RevisionHistoryLimit = &v
+ case 7:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Paused = bool(v != 0)
+ case 9:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProgressDeadlineSeconds", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ProgressDeadlineSeconds = &v
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeploymentStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeploymentStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeploymentStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType)
+ }
+ m.ObservedGeneration = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ObservedGeneration |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType)
+ }
+ m.Replicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Replicas |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType)
+ }
+ m.UpdatedReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.UpdatedReplicas |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType)
+ }
+ m.AvailableReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.AvailableReplicas |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UnavailableReplicas", wireType)
+ }
+ m.UnavailableReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.UnavailableReplicas |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Conditions = append(m.Conditions, DeploymentCondition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType)
+ }
+ m.ReadyReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ReadyReplicas |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.CollisionCount = &v
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeploymentStrategy: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeploymentStrategy: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = DeploymentStrategyType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.RollingUpdate == nil {
+ m.RollingUpdate = &RollingUpdateDeployment{}
+ }
+ if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ReplicaSet) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ReplicaSet: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ReplicaSet: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ReplicaSetCondition: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ReplicaSetCondition: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = ReplicaSetConditionType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Reason = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ReplicaSetList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ReplicaSetList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ReplicaSetList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, ReplicaSet{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ReplicaSetSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ReplicaSetSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Replicas = &v
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Selector == nil {
+ m.Selector = &v1.LabelSelector{}
+ }
+ if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType)
+ }
+ m.MinReadySeconds = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.MinReadySeconds |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ReplicaSetStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ReplicaSetStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType)
+ }
+ m.Replicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Replicas |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FullyLabeledReplicas", wireType)
+ }
+ m.FullyLabeledReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.FullyLabeledReplicas |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType)
+ }
+ m.ObservedGeneration = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ObservedGeneration |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType)
+ }
+ m.ReadyReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ReadyReplicas |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType)
+ }
+ m.AvailableReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.AvailableReplicas |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Conditions = append(m.Conditions, ReplicaSetCondition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RollingUpdateDaemonSet: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RollingUpdateDaemonSet: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.MaxUnavailable == nil {
+ m.MaxUnavailable = &intstr.IntOrString{}
+ }
+ if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RollingUpdateDeployment: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RollingUpdateDeployment: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.MaxUnavailable == nil {
+ m.MaxUnavailable = &intstr.IntOrString{}
+ }
+ if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.MaxSurge == nil {
+ m.MaxSurge = &intstr.IntOrString{}
+ }
+ if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RollingUpdateStatefulSetStrategy: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RollingUpdateStatefulSetStrategy: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Partition = &v
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *StatefulSet) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: StatefulSet: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: StatefulSet: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: StatefulSetCondition: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: StatefulSetCondition: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = StatefulSetConditionType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Reason = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *StatefulSetList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: StatefulSetList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: StatefulSetList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, StatefulSet{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: StatefulSetSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: StatefulSetSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Replicas = &v
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Selector == nil {
+ m.Selector = &v1.LabelSelector{}
+ }
+ if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field VolumeClaimTemplates", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, v11.PersistentVolumeClaim{})
+ if err := m.VolumeClaimTemplates[len(m.VolumeClaimTemplates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ServiceName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PodManagementPolicy", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PodManagementPolicy = PodManagementPolicyType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.UpdateStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.RevisionHistoryLimit = &v
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: StatefulSetStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: StatefulSetStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType)
+ }
+ m.ObservedGeneration = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ObservedGeneration |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType)
+ }
+ m.Replicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Replicas |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType)
+ }
+ m.ReadyReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ReadyReplicas |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CurrentReplicas", wireType)
+ }
+ m.CurrentReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.CurrentReplicas |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType)
+ }
+ m.UpdatedReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.UpdatedReplicas |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CurrentRevision", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.CurrentRevision = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UpdateRevision", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.UpdateRevision = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 9:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.CollisionCount = &v
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Conditions = append(m.Conditions, StatefulSetCondition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: StatefulSetUpdateStrategy: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: StatefulSetUpdateStrategy: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = StatefulSetUpdateStrategyType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.RollingUpdate == nil {
+ m.RollingUpdate = &RollingUpdateStatefulSetStrategy{}
+ }
+ if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenerated(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupGenerated
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/k8s.io/api/apps/v1/generated.proto b/vendor/k8s.io/api/apps/v1/generated.proto
new file mode 100644
index 000000000..6c5527974
--- /dev/null
+++ b/vendor/k8s.io/api/apps/v1/generated.proto
@@ -0,0 +1,701 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.api.apps.v1;
+
+import "k8s.io/api/core/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+import "k8s.io/apimachinery/pkg/util/intstr/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1";
+
+// ControllerRevision implements an immutable snapshot of state data. Clients
+// are responsible for serializing and deserializing the objects that contain
+// their internal state.
+// Once a ControllerRevision has been successfully created, it can not be updated.
+// The API Server will fail validation of all requests that attempt to mutate
+// the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both
+// the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However,
+// it may be subject to name and representation changes in future releases, and clients should not
+// depend on its stability. It is primarily for internal use by controllers.
+message ControllerRevision {
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Data is the serialized representation of the state.
+ optional k8s.io.apimachinery.pkg.runtime.RawExtension data = 2;
+
+ // Revision indicates the revision of the state represented by Data.
+ optional int64 revision = 3;
+}
+
+// ControllerRevisionList is a resource containing a list of ControllerRevision objects.
+message ControllerRevisionList {
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of ControllerRevisions
+ repeated ControllerRevision items = 2;
+}
+
+// DaemonSet represents the configuration of a daemon set.
+message DaemonSet {
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // The desired behavior of this daemon set.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ // +optional
+ optional DaemonSetSpec spec = 2;
+
+ // The current status of this daemon set. This data may be
+ // out of date by some window of time.
+ // Populated by the system.
+ // Read-only.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ // +optional
+ optional DaemonSetStatus status = 3;
+}
+
+// DaemonSetCondition describes the state of a DaemonSet at a certain point.
+message DaemonSetCondition {
+ // Type of DaemonSet condition.
+ optional string type = 1;
+
+ // Status of the condition, one of True, False, Unknown.
+ optional string status = 2;
+
+ // Last time the condition transitioned from one status to another.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
+
+ // The reason for the condition's last transition.
+ // +optional
+ optional string reason = 4;
+
+ // A human readable message indicating details about the transition.
+ // +optional
+ optional string message = 5;
+}
+
+// DaemonSetList is a collection of daemon sets.
+message DaemonSetList {
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // A list of daemon sets.
+ repeated DaemonSet items = 2;
+}
+
+// DaemonSetSpec is the specification of a daemon set.
+message DaemonSetSpec {
+ // A label query over pods that are managed by the daemon set.
+ // Must match in order to be controlled.
+ // It must match the pod template's labels.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1;
+
+ // An object that describes the pod that will be created.
+ // The DaemonSet will create exactly one copy of this pod on every node
+ // that matches the template's node selector (or on every node if no node
+ // selector is specified).
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
+ optional k8s.io.api.core.v1.PodTemplateSpec template = 2;
+
+ // An update strategy to replace existing DaemonSet pods with new pods.
+ // +optional
+ optional DaemonSetUpdateStrategy updateStrategy = 3;
+
+ // The minimum number of seconds for which a newly created DaemonSet pod should
+ // be ready without any of its container crashing, for it to be considered
+ // available. Defaults to 0 (pod will be considered available as soon as it
+ // is ready).
+ // +optional
+ optional int32 minReadySeconds = 4;
+
+ // The number of old history to retain to allow rollback.
+ // This is a pointer to distinguish between explicit zero and not specified.
+ // Defaults to 10.
+ // +optional
+ optional int32 revisionHistoryLimit = 6;
+}
+
+// DaemonSetStatus represents the current status of a daemon set.
+message DaemonSetStatus {
+ // The number of nodes that are running at least 1
+ // daemon pod and are supposed to run the daemon pod.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
+ optional int32 currentNumberScheduled = 1;
+
+ // The number of nodes that are running the daemon pod, but are
+ // not supposed to run the daemon pod.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
+ optional int32 numberMisscheduled = 2;
+
+ // The total number of nodes that should be running the daemon
+ // pod (including nodes correctly running the daemon pod).
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
+ optional int32 desiredNumberScheduled = 3;
+
+ // The number of nodes that should be running the daemon pod and have one
+ // or more of the daemon pod running and ready.
+ optional int32 numberReady = 4;
+
+ // The most recent generation observed by the daemon set controller.
+ // +optional
+ optional int64 observedGeneration = 5;
+
+ // The total number of nodes that are running updated daemon pod
+ // +optional
+ optional int32 updatedNumberScheduled = 6;
+
+ // The number of nodes that should be running the
+ // daemon pod and have one or more of the daemon pod running and
+ // available (ready for at least spec.minReadySeconds)
+ // +optional
+ optional int32 numberAvailable = 7;
+
+ // The number of nodes that should be running the
+ // daemon pod and have none of the daemon pod running and available
+ // (ready for at least spec.minReadySeconds)
+ // +optional
+ optional int32 numberUnavailable = 8;
+
+ // Count of hash collisions for the DaemonSet. The DaemonSet controller
+ // uses this field as a collision avoidance mechanism when it needs to
+ // create the name for the newest ControllerRevision.
+ // +optional
+ optional int32 collisionCount = 9;
+
+ // Represents the latest available observations of a DaemonSet's current state.
+ // +optional
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ repeated DaemonSetCondition conditions = 10;
+}
+
+// DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet.
+message DaemonSetUpdateStrategy {
+ // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". Default is RollingUpdate.
+ // +optional
+ optional string type = 1;
+
+ // Rolling update config params. Present only if type = "RollingUpdate".
+ // ---
+ // TODO: Update this to follow our convention for oneOf, whatever we decide it
+ // to be. Same as Deployment `strategy.rollingUpdate`.
+ // See https://github.com/kubernetes/kubernetes/issues/35345
+ // +optional
+ optional RollingUpdateDaemonSet rollingUpdate = 2;
+}
+
+// Deployment enables declarative updates for Pods and ReplicaSets.
+message Deployment {
+ // Standard object metadata.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Specification of the desired behavior of the Deployment.
+ // +optional
+ optional DeploymentSpec spec = 2;
+
+ // Most recently observed status of the Deployment.
+ // +optional
+ optional DeploymentStatus status = 3;
+}
+
+// DeploymentCondition describes the state of a deployment at a certain point.
+message DeploymentCondition {
+ // Type of deployment condition.
+ optional string type = 1;
+
+ // Status of the condition, one of True, False, Unknown.
+ optional string status = 2;
+
+ // The last time this condition was updated.
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6;
+
+ // Last time the condition transitioned from one status to another.
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7;
+
+ // The reason for the condition's last transition.
+ optional string reason = 4;
+
+ // A human readable message indicating details about the transition.
+ optional string message = 5;
+}
+
+// DeploymentList is a list of Deployments.
+message DeploymentList {
+ // Standard list metadata.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of Deployments.
+ repeated Deployment items = 2;
+}
+
+// DeploymentSpec is the specification of the desired behavior of the Deployment.
+message DeploymentSpec {
+ // Number of desired pods. This is a pointer to distinguish between explicit
+ // zero and not specified. Defaults to 1.
+ // +optional
+ optional int32 replicas = 1;
+
+ // Label selector for pods. Existing ReplicaSets whose pods are
+ // selected by this will be the ones affected by this deployment.
+ // It must match the pod template's labels.
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
+
+ // Template describes the pods that will be created.
+ optional k8s.io.api.core.v1.PodTemplateSpec template = 3;
+
+ // The deployment strategy to use to replace existing pods with new ones.
+ // +optional
+ // +patchStrategy=retainKeys
+ optional DeploymentStrategy strategy = 4;
+
+ // Minimum number of seconds for which a newly created pod should be ready
+ // without any of its container crashing, for it to be considered available.
+ // Defaults to 0 (pod will be considered available as soon as it is ready)
+ // +optional
+ optional int32 minReadySeconds = 5;
+
+ // The number of old ReplicaSets to retain to allow rollback.
+ // This is a pointer to distinguish between explicit zero and not specified.
+ // Defaults to 10.
+ // +optional
+ optional int32 revisionHistoryLimit = 6;
+
+ // Indicates that the deployment is paused.
+ // +optional
+ optional bool paused = 7;
+
+ // The maximum time in seconds for a deployment to make progress before it
+ // is considered to be failed. The deployment controller will continue to
+ // process failed deployments and a condition with a ProgressDeadlineExceeded
+ // reason will be surfaced in the deployment status. Note that progress will
+ // not be estimated during the time a deployment is paused. Defaults to 600s.
+ optional int32 progressDeadlineSeconds = 9;
+}
+
+// DeploymentStatus is the most recently observed status of the Deployment.
+message DeploymentStatus {
+ // The generation observed by the deployment controller.
+ // +optional
+ optional int64 observedGeneration = 1;
+
+ // Total number of non-terminated pods targeted by this deployment (their labels match the selector).
+ // +optional
+ optional int32 replicas = 2;
+
+ // Total number of non-terminated pods targeted by this deployment that have the desired template spec.
+ // +optional
+ optional int32 updatedReplicas = 3;
+
+ // Total number of ready pods targeted by this deployment.
+ // +optional
+ optional int32 readyReplicas = 7;
+
+ // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.
+ // +optional
+ optional int32 availableReplicas = 4;
+
+ // Total number of unavailable pods targeted by this deployment. This is the total number of
+ // pods that are still required for the deployment to have 100% available capacity. They may
+ // either be pods that are running but not yet available or pods that still have not been created.
+ // +optional
+ optional int32 unavailableReplicas = 5;
+
+ // Represents the latest available observations of a deployment's current state.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ repeated DeploymentCondition conditions = 6;
+
+ // Count of hash collisions for the Deployment. The Deployment controller uses this
+ // field as a collision avoidance mechanism when it needs to create the name for the
+ // newest ReplicaSet.
+ // +optional
+ optional int32 collisionCount = 8;
+}
+
+// DeploymentStrategy describes how to replace existing pods with new ones.
+message DeploymentStrategy {
+ // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate.
+ // +optional
+ optional string type = 1;
+
+ // Rolling update config params. Present only if DeploymentStrategyType =
+ // RollingUpdate.
+ // ---
+ // TODO: Update this to follow our convention for oneOf, whatever we decide it
+ // to be.
+ // +optional
+ optional RollingUpdateDeployment rollingUpdate = 2;
+}
+
+// ReplicaSet ensures that a specified number of pod replicas are running at any given time.
+message ReplicaSet {
+ // If the Labels of a ReplicaSet are empty, they are defaulted to
+ // be the same as the Pod(s) that the ReplicaSet manages.
+ // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Spec defines the specification of the desired behavior of the ReplicaSet.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ // +optional
+ optional ReplicaSetSpec spec = 2;
+
+ // Status is the most recently observed status of the ReplicaSet.
+ // This data may be out of date by some window of time.
+ // Populated by the system.
+ // Read-only.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ // +optional
+ optional ReplicaSetStatus status = 3;
+}
+
+// ReplicaSetCondition describes the state of a replica set at a certain point.
+message ReplicaSetCondition {
+ // Type of replica set condition.
+ optional string type = 1;
+
+ // Status of the condition, one of True, False, Unknown.
+ optional string status = 2;
+
+ // The last time the condition transitioned from one status to another.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
+
+ // The reason for the condition's last transition.
+ // +optional
+ optional string reason = 4;
+
+ // A human readable message indicating details about the transition.
+ // +optional
+ optional string message = 5;
+}
+
+// ReplicaSetList is a collection of ReplicaSets.
+message ReplicaSetList {
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // List of ReplicaSets.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller
+ repeated ReplicaSet items = 2;
+}
+
+// ReplicaSetSpec is the specification of a ReplicaSet.
+message ReplicaSetSpec {
+ // Replicas is the number of desired replicas.
+ // This is a pointer to distinguish between explicit zero and unspecified.
+ // Defaults to 1.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
+ // +optional
+ optional int32 replicas = 1;
+
+ // Minimum number of seconds for which a newly created pod should be ready
+ // without any of its container crashing, for it to be considered available.
+ // Defaults to 0 (pod will be considered available as soon as it is ready)
+ // +optional
+ optional int32 minReadySeconds = 4;
+
+ // Selector is a label query over pods that should match the replica count.
+ // Label keys and values that must match in order to be controlled by this replica set.
+ // It must match the pod template's labels.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
+
+ // Template is the object that describes the pod that will be created if
+ // insufficient replicas are detected.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
+ // +optional
+ optional k8s.io.api.core.v1.PodTemplateSpec template = 3;
+}
+
+// ReplicaSetStatus represents the current status of a ReplicaSet.
+message ReplicaSetStatus {
+ // Replicas is the most recently oberved number of replicas.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
+ optional int32 replicas = 1;
+
+ // The number of pods that have labels matching the labels of the pod template of the replicaset.
+ // +optional
+ optional int32 fullyLabeledReplicas = 2;
+
+ // The number of ready replicas for this replica set.
+ // +optional
+ optional int32 readyReplicas = 4;
+
+ // The number of available replicas (ready for at least minReadySeconds) for this replica set.
+ // +optional
+ optional int32 availableReplicas = 5;
+
+ // ObservedGeneration reflects the generation of the most recently observed ReplicaSet.
+ // +optional
+ optional int64 observedGeneration = 3;
+
+ // Represents the latest available observations of a replica set's current state.
+ // +optional
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ repeated ReplicaSetCondition conditions = 6;
+}
+
+// Spec to control the desired behavior of daemon set rolling update.
+message RollingUpdateDaemonSet {
+ // The maximum number of DaemonSet pods that can be unavailable during the
+ // update. Value can be an absolute number (ex: 5) or a percentage of total
+ // number of DaemonSet pods at the start of the update (ex: 10%). Absolute
+ // number is calculated from percentage by rounding up.
+ // This cannot be 0.
+ // Default value is 1.
+ // Example: when this is set to 30%, at most 30% of the total number of nodes
+ // that should be running the daemon pod (i.e. status.desiredNumberScheduled)
+ // can have their pods stopped for an update at any given
+ // time. The update starts by stopping at most 30% of those DaemonSet pods
+ // and then brings up new DaemonSet pods in their place. Once the new pods
+ // are available, it then proceeds onto other DaemonSet pods, thus ensuring
+ // that at least 70% of original number of DaemonSet pods are available at
+ // all times during the update.
+ // +optional
+ optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1;
+}
+
+// Spec to control the desired behavior of rolling update.
+message RollingUpdateDeployment {
+ // The maximum number of pods that can be unavailable during the update.
+ // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+ // Absolute number is calculated from percentage by rounding down.
+ // This can not be 0 if MaxSurge is 0.
+ // Defaults to 25%.
+ // Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods
+ // immediately when the rolling update starts. Once new pods are ready, old ReplicaSet
+ // can be scaled down further, followed by scaling up the new ReplicaSet, ensuring
+ // that the total number of pods available at all times during the update is at
+ // least 70% of desired pods.
+ // +optional
+ optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1;
+
+ // The maximum number of pods that can be scheduled above the desired number of
+ // pods.
+ // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+ // This can not be 0 if MaxUnavailable is 0.
+ // Absolute number is calculated from percentage by rounding up.
+ // Defaults to 25%.
+ // Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when
+ // the rolling update starts, such that the total number of old and new pods do not exceed
+ // 130% of desired pods. Once old pods have been killed,
+ // new ReplicaSet can be scaled up further, ensuring that total number of pods running
+ // at any time during the update is at most 130% of desired pods.
+ // +optional
+ optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2;
+}
+
+// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.
+message RollingUpdateStatefulSetStrategy {
+ // Partition indicates the ordinal at which the StatefulSet should be
+ // partitioned.
+ // Default value is 0.
+ // +optional
+ optional int32 partition = 1;
+}
+
+// StatefulSet represents a set of pods with consistent identities.
+// Identities are defined as:
+// - Network: A single stable DNS and hostname.
+// - Storage: As many VolumeClaims as requested.
+// The StatefulSet guarantees that a given network identity will always
+// map to the same storage identity.
+message StatefulSet {
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Spec defines the desired identities of pods in this set.
+ // +optional
+ optional StatefulSetSpec spec = 2;
+
+ // Status is the current status of Pods in this StatefulSet. This data
+ // may be out of date by some window of time.
+ // +optional
+ optional StatefulSetStatus status = 3;
+}
+
+// StatefulSetCondition describes the state of a statefulset at a certain point.
+message StatefulSetCondition {
+ // Type of statefulset condition.
+ optional string type = 1;
+
+ // Status of the condition, one of True, False, Unknown.
+ optional string status = 2;
+
+ // Last time the condition transitioned from one status to another.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
+
+ // The reason for the condition's last transition.
+ // +optional
+ optional string reason = 4;
+
+ // A human readable message indicating details about the transition.
+ // +optional
+ optional string message = 5;
+}
+
+// StatefulSetList is a collection of StatefulSets.
+message StatefulSetList {
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ repeated StatefulSet items = 2;
+}
+
+// A StatefulSetSpec is the specification of a StatefulSet.
+message StatefulSetSpec {
+ // replicas is the desired number of replicas of the given Template.
+ // These are replicas in the sense that they are instantiations of the
+ // same Template, but individual replicas also have a consistent identity.
+ // If unspecified, defaults to 1.
+ // TODO: Consider a rename of this field.
+ // +optional
+ optional int32 replicas = 1;
+
+ // selector is a label query over pods that should match the replica count.
+ // It must match the pod template's labels.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
+
+ // template is the object that describes the pod that will be created if
+ // insufficient replicas are detected. Each pod stamped out by the StatefulSet
+ // will fulfill this Template, but have a unique identity from the rest
+ // of the StatefulSet.
+ optional k8s.io.api.core.v1.PodTemplateSpec template = 3;
+
+ // volumeClaimTemplates is a list of claims that pods are allowed to reference.
+ // The StatefulSet controller is responsible for mapping network identities to
+ // claims in a way that maintains the identity of a pod. Every claim in
+ // this list must have at least one matching (by name) volumeMount in one
+ // container in the template. A claim in this list takes precedence over
+ // any volumes in the template, with the same name.
+ // TODO: Define the behavior if a claim already exists with the same name.
+ // +optional
+ repeated k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4;
+
+ // serviceName is the name of the service that governs this StatefulSet.
+ // This service must exist before the StatefulSet, and is responsible for
+ // the network identity of the set. Pods get DNS/hostnames that follow the
+ // pattern: pod-specific-string.serviceName.default.svc.cluster.local
+ // where "pod-specific-string" is managed by the StatefulSet controller.
+ optional string serviceName = 5;
+
+ // podManagementPolicy controls how pods are created during initial scale up,
+ // when replacing pods on nodes, or when scaling down. The default policy is
+ // `OrderedReady`, where pods are created in increasing order (pod-0, then
+ // pod-1, etc) and the controller will wait until each pod is ready before
+ // continuing. When scaling down, the pods are removed in the opposite order.
+ // The alternative policy is `Parallel` which will create pods in parallel
+ // to match the desired scale without waiting, and on scale down will delete
+ // all pods at once.
+ // +optional
+ optional string podManagementPolicy = 6;
+
+ // updateStrategy indicates the StatefulSetUpdateStrategy that will be
+ // employed to update Pods in the StatefulSet when a revision is made to
+ // Template.
+ optional StatefulSetUpdateStrategy updateStrategy = 7;
+
+ // revisionHistoryLimit is the maximum number of revisions that will
+ // be maintained in the StatefulSet's revision history. The revision history
+ // consists of all revisions not represented by a currently applied
+ // StatefulSetSpec version. The default value is 10.
+ optional int32 revisionHistoryLimit = 8;
+}
+
+// StatefulSetStatus represents the current state of a StatefulSet.
+message StatefulSetStatus {
+ // observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the
+ // StatefulSet's generation, which is updated on mutation by the API Server.
+ // +optional
+ optional int64 observedGeneration = 1;
+
+ // replicas is the number of Pods created by the StatefulSet controller.
+ optional int32 replicas = 2;
+
+ // readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.
+ optional int32 readyReplicas = 3;
+
+ // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version
+ // indicated by currentRevision.
+ optional int32 currentReplicas = 4;
+
+ // updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version
+ // indicated by updateRevision.
+ optional int32 updatedReplicas = 5;
+
+ // currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the
+ // sequence [0,currentReplicas).
+ optional string currentRevision = 6;
+
+ // updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence
+ // [replicas-updatedReplicas,replicas)
+ optional string updateRevision = 7;
+
+ // collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller
+ // uses this field as a collision avoidance mechanism when it needs to create the name for the
+ // newest ControllerRevision.
+ // +optional
+ optional int32 collisionCount = 9;
+
+ // Represents the latest available observations of a statefulset's current state.
+ // +optional
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ repeated StatefulSetCondition conditions = 10;
+}
+
+// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet
+// controller will use to perform updates. It includes any additional parameters
+// necessary to perform the update for the indicated strategy.
+message StatefulSetUpdateStrategy {
+ // Type indicates the type of the StatefulSetUpdateStrategy.
+ // Default is RollingUpdate.
+ // +optional
+ optional string type = 1;
+
+ // RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.
+ // +optional
+ optional RollingUpdateStatefulSetStrategy rollingUpdate = 2;
+}
+
diff --git a/vendor/k8s.io/api/apps/v1/register.go b/vendor/k8s.io/api/apps/v1/register.go
new file mode 100644
index 000000000..027101046
--- /dev/null
+++ b/vendor/k8s.io/api/apps/v1/register.go
@@ -0,0 +1,60 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "apps"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+ // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+ // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+ SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ localSchemeBuilder = &SchemeBuilder
+ AddToScheme = localSchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &Deployment{},
+ &DeploymentList{},
+ &StatefulSet{},
+ &StatefulSetList{},
+ &DaemonSet{},
+ &DaemonSetList{},
+ &ReplicaSet{},
+ &ReplicaSetList{},
+ &ControllerRevision{},
+ &ControllerRevisionList{},
+ )
+ metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+ return nil
+}
diff --git a/vendor/k8s.io/api/apps/v1/types.go b/vendor/k8s.io/api/apps/v1/types.go
new file mode 100644
index 000000000..e003a0c4f
--- /dev/null
+++ b/vendor/k8s.io/api/apps/v1/types.go
@@ -0,0 +1,826 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/util/intstr"
+)
+
+const (
+ ControllerRevisionHashLabelKey = "controller-revision-hash"
+ StatefulSetRevisionLabel = ControllerRevisionHashLabelKey
+ DeprecatedRollbackTo = "deprecated.deployment.rollback.to"
+ DeprecatedTemplateGeneration = "deprecated.daemonset.template.generation"
+ StatefulSetPodNameLabel = "statefulset.kubernetes.io/pod-name"
+)
+
+// +genclient
+// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale
+// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// StatefulSet represents a set of pods with consistent identities.
+// Identities are defined as:
+// - Network: A single stable DNS and hostname.
+// - Storage: As many VolumeClaims as requested.
+// The StatefulSet guarantees that a given network identity will always
+// map to the same storage identity.
+type StatefulSet struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Spec defines the desired identities of pods in this set.
+ // +optional
+ Spec StatefulSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+ // Status is the current status of Pods in this StatefulSet. This data
+ // may be out of date by some window of time.
+ // +optional
+ Status StatefulSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// PodManagementPolicyType defines the policy for creating pods under a stateful set.
+type PodManagementPolicyType string
+
+const (
+ // OrderedReadyPodManagement will create pods in strictly increasing order on
+ // scale up and strictly decreasing order on scale down, progressing only when
+ // the previous pod is ready or terminated. At most one pod will be changed
+ // at any time.
+ OrderedReadyPodManagement PodManagementPolicyType = "OrderedReady"
+ // ParallelPodManagement will create and delete pods as soon as the stateful set
+ // replica count is changed, and will not wait for pods to be ready or complete
+ // termination.
+ ParallelPodManagement PodManagementPolicyType = "Parallel"
+)
+
+// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet
+// controller will use to perform updates. It includes any additional parameters
+// necessary to perform the update for the indicated strategy.
+type StatefulSetUpdateStrategy struct {
+ // Type indicates the type of the StatefulSetUpdateStrategy.
+ // Default is RollingUpdate.
+ // +optional
+ Type StatefulSetUpdateStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=StatefulSetStrategyType"`
+ // RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.
+ // +optional
+ RollingUpdate *RollingUpdateStatefulSetStrategy `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"`
+}
+
+// StatefulSetUpdateStrategyType is a string enumeration type that enumerates
+// all possible update strategies for the StatefulSet controller.
+type StatefulSetUpdateStrategyType string
+
+const (
+ // RollingUpdateStatefulSetStrategyType indicates that update will be
+ // applied to all Pods in the StatefulSet with respect to the StatefulSet
+ // ordering constraints. When a scale operation is performed with this
+ // strategy, new Pods will be created from the specification version indicated
+ // by the StatefulSet's updateRevision.
+ RollingUpdateStatefulSetStrategyType StatefulSetUpdateStrategyType = "RollingUpdate"
+ // OnDeleteStatefulSetStrategyType triggers the legacy behavior. Version
+ // tracking and ordered rolling restarts are disabled. Pods are recreated
+ // from the StatefulSetSpec when they are manually deleted. When a scale
+ // operation is performed with this strategy,specification version indicated
+ // by the StatefulSet's currentRevision.
+ OnDeleteStatefulSetStrategyType StatefulSetUpdateStrategyType = "OnDelete"
+)
+
+// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.
+type RollingUpdateStatefulSetStrategy struct {
+ // Partition indicates the ordinal at which the StatefulSet should be
+ // partitioned.
+ // Default value is 0.
+ // +optional
+ Partition *int32 `json:"partition,omitempty" protobuf:"varint,1,opt,name=partition"`
+}
+
+// A StatefulSetSpec is the specification of a StatefulSet.
+type StatefulSetSpec struct {
+ // replicas is the desired number of replicas of the given Template.
+ // These are replicas in the sense that they are instantiations of the
+ // same Template, but individual replicas also have a consistent identity.
+ // If unspecified, defaults to 1.
+ // TODO: Consider a rename of this field.
+ // +optional
+ Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
+
+ // selector is a label query over pods that should match the replica count.
+ // It must match the pod template's labels.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+ Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"`
+
+ // template is the object that describes the pod that will be created if
+ // insufficient replicas are detected. Each pod stamped out by the StatefulSet
+ // will fulfill this Template, but have a unique identity from the rest
+ // of the StatefulSet.
+ Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"`
+
+ // volumeClaimTemplates is a list of claims that pods are allowed to reference.
+ // The StatefulSet controller is responsible for mapping network identities to
+ // claims in a way that maintains the identity of a pod. Every claim in
+ // this list must have at least one matching (by name) volumeMount in one
+ // container in the template. A claim in this list takes precedence over
+ // any volumes in the template, with the same name.
+ // TODO: Define the behavior if a claim already exists with the same name.
+ // +optional
+ VolumeClaimTemplates []v1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty" protobuf:"bytes,4,rep,name=volumeClaimTemplates"`
+
+ // serviceName is the name of the service that governs this StatefulSet.
+ // This service must exist before the StatefulSet, and is responsible for
+ // the network identity of the set. Pods get DNS/hostnames that follow the
+ // pattern: pod-specific-string.serviceName.default.svc.cluster.local
+ // where "pod-specific-string" is managed by the StatefulSet controller.
+ ServiceName string `json:"serviceName" protobuf:"bytes,5,opt,name=serviceName"`
+
+ // podManagementPolicy controls how pods are created during initial scale up,
+ // when replacing pods on nodes, or when scaling down. The default policy is
+ // `OrderedReady`, where pods are created in increasing order (pod-0, then
+ // pod-1, etc) and the controller will wait until each pod is ready before
+ // continuing. When scaling down, the pods are removed in the opposite order.
+ // The alternative policy is `Parallel` which will create pods in parallel
+ // to match the desired scale without waiting, and on scale down will delete
+ // all pods at once.
+ // +optional
+ PodManagementPolicy PodManagementPolicyType `json:"podManagementPolicy,omitempty" protobuf:"bytes,6,opt,name=podManagementPolicy,casttype=PodManagementPolicyType"`
+
+ // updateStrategy indicates the StatefulSetUpdateStrategy that will be
+ // employed to update Pods in the StatefulSet when a revision is made to
+ // Template.
+ UpdateStrategy StatefulSetUpdateStrategy `json:"updateStrategy,omitempty" protobuf:"bytes,7,opt,name=updateStrategy"`
+
+ // revisionHistoryLimit is the maximum number of revisions that will
+ // be maintained in the StatefulSet's revision history. The revision history
+ // consists of all revisions not represented by a currently applied
+ // StatefulSetSpec version. The default value is 10.
+ RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,8,opt,name=revisionHistoryLimit"`
+}
+
+// StatefulSetStatus represents the current state of a StatefulSet.
+type StatefulSetStatus struct {
+ // observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the
+ // StatefulSet's generation, which is updated on mutation by the API Server.
+ // +optional
+ ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"`
+
+ // replicas is the number of Pods created by the StatefulSet controller.
+ Replicas int32 `json:"replicas" protobuf:"varint,2,opt,name=replicas"`
+
+ // readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.
+ ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,3,opt,name=readyReplicas"`
+
+ // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version
+ // indicated by currentRevision.
+ CurrentReplicas int32 `json:"currentReplicas,omitempty" protobuf:"varint,4,opt,name=currentReplicas"`
+
+ // updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version
+ // indicated by updateRevision.
+ UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,5,opt,name=updatedReplicas"`
+
+ // currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the
+ // sequence [0,currentReplicas).
+ CurrentRevision string `json:"currentRevision,omitempty" protobuf:"bytes,6,opt,name=currentRevision"`
+
+ // updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence
+ // [replicas-updatedReplicas,replicas)
+ UpdateRevision string `json:"updateRevision,omitempty" protobuf:"bytes,7,opt,name=updateRevision"`
+
+ // collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller
+ // uses this field as a collision avoidance mechanism when it needs to create the name for the
+ // newest ControllerRevision.
+ // +optional
+ CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"`
+
+ // Represents the latest available observations of a statefulset's current state.
+ // +optional
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ Conditions []StatefulSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"`
+}
+
+type StatefulSetConditionType string
+
+// StatefulSetCondition describes the state of a statefulset at a certain point.
+type StatefulSetCondition struct {
+ // Type of statefulset condition.
+ Type StatefulSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=StatefulSetConditionType"`
+ // Status of the condition, one of True, False, Unknown.
+ Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"`
+ // Last time the condition transitioned from one status to another.
+ // +optional
+ LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"`
+ // The reason for the condition's last transition.
+ // +optional
+ Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
+ // A human readable message indicating details about the transition.
+ // +optional
+ Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// StatefulSetList is a collection of StatefulSets.
+type StatefulSetList struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ Items []StatefulSet `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient
+// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale
+// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Deployment enables declarative updates for Pods and ReplicaSets.
+type Deployment struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object metadata.
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Specification of the desired behavior of the Deployment.
+ // +optional
+ Spec DeploymentSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+ // Most recently observed status of the Deployment.
+ // +optional
+ Status DeploymentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// DeploymentSpec is the specification of the desired behavior of the Deployment.
+type DeploymentSpec struct {
+ // Number of desired pods. This is a pointer to distinguish between explicit
+ // zero and not specified. Defaults to 1.
+ // +optional
+ Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
+
+ // Label selector for pods. Existing ReplicaSets whose pods are
+ // selected by this will be the ones affected by this deployment.
+ // It must match the pod template's labels.
+ Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"`
+
+ // Template describes the pods that will be created.
+ Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"`
+
+ // The deployment strategy to use to replace existing pods with new ones.
+ // +optional
+ // +patchStrategy=retainKeys
+ Strategy DeploymentStrategy `json:"strategy,omitempty" patchStrategy:"retainKeys" protobuf:"bytes,4,opt,name=strategy"`
+
+ // Minimum number of seconds for which a newly created pod should be ready
+ // without any of its container crashing, for it to be considered available.
+ // Defaults to 0 (pod will be considered available as soon as it is ready)
+ // +optional
+ MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,5,opt,name=minReadySeconds"`
+
+ // The number of old ReplicaSets to retain to allow rollback.
+ // This is a pointer to distinguish between explicit zero and not specified.
+ // Defaults to 10.
+ // +optional
+ RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"`
+
+ // Indicates that the deployment is paused.
+ // +optional
+ Paused bool `json:"paused,omitempty" protobuf:"varint,7,opt,name=paused"`
+
+ // The maximum time in seconds for a deployment to make progress before it
+ // is considered to be failed. The deployment controller will continue to
+ // process failed deployments and a condition with a ProgressDeadlineExceeded
+ // reason will be surfaced in the deployment status. Note that progress will
+ // not be estimated during the time a deployment is paused. Defaults to 600s.
+ ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty" protobuf:"varint,9,opt,name=progressDeadlineSeconds"`
+}
+
+const (
+ // DefaultDeploymentUniqueLabelKey is the default key of the selector that is added
+ // to existing ReplicaSets (and label key that is added to its pods) to prevent the existing ReplicaSets
+ // to select new pods (and old pods being select by new ReplicaSet).
+ DefaultDeploymentUniqueLabelKey string = "pod-template-hash"
+)
+
+// DeploymentStrategy describes how to replace existing pods with new ones.
+type DeploymentStrategy struct {
+ // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate.
+ // +optional
+ Type DeploymentStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentStrategyType"`
+
+ // Rolling update config params. Present only if DeploymentStrategyType =
+ // RollingUpdate.
+ //---
+ // TODO: Update this to follow our convention for oneOf, whatever we decide it
+ // to be.
+ // +optional
+ RollingUpdate *RollingUpdateDeployment `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"`
+}
+
+type DeploymentStrategyType string
+
+const (
+ // Kill all existing pods before creating new ones.
+ RecreateDeploymentStrategyType DeploymentStrategyType = "Recreate"
+
+ // Replace the old ReplicaSets by new one using rolling update i.e gradually scale down the old ReplicaSets and scale up the new one.
+ RollingUpdateDeploymentStrategyType DeploymentStrategyType = "RollingUpdate"
+)
+
+// Spec to control the desired behavior of rolling update.
+type RollingUpdateDeployment struct {
+ // The maximum number of pods that can be unavailable during the update.
+ // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+ // Absolute number is calculated from percentage by rounding down.
+ // This can not be 0 if MaxSurge is 0.
+ // Defaults to 25%.
+ // Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods
+ // immediately when the rolling update starts. Once new pods are ready, old ReplicaSet
+ // can be scaled down further, followed by scaling up the new ReplicaSet, ensuring
+ // that the total number of pods available at all times during the update is at
+ // least 70% of desired pods.
+ // +optional
+ MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"`
+
+ // The maximum number of pods that can be scheduled above the desired number of
+ // pods.
+ // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+ // This can not be 0 if MaxUnavailable is 0.
+ // Absolute number is calculated from percentage by rounding up.
+ // Defaults to 25%.
+ // Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when
+ // the rolling update starts, such that the total number of old and new pods do not exceed
+ // 130% of desired pods. Once old pods have been killed,
+ // new ReplicaSet can be scaled up further, ensuring that total number of pods running
+ // at any time during the update is at most 130% of desired pods.
+ // +optional
+ MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"`
+}
+
+// DeploymentStatus is the most recently observed status of the Deployment.
+type DeploymentStatus struct {
+ // The generation observed by the deployment controller.
+ // +optional
+ ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"`
+
+ // Total number of non-terminated pods targeted by this deployment (their labels match the selector).
+ // +optional
+ Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"`
+
+ // Total number of non-terminated pods targeted by this deployment that have the desired template spec.
+ // +optional
+ UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"`
+
+ // Total number of ready pods targeted by this deployment.
+ // +optional
+ ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"`
+
+ // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.
+ // +optional
+ AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"`
+
+ // Total number of unavailable pods targeted by this deployment. This is the total number of
+ // pods that are still required for the deployment to have 100% available capacity. They may
+ // either be pods that are running but not yet available or pods that still have not been created.
+ // +optional
+ UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"`
+
+ // Represents the latest available observations of a deployment's current state.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"`
+
+ // Count of hash collisions for the Deployment. The Deployment controller uses this
+ // field as a collision avoidance mechanism when it needs to create the name for the
+ // newest ReplicaSet.
+ // +optional
+ CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,8,opt,name=collisionCount"`
+}
+
+type DeploymentConditionType string
+
+// These are valid conditions of a deployment.
+const (
+ // Available means the deployment is available, ie. at least the minimum available
+ // replicas required are up and running for at least minReadySeconds.
+ DeploymentAvailable DeploymentConditionType = "Available"
+ // Progressing means the deployment is progressing. Progress for a deployment is
+ // considered when a new replica set is created or adopted, and when new pods scale
+ // up or old pods scale down. Progress is not estimated for paused deployments or
+ // when progressDeadlineSeconds is not specified.
+ DeploymentProgressing DeploymentConditionType = "Progressing"
+ // ReplicaFailure is added in a deployment when one of its pods fails to be created
+ // or deleted.
+ DeploymentReplicaFailure DeploymentConditionType = "ReplicaFailure"
+)
+
+// DeploymentCondition describes the state of a deployment at a certain point.
+type DeploymentCondition struct {
+ // Type of deployment condition.
+ Type DeploymentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentConditionType"`
+ // Status of the condition, one of True, False, Unknown.
+ Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"`
+ // The last time this condition was updated.
+ LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"`
+ // Last time the condition transitioned from one status to another.
+ LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,7,opt,name=lastTransitionTime"`
+ // The reason for the condition's last transition.
+ Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
+ // A human readable message indicating details about the transition.
+ Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// DeploymentList is a list of Deployments.
+type DeploymentList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is the list of Deployments.
+ Items []Deployment `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet.
+type DaemonSetUpdateStrategy struct {
+ // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". Default is RollingUpdate.
+ // +optional
+ Type DaemonSetUpdateStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"`
+
+ // Rolling update config params. Present only if type = "RollingUpdate".
+ //---
+ // TODO: Update this to follow our convention for oneOf, whatever we decide it
+ // to be. Same as Deployment `strategy.rollingUpdate`.
+ // See https://github.com/kubernetes/kubernetes/issues/35345
+ // +optional
+ RollingUpdate *RollingUpdateDaemonSet `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"`
+}
+
+type DaemonSetUpdateStrategyType string
+
+const (
+ // Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other.
+ RollingUpdateDaemonSetStrategyType DaemonSetUpdateStrategyType = "RollingUpdate"
+
+ // Replace the old daemons only when it's killed
+ OnDeleteDaemonSetStrategyType DaemonSetUpdateStrategyType = "OnDelete"
+)
+
+// Spec to control the desired behavior of daemon set rolling update.
+type RollingUpdateDaemonSet struct {
+ // The maximum number of DaemonSet pods that can be unavailable during the
+ // update. Value can be an absolute number (ex: 5) or a percentage of total
+ // number of DaemonSet pods at the start of the update (ex: 10%). Absolute
+ // number is calculated from percentage by rounding up.
+ // This cannot be 0.
+ // Default value is 1.
+ // Example: when this is set to 30%, at most 30% of the total number of nodes
+ // that should be running the daemon pod (i.e. status.desiredNumberScheduled)
+ // can have their pods stopped for an update at any given
+ // time. The update starts by stopping at most 30% of those DaemonSet pods
+ // and then brings up new DaemonSet pods in their place. Once the new pods
+ // are available, it then proceeds onto other DaemonSet pods, thus ensuring
+ // that at least 70% of original number of DaemonSet pods are available at
+ // all times during the update.
+ // +optional
+ MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"`
+}
+
+// DaemonSetSpec is the specification of a daemon set.
+type DaemonSetSpec struct {
+ // A label query over pods that are managed by the daemon set.
+ // Must match in order to be controlled.
+ // It must match the pod template's labels.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+ Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,1,opt,name=selector"`
+
+ // An object that describes the pod that will be created.
+ // The DaemonSet will create exactly one copy of this pod on every node
+ // that matches the template's node selector (or on every node if no node
+ // selector is specified).
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
+ Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,2,opt,name=template"`
+
+ // An update strategy to replace existing DaemonSet pods with new pods.
+ // +optional
+ UpdateStrategy DaemonSetUpdateStrategy `json:"updateStrategy,omitempty" protobuf:"bytes,3,opt,name=updateStrategy"`
+
+ // The minimum number of seconds for which a newly created DaemonSet pod should
+ // be ready without any of its container crashing, for it to be considered
+ // available. Defaults to 0 (pod will be considered available as soon as it
+ // is ready).
+ // +optional
+ MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"`
+
+ // The number of old history to retain to allow rollback.
+ // This is a pointer to distinguish between explicit zero and not specified.
+ // Defaults to 10.
+ // +optional
+ RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"`
+}
+
+// DaemonSetStatus represents the current status of a daemon set.
+type DaemonSetStatus struct {
+ // The number of nodes that are running at least 1
+ // daemon pod and are supposed to run the daemon pod.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
+ CurrentNumberScheduled int32 `json:"currentNumberScheduled" protobuf:"varint,1,opt,name=currentNumberScheduled"`
+
+ // The number of nodes that are running the daemon pod, but are
+ // not supposed to run the daemon pod.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
+ NumberMisscheduled int32 `json:"numberMisscheduled" protobuf:"varint,2,opt,name=numberMisscheduled"`
+
+ // The total number of nodes that should be running the daemon
+ // pod (including nodes correctly running the daemon pod).
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
+ DesiredNumberScheduled int32 `json:"desiredNumberScheduled" protobuf:"varint,3,opt,name=desiredNumberScheduled"`
+
+ // The number of nodes that should be running the daemon pod and have one
+ // or more of the daemon pod running and ready.
+ NumberReady int32 `json:"numberReady" protobuf:"varint,4,opt,name=numberReady"`
+
+ // The most recent generation observed by the daemon set controller.
+ // +optional
+ ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,5,opt,name=observedGeneration"`
+
+ // The total number of nodes that are running updated daemon pod
+ // +optional
+ UpdatedNumberScheduled int32 `json:"updatedNumberScheduled,omitempty" protobuf:"varint,6,opt,name=updatedNumberScheduled"`
+
+ // The number of nodes that should be running the
+ // daemon pod and have one or more of the daemon pod running and
+ // available (ready for at least spec.minReadySeconds)
+ // +optional
+ NumberAvailable int32 `json:"numberAvailable,omitempty" protobuf:"varint,7,opt,name=numberAvailable"`
+
+ // The number of nodes that should be running the
+ // daemon pod and have none of the daemon pod running and available
+ // (ready for at least spec.minReadySeconds)
+ // +optional
+ NumberUnavailable int32 `json:"numberUnavailable,omitempty" protobuf:"varint,8,opt,name=numberUnavailable"`
+
+ // Count of hash collisions for the DaemonSet. The DaemonSet controller
+ // uses this field as a collision avoidance mechanism when it needs to
+ // create the name for the newest ControllerRevision.
+ // +optional
+ CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"`
+
+ // Represents the latest available observations of a DaemonSet's current state.
+ // +optional
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ Conditions []DaemonSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"`
+}
+
+type DaemonSetConditionType string
+
+// TODO: Add valid condition types of a DaemonSet.
+
+// DaemonSetCondition describes the state of a DaemonSet at a certain point.
+type DaemonSetCondition struct {
+ // Type of DaemonSet condition.
+ Type DaemonSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DaemonSetConditionType"`
+ // Status of the condition, one of True, False, Unknown.
+ Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"`
+ // Last time the condition transitioned from one status to another.
+ // +optional
+ LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"`
+ // The reason for the condition's last transition.
+ // +optional
+ Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
+ // A human readable message indicating details about the transition.
+ // +optional
+ Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// DaemonSet represents the configuration of a daemon set.
+type DaemonSet struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // The desired behavior of this daemon set.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ // +optional
+ Spec DaemonSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+ // The current status of this daemon set. This data may be
+ // out of date by some window of time.
+ // Populated by the system.
+ // Read-only.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ // +optional
+ Status DaemonSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+const (
+ // DefaultDaemonSetUniqueLabelKey is the default label key that is added
+ // to existing DaemonSet pods to distinguish between old and new
+ // DaemonSet pods during DaemonSet template updates.
+ DefaultDaemonSetUniqueLabelKey = ControllerRevisionHashLabelKey
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// DaemonSetList is a collection of daemon sets.
+type DaemonSetList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // A list of daemon sets.
+ Items []DaemonSet `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient
+// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale
+// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ReplicaSet ensures that a specified number of pod replicas are running at any given time.
+type ReplicaSet struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // If the Labels of a ReplicaSet are empty, they are defaulted to
+ // be the same as the Pod(s) that the ReplicaSet manages.
+ // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Spec defines the specification of the desired behavior of the ReplicaSet.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ // +optional
+ Spec ReplicaSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+ // Status is the most recently observed status of the ReplicaSet.
+ // This data may be out of date by some window of time.
+ // Populated by the system.
+ // Read-only.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ // +optional
+ Status ReplicaSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ReplicaSetList is a collection of ReplicaSets.
+type ReplicaSetList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // List of ReplicaSets.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller
+ Items []ReplicaSet `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// ReplicaSetSpec is the specification of a ReplicaSet.
+type ReplicaSetSpec struct {
+ // Replicas is the number of desired replicas.
+ // This is a pointer to distinguish between explicit zero and unspecified.
+ // Defaults to 1.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
+ // +optional
+ Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
+
+ // Minimum number of seconds for which a newly created pod should be ready
+ // without any of its container crashing, for it to be considered available.
+ // Defaults to 0 (pod will be considered available as soon as it is ready)
+ // +optional
+ MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"`
+
+ // Selector is a label query over pods that should match the replica count.
+ // Label keys and values that must match in order to be controlled by this replica set.
+ // It must match the pod template's labels.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+ Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"`
+
+ // Template is the object that describes the pod that will be created if
+ // insufficient replicas are detected.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
+ // +optional
+ Template v1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"`
+}
+
+// ReplicaSetStatus represents the current status of a ReplicaSet.
+type ReplicaSetStatus struct {
+ // Replicas is the most recently oberved number of replicas.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
+ Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"`
+
+ // The number of pods that have labels matching the labels of the pod template of the replicaset.
+ // +optional
+ FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"`
+
+ // The number of ready replicas for this replica set.
+ // +optional
+ ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"`
+
+ // The number of available replicas (ready for at least minReadySeconds) for this replica set.
+ // +optional
+ AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,5,opt,name=availableReplicas"`
+
+ // ObservedGeneration reflects the generation of the most recently observed ReplicaSet.
+ // +optional
+ ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"`
+
+ // Represents the latest available observations of a replica set's current state.
+ // +optional
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ Conditions []ReplicaSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"`
+}
+
+type ReplicaSetConditionType string
+
+// These are valid conditions of a replica set.
+const (
+ // ReplicaSetReplicaFailure is added in a replica set when one of its pods fails to be created
+ // due to insufficient quota, limit ranges, pod security policy, node selectors, etc. or deleted
+ // due to kubelet being down or finalizers are failing.
+ ReplicaSetReplicaFailure ReplicaSetConditionType = "ReplicaFailure"
+)
+
+// ReplicaSetCondition describes the state of a replica set at a certain point.
+type ReplicaSetCondition struct {
+ // Type of replica set condition.
+ Type ReplicaSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ReplicaSetConditionType"`
+ // Status of the condition, one of True, False, Unknown.
+ Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"`
+ // The last time the condition transitioned from one status to another.
+ // +optional
+ LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"`
+ // The reason for the condition's last transition.
+ // +optional
+ Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
+ // A human readable message indicating details about the transition.
+ // +optional
+ Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ControllerRevision implements an immutable snapshot of state data. Clients
+// are responsible for serializing and deserializing the objects that contain
+// their internal state.
+// Once a ControllerRevision has been successfully created, it can not be updated.
+// The API Server will fail validation of all requests that attempt to mutate
+// the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both
+// the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However,
+// it may be subject to name and representation changes in future releases, and clients should not
+// depend on its stability. It is primarily for internal use by controllers.
+type ControllerRevision struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Data is the serialized representation of the state.
+ Data runtime.RawExtension `json:"data,omitempty" protobuf:"bytes,2,opt,name=data"`
+
+ // Revision indicates the revision of the state represented by Data.
+ Revision int64 `json:"revision" protobuf:"varint,3,opt,name=revision"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ControllerRevisionList is a resource containing a list of ControllerRevision objects.
+type ControllerRevisionList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is the list of ControllerRevisions
+ Items []ControllerRevision `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
diff --git a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go
new file mode 100644
index 000000000..3f0299d03
--- /dev/null
+++ b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go
@@ -0,0 +1,365 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_ControllerRevision = map[string]string{
+ "": "ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.",
+ "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "data": "Data is the serialized representation of the state.",
+ "revision": "Revision indicates the revision of the state represented by Data.",
+}
+
+func (ControllerRevision) SwaggerDoc() map[string]string {
+ return map_ControllerRevision
+}
+
+var map_ControllerRevisionList = map[string]string{
+ "": "ControllerRevisionList is a resource containing a list of ControllerRevision objects.",
+ "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "Items is the list of ControllerRevisions",
+}
+
+func (ControllerRevisionList) SwaggerDoc() map[string]string {
+ return map_ControllerRevisionList
+}
+
+var map_DaemonSet = map[string]string{
+ "": "DaemonSet represents the configuration of a daemon set.",
+ "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+ "status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+}
+
+func (DaemonSet) SwaggerDoc() map[string]string {
+ return map_DaemonSet
+}
+
+var map_DaemonSetCondition = map[string]string{
+ "": "DaemonSetCondition describes the state of a DaemonSet at a certain point.",
+ "type": "Type of DaemonSet condition.",
+ "status": "Status of the condition, one of True, False, Unknown.",
+ "lastTransitionTime": "Last time the condition transitioned from one status to another.",
+ "reason": "The reason for the condition's last transition.",
+ "message": "A human readable message indicating details about the transition.",
+}
+
+func (DaemonSetCondition) SwaggerDoc() map[string]string {
+ return map_DaemonSetCondition
+}
+
+var map_DaemonSetList = map[string]string{
+ "": "DaemonSetList is a collection of daemon sets.",
+ "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "A list of daemon sets.",
+}
+
+func (DaemonSetList) SwaggerDoc() map[string]string {
+ return map_DaemonSetList
+}
+
+var map_DaemonSetSpec = map[string]string{
+ "": "DaemonSetSpec is the specification of a daemon set.",
+ "selector": "A label query over pods that are managed by the daemon set. Must match in order to be controlled. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
+ "template": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template",
+ "updateStrategy": "An update strategy to replace existing DaemonSet pods with new pods.",
+ "minReadySeconds": "The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).",
+ "revisionHistoryLimit": "The number of old history to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.",
+}
+
+func (DaemonSetSpec) SwaggerDoc() map[string]string {
+ return map_DaemonSetSpec
+}
+
+var map_DaemonSetStatus = map[string]string{
+ "": "DaemonSetStatus represents the current status of a daemon set.",
+ "currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
+ "numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
+ "desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
+ "numberReady": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready.",
+ "observedGeneration": "The most recent generation observed by the daemon set controller.",
+ "updatedNumberScheduled": "The total number of nodes that are running updated daemon pod",
+ "numberAvailable": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds)",
+ "numberUnavailable": "The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds)",
+ "collisionCount": "Count of hash collisions for the DaemonSet. The DaemonSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.",
+ "conditions": "Represents the latest available observations of a DaemonSet's current state.",
+}
+
+func (DaemonSetStatus) SwaggerDoc() map[string]string {
+ return map_DaemonSetStatus
+}
+
+var map_DaemonSetUpdateStrategy = map[string]string{
+ "": "DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet.",
+ "type": "Type of daemon set update. Can be \"RollingUpdate\" or \"OnDelete\". Default is RollingUpdate.",
+ "rollingUpdate": "Rolling update config params. Present only if type = \"RollingUpdate\".",
+}
+
+func (DaemonSetUpdateStrategy) SwaggerDoc() map[string]string {
+ return map_DaemonSetUpdateStrategy
+}
+
+var map_Deployment = map[string]string{
+ "": "Deployment enables declarative updates for Pods and ReplicaSets.",
+ "metadata": "Standard object metadata.",
+ "spec": "Specification of the desired behavior of the Deployment.",
+ "status": "Most recently observed status of the Deployment.",
+}
+
+func (Deployment) SwaggerDoc() map[string]string {
+ return map_Deployment
+}
+
+var map_DeploymentCondition = map[string]string{
+ "": "DeploymentCondition describes the state of a deployment at a certain point.",
+ "type": "Type of deployment condition.",
+ "status": "Status of the condition, one of True, False, Unknown.",
+ "lastUpdateTime": "The last time this condition was updated.",
+ "lastTransitionTime": "Last time the condition transitioned from one status to another.",
+ "reason": "The reason for the condition's last transition.",
+ "message": "A human readable message indicating details about the transition.",
+}
+
+func (DeploymentCondition) SwaggerDoc() map[string]string {
+ return map_DeploymentCondition
+}
+
+var map_DeploymentList = map[string]string{
+ "": "DeploymentList is a list of Deployments.",
+ "metadata": "Standard list metadata.",
+ "items": "Items is the list of Deployments.",
+}
+
+func (DeploymentList) SwaggerDoc() map[string]string {
+ return map_DeploymentList
+}
+
+var map_DeploymentSpec = map[string]string{
+ "": "DeploymentSpec is the specification of the desired behavior of the Deployment.",
+ "replicas": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.",
+ "selector": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment. It must match the pod template's labels.",
+ "template": "Template describes the pods that will be created.",
+ "strategy": "The deployment strategy to use to replace existing pods with new ones.",
+ "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)",
+ "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.",
+ "paused": "Indicates that the deployment is paused.",
+ "progressDeadlineSeconds": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.",
+}
+
+func (DeploymentSpec) SwaggerDoc() map[string]string {
+ return map_DeploymentSpec
+}
+
+var map_DeploymentStatus = map[string]string{
+ "": "DeploymentStatus is the most recently observed status of the Deployment.",
+ "observedGeneration": "The generation observed by the deployment controller.",
+ "replicas": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).",
+ "updatedReplicas": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.",
+ "readyReplicas": "Total number of ready pods targeted by this deployment.",
+ "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.",
+ "unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.",
+ "conditions": "Represents the latest available observations of a deployment's current state.",
+ "collisionCount": "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.",
+}
+
+func (DeploymentStatus) SwaggerDoc() map[string]string {
+ return map_DeploymentStatus
+}
+
+var map_DeploymentStrategy = map[string]string{
+ "": "DeploymentStrategy describes how to replace existing pods with new ones.",
+ "type": "Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate.",
+ "rollingUpdate": "Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate.",
+}
+
+func (DeploymentStrategy) SwaggerDoc() map[string]string {
+ return map_DeploymentStrategy
+}
+
+var map_ReplicaSet = map[string]string{
+ "": "ReplicaSet ensures that a specified number of pod replicas are running at any given time.",
+ "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+ "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+}
+
+func (ReplicaSet) SwaggerDoc() map[string]string {
+ return map_ReplicaSet
+}
+
+var map_ReplicaSetCondition = map[string]string{
+ "": "ReplicaSetCondition describes the state of a replica set at a certain point.",
+ "type": "Type of replica set condition.",
+ "status": "Status of the condition, one of True, False, Unknown.",
+ "lastTransitionTime": "The last time the condition transitioned from one status to another.",
+ "reason": "The reason for the condition's last transition.",
+ "message": "A human readable message indicating details about the transition.",
+}
+
+func (ReplicaSetCondition) SwaggerDoc() map[string]string {
+ return map_ReplicaSetCondition
+}
+
+var map_ReplicaSetList = map[string]string{
+ "": "ReplicaSetList is a collection of ReplicaSets.",
+ "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "items": "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller",
+}
+
+func (ReplicaSetList) SwaggerDoc() map[string]string {
+ return map_ReplicaSetList
+}
+
+var map_ReplicaSetSpec = map[string]string{
+ "": "ReplicaSetSpec is the specification of a ReplicaSet.",
+ "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller",
+ "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)",
+ "selector": "Selector is a label query over pods that should match the replica count. Label keys and values that must match in order to be controlled by this replica set. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
+ "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template",
+}
+
+func (ReplicaSetSpec) SwaggerDoc() map[string]string {
+ return map_ReplicaSetSpec
+}
+
+var map_ReplicaSetStatus = map[string]string{
+ "": "ReplicaSetStatus represents the current status of a ReplicaSet.",
+ "replicas": "Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller",
+ "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replicaset.",
+ "readyReplicas": "The number of ready replicas for this replica set.",
+ "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this replica set.",
+ "observedGeneration": "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.",
+ "conditions": "Represents the latest available observations of a replica set's current state.",
+}
+
+func (ReplicaSetStatus) SwaggerDoc() map[string]string {
+ return map_ReplicaSetStatus
+}
+
+var map_RollingUpdateDaemonSet = map[string]string{
+ "": "Spec to control the desired behavior of daemon set rolling update.",
+ "maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0. Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.",
+}
+
+func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string {
+ return map_RollingUpdateDaemonSet
+}
+
+var map_RollingUpdateDeployment = map[string]string{
+ "": "Spec to control the desired behavior of rolling update.",
+ "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old ReplicaSet can be scaled down further, followed by scaling up the new ReplicaSet, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.",
+ "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is at most 130% of desired pods.",
+}
+
+func (RollingUpdateDeployment) SwaggerDoc() map[string]string {
+ return map_RollingUpdateDeployment
+}
+
+var map_RollingUpdateStatefulSetStrategy = map[string]string{
+ "": "RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.",
+ "partition": "Partition indicates the ordinal at which the StatefulSet should be partitioned. Default value is 0.",
+}
+
+func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string {
+ return map_RollingUpdateStatefulSetStrategy
+}
+
+var map_StatefulSet = map[string]string{
+ "": "StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.",
+ "spec": "Spec defines the desired identities of pods in this set.",
+ "status": "Status is the current status of Pods in this StatefulSet. This data may be out of date by some window of time.",
+}
+
+func (StatefulSet) SwaggerDoc() map[string]string {
+ return map_StatefulSet
+}
+
+var map_StatefulSetCondition = map[string]string{
+ "": "StatefulSetCondition describes the state of a statefulset at a certain point.",
+ "type": "Type of statefulset condition.",
+ "status": "Status of the condition, one of True, False, Unknown.",
+ "lastTransitionTime": "Last time the condition transitioned from one status to another.",
+ "reason": "The reason for the condition's last transition.",
+ "message": "A human readable message indicating details about the transition.",
+}
+
+func (StatefulSetCondition) SwaggerDoc() map[string]string {
+ return map_StatefulSetCondition
+}
+
+var map_StatefulSetList = map[string]string{
+ "": "StatefulSetList is a collection of StatefulSets.",
+}
+
+func (StatefulSetList) SwaggerDoc() map[string]string {
+ return map_StatefulSetList
+}
+
+var map_StatefulSetSpec = map[string]string{
+ "": "A StatefulSetSpec is the specification of a StatefulSet.",
+ "replicas": "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.",
+ "selector": "selector is a label query over pods that should match the replica count. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
+ "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.",
+ "volumeClaimTemplates": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.",
+ "serviceName": "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.",
+ "podManagementPolicy": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.",
+ "updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.",
+ "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.",
+}
+
+func (StatefulSetSpec) SwaggerDoc() map[string]string {
+ return map_StatefulSetSpec
+}
+
+var map_StatefulSetStatus = map[string]string{
+ "": "StatefulSetStatus represents the current state of a StatefulSet.",
+ "observedGeneration": "observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the StatefulSet's generation, which is updated on mutation by the API Server.",
+ "replicas": "replicas is the number of Pods created by the StatefulSet controller.",
+ "readyReplicas": "readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.",
+ "currentReplicas": "currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by currentRevision.",
+ "updatedReplicas": "updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by updateRevision.",
+ "currentRevision": "currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas).",
+ "updateRevision": "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)",
+ "collisionCount": "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.",
+ "conditions": "Represents the latest available observations of a statefulset's current state.",
+}
+
+func (StatefulSetStatus) SwaggerDoc() map[string]string {
+ return map_StatefulSetStatus
+}
+
+var map_StatefulSetUpdateStrategy = map[string]string{
+ "": "StatefulSetUpdateStrategy indicates the strategy that the StatefulSet controller will use to perform updates. It includes any additional parameters necessary to perform the update for the indicated strategy.",
+ "type": "Type indicates the type of the StatefulSetUpdateStrategy. Default is RollingUpdate.",
+ "rollingUpdate": "RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.",
+}
+
+func (StatefulSetUpdateStrategy) SwaggerDoc() map[string]string {
+ return map_StatefulSetUpdateStrategy
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go
new file mode 100644
index 000000000..7b7ff385c
--- /dev/null
+++ b/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go
@@ -0,0 +1,772 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ intstr "k8s.io/apimachinery/pkg/util/intstr"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControllerRevision) DeepCopyInto(out *ControllerRevision) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Data.DeepCopyInto(&out.Data)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRevision.
+func (in *ControllerRevision) DeepCopy() *ControllerRevision {
+ if in == nil {
+ return nil
+ }
+ out := new(ControllerRevision)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ControllerRevision) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControllerRevisionList) DeepCopyInto(out *ControllerRevisionList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ControllerRevision, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRevisionList.
+func (in *ControllerRevisionList) DeepCopy() *ControllerRevisionList {
+ if in == nil {
+ return nil
+ }
+ out := new(ControllerRevisionList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ControllerRevisionList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DaemonSet) DeepCopyInto(out *DaemonSet) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSet.
+func (in *DaemonSet) DeepCopy() *DaemonSet {
+ if in == nil {
+ return nil
+ }
+ out := new(DaemonSet)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DaemonSet) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DaemonSetCondition) DeepCopyInto(out *DaemonSetCondition) {
+ *out = *in
+ in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetCondition.
+func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition {
+ if in == nil {
+ return nil
+ }
+ out := new(DaemonSetCondition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]DaemonSet, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetList.
+func (in *DaemonSetList) DeepCopy() *DaemonSetList {
+ if in == nil {
+ return nil
+ }
+ out := new(DaemonSetList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DaemonSetList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DaemonSetSpec) DeepCopyInto(out *DaemonSetSpec) {
+ *out = *in
+ if in.Selector != nil {
+ in, out := &in.Selector, &out.Selector
+ *out = new(metav1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ in.Template.DeepCopyInto(&out.Template)
+ in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy)
+ if in.RevisionHistoryLimit != nil {
+ in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit
+ *out = new(int32)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetSpec.
+func (in *DaemonSetSpec) DeepCopy() *DaemonSetSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(DaemonSetSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DaemonSetStatus) DeepCopyInto(out *DaemonSetStatus) {
+ *out = *in
+ if in.CollisionCount != nil {
+ in, out := &in.CollisionCount, &out.CollisionCount
+ *out = new(int32)
+ **out = **in
+ }
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]DaemonSetCondition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetStatus.
+func (in *DaemonSetStatus) DeepCopy() *DaemonSetStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(DaemonSetStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DaemonSetUpdateStrategy) DeepCopyInto(out *DaemonSetUpdateStrategy) {
+ *out = *in
+ if in.RollingUpdate != nil {
+ in, out := &in.RollingUpdate, &out.RollingUpdate
+ *out = new(RollingUpdateDaemonSet)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetUpdateStrategy.
+func (in *DaemonSetUpdateStrategy) DeepCopy() *DaemonSetUpdateStrategy {
+ if in == nil {
+ return nil
+ }
+ out := new(DaemonSetUpdateStrategy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Deployment) DeepCopyInto(out *Deployment) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Deployment.
+func (in *Deployment) DeepCopy() *Deployment {
+ if in == nil {
+ return nil
+ }
+ out := new(Deployment)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Deployment) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentCondition) DeepCopyInto(out *DeploymentCondition) {
+ *out = *in
+ in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime)
+ in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCondition.
+func (in *DeploymentCondition) DeepCopy() *DeploymentCondition {
+ if in == nil {
+ return nil
+ }
+ out := new(DeploymentCondition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentList) DeepCopyInto(out *DeploymentList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Deployment, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentList.
+func (in *DeploymentList) DeepCopy() *DeploymentList {
+ if in == nil {
+ return nil
+ }
+ out := new(DeploymentList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DeploymentList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) {
+ *out = *in
+ if in.Replicas != nil {
+ in, out := &in.Replicas, &out.Replicas
+ *out = new(int32)
+ **out = **in
+ }
+ if in.Selector != nil {
+ in, out := &in.Selector, &out.Selector
+ *out = new(metav1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ in.Template.DeepCopyInto(&out.Template)
+ in.Strategy.DeepCopyInto(&out.Strategy)
+ if in.RevisionHistoryLimit != nil {
+ in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit
+ *out = new(int32)
+ **out = **in
+ }
+ if in.ProgressDeadlineSeconds != nil {
+ in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds
+ *out = new(int32)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentSpec.
+func (in *DeploymentSpec) DeepCopy() *DeploymentSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(DeploymentSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]DeploymentCondition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.CollisionCount != nil {
+ in, out := &in.CollisionCount, &out.CollisionCount
+ *out = new(int32)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStatus.
+func (in *DeploymentStatus) DeepCopy() *DeploymentStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(DeploymentStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentStrategy) DeepCopyInto(out *DeploymentStrategy) {
+ *out = *in
+ if in.RollingUpdate != nil {
+ in, out := &in.RollingUpdate, &out.RollingUpdate
+ *out = new(RollingUpdateDeployment)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStrategy.
+func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy {
+ if in == nil {
+ return nil
+ }
+ out := new(DeploymentStrategy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReplicaSet) DeepCopyInto(out *ReplicaSet) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSet.
+func (in *ReplicaSet) DeepCopy() *ReplicaSet {
+ if in == nil {
+ return nil
+ }
+ out := new(ReplicaSet)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ReplicaSet) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReplicaSetCondition) DeepCopyInto(out *ReplicaSetCondition) {
+ *out = *in
+ in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetCondition.
+func (in *ReplicaSetCondition) DeepCopy() *ReplicaSetCondition {
+ if in == nil {
+ return nil
+ }
+ out := new(ReplicaSetCondition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReplicaSetList) DeepCopyInto(out *ReplicaSetList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ReplicaSet, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetList.
+func (in *ReplicaSetList) DeepCopy() *ReplicaSetList {
+ if in == nil {
+ return nil
+ }
+ out := new(ReplicaSetList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ReplicaSetList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReplicaSetSpec) DeepCopyInto(out *ReplicaSetSpec) {
+ *out = *in
+ if in.Replicas != nil {
+ in, out := &in.Replicas, &out.Replicas
+ *out = new(int32)
+ **out = **in
+ }
+ if in.Selector != nil {
+ in, out := &in.Selector, &out.Selector
+ *out = new(metav1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ in.Template.DeepCopyInto(&out.Template)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetSpec.
+func (in *ReplicaSetSpec) DeepCopy() *ReplicaSetSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ReplicaSetSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReplicaSetStatus) DeepCopyInto(out *ReplicaSetStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]ReplicaSetCondition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetStatus.
+func (in *ReplicaSetStatus) DeepCopy() *ReplicaSetStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ReplicaSetStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RollingUpdateDaemonSet) DeepCopyInto(out *RollingUpdateDaemonSet) {
+ *out = *in
+ if in.MaxUnavailable != nil {
+ in, out := &in.MaxUnavailable, &out.MaxUnavailable
+ *out = new(intstr.IntOrString)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDaemonSet.
+func (in *RollingUpdateDaemonSet) DeepCopy() *RollingUpdateDaemonSet {
+ if in == nil {
+ return nil
+ }
+ out := new(RollingUpdateDaemonSet)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RollingUpdateDeployment) DeepCopyInto(out *RollingUpdateDeployment) {
+ *out = *in
+ if in.MaxUnavailable != nil {
+ in, out := &in.MaxUnavailable, &out.MaxUnavailable
+ *out = new(intstr.IntOrString)
+ **out = **in
+ }
+ if in.MaxSurge != nil {
+ in, out := &in.MaxSurge, &out.MaxSurge
+ *out = new(intstr.IntOrString)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDeployment.
+func (in *RollingUpdateDeployment) DeepCopy() *RollingUpdateDeployment {
+ if in == nil {
+ return nil
+ }
+ out := new(RollingUpdateDeployment)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RollingUpdateStatefulSetStrategy) DeepCopyInto(out *RollingUpdateStatefulSetStrategy) {
+ *out = *in
+ if in.Partition != nil {
+ in, out := &in.Partition, &out.Partition
+ *out = new(int32)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateStatefulSetStrategy.
+func (in *RollingUpdateStatefulSetStrategy) DeepCopy() *RollingUpdateStatefulSetStrategy {
+ if in == nil {
+ return nil
+ }
+ out := new(RollingUpdateStatefulSetStrategy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StatefulSet) DeepCopyInto(out *StatefulSet) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSet.
+func (in *StatefulSet) DeepCopy() *StatefulSet {
+ if in == nil {
+ return nil
+ }
+ out := new(StatefulSet)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *StatefulSet) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StatefulSetCondition) DeepCopyInto(out *StatefulSetCondition) {
+ *out = *in
+ in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetCondition.
+func (in *StatefulSetCondition) DeepCopy() *StatefulSetCondition {
+ if in == nil {
+ return nil
+ }
+ out := new(StatefulSetCondition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]StatefulSet, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetList.
+func (in *StatefulSetList) DeepCopy() *StatefulSetList {
+ if in == nil {
+ return nil
+ }
+ out := new(StatefulSetList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *StatefulSetList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) {
+ *out = *in
+ if in.Replicas != nil {
+ in, out := &in.Replicas, &out.Replicas
+ *out = new(int32)
+ **out = **in
+ }
+ if in.Selector != nil {
+ in, out := &in.Selector, &out.Selector
+ *out = new(metav1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ in.Template.DeepCopyInto(&out.Template)
+ if in.VolumeClaimTemplates != nil {
+ in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates
+ *out = make([]corev1.PersistentVolumeClaim, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy)
+ if in.RevisionHistoryLimit != nil {
+ in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit
+ *out = new(int32)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetSpec.
+func (in *StatefulSetSpec) DeepCopy() *StatefulSetSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(StatefulSetSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StatefulSetStatus) DeepCopyInto(out *StatefulSetStatus) {
+ *out = *in
+ if in.CollisionCount != nil {
+ in, out := &in.CollisionCount, &out.CollisionCount
+ *out = new(int32)
+ **out = **in
+ }
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]StatefulSetCondition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetStatus.
+func (in *StatefulSetStatus) DeepCopy() *StatefulSetStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(StatefulSetStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StatefulSetUpdateStrategy) DeepCopyInto(out *StatefulSetUpdateStrategy) {
+ *out = *in
+ if in.RollingUpdate != nil {
+ in, out := &in.RollingUpdate, &out.RollingUpdate
+ *out = new(RollingUpdateStatefulSetStrategy)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetUpdateStrategy.
+func (in *StatefulSetUpdateStrategy) DeepCopy() *StatefulSetUpdateStrategy {
+ if in == nil {
+ return nil
+ }
+ out := new(StatefulSetUpdateStrategy)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index e79f8587b..a44d0e88d 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -84,7 +84,7 @@ github.com/containers/buildah/pkg/secrets
github.com/containers/buildah/pkg/supplemented
github.com/containers/buildah/pkg/umask
github.com/containers/buildah/util
-# github.com/containers/common v0.12.0
+# github.com/containers/common v0.13.0
github.com/containers/common/pkg/apparmor
github.com/containers/common/pkg/auth
github.com/containers/common/pkg/capabilities
@@ -322,7 +322,7 @@ github.com/imdario/mergo
github.com/inconshreveable/mousetrap
# github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07
github.com/ishidawataru/sctp
-# github.com/json-iterator/go v1.1.9
+# github.com/json-iterator/go v1.1.10
github.com/json-iterator/go
# github.com/klauspost/compress v1.10.7
github.com/klauspost/compress/flate
@@ -491,7 +491,7 @@ github.com/sirupsen/logrus/hooks/syslog
github.com/spf13/cobra
# github.com/spf13/pflag v1.0.5
github.com/spf13/pflag
-# github.com/stretchr/testify v1.6.0
+# github.com/stretchr/testify v1.6.1
github.com/stretchr/testify/assert
github.com/stretchr/testify/require
# github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2
@@ -689,6 +689,7 @@ gopkg.in/yaml.v2
# gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c
gopkg.in/yaml.v3
# k8s.io/api v0.18.3
+k8s.io/api/apps/v1
k8s.io/api/core/v1
# k8s.io/apimachinery v0.18.3
k8s.io/apimachinery/pkg/api/errors