summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--cmd/podman/common/create.go5
-rw-r--r--cmd/podman/common/create_opts.go1
-rw-r--r--cmd/podman/common/specgen.go11
-rw-r--r--cmd/podman/common/util.go57
-rw-r--r--cmd/podman/parse/common.go55
-rw-r--r--cmd/podman/play/kube.go34
-rw-r--r--cmd/podman/pods/create.go4
-rw-r--r--cmd/podman/pods/rm.go25
-rw-r--r--cmd/podman/pods/start.go22
-rw-r--r--cmd/podman/pods/stop.go35
-rw-r--r--completions/bash/podman7
-rw-r--r--contrib/spec/podman.spec.in3
-rw-r--r--docs/source/markdown/podman-create.1.md4
-rw-r--r--docs/source/markdown/podman-generate-systemd.1.md5
-rw-r--r--docs/source/markdown/podman-pod-create.1.md4
-rw-r--r--docs/source/markdown/podman-pod-rm.1.md6
-rw-r--r--docs/source/markdown/podman-pod-start.1.md5
-rw-r--r--docs/source/markdown/podman-pod-stop.1.md11
-rw-r--r--docs/source/markdown/podman-run.1.md4
-rw-r--r--libpod/container_internal_linux.go5
-rw-r--r--libpod/container_log.go4
-rw-r--r--libpod/define/pod_inspect.go3
-rw-r--r--libpod/oci_util.go36
-rw-r--r--libpod/options.go24
-rw-r--r--libpod/pod.go25
-rw-r--r--libpod/pod_api.go1
-rw-r--r--libpod/runtime_pod_infra_linux.go3
-rw-r--r--pkg/api/handlers/compat/containers.go7
-rw-r--r--pkg/domain/entities/play.go14
-rw-r--r--pkg/domain/entities/pods.go22
-rw-r--r--pkg/domain/infra/abi/generate.go161
-rw-r--r--pkg/domain/infra/abi/play.go102
-rw-r--r--pkg/domain/infra/abi/pods.go4
-rw-r--r--pkg/domain/infra/abi/system.go6
-rw-r--r--pkg/specgen/generate/pod_create.go4
-rw-r--r--pkg/specgen/podspecgen.go3
-rw-r--r--pkg/systemd/generate/common.go50
-rw-r--r--pkg/systemd/generate/common_test.go25
-rw-r--r--pkg/systemd/generate/containers.go289
-rw-r--r--pkg/systemd/generate/containers_test.go366
-rw-r--r--pkg/systemd/generate/pods.go341
-rw-r--r--pkg/systemd/generate/pods_test.go100
-rw-r--r--pkg/systemd/generate/systemdgen.go237
-rw-r--r--pkg/systemd/generate/systemdgen_test.go347
-rw-r--r--test/e2e/create_test.go37
-rw-r--r--test/e2e/generate_kube_test.go6
-rw-r--r--test/e2e/generate_systemd_test.go50
-rw-r--r--test/e2e/play_kube_test.go300
-rw-r--r--test/e2e/pod_inspect_test.go22
-rw-r--r--test/e2e/pod_rm_test.go69
-rw-r--r--test/e2e/pod_start_test.go94
-rw-r--r--test/e2e/pod_stop_test.go69
-rw-r--r--test/e2e/run_networking_test.go26
-rw-r--r--vendor/k8s.io/api/apps/v1/doc.go21
-rw-r--r--vendor/k8s.io/api/apps/v1/generated.pb.go8238
-rw-r--r--vendor/k8s.io/api/apps/v1/generated.proto701
-rw-r--r--vendor/k8s.io/api/apps/v1/register.go60
-rw-r--r--vendor/k8s.io/api/apps/v1/types.go826
-rw-r--r--vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go365
-rw-r--r--vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go772
-rw-r--r--vendor/modules.txt1
61 files changed, 13277 insertions, 857 deletions
diff --git a/cmd/podman/common/create.go b/cmd/podman/common/create.go
index 86cd51643..e79c5c20b 100644
--- a/cmd/podman/common/create.go
+++ b/cmd/podman/common/create.go
@@ -338,6 +338,11 @@ func GetCreateFlags(cf *ContainerCLIOpts) *pflag.FlagSet {
"pod", "",
"Run container in an existing pod",
)
+ createFlags.StringVar(
+ &cf.PodIDFile,
+ "pod-id-file", "",
+ "Read the pod ID from the file",
+ )
createFlags.BoolVar(
&cf.Privileged,
"privileged", false,
diff --git a/cmd/podman/common/create_opts.go b/cmd/podman/common/create_opts.go
index 4cba5daf7..98dc6744c 100644
--- a/cmd/podman/common/create_opts.go
+++ b/cmd/podman/common/create_opts.go
@@ -68,6 +68,7 @@ type ContainerCLIOpts struct {
PID string
PIDsLimit int64
Pod string
+ PodIDFile string
Privileged bool
PublishAll bool
Pull string
diff --git a/cmd/podman/common/specgen.go b/cmd/podman/common/specgen.go
index 2286e67de..fee9d8c7b 100644
--- a/cmd/podman/common/specgen.go
+++ b/cmd/podman/common/specgen.go
@@ -254,6 +254,17 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *ContainerCLIOpts, args []string
s.PublishExposedPorts = c.PublishAll
s.Pod = c.Pod
+ if len(c.PodIDFile) > 0 {
+ if len(s.Pod) > 0 {
+ return errors.New("Cannot specify both --pod and --pod-id-file")
+ }
+ podID, err := ReadPodIDFile(c.PodIDFile)
+ if err != nil {
+ return err
+ }
+ s.Pod = podID
+ }
+
expose, err := createExpose(c.Expose)
if err != nil {
return err
diff --git a/cmd/podman/common/util.go b/cmd/podman/common/util.go
index a3626b4e4..ce323a4ba 100644
--- a/cmd/podman/common/util.go
+++ b/cmd/podman/common/util.go
@@ -1,6 +1,7 @@
package common
import (
+ "io/ioutil"
"net"
"strconv"
"strings"
@@ -10,6 +11,30 @@ import (
"github.com/sirupsen/logrus"
)
+// ReadPodIDFile reads the specified file and returns its content (i.e., first
+// line).
+func ReadPodIDFile(path string) (string, error) {
+ content, err := ioutil.ReadFile(path)
+ if err != nil {
+ return "", errors.Wrap(err, "error reading pod ID file")
+ }
+ return strings.Split(string(content), "\n")[0], nil
+}
+
+// ReadPodIDFiles reads the specified files and returns their content (i.e.,
+// first line).
+func ReadPodIDFiles(files []string) ([]string, error) {
+ ids := []string{}
+ for _, file := range files {
+ id, err := ReadPodIDFile(file)
+ if err != nil {
+ return nil, err
+ }
+ ids = append(ids, id)
+ }
+ return ids, nil
+}
+
// createExpose parses user-provided exposed port definitions and converts them
// into SpecGen format.
// TODO: The SpecGen format should really handle ranges more sanely - we could
@@ -71,14 +96,44 @@ func createPortBindings(ports []string) ([]specgen.PortMapping, error) {
return nil, errors.Errorf("invalid port format - protocol can only be specified once")
}
- splitPort := strings.Split(splitProto[0], ":")
+ remainder := splitProto[0]
+ haveV6 := false
+
+ // Check for an IPv6 address in brackets
+ splitV6 := strings.Split(remainder, "]")
+ switch len(splitV6) {
+ case 1:
+ // Do nothing, proceed as before
+ case 2:
+ // We potentially have an IPv6 address
+ haveV6 = true
+ if !strings.HasPrefix(splitV6[0], "[") {
+ return nil, errors.Errorf("invalid port format - IPv6 addresses must be enclosed by []")
+ }
+ if !strings.HasPrefix(splitV6[1], ":") {
+ return nil, errors.Errorf("invalid port format - IPv6 address must be followed by a colon (':')")
+ }
+ ipNoPrefix := strings.TrimPrefix(splitV6[0], "[")
+ hostIP = &ipNoPrefix
+ remainder = strings.TrimPrefix(splitV6[1], ":")
+ default:
+ return nil, errors.Errorf("invalid port format - at most one IPv6 address can be specified in a --publish")
+ }
+
+ splitPort := strings.Split(remainder, ":")
switch len(splitPort) {
case 1:
+ if haveV6 {
+ return nil, errors.Errorf("invalid port format - must provide host and destination port if specifying an IP")
+ }
ctrPort = splitPort[0]
case 2:
hostPort = &(splitPort[0])
ctrPort = splitPort[1]
case 3:
+ if haveV6 {
+ return nil, errors.Errorf("invalid port format - when v6 address specified, must be [ipv6]:hostPort:ctrPort")
+ }
hostIP = &(splitPort[0])
hostPort = &(splitPort[1])
ctrPort = splitPort[2]
diff --git a/cmd/podman/parse/common.go b/cmd/podman/parse/common.go
index 13f425b6d..b3aa88da2 100644
--- a/cmd/podman/parse/common.go
+++ b/cmd/podman/parse/common.go
@@ -5,6 +5,10 @@ import (
"github.com/spf13/cobra"
)
+// TODO: the two functions here are almost identical. It may be worth looking
+// into generalizing the two a bit more and share code but time is scarce and
+// we only live once.
+
// CheckAllLatestAndCIDFile checks that --all and --latest are used correctly.
// If cidfile is set, also check for the --cidfile flag.
func CheckAllLatestAndCIDFile(c *cobra.Command, args []string, ignoreArgLen bool, cidfile bool) error {
@@ -55,3 +59,54 @@ func CheckAllLatestAndCIDFile(c *cobra.Command, args []string, ignoreArgLen bool
}
return nil
}
+
+// CheckAllLatestAndPodIDFile checks that --all and --latest are used correctly.
+// If withIDFile is set, also check for the --pod-id-file flag.
+func CheckAllLatestAndPodIDFile(c *cobra.Command, args []string, ignoreArgLen bool, withIDFile bool) error {
+ argLen := len(args)
+ if c.Flags().Lookup("all") == nil || c.Flags().Lookup("latest") == nil {
+ if !withIDFile {
+ return errors.New("unable to lookup values for 'latest' or 'all'")
+ } else if c.Flags().Lookup("pod-id-file") == nil {
+ return errors.New("unable to lookup values for 'latest', 'all' or 'pod-id-file'")
+ }
+ }
+
+ specifiedAll, _ := c.Flags().GetBool("all")
+ specifiedLatest, _ := c.Flags().GetBool("latest")
+ specifiedPodIDFile := false
+ if pid, _ := c.Flags().GetStringArray("pod-id-file"); len(pid) > 0 {
+ specifiedPodIDFile = true
+ }
+
+ if specifiedPodIDFile && (specifiedAll || specifiedLatest) {
+ return errors.Errorf("--all, --latest and --pod-id-file cannot be used together")
+ } else if specifiedAll && specifiedLatest {
+ return errors.Errorf("--all and --latest cannot be used together")
+ }
+
+ if (argLen > 0) && specifiedAll {
+ return errors.Errorf("no arguments are needed with --all")
+ }
+
+ if ignoreArgLen {
+ return nil
+ }
+
+ if argLen > 0 {
+ if specifiedLatest {
+ return errors.Errorf("no arguments are needed with --latest")
+ } else if withIDFile && (specifiedLatest || specifiedPodIDFile) {
+ return errors.Errorf("no arguments are needed with --latest or --pod-id-file")
+ }
+ }
+
+ if specifiedPodIDFile {
+ return nil
+ }
+
+ if argLen < 1 && !specifiedAll && !specifiedLatest && !specifiedPodIDFile {
+ return errors.Errorf("you must provide at least one name or id")
+ }
+ return nil
+}
diff --git a/cmd/podman/play/kube.go b/cmd/podman/play/kube.go
index 1fbf24d5e..c26ca9853 100644
--- a/cmd/podman/play/kube.go
+++ b/cmd/podman/play/kube.go
@@ -92,21 +92,29 @@ func kube(cmd *cobra.Command, args []string) error {
return err
}
- for _, l := range report.Logs {
- fmt.Fprintf(os.Stderr, l)
+ for _, pod := range report.Pods {
+ for _, l := range pod.Logs {
+ fmt.Fprintf(os.Stderr, l)
+ }
}
- fmt.Printf("Pod:\n%s\n", report.Pod)
- switch len(report.Containers) {
- case 0:
- return nil
- case 1:
- fmt.Printf("Container:\n")
- default:
- fmt.Printf("Containers:\n")
- }
- for _, ctr := range report.Containers {
- fmt.Println(ctr)
+ for _, pod := range report.Pods {
+ fmt.Printf("Pod:\n")
+ fmt.Println(pod.ID)
+
+ switch len(pod.Containers) {
+ case 0:
+ continue
+ case 1:
+ fmt.Printf("Container:\n")
+ default:
+ fmt.Printf("Containers:\n")
+ }
+ for _, ctr := range pod.Containers {
+ fmt.Println(ctr)
+ }
+ // Empty line for space for next block
+ fmt.Println()
}
return nil
diff --git a/cmd/podman/pods/create.go b/cmd/podman/pods/create.go
index 5ed5fa57c..51b7a7d52 100644
--- a/cmd/podman/pods/create.go
+++ b/cmd/podman/pods/create.go
@@ -53,6 +53,7 @@ func init() {
flags.AddFlagSet(common.GetNetFlags())
flags.StringVar(&createOptions.CGroupParent, "cgroup-parent", "", "Set parent cgroup for the pod")
flags.BoolVar(&createOptions.Infra, "infra", true, "Create an infra container associated with the pod to share namespaces with")
+ flags.StringVar(&createOptions.InfraConmonPidFile, "infra-conmon-pidfile", "", "Path to the file that will receive the POD of the infra container's conmon")
flags.StringVar(&createOptions.InfraImage, "infra-image", containerConfig.Engine.InfraImage, "The image of the infra container to associate with the pod")
flags.StringVar(&createOptions.InfraCommand, "infra-command", containerConfig.Engine.InfraCommand, "The command to run on the infra container when the pod is started")
flags.StringSliceVar(&labelFile, "label-file", []string{}, "Read in a line delimited file of labels")
@@ -83,6 +84,9 @@ func create(cmd *cobra.Command, args []string) error {
if !createOptions.Infra {
logrus.Debugf("Not creating an infra container")
+ if cmd.Flag("infra-conmon-pidfile").Changed {
+ return errors.New("cannot set infra-conmon-pid without an infra container")
+ }
if cmd.Flag("infra-command").Changed {
return errors.New("cannot set infra-command without an infra container")
}
diff --git a/cmd/podman/pods/rm.go b/cmd/podman/pods/rm.go
index 4b9882f8a..8de0bce9e 100644
--- a/cmd/podman/pods/rm.go
+++ b/cmd/podman/pods/rm.go
@@ -4,6 +4,7 @@ import (
"context"
"fmt"
+ "github.com/containers/libpod/cmd/podman/common"
"github.com/containers/libpod/cmd/podman/parse"
"github.com/containers/libpod/cmd/podman/registry"
"github.com/containers/libpod/cmd/podman/utils"
@@ -11,7 +12,15 @@ import (
"github.com/spf13/cobra"
)
+// allows for splitting API and CLI-only options
+type podRmOptionsWrapper struct {
+ entities.PodRmOptions
+
+ PodIDFiles []string
+}
+
var (
+ rmOptions = podRmOptionsWrapper{}
podRmDescription = fmt.Sprintf(`podman rm will remove one or more stopped pods and their containers from the host.
The pod name or ID can be used. A pod with containers will not be removed without --force. If --force is specified, all containers will be stopped, then removed.`)
@@ -21,7 +30,7 @@ var (
Long: podRmDescription,
RunE: rm,
Args: func(cmd *cobra.Command, args []string) error {
- return parse.CheckAllLatestAndCIDFile(cmd, args, false, false)
+ return parse.CheckAllLatestAndPodIDFile(cmd, args, false, true)
},
Example: `podman pod rm mywebserverpod
podman pod rm -f 860a4b23
@@ -29,10 +38,6 @@ var (
}
)
-var (
- rmOptions = entities.PodRmOptions{}
-)
-
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
@@ -45,6 +50,7 @@ func init() {
flags.BoolVarP(&rmOptions.Force, "force", "f", false, "Force removal of a running pod by first stopping all containers, then removing all containers in the pod. The default is false")
flags.BoolVarP(&rmOptions.Ignore, "ignore", "i", false, "Ignore errors when a specified pod is missing")
flags.BoolVarP(&rmOptions.Latest, "latest", "l", false, "Remove the latest pod podman is aware of")
+ flags.StringArrayVarP(&rmOptions.PodIDFiles, "pod-id-file", "", nil, "Read the pod ID from the file")
if registry.IsRemote() {
_ = flags.MarkHidden("latest")
_ = flags.MarkHidden("ignore")
@@ -55,7 +61,14 @@ func rm(cmd *cobra.Command, args []string) error {
var (
errs utils.OutputErrors
)
- responses, err := registry.ContainerEngine().PodRm(context.Background(), args, rmOptions)
+
+ ids, err := common.ReadPodIDFiles(rmOptions.PodIDFiles)
+ if err != nil {
+ return err
+ }
+ args = append(args, ids...)
+
+ responses, err := registry.ContainerEngine().PodRm(context.Background(), args, rmOptions.PodRmOptions)
if err != nil {
return err
}
diff --git a/cmd/podman/pods/start.go b/cmd/podman/pods/start.go
index d0150a3c2..97020b360 100644
--- a/cmd/podman/pods/start.go
+++ b/cmd/podman/pods/start.go
@@ -4,6 +4,7 @@ import (
"context"
"fmt"
+ "github.com/containers/libpod/cmd/podman/common"
"github.com/containers/libpod/cmd/podman/parse"
"github.com/containers/libpod/cmd/podman/registry"
"github.com/containers/libpod/cmd/podman/utils"
@@ -11,6 +12,13 @@ import (
"github.com/spf13/cobra"
)
+// allows for splitting API and CLI-only options
+type podStartOptionsWrapper struct {
+ entities.PodStartOptions
+
+ PodIDFiles []string
+}
+
var (
podStartDescription = `The pod name or ID can be used.
@@ -21,7 +29,7 @@ var (
Long: podStartDescription,
RunE: start,
Args: func(cmd *cobra.Command, args []string) error {
- return parse.CheckAllLatestAndCIDFile(cmd, args, false, false)
+ return parse.CheckAllLatestAndPodIDFile(cmd, args, false, true)
},
Example: `podman pod start podID
podman pod start --latest
@@ -30,7 +38,7 @@ var (
)
var (
- startOptions = entities.PodStartOptions{}
+ startOptions = podStartOptionsWrapper{}
)
func init() {
@@ -43,6 +51,7 @@ func init() {
flags := startCommand.Flags()
flags.BoolVarP(&startOptions.All, "all", "a", false, "Restart all running pods")
flags.BoolVarP(&startOptions.Latest, "latest", "l", false, "Restart the latest pod podman is aware of")
+ flags.StringArrayVarP(&startOptions.PodIDFiles, "pod-id-file", "", nil, "Read the pod ID from the file")
if registry.IsRemote() {
_ = flags.MarkHidden("latest")
}
@@ -52,7 +61,14 @@ func start(cmd *cobra.Command, args []string) error {
var (
errs utils.OutputErrors
)
- responses, err := registry.ContainerEngine().PodStart(context.Background(), args, startOptions)
+
+ ids, err := common.ReadPodIDFiles(startOptions.PodIDFiles)
+ if err != nil {
+ return err
+ }
+ args = append(args, ids...)
+
+ responses, err := registry.ContainerEngine().PodStart(context.Background(), args, startOptions.PodStartOptions)
if err != nil {
return err
}
diff --git a/cmd/podman/pods/stop.go b/cmd/podman/pods/stop.go
index daf05d640..628e8a536 100644
--- a/cmd/podman/pods/stop.go
+++ b/cmd/podman/pods/stop.go
@@ -4,6 +4,7 @@ import (
"context"
"fmt"
+ "github.com/containers/libpod/cmd/podman/common"
"github.com/containers/libpod/cmd/podman/parse"
"github.com/containers/libpod/cmd/podman/registry"
"github.com/containers/libpod/cmd/podman/utils"
@@ -11,7 +12,18 @@ import (
"github.com/spf13/cobra"
)
+// allows for splitting API and CLI-only options
+type podStopOptionsWrapper struct {
+ entities.PodStopOptions
+
+ PodIDFiles []string
+ TimeoutCLI uint
+}
+
var (
+ stopOptions = podStopOptionsWrapper{
+ PodStopOptions: entities.PodStopOptions{Timeout: -1},
+ }
podStopDescription = `The pod name or ID can be used.
This command will stop all running containers in each of the specified pods.`
@@ -22,7 +34,7 @@ var (
Long: podStopDescription,
RunE: stop,
Args: func(cmd *cobra.Command, args []string) error {
- return parse.CheckAllLatestAndCIDFile(cmd, args, false, false)
+ return parse.CheckAllLatestAndPodIDFile(cmd, args, false, true)
},
Example: `podman pod stop mywebserverpod
podman pod stop --latest
@@ -30,13 +42,6 @@ var (
}
)
-var (
- stopOptions = entities.PodStopOptions{
- Timeout: -1,
- }
- timeout uint
-)
-
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
@@ -47,7 +52,8 @@ func init() {
flags.BoolVarP(&stopOptions.All, "all", "a", false, "Stop all running pods")
flags.BoolVarP(&stopOptions.Ignore, "ignore", "i", false, "Ignore errors when a specified pod is missing")
flags.BoolVarP(&stopOptions.Latest, "latest", "l", false, "Stop the latest pod podman is aware of")
- flags.UintVarP(&timeout, "time", "t", containerConfig.Engine.StopTimeout, "Seconds to wait for pod stop before killing the container")
+ flags.UintVarP(&stopOptions.TimeoutCLI, "time", "t", containerConfig.Engine.StopTimeout, "Seconds to wait for pod stop before killing the container")
+ flags.StringArrayVarP(&stopOptions.PodIDFiles, "pod-id-file", "", nil, "Read the pod ID from the file")
if registry.IsRemote() {
_ = flags.MarkHidden("latest")
_ = flags.MarkHidden("ignore")
@@ -60,9 +66,16 @@ func stop(cmd *cobra.Command, args []string) error {
errs utils.OutputErrors
)
if cmd.Flag("time").Changed {
- stopOptions.Timeout = int(timeout)
+ stopOptions.Timeout = int(stopOptions.TimeoutCLI)
+ }
+
+ ids, err := common.ReadPodIDFiles(stopOptions.PodIDFiles)
+ if err != nil {
+ return err
}
- responses, err := registry.ContainerEngine().PodStop(context.Background(), args, stopOptions)
+ args = append(args, ids...)
+
+ responses, err := registry.ContainerEngine().PodStop(context.Background(), args, stopOptions.PodStopOptions)
if err != nil {
return err
}
diff --git a/completions/bash/podman b/completions/bash/podman
index a58becaf0..6dbe645fe 100644
--- a/completions/bash/podman
+++ b/completions/bash/podman
@@ -2102,6 +2102,7 @@ _podman_container_run() {
--pid
--pids-limit
--pod
+ --pod-id-file
--publish -p
--pull
--runtime
@@ -2206,7 +2207,7 @@ _podman_container_run() {
__podman_complete_capabilities
return
;;
- --cidfile|--env-file|--init-path|--label-file)
+ --cidfile|--env-file|--init-path|--label-file|--pod-id-file)
_filedir
return
;;
@@ -3097,6 +3098,7 @@ _podman_pod_create() {
--dns-opt
--dns-search
--infra-command
+ --infra-conmon-pidfile
--infra-image
--ip
--label-file
@@ -3223,6 +3225,7 @@ _podman_pod_restart() {
_podman_pod_rm() {
local options_with_args="
+ --pod-id-file
"
local boolean_options="
@@ -3250,6 +3253,7 @@ _podman_pod_rm() {
_podman_pod_start() {
local options_with_args="
+ --pod-id-file
"
local boolean_options="
@@ -3275,6 +3279,7 @@ _podman_pod_stop() {
local options_with_args="
-t
--time
+ --pod-id-file
"
local boolean_options="
diff --git a/contrib/spec/podman.spec.in b/contrib/spec/podman.spec.in
index 8d3cba612..260de7b20 100644
--- a/contrib/spec/podman.spec.in
+++ b/contrib/spec/podman.spec.in
@@ -77,8 +77,9 @@ BuildRequires: systemd-devel
Requires: skopeo-containers
Requires: containernetworking-plugins >= 0.6.0-3
Requires: iptables
-%if 0%{?rhel} <= 7
+%if 0%{?rhel} < 8 || 0%{?centos} < 8
Requires: container-selinux
+Requires: runc
%else
%if 0%{?rhel} || 0%{?centos}
Requires: runc
diff --git a/docs/source/markdown/podman-create.1.md b/docs/source/markdown/podman-create.1.md
index a69ef04d1..dbc835920 100644
--- a/docs/source/markdown/podman-create.1.md
+++ b/docs/source/markdown/podman-create.1.md
@@ -593,6 +593,10 @@ Tune the container's pids limit. Set `0` to have unlimited pids for the containe
Run container in an existing pod. If you want Podman to make the pod for you, preference the pod name with `new:`.
To make a pod with more granular options, use the `podman pod create` command before creating a container.
+**--pod-id-file**=*path*
+
+Run container in an existing pod and read the pod's ID from the specified file. If a container is run within a pod, and the pod has an infra-container, the infra-container will be started before the container is.
+
**--privileged**=*true|false*
Give extended privileges to this container. The default is *false*.
diff --git a/docs/source/markdown/podman-generate-systemd.1.md b/docs/source/markdown/podman-generate-systemd.1.md
index 72031b19b..2facd754c 100644
--- a/docs/source/markdown/podman-generate-systemd.1.md
+++ b/docs/source/markdown/podman-generate-systemd.1.md
@@ -26,10 +26,7 @@ Use the name of the container for the start, stop, and description in the unit f
**--new**
-Create a new container via podman-run instead of starting an existing one. This option relies on container configuration files, which may not map directly to podman CLI flags; please review the generated output carefully before placing in production.
-Since we use systemd `Type=forking` service, using this option will force the container run with the detached param `-d`.
-
-Note: Generating systemd unit files with `--new` flag is not yet supported for pods.
+Using this flag will yield unit files that do not expect containers and pods to exist. Instead, new containers and pods are created based on their configuration files. The unit files are created best effort and may need to be further edited; please review the generated files carefully before using them in production.
**--time**, **-t**=*value*
diff --git a/docs/source/markdown/podman-pod-create.1.md b/docs/source/markdown/podman-pod-create.1.md
index 489c9b32e..de6b600f0 100644
--- a/docs/source/markdown/podman-pod-create.1.md
+++ b/docs/source/markdown/podman-pod-create.1.md
@@ -47,6 +47,10 @@ Set a hostname to the pod
Create an infra container and associate it with the pod. An infra container is a lightweight container used to coordinate the shared kernel namespace of a pod. Default: true.
+**--infra-conmon-pidfile**=*file*
+
+Write the pid of the infra container's **conmon** process to a file. As **conmon** runs in a separate process than Podman, this is necessary when using systemd to manage Podman containers and pods.
+
**--infra-command**=*command*
The command that will be run to start the infra container. Default: "/pause".
diff --git a/docs/source/markdown/podman-pod-rm.1.md b/docs/source/markdown/podman-pod-rm.1.md
index 14da2071f..95e7ab002 100644
--- a/docs/source/markdown/podman-pod-rm.1.md
+++ b/docs/source/markdown/podman-pod-rm.1.md
@@ -31,6 +31,10 @@ The latest option is not supported on the remote client.
Stop running containers and delete all stopped containers before removal of pod.
+**--pod-id-file**
+
+Read pod ID from the specified file and remove the pod. Can be specified multiple times.
+
## EXAMPLE
podman pod rm mywebserverpod
@@ -43,6 +47,8 @@ podman pod rm -f -a
podman pod rm -fa
+podman pod rm --pod-id-file /path/to/id/file
+
## SEE ALSO
podman-pod(1)
diff --git a/docs/source/markdown/podman-pod-start.1.md b/docs/source/markdown/podman-pod-start.1.md
index 29960d6aa..6c6cfa2cf 100644
--- a/docs/source/markdown/podman-pod-start.1.md
+++ b/docs/source/markdown/podman-pod-start.1.md
@@ -22,6 +22,10 @@ Instead of providing the pod name or ID, start the last created pod.
The latest option is not supported on the remote client.
+**--pod-id-file**
+
+Read pod ID from the specified file and start the pod. Can be specified multiple times.
+
## EXAMPLE
podman pod start mywebserverpod
@@ -32,6 +36,7 @@ podman pod start --latest
podman pod start --all
+podman pod start --pod-id-file /path/to/id/file
## SEE ALSO
podman-pod(1), podman-pod-stop(1), podman-start(1)
diff --git a/docs/source/markdown/podman-pod-stop.1.md b/docs/source/markdown/podman-pod-stop.1.md
index b5e7aef7d..7ce9ff941 100644
--- a/docs/source/markdown/podman-pod-stop.1.md
+++ b/docs/source/markdown/podman-pod-stop.1.md
@@ -31,6 +31,10 @@ The latest option is not supported on the remote client.
Timeout to wait before forcibly stopping the containers in the pod.
+**--pod-id-file**
+
+Read pod ID from the specified file and stop the pod. Can be specified multiple times.
+
## EXAMPLE
Stop a pod called *mywebserverpod*
@@ -62,6 +66,13 @@ $ podman pod stop --all
cc8f0bea67b1a1a11aec1ecd38102a1be4b145577f21fc843c7c83b77fc28907
```
+Stop two pods via --pod-id-file
+```
+$ podman pod stop --pod-id-file file1 --pod-id-file file2
+19456b4cd557eaf9629825113a552681a6013f8c8cad258e36ab825ef536e818
+cc8f0bea67b1a1a11aec1ecd38102a1be4b145577f21fc843c7c83b77fc28907
+```
+
Stop all pods with a timeout of 1 second.
```
$ podman pod stop -a -t 1
diff --git a/docs/source/markdown/podman-run.1.md b/docs/source/markdown/podman-run.1.md
index 02db8b205..22f7cae09 100644
--- a/docs/source/markdown/podman-run.1.md
+++ b/docs/source/markdown/podman-run.1.md
@@ -605,6 +605,10 @@ Run container in an existing pod. If you want Podman to make the pod for you, pr
To make a pod with more granular options, use the **podman pod create** command before creating a container.
If a container is run with a pod, and the pod has an infra-container, the infra-container will be started before the container is.
+**--pod-id-file**=*path*
+
+Run container in an existing pod and read the pod's ID from the specified file. If a container is run within a pod, and the pod has an infra-container, the infra-container will be started before the container is.
+
**--privileged**=**true**|**false**
Give extended privileges to this container. The default is **false**.
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index e3ca3f6b2..9afe11b2b 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -79,7 +79,8 @@ func (c *Container) prepare() error {
go func() {
defer wg.Done()
// Set up network namespace if not already set up
- if c.config.CreateNetNS && c.state.NetNS == nil && !c.config.PostConfigureNetNS {
+ noNetNS := c.state.NetNS == nil
+ if c.config.CreateNetNS && noNetNS && !c.config.PostConfigureNetNS {
netNS, networkStatus, createNetNSErr = c.runtime.createNetNS(c)
if createNetNSErr != nil {
return
@@ -94,7 +95,7 @@ func (c *Container) prepare() error {
}
// handle rootless network namespace setup
- if c.state.NetNS != nil && c.config.NetMode.IsSlirp4netns() && !c.config.PostConfigureNetNS {
+ if noNetNS && c.config.NetMode.IsSlirp4netns() && !c.config.PostConfigureNetNS {
createNetNSErr = c.runtime.setupRootlessNetNS(c)
}
}()
diff --git a/libpod/container_log.go b/libpod/container_log.go
index bfa303e84..c3a84d048 100644
--- a/libpod/container_log.go
+++ b/libpod/container_log.go
@@ -19,7 +19,7 @@ func (r *Runtime) Log(containers []*Container, options *logs.LogOptions, logChan
return nil
}
-// ReadLog reads a containers log based on the input options and returns loglines over a channel
+// ReadLog reads a containers log based on the input options and returns loglines over a channel.
func (c *Container) ReadLog(options *logs.LogOptions, logChannel chan *logs.LogLine) error {
// TODO Skip sending logs until journald logs can be read
// TODO make this not a magic string
@@ -61,7 +61,7 @@ func (c *Container) readFromLogFile(options *logs.LogOptions, logChannel chan *l
partial += nll.Msg
continue
} else if !nll.Partial() && len(partial) > 1 {
- nll.Msg = partial
+ nll.Msg = partial + nll.Msg
partial = ""
}
nll.CID = c.ID()
diff --git a/libpod/define/pod_inspect.go b/libpod/define/pod_inspect.go
index 26fd2cab4..7f06e16fc 100644
--- a/libpod/define/pod_inspect.go
+++ b/libpod/define/pod_inspect.go
@@ -18,6 +18,9 @@ type InspectPodData struct {
Namespace string `json:"Namespace,omitempty"`
// Created is the time when the pod was created.
Created time.Time
+ // CreateCommand is the full command plus arguments of the process the
+ // container has been created with.
+ CreateCommand []string `json:"CreateCommand,omitempty"`
// State represents the current state of the pod.
State string `json:"State"`
// Hostname is the hostname that the pod will set.
diff --git a/libpod/oci_util.go b/libpod/oci_util.go
index 53567d2d0..8b40dad81 100644
--- a/libpod/oci_util.go
+++ b/libpod/oci_util.go
@@ -36,14 +36,30 @@ func bindPorts(ports []ocicni.PortMapping) ([]*os.File, error) {
var files []*os.File
notifySCTP := false
for _, i := range ports {
+ isV6 := net.ParseIP(i.HostIP).To4() == nil
+ if i.HostIP == "" {
+ isV6 = false
+ }
switch i.Protocol {
case "udp":
- addr, err := net.ResolveUDPAddr("udp", fmt.Sprintf("%s:%d", i.HostIP, i.HostPort))
+ var (
+ addr *net.UDPAddr
+ err error
+ )
+ if isV6 {
+ addr, err = net.ResolveUDPAddr("udp6", fmt.Sprintf("[%s]:%d", i.HostIP, i.HostPort))
+ } else {
+ addr, err = net.ResolveUDPAddr("udp4", fmt.Sprintf("%s:%d", i.HostIP, i.HostPort))
+ }
if err != nil {
return nil, errors.Wrapf(err, "cannot resolve the UDP address")
}
- server, err := net.ListenUDP("udp", addr)
+ proto := "udp4"
+ if isV6 {
+ proto = "udp6"
+ }
+ server, err := net.ListenUDP(proto, addr)
if err != nil {
return nil, errors.Wrapf(err, "cannot listen on the UDP port")
}
@@ -54,12 +70,24 @@ func bindPorts(ports []ocicni.PortMapping) ([]*os.File, error) {
files = append(files, f)
case "tcp":
- addr, err := net.ResolveTCPAddr("tcp4", fmt.Sprintf("%s:%d", i.HostIP, i.HostPort))
+ var (
+ addr *net.TCPAddr
+ err error
+ )
+ if isV6 {
+ addr, err = net.ResolveTCPAddr("tcp6", fmt.Sprintf("[%s]:%d", i.HostIP, i.HostPort))
+ } else {
+ addr, err = net.ResolveTCPAddr("tcp4", fmt.Sprintf("%s:%d", i.HostIP, i.HostPort))
+ }
if err != nil {
return nil, errors.Wrapf(err, "cannot resolve the TCP address")
}
- server, err := net.ListenTCP("tcp4", addr)
+ proto := "tcp4"
+ if isV6 {
+ proto = "tcp6"
+ }
+ server, err := net.ListenTCP(proto, addr)
if err != nil {
return nil, errors.Wrapf(err, "cannot listen on the TCP port")
}
diff --git a/libpod/options.go b/libpod/options.go
index 8e0d3df86..5a0f60093 100644
--- a/libpod/options.go
+++ b/libpod/options.go
@@ -1538,6 +1538,30 @@ func WithPodHostname(hostname string) PodCreateOption {
}
}
+// WithPodCreateCommand adds the full command plus arguments of the current
+// process to the pod config.
+func WithPodCreateCommand() PodCreateOption {
+ return func(pod *Pod) error {
+ if pod.valid {
+ return define.ErrPodFinalized
+ }
+ pod.config.CreateCommand = os.Args
+ return nil
+ }
+}
+
+// WithInfraConmonPidFile sets the path to a custom conmon PID file for the
+// infra container.
+func WithInfraConmonPidFile(path string) PodCreateOption {
+ return func(pod *Pod) error {
+ if pod.valid {
+ return define.ErrPodFinalized
+ }
+ pod.config.InfraContainer.ConmonPidFile = path
+ return nil
+ }
+}
+
// WithPodLabels sets the labels of a pod.
func WithPodLabels(labels map[string]string) PodCreateOption {
return func(pod *Pod) error {
diff --git a/libpod/pod.go b/libpod/pod.go
index 8afaa6052..bf0d7a397 100644
--- a/libpod/pod.go
+++ b/libpod/pod.go
@@ -64,6 +64,10 @@ type PodConfig struct {
// Time pod was created
CreatedTime time.Time `json:"created"`
+ // CreateCommand is the full command plus arguments of the process the
+ // container has been created with.
+ CreateCommand []string `json:"CreateCommand,omitempty"`
+
// ID of the pod's lock
LockID uint32 `json:"lockID"`
}
@@ -79,6 +83,7 @@ type podState struct {
// InfraContainerConfig is the configuration for the pod's infra container
type InfraContainerConfig struct {
+ ConmonPidFile string `json:"conmonPidFile"`
HasInfraContainer bool `json:"makeInfraContainer"`
HostNetwork bool `json:"infraHostNetwork,omitempty"`
PortBindings []ocicni.PortMapping `json:"infraPortBindings"`
@@ -124,6 +129,12 @@ func (p *Pod) CreatedTime() time.Time {
return p.config.CreatedTime
}
+// CreateCommand returns the os.Args of the process with which the pod has been
+// created.
+func (p *Pod) CreateCommand() []string {
+ return p.config.CreateCommand
+}
+
// CgroupParent returns the pod's CGroup parent
func (p *Pod) CgroupParent() string {
return p.config.CgroupParent
@@ -246,6 +257,20 @@ func (p *Pod) InfraContainerID() (string, error) {
return p.state.InfraContainerID, nil
}
+// InfraContainer returns the infra container.
+func (p *Pod) InfraContainer() (*Container, error) {
+ if !p.HasInfraContainer() {
+ return nil, errors.Wrap(define.ErrNoSuchCtr, "pod has no infra container")
+ }
+
+ id, err := p.InfraContainerID()
+ if err != nil {
+ return nil, err
+ }
+
+ return p.runtime.state.Container(id)
+}
+
// TODO add pod batching
// Lock pod to avoid lock contention
// Store and lock all containers (no RemoveContainer in batch guarantees cache will not become stale)
diff --git a/libpod/pod_api.go b/libpod/pod_api.go
index e2c4b515d..c8605eb69 100644
--- a/libpod/pod_api.go
+++ b/libpod/pod_api.go
@@ -489,6 +489,7 @@ func (p *Pod) Inspect() (*define.InspectPodData, error) {
Name: p.Name(),
Namespace: p.Namespace(),
Created: p.CreatedTime(),
+ CreateCommand: p.config.CreateCommand,
State: podState,
Hostname: p.config.Hostname,
Labels: p.Labels(),
diff --git a/libpod/runtime_pod_infra_linux.go b/libpod/runtime_pod_infra_linux.go
index 06a7b3936..a0dee3aa1 100644
--- a/libpod/runtime_pod_infra_linux.go
+++ b/libpod/runtime_pod_infra_linux.go
@@ -130,6 +130,9 @@ func (r *Runtime) makeInfraContainer(ctx context.Context, p *Pod, imgName, rawIm
options = append(options, WithRootFSFromImage(imgID, imgName, rawImageName))
options = append(options, WithName(containerName))
options = append(options, withIsInfra())
+ if len(p.config.InfraContainer.ConmonPidFile) > 0 {
+ options = append(options, WithConmonPidFile(p.config.InfraContainer.ConmonPidFile))
+ }
return r.newContainer(ctx, g.Config, options...)
}
diff --git a/pkg/api/handlers/compat/containers.go b/pkg/api/handlers/compat/containers.go
index b90f3d625..8ce2180ab 100644
--- a/pkg/api/handlers/compat/containers.go
+++ b/pkg/api/handlers/compat/containers.go
@@ -465,8 +465,11 @@ func LibpodToContainerJSON(l *libpod.Container, sz bool) (*types.ContainerJSON,
ports := make(nat.PortSet)
for p := range inspect.HostConfig.PortBindings {
- splitp := strings.Split(p, "/")
- port, err := nat.NewPort(splitp[0], splitp[1])
+ splitp := strings.SplitN(p, "/", 2)
+ if len(splitp) != 2 {
+ return nil, errors.Errorf("PORT/PROTOCOL Format required for %q", p)
+ }
+ port, err := nat.NewPort(splitp[1], splitp[0])
if err != nil {
return nil, err
}
diff --git a/pkg/domain/entities/play.go b/pkg/domain/entities/play.go
index 4f485cbee..0823bc64e 100644
--- a/pkg/domain/entities/play.go
+++ b/pkg/domain/entities/play.go
@@ -26,12 +26,18 @@ type PlayKubeOptions struct {
SeccompProfileRoot string
}
-// PlayKubeReport contains the results of running play kube.
-type PlayKubeReport struct {
- // Pod - the ID of the created pod.
- Pod string
+// PlayKubePod represents a single pod and associated containers created by play kube
+type PlayKubePod struct {
+ // ID - ID of the pod created as a result of play kube.
+ ID string
// Containers - the IDs of the containers running in the created pod.
Containers []string
// Logs - non-fatal erros and log messages while processing.
Logs []string
}
+
+// PlayKubeReport contains the results of running play kube.
+type PlayKubeReport struct {
+ // Pods - pods created by play kube.
+ Pods []PlayKubePod
+}
diff --git a/pkg/domain/entities/pods.go b/pkg/domain/entities/pods.go
index a85333c75..fc76ddd41 100644
--- a/pkg/domain/entities/pods.go
+++ b/pkg/domain/entities/pods.go
@@ -103,15 +103,16 @@ type PodRmReport struct {
}
type PodCreateOptions struct {
- CGroupParent string
- Hostname string
- Infra bool
- InfraImage string
- InfraCommand string
- Labels map[string]string
- Name string
- Net *NetOptions
- Share []string
+ CGroupParent string
+ Hostname string
+ Infra bool
+ InfraImage string
+ InfraCommand string
+ InfraConmonPidFile string
+ Labels map[string]string
+ Name string
+ Net *NetOptions
+ Share []string
}
type PodCreateReport struct {
@@ -127,6 +128,9 @@ func (p PodCreateOptions) ToPodSpecGen(s *specgen.PodSpecGenerator) {
if len(p.InfraCommand) > 0 {
s.InfraCommand = strings.Split(p.InfraCommand, " ")
}
+ if len(p.InfraConmonPidFile) > 0 {
+ s.InfraConmonPidFile = p.InfraConmonPidFile
+ }
s.InfraImage = p.InfraImage
s.SharedNamespaces = p.Share
diff --git a/pkg/domain/infra/abi/generate.go b/pkg/domain/infra/abi/generate.go
index abb5e2911..8853303d5 100644
--- a/pkg/domain/infra/abi/generate.go
+++ b/pkg/domain/infra/abi/generate.go
@@ -4,7 +4,6 @@ import (
"bytes"
"context"
"fmt"
- "strings"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/define"
@@ -16,165 +15,29 @@ import (
)
func (ic *ContainerEngine) GenerateSystemd(ctx context.Context, nameOrID string, options entities.GenerateSystemdOptions) (*entities.GenerateSystemdReport, error) {
- opts := generate.Options{
- Files: options.Files,
- New: options.New,
- }
-
// First assume it's a container.
- if info, found, err := ic.generateSystemdgenContainerInfo(nameOrID, nil, options); found && err != nil {
- return nil, err
- } else if found && err == nil {
- output, err := generate.CreateContainerSystemdUnit(info, opts)
- if err != nil {
- return nil, err
+ ctr, ctrErr := ic.Libpod.LookupContainer(nameOrID)
+ if ctrErr == nil {
+ // Generate the unit for the container.
+ s, err := generate.ContainerUnit(ctr, options)
+ if err == nil {
+ return &entities.GenerateSystemdReport{Output: s}, nil
}
- return &entities.GenerateSystemdReport{Output: output}, nil
- }
-
- // --new does not support pods.
- if options.New {
- return nil, errors.Errorf("error generating systemd unit files: cannot generate generic files for a pod")
}
- // We're either having a pod or garbage.
+ // If it's not a container, we either have a pod or garbage.
pod, err := ic.Libpod.LookupPod(nameOrID)
if err != nil {
- return nil, err
- }
-
- // Error out if the pod has no infra container, which we require to be the
- // main service.
- if !pod.HasInfraContainer() {
- return nil, fmt.Errorf("error generating systemd unit files: Pod %q has no infra container", pod.Name())
+ err = errors.Wrap(ctrErr, err.Error())
+ return nil, errors.Wrapf(err, "%s does not refer to a container or pod", nameOrID)
}
- // Generate a systemdgen.ContainerInfo for the infra container. This
- // ContainerInfo acts as the main service of the pod.
- infraID, err := pod.InfraContainerID()
- if err != nil {
- return nil, nil
- }
- podInfo, _, err := ic.generateSystemdgenContainerInfo(infraID, pod, options)
+ // Generate the units for the pod and all its containers.
+ s, err := generate.PodUnits(pod, options)
if err != nil {
return nil, err
}
-
- // Compute the container-dependency graph for the Pod.
- containers, err := pod.AllContainers()
- if err != nil {
- return nil, err
- }
- if len(containers) == 0 {
- return nil, fmt.Errorf("error generating systemd unit files: Pod %q has no containers", pod.Name())
- }
- graph, err := libpod.BuildContainerGraph(containers)
- if err != nil {
- return nil, err
- }
-
- // Traverse the dependency graph and create systemdgen.ContainerInfo's for
- // each container.
- containerInfos := []*generate.ContainerInfo{podInfo}
- for ctr, dependencies := range graph.DependencyMap() {
- // Skip the infra container as we already generated it.
- if ctr.ID() == infraID {
- continue
- }
- ctrInfo, _, err := ic.generateSystemdgenContainerInfo(ctr.ID(), nil, options)
- if err != nil {
- return nil, err
- }
- // Now add the container's dependencies and at the container as a
- // required service of the infra container.
- for _, dep := range dependencies {
- if dep.ID() == infraID {
- ctrInfo.BoundToServices = append(ctrInfo.BoundToServices, podInfo.ServiceName)
- } else {
- _, serviceName := generateServiceName(dep, nil, options)
- ctrInfo.BoundToServices = append(ctrInfo.BoundToServices, serviceName)
- }
- }
- podInfo.RequiredServices = append(podInfo.RequiredServices, ctrInfo.ServiceName)
- containerInfos = append(containerInfos, ctrInfo)
- }
-
- // Now generate the systemd service for all containers.
- builder := strings.Builder{}
- for i, info := range containerInfos {
- if i > 0 {
- builder.WriteByte('\n')
- }
- out, err := generate.CreateContainerSystemdUnit(info, opts)
- if err != nil {
- return nil, err
- }
- builder.WriteString(out)
- }
-
- return &entities.GenerateSystemdReport{Output: builder.String()}, nil
-}
-
-// generateSystemdgenContainerInfo is a helper to generate a
-// systemdgen.ContainerInfo for `GenerateSystemd`.
-func (ic *ContainerEngine) generateSystemdgenContainerInfo(nameOrID string, pod *libpod.Pod, options entities.GenerateSystemdOptions) (*generate.ContainerInfo, bool, error) {
- ctr, err := ic.Libpod.LookupContainer(nameOrID)
- if err != nil {
- return nil, false, err
- }
-
- timeout := ctr.StopTimeout()
- if options.StopTimeout != nil {
- timeout = *options.StopTimeout
- }
-
- config := ctr.Config()
- conmonPidFile := config.ConmonPidFile
- if conmonPidFile == "" {
- return nil, true, errors.Errorf("conmon PID file path is empty, try to recreate the container with --conmon-pidfile flag")
- }
-
- createCommand := []string{}
- if config.CreateCommand != nil {
- createCommand = config.CreateCommand
- } else if options.New {
- return nil, true, errors.Errorf("cannot use --new on container %q: no create command found", nameOrID)
- }
-
- name, serviceName := generateServiceName(ctr, pod, options)
- info := &generate.ContainerInfo{
- ServiceName: serviceName,
- ContainerName: name,
- RestartPolicy: options.RestartPolicy,
- PIDFile: conmonPidFile,
- StopTimeout: timeout,
- GenerateTimestamp: true,
- CreateCommand: createCommand,
- }
-
- return info, true, nil
-}
-
-// generateServiceName generates the container name and the service name for systemd service.
-func generateServiceName(ctr *libpod.Container, pod *libpod.Pod, options entities.GenerateSystemdOptions) (string, string) {
- var kind, name, ctrName string
- if pod == nil {
- kind = options.ContainerPrefix //defaults to container
- name = ctr.ID()
- if options.Name {
- name = ctr.Name()
- }
- ctrName = name
- } else {
- kind = options.PodPrefix //defaults to pod
- name = pod.ID()
- ctrName = ctr.ID()
- if options.Name {
- name = pod.Name()
- ctrName = ctr.Name()
- }
- }
- return ctrName, fmt.Sprintf("%s%s%s", kind, options.Separator, name)
+ return &entities.GenerateSystemdReport{Output: s}, nil
}
func (ic *ContainerEngine) GenerateKube(ctx context.Context, nameOrID string, options entities.GenerateKubeOptions) (*entities.GenerateKubeReport, error) {
diff --git a/pkg/domain/infra/abi/play.go b/pkg/domain/infra/abi/play.go
index 932974aba..f5b93c51b 100644
--- a/pkg/domain/infra/abi/play.go
+++ b/pkg/domain/infra/abi/play.go
@@ -26,6 +26,7 @@ import (
"github.com/ghodss/yaml"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
+ v1apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
)
@@ -38,13 +39,7 @@ const (
func (ic *ContainerEngine) PlayKube(ctx context.Context, path string, options entities.PlayKubeOptions) (*entities.PlayKubeReport, error) {
var (
- containers []*libpod.Container
- pod *libpod.Pod
- podOptions []libpod.PodCreateOption
- podYAML v1.Pod
- registryCreds *types.DockerAuthConfig
- writer io.Writer
- report entities.PlayKubeReport
+ kubeObject v1.ObjectReference
)
content, err := ioutil.ReadFile(path)
@@ -52,25 +47,84 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, path string, options en
return nil, err
}
- if err := yaml.Unmarshal(content, &podYAML); err != nil {
+ if err := yaml.Unmarshal(content, &kubeObject); err != nil {
return nil, errors.Wrapf(err, "unable to read %q as YAML", path)
}
// NOTE: pkg/bindings/play is also parsing the file.
// A pkg/kube would be nice to refactor and abstract
// parts of the K8s-related code.
- if podYAML.Kind != "Pod" {
- return nil, errors.Errorf("invalid YAML kind: %q. Pod is the only supported Kubernetes YAML kind", podYAML.Kind)
+ switch kubeObject.Kind {
+ case "Pod":
+ var podYAML v1.Pod
+ var podTemplateSpec v1.PodTemplateSpec
+ if err := yaml.Unmarshal(content, &podYAML); err != nil {
+ return nil, errors.Wrapf(err, "unable to read YAML %q as Kube Pod", path)
+ }
+ podTemplateSpec.ObjectMeta = podYAML.ObjectMeta
+ podTemplateSpec.Spec = podYAML.Spec
+ return ic.playKubePod(ctx, podTemplateSpec.ObjectMeta.Name, &podTemplateSpec, options)
+ case "Deployment":
+ var deploymentYAML v1apps.Deployment
+ if err := yaml.Unmarshal(content, &deploymentYAML); err != nil {
+ return nil, errors.Wrapf(err, "unable to read YAML %q as Kube Deployment", path)
+ }
+ return ic.playKubeDeployment(ctx, &deploymentYAML, options)
+ default:
+ return nil, errors.Errorf("invalid YAML kind: %q. [Pod|Deployment] are the only supported Kubernetes Kinds", kubeObject.Kind)
+ }
+
+}
+
+func (ic *ContainerEngine) playKubeDeployment(ctx context.Context, deploymentYAML *v1apps.Deployment, options entities.PlayKubeOptions) (*entities.PlayKubeReport, error) {
+ var (
+ deploymentName string
+ podSpec v1.PodTemplateSpec
+ numReplicas int32
+ i int32
+ report entities.PlayKubeReport
+ )
+
+ deploymentName = deploymentYAML.ObjectMeta.Name
+ if deploymentName == "" {
+ return nil, errors.Errorf("Deployment does not have a name")
}
+ numReplicas = 1
+ if deploymentYAML.Spec.Replicas != nil {
+ numReplicas = *deploymentYAML.Spec.Replicas
+ }
+ podSpec = deploymentYAML.Spec.Template
+
+ // create "replicas" number of pods
+ for i = 0; i < numReplicas; i++ {
+ podName := fmt.Sprintf("%s-pod-%d", deploymentName, i)
+ podReport, err := ic.playKubePod(ctx, podName, &podSpec, options)
+ if err != nil {
+ return nil, errors.Wrapf(err, "Error encountered while bringing up pod %s", podName)
+ }
+ report.Pods = append(report.Pods, podReport.Pods...)
+ }
+ return &report, nil
+}
+
+func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podYAML *v1.PodTemplateSpec, options entities.PlayKubeOptions) (*entities.PlayKubeReport, error) {
+ var (
+ containers []*libpod.Container
+ pod *libpod.Pod
+ podOptions []libpod.PodCreateOption
+ registryCreds *types.DockerAuthConfig
+ writer io.Writer
+ playKubePod entities.PlayKubePod
+ report entities.PlayKubeReport
+ )
// check for name collision between pod and container
- podName := podYAML.ObjectMeta.Name
if podName == "" {
return nil, errors.Errorf("pod does not have a name")
}
for _, n := range podYAML.Spec.Containers {
if n.Name == podName {
- report.Logs = append(report.Logs,
+ playKubePod.Logs = append(playKubePod.Logs,
fmt.Sprintf("a container exists with the same name (%q) as the pod in your YAML file; changing pod name to %s_pod\n", podName, podName))
podName = fmt.Sprintf("%s_pod", podName)
}
@@ -239,7 +293,7 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, path string, options en
if err != nil {
return nil, err
}
- conf, err := kubeContainerToCreateConfig(ctx, container, ic.Libpod, newImage, namespaces, volumes, pod.ID(), podInfraID, seccompPaths)
+ conf, err := kubeContainerToCreateConfig(ctx, container, ic.Libpod, newImage, namespaces, volumes, pod.ID(), podName, podInfraID, seccompPaths)
if err != nil {
return nil, err
}
@@ -259,11 +313,13 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, path string, options en
}
}
- report.Pod = pod.ID()
+ playKubePod.ID = pod.ID()
for _, ctr := range containers {
- report.Containers = append(report.Containers, ctr.ID())
+ playKubePod.Containers = append(playKubePod.Containers, ctr.ID())
}
+ report.Pods = append(report.Pods, playKubePod)
+
return &report, nil
}
@@ -351,7 +407,7 @@ func setupSecurityContext(securityConfig *createconfig.SecurityConfig, userConfi
}
// kubeContainerToCreateConfig takes a v1.Container and returns a createconfig describing a container
-func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container, runtime *libpod.Runtime, newImage *image.Image, namespaces map[string]string, volumes map[string]string, podID, infraID string, seccompPaths *kubeSeccompPaths) (*createconfig.CreateConfig, error) {
+func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container, runtime *libpod.Runtime, newImage *image.Image, namespaces map[string]string, volumes map[string]string, podID, podName, infraID string, seccompPaths *kubeSeccompPaths) (*createconfig.CreateConfig, error) {
var (
containerConfig createconfig.CreateConfig
pidConfig createconfig.PidConfig
@@ -368,7 +424,14 @@ func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container
containerConfig.Image = containerYAML.Image
containerConfig.ImageID = newImage.ID()
- containerConfig.Name = containerYAML.Name
+
+ // podName should be non-empty for Deployment objects to be able to create
+ // multiple pods having containers with unique names
+ if podName == "" {
+ return nil, errors.Errorf("kubeContainerToCreateConfig got empty podName")
+ }
+ containerConfig.Name = fmt.Sprintf("%s-%s", podName, containerYAML.Name)
+
containerConfig.Tty = containerYAML.TTY
containerConfig.Pod = podID
@@ -382,7 +445,10 @@ func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container
setupSecurityContext(&securityConfig, &userConfig, containerYAML)
- securityConfig.SeccompProfilePath = seccompPaths.findForContainer(containerConfig.Name)
+ // Since we prefix the container name with pod name to work-around the uniqueness requirement,
+ // the seccom profile should reference the actual container name from the YAML
+ // but apply to the containers with the prefixed name
+ securityConfig.SeccompProfilePath = seccompPaths.findForContainer(containerYAML.Name)
containerConfig.Command = []string{}
if imageData != nil && imageData.Config != nil {
diff --git a/pkg/domain/infra/abi/pods.go b/pkg/domain/infra/abi/pods.go
index eb6f1e191..054b59b06 100644
--- a/pkg/domain/infra/abi/pods.go
+++ b/pkg/domain/infra/abi/pods.go
@@ -144,6 +144,7 @@ func (ic *ContainerEngine) PodStop(ctx context.Context, namesOrIds []string, opt
var (
reports []*entities.PodStopReport
)
+
pods, err := getPodsByContext(options.All, options.Latest, namesOrIds, ic.Libpod)
if err != nil && !(options.Ignore && errors.Cause(err) == define.ErrNoSuchPod) {
return nil, err
@@ -199,10 +200,12 @@ func (ic *ContainerEngine) PodStart(ctx context.Context, namesOrIds []string, op
var (
reports []*entities.PodStartReport
)
+
pods, err := getPodsByContext(options.All, options.Latest, namesOrIds, ic.Libpod)
if err != nil {
return nil, err
}
+
for _, p := range pods {
report := entities.PodStartReport{Id: p.ID()}
errs, err := p.Start(ctx)
@@ -227,6 +230,7 @@ func (ic *ContainerEngine) PodRm(ctx context.Context, namesOrIds []string, optio
var (
reports []*entities.PodRmReport
)
+
pods, err := getPodsByContext(options.All, options.Latest, namesOrIds, ic.Libpod)
if err != nil && !(options.Ignore && errors.Cause(err) == define.ErrNoSuchPod) {
return nil, err
diff --git a/pkg/domain/infra/abi/system.go b/pkg/domain/infra/abi/system.go
index b91dd513d..33ba58558 100644
--- a/pkg/domain/infra/abi/system.go
+++ b/pkg/domain/infra/abi/system.go
@@ -72,11 +72,9 @@ func (ic *ContainerEngine) SetupRootless(_ context.Context, cmd *cobra.Command)
return err
}
unitName := fmt.Sprintf("podman-%d.scope", os.Getpid())
- if err := utils.RunUnderSystemdScope(os.Getpid(), "user.slice", unitName); err != nil {
- if conf.Engine.CgroupManager == config.SystemdCgroupsManager {
+ if conf.Engine.CgroupManager == config.SystemdCgroupsManager {
+ if err := utils.RunUnderSystemdScope(os.Getpid(), "user.slice", unitName); err != nil {
logrus.Warnf("Failed to add podman to systemd sandbox cgroup: %v", err)
- } else {
- logrus.Debugf("Failed to add podman to systemd sandbox cgroup: %v", err)
}
}
}
diff --git a/pkg/specgen/generate/pod_create.go b/pkg/specgen/generate/pod_create.go
index cd2d69cfb..5ccb1ba80 100644
--- a/pkg/specgen/generate/pod_create.go
+++ b/pkg/specgen/generate/pod_create.go
@@ -93,5 +93,9 @@ func createPodOptions(p *specgen.PodSpecGenerator) ([]libpod.PodCreateOption, er
options = append(options, libpod.WithInfraContainerPorts(ports))
}
options = append(options, libpod.WithPodCgroups())
+ options = append(options, libpod.WithPodCreateCommand())
+ if len(p.InfraConmonPidFile) > 0 {
+ options = append(options, libpod.WithInfraConmonPidFile(p.InfraConmonPidFile))
+ }
return options, nil
}
diff --git a/pkg/specgen/podspecgen.go b/pkg/specgen/podspecgen.go
index 11976233a..600d27004 100644
--- a/pkg/specgen/podspecgen.go
+++ b/pkg/specgen/podspecgen.go
@@ -25,6 +25,9 @@ type PodBasicConfig struct {
// InfraCommand and InfraImages in this struct.
// Optional.
NoInfra bool `json:"no_infra,omitempty"`
+ // InfraConmonPidFile is a custom path to store the infra container's
+ // conmon PID.
+ InfraConmonPidFile string `json:"infra_conmon_pid_file,omitempty"`
// InfraCommand sets the command that will be used to start the infra
// container.
// If not set, the default set in the Libpod configuration file will be
diff --git a/pkg/systemd/generate/common.go b/pkg/systemd/generate/common.go
new file mode 100644
index 000000000..fe56dc874
--- /dev/null
+++ b/pkg/systemd/generate/common.go
@@ -0,0 +1,50 @@
+package generate
+
+import (
+ "github.com/pkg/errors"
+)
+
+// EnvVariable "PODMAN_SYSTEMD_UNIT" is set in all generated systemd units and
+// is set to the unit's (unique) name.
+const EnvVariable = "PODMAN_SYSTEMD_UNIT"
+
+// restartPolicies includes all valid restart policies to be used in a unit
+// file.
+var restartPolicies = []string{"no", "on-success", "on-failure", "on-abnormal", "on-watchdog", "on-abort", "always"}
+
+// validateRestartPolicy checks that the user-provided policy is valid.
+func validateRestartPolicy(restart string) error {
+ for _, i := range restartPolicies {
+ if i == restart {
+ return nil
+ }
+ }
+ return errors.Errorf("%s is not a valid restart policy", restart)
+}
+
+const headerTemplate = `# {{.ServiceName}}.service
+# autogenerated by Podman {{.PodmanVersion}}
+{{- if .TimeStamp}}
+# {{.TimeStamp}}
+{{- end}}
+
+[Unit]
+Description=Podman {{.ServiceName}}.service
+Documentation=man:podman-generate-systemd(1)
+Wants=network.target
+After=network-online.target
+`
+
+// filterPodFlags removes --pod and --pod-id-file from the specified command.
+func filterPodFlags(command []string) []string {
+ processed := []string{}
+ for i := 0; i < len(command); i++ {
+ s := command[i]
+ if s == "--pod" || s == "--pod-id-file" {
+ i++
+ continue
+ }
+ processed = append(processed, s)
+ }
+ return processed
+}
diff --git a/pkg/systemd/generate/common_test.go b/pkg/systemd/generate/common_test.go
new file mode 100644
index 000000000..f53bb7828
--- /dev/null
+++ b/pkg/systemd/generate/common_test.go
@@ -0,0 +1,25 @@
+package generate
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestFilterPodFlags(t *testing.T) {
+
+ tests := []struct {
+ input []string
+ }{
+ {[]string{"podman", "pod", "create"}},
+ {[]string{"podman", "pod", "create", "--name", "foo"}},
+ {[]string{"podman", "pod", "create", "--pod-id-file", "foo"}},
+ {[]string{"podman", "run", "--pod", "foo"}},
+ }
+
+ for _, test := range tests {
+ processed := filterPodFlags(test.input)
+ assert.NotContains(t, processed, "--pod-id-file")
+ assert.NotContains(t, processed, "--pod")
+ }
+}
diff --git a/pkg/systemd/generate/containers.go b/pkg/systemd/generate/containers.go
new file mode 100644
index 000000000..dced1a3da
--- /dev/null
+++ b/pkg/systemd/generate/containers.go
@@ -0,0 +1,289 @@
+package generate
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+ "text/template"
+ "time"
+
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/pkg/domain/entities"
+ "github.com/containers/libpod/version"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// containerInfo contains data required for generating a container's systemd
+// unit file.
+type containerInfo struct {
+ // ServiceName of the systemd service.
+ ServiceName string
+ // Name or ID of the container.
+ ContainerNameOrID string
+ // StopTimeout sets the timeout Podman waits before killing the container
+ // during service stop.
+ StopTimeout uint
+ // RestartPolicy of the systemd unit (e.g., no, on-failure, always).
+ RestartPolicy string
+ // PIDFile of the service. Required for forking services. Must point to the
+ // PID of the associated conmon process.
+ PIDFile string
+ // ContainerIDFile to be used in the unit.
+ ContainerIDFile string
+ // GenerateTimestamp, if set the generated unit file has a time stamp.
+ GenerateTimestamp bool
+ // BoundToServices are the services this service binds to. Note that this
+ // service runs after them.
+ BoundToServices []string
+ // PodmanVersion for the header. Will be set internally. Will be auto-filled
+ // if left empty.
+ PodmanVersion string
+ // Executable is the path to the podman executable. Will be auto-filled if
+ // left empty.
+ Executable string
+ // TimeStamp at the time of creating the unit file. Will be set internally.
+ TimeStamp string
+ // CreateCommand is the full command plus arguments of the process the
+ // container has been created with.
+ CreateCommand []string
+ // EnvVariable is generate.EnvVariable and must not be set.
+ EnvVariable string
+ // ExecStartPre of the unit.
+ ExecStartPre string
+ // ExecStart of the unit.
+ ExecStart string
+ // ExecStop of the unit.
+ ExecStop string
+ // ExecStopPost of the unit.
+ ExecStopPost string
+
+ // If not nil, the container is part of the pod. We can use the
+ // podInfo to extract the relevant data.
+ pod *podInfo
+}
+
+const containerTemplate = headerTemplate + `
+{{- if .BoundToServices}}
+RefuseManualStart=yes
+RefuseManualStop=yes
+BindsTo={{- range $index, $value := .BoundToServices -}}{{if $index}} {{end}}{{ $value }}.service{{end}}
+After={{- range $index, $value := .BoundToServices -}}{{if $index}} {{end}}{{ $value }}.service{{end}}
+{{- end}}
+
+[Service]
+Environment={{.EnvVariable}}=%n
+Restart={{.RestartPolicy}}
+{{- if .ExecStartPre}}
+ExecStartPre={{.ExecStartPre}}
+{{- end}}
+ExecStart={{.ExecStart}}
+ExecStop={{.ExecStop}}
+{{- if .ExecStopPost}}
+ExecStopPost={{.ExecStopPost}}
+{{- end}}
+PIDFile={{.PIDFile}}
+KillMode=none
+Type=forking
+
+[Install]
+WantedBy=multi-user.target default.target`
+
+// ContainerUnit generates a systemd unit for the specified container. Based
+// on the options, the return value might be the entire unit or a file it has
+// been written to.
+func ContainerUnit(ctr *libpod.Container, options entities.GenerateSystemdOptions) (string, error) {
+ info, err := generateContainerInfo(ctr, options)
+ if err != nil {
+ return "", err
+ }
+ return executeContainerTemplate(info, options)
+}
+
+func generateContainerInfo(ctr *libpod.Container, options entities.GenerateSystemdOptions) (*containerInfo, error) {
+ timeout := ctr.StopTimeout()
+ if options.StopTimeout != nil {
+ timeout = *options.StopTimeout
+ }
+
+ config := ctr.Config()
+ conmonPidFile := config.ConmonPidFile
+ if conmonPidFile == "" {
+ return nil, errors.Errorf("conmon PID file path is empty, try to recreate the container with --conmon-pidfile flag")
+ }
+
+ createCommand := []string{}
+ if config.CreateCommand != nil {
+ createCommand = config.CreateCommand
+ } else if options.New {
+ return nil, errors.Errorf("cannot use --new on container %q: no create command found", ctr.ID())
+ }
+
+ nameOrID, serviceName := containerServiceName(ctr, options)
+
+ info := containerInfo{
+ ServiceName: serviceName,
+ ContainerNameOrID: nameOrID,
+ RestartPolicy: options.RestartPolicy,
+ PIDFile: conmonPidFile,
+ StopTimeout: timeout,
+ GenerateTimestamp: true,
+ CreateCommand: createCommand,
+ }
+
+ return &info, nil
+}
+
+// containerServiceName returns the nameOrID and the service name of the
+// container.
+func containerServiceName(ctr *libpod.Container, options entities.GenerateSystemdOptions) (string, string) {
+ nameOrID := ctr.ID()
+ if options.Name {
+ nameOrID = ctr.Name()
+ }
+ serviceName := fmt.Sprintf("%s%s%s", options.ContainerPrefix, options.Separator, nameOrID)
+ return nameOrID, serviceName
+}
+
+// executeContainerTemplate executes the container template on the specified
+// containerInfo. Note that the containerInfo is also post processed and
+// completed, which allows for an easier unit testing.
+func executeContainerTemplate(info *containerInfo, options entities.GenerateSystemdOptions) (string, error) {
+ if err := validateRestartPolicy(info.RestartPolicy); err != nil {
+ return "", err
+ }
+
+ // Make sure the executable is set.
+ if info.Executable == "" {
+ executable, err := os.Executable()
+ if err != nil {
+ executable = "/usr/bin/podman"
+ logrus.Warnf("Could not obtain podman executable location, using default %s", executable)
+ }
+ info.Executable = executable
+ }
+
+ info.EnvVariable = EnvVariable
+ info.ExecStart = "{{.Executable}} start {{.ContainerNameOrID}}"
+ info.ExecStop = "{{.Executable}} stop {{if (ge .StopTimeout 0)}}-t {{.StopTimeout}}{{end}} {{.ContainerNameOrID}}"
+
+ // Assemble the ExecStart command when creating a new container.
+ //
+ // Note that we cannot catch all corner cases here such that users
+ // *must* manually check the generated files. A container might have
+ // been created via a Python script, which would certainly yield an
+ // invalid `info.CreateCommand`. Hence, we're doing a best effort unit
+ // generation and don't try aiming at completeness.
+ if options.New {
+ info.PIDFile = "%t/" + info.ServiceName + ".pid"
+ info.ContainerIDFile = "%t/" + info.ServiceName + ".ctr-id"
+ // The create command must at least have three arguments:
+ // /usr/bin/podman run $IMAGE
+ index := 2
+ if info.CreateCommand[1] == "container" {
+ index = 3
+ }
+ if len(info.CreateCommand) < index+1 {
+ return "", errors.Errorf("container's create command is too short or invalid: %v", info.CreateCommand)
+ }
+ // We're hard-coding the first five arguments and append the
+ // CreateCommand with a stripped command and subcomand.
+ startCommand := []string{
+ info.Executable,
+ "run",
+ "--conmon-pidfile", "{{.PIDFile}}",
+ "--cidfile", "{{.ContainerIDFile}}",
+ "--cgroups=no-conmon",
+ }
+ // If the container is in a pod, make sure that the
+ // --pod-id-file is set correctly.
+ if info.pod != nil {
+ podFlags := []string{"--pod-id-file", info.pod.PodIDFile}
+ startCommand = append(startCommand, podFlags...)
+ info.CreateCommand = filterPodFlags(info.CreateCommand)
+ }
+
+ // Enforce detaching
+ //
+ // since we use systemd `Type=forking` service
+ // @see https://www.freedesktop.org/software/systemd/man/systemd.service.html#Type=
+ // when we generated systemd service file with the --new param,
+ // `ExecStart` will have `/usr/bin/podman run ...`
+ // if `info.CreateCommand` has no `-d` or `--detach` param,
+ // podman will run the container in default attached mode,
+ // as a result, `systemd start` will wait the `podman run` command exit until failed with timeout error.
+ hasDetachParam := false
+ for _, p := range info.CreateCommand[index:] {
+ if p == "--detach" || p == "-d" {
+ hasDetachParam = true
+ }
+ }
+ if !hasDetachParam {
+ startCommand = append(startCommand, "-d")
+ }
+ startCommand = append(startCommand, info.CreateCommand[index:]...)
+
+ info.ExecStartPre = "/usr/bin/rm -f {{.PIDFile}} {{.ContainerIDFile}}"
+ info.ExecStart = strings.Join(startCommand, " ")
+ info.ExecStop = "{{.Executable}} stop --ignore --cidfile {{.ContainerIDFile}} {{if (ge .StopTimeout 0)}}-t {{.StopTimeout}}{{end}}"
+ info.ExecStopPost = "{{.Executable}} rm --ignore -f --cidfile {{.ContainerIDFile}}"
+ }
+
+ if info.PodmanVersion == "" {
+ info.PodmanVersion = version.Version
+ }
+ if info.GenerateTimestamp {
+ info.TimeStamp = fmt.Sprintf("%v", time.Now().Format(time.UnixDate))
+ }
+
+ // Sort the slices to assure a deterministic output.
+ sort.Strings(info.BoundToServices)
+
+ // Generate the template and compile it.
+ //
+ // Note that we need a two-step generation process to allow for fields
+ // embedding other fields. This way we can replace `A -> B -> C` and
+ // make the code easier to maintain at the cost of a slightly slower
+ // generation. That's especially needed for embedding the PID and ID
+ // files in other fields which will eventually get replaced in the 2nd
+ // template execution.
+ templ, err := template.New("container_template").Parse(containerTemplate)
+ if err != nil {
+ return "", errors.Wrap(err, "error parsing systemd service template")
+ }
+
+ var buf bytes.Buffer
+ if err := templ.Execute(&buf, info); err != nil {
+ return "", err
+ }
+
+ // Now parse the generated template (i.e., buf) and execute it.
+ templ, err = template.New("container_template").Parse(buf.String())
+ if err != nil {
+ return "", errors.Wrap(err, "error parsing systemd service template")
+ }
+
+ buf = bytes.Buffer{}
+ if err := templ.Execute(&buf, info); err != nil {
+ return "", err
+ }
+
+ if !options.Files {
+ return buf.String(), nil
+ }
+
+ buf.WriteByte('\n')
+ cwd, err := os.Getwd()
+ if err != nil {
+ return "", errors.Wrap(err, "error getting current working directory")
+ }
+ path := filepath.Join(cwd, fmt.Sprintf("%s.service", info.ServiceName))
+ if err := ioutil.WriteFile(path, buf.Bytes(), 0644); err != nil {
+ return "", errors.Wrap(err, "error generating systemd unit")
+ }
+ return path, nil
+}
diff --git a/pkg/systemd/generate/containers_test.go b/pkg/systemd/generate/containers_test.go
new file mode 100644
index 000000000..8365ecd7a
--- /dev/null
+++ b/pkg/systemd/generate/containers_test.go
@@ -0,0 +1,366 @@
+package generate
+
+import (
+ "testing"
+
+ "github.com/containers/libpod/pkg/domain/entities"
+)
+
+func TestValidateRestartPolicyContainer(t *testing.T) {
+ type containerInfo struct {
+ restart string
+ }
+ tests := []struct {
+ name string
+ containerInfo containerInfo
+ wantErr bool
+ }{
+ {"good-on", containerInfo{restart: "no"}, false},
+ {"good-on-success", containerInfo{restart: "on-success"}, false},
+ {"good-on-failure", containerInfo{restart: "on-failure"}, false},
+ {"good-on-abnormal", containerInfo{restart: "on-abnormal"}, false},
+ {"good-on-watchdog", containerInfo{restart: "on-watchdog"}, false},
+ {"good-on-abort", containerInfo{restart: "on-abort"}, false},
+ {"good-always", containerInfo{restart: "always"}, false},
+ {"fail", containerInfo{restart: "foobar"}, true},
+ {"failblank", containerInfo{restart: ""}, true},
+ }
+ for _, tt := range tests {
+ test := tt
+ t.Run(tt.name, func(t *testing.T) {
+ if err := validateRestartPolicy(test.containerInfo.restart); (err != nil) != test.wantErr {
+ t.Errorf("ValidateRestartPolicy() error = %v, wantErr %v", err, test.wantErr)
+ }
+ })
+ }
+}
+
+func TestCreateContainerSystemdUnit(t *testing.T) {
+ goodID := `# container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401.service
+# autogenerated by Podman CI
+
+[Unit]
+Description=Podman container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401.service
+Documentation=man:podman-generate-systemd(1)
+Wants=network.target
+After=network-online.target
+
+[Service]
+Environment=PODMAN_SYSTEMD_UNIT=%n
+Restart=always
+ExecStart=/usr/bin/podman start 639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401
+ExecStop=/usr/bin/podman stop -t 10 639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401
+PIDFile=/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid
+KillMode=none
+Type=forking
+
+[Install]
+WantedBy=multi-user.target default.target`
+
+ goodName := `# container-foobar.service
+# autogenerated by Podman CI
+
+[Unit]
+Description=Podman container-foobar.service
+Documentation=man:podman-generate-systemd(1)
+Wants=network.target
+After=network-online.target
+
+[Service]
+Environment=PODMAN_SYSTEMD_UNIT=%n
+Restart=always
+ExecStart=/usr/bin/podman start foobar
+ExecStop=/usr/bin/podman stop -t 10 foobar
+PIDFile=/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid
+KillMode=none
+Type=forking
+
+[Install]
+WantedBy=multi-user.target default.target`
+
+ goodNameBoundTo := `# container-foobar.service
+# autogenerated by Podman CI
+
+[Unit]
+Description=Podman container-foobar.service
+Documentation=man:podman-generate-systemd(1)
+Wants=network.target
+After=network-online.target
+RefuseManualStart=yes
+RefuseManualStop=yes
+BindsTo=a.service b.service c.service pod.service
+After=a.service b.service c.service pod.service
+
+[Service]
+Environment=PODMAN_SYSTEMD_UNIT=%n
+Restart=always
+ExecStart=/usr/bin/podman start foobar
+ExecStop=/usr/bin/podman stop -t 10 foobar
+PIDFile=/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid
+KillMode=none
+Type=forking
+
+[Install]
+WantedBy=multi-user.target default.target`
+
+ goodNameNew := `# jadda-jadda.service
+# autogenerated by Podman CI
+
+[Unit]
+Description=Podman jadda-jadda.service
+Documentation=man:podman-generate-systemd(1)
+Wants=network.target
+After=network-online.target
+
+[Service]
+Environment=PODMAN_SYSTEMD_UNIT=%n
+Restart=always
+ExecStartPre=/usr/bin/rm -f %t/jadda-jadda.pid %t/jadda-jadda.ctr-id
+ExecStart=/usr/bin/podman run --conmon-pidfile %t/jadda-jadda.pid --cidfile %t/jadda-jadda.ctr-id --cgroups=no-conmon -d --name jadda-jadda --hostname hello-world awesome-image:latest command arg1 ... argN
+ExecStop=/usr/bin/podman stop --ignore --cidfile %t/jadda-jadda.ctr-id -t 42
+ExecStopPost=/usr/bin/podman rm --ignore -f --cidfile %t/jadda-jadda.ctr-id
+PIDFile=%t/jadda-jadda.pid
+KillMode=none
+Type=forking
+
+[Install]
+WantedBy=multi-user.target default.target`
+
+ goodNameNewWithPodFile := `# jadda-jadda.service
+# autogenerated by Podman CI
+
+[Unit]
+Description=Podman jadda-jadda.service
+Documentation=man:podman-generate-systemd(1)
+Wants=network.target
+After=network-online.target
+
+[Service]
+Environment=PODMAN_SYSTEMD_UNIT=%n
+Restart=always
+ExecStartPre=/usr/bin/rm -f %t/jadda-jadda.pid %t/jadda-jadda.ctr-id
+ExecStart=/usr/bin/podman run --conmon-pidfile %t/jadda-jadda.pid --cidfile %t/jadda-jadda.ctr-id --cgroups=no-conmon --pod-id-file /tmp/pod-foobar.pod-id-file -d --name jadda-jadda --hostname hello-world awesome-image:latest command arg1 ... argN
+ExecStop=/usr/bin/podman stop --ignore --cidfile %t/jadda-jadda.ctr-id -t 42
+ExecStopPost=/usr/bin/podman rm --ignore -f --cidfile %t/jadda-jadda.ctr-id
+PIDFile=%t/jadda-jadda.pid
+KillMode=none
+Type=forking
+
+[Install]
+WantedBy=multi-user.target default.target`
+ goodNameNewDetach := `# jadda-jadda.service
+# autogenerated by Podman CI
+
+[Unit]
+Description=Podman jadda-jadda.service
+Documentation=man:podman-generate-systemd(1)
+Wants=network.target
+After=network-online.target
+
+[Service]
+Environment=PODMAN_SYSTEMD_UNIT=%n
+Restart=always
+ExecStartPre=/usr/bin/rm -f %t/jadda-jadda.pid %t/jadda-jadda.ctr-id
+ExecStart=/usr/bin/podman run --conmon-pidfile %t/jadda-jadda.pid --cidfile %t/jadda-jadda.ctr-id --cgroups=no-conmon --detach --name jadda-jadda --hostname hello-world awesome-image:latest command arg1 ... argN
+ExecStop=/usr/bin/podman stop --ignore --cidfile %t/jadda-jadda.ctr-id -t 42
+ExecStopPost=/usr/bin/podman rm --ignore -f --cidfile %t/jadda-jadda.ctr-id
+PIDFile=%t/jadda-jadda.pid
+KillMode=none
+Type=forking
+
+[Install]
+WantedBy=multi-user.target default.target`
+
+ goodIDNew := `# container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401.service
+# autogenerated by Podman CI
+
+[Unit]
+Description=Podman container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401.service
+Documentation=man:podman-generate-systemd(1)
+Wants=network.target
+After=network-online.target
+
+[Service]
+Environment=PODMAN_SYSTEMD_UNIT=%n
+Restart=always
+ExecStartPre=/usr/bin/rm -f %t/container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401.pid %t/container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401.ctr-id
+ExecStart=/usr/bin/podman run --conmon-pidfile %t/container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401.pid --cidfile %t/container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401.ctr-id --cgroups=no-conmon -d awesome-image:latest
+ExecStop=/usr/bin/podman stop --ignore --cidfile %t/container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401.ctr-id -t 10
+ExecStopPost=/usr/bin/podman rm --ignore -f --cidfile %t/container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401.ctr-id
+PIDFile=%t/container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401.pid
+KillMode=none
+Type=forking
+
+[Install]
+WantedBy=multi-user.target default.target`
+
+ tests := []struct {
+ name string
+ info containerInfo
+ want string
+ new bool
+ wantErr bool
+ }{
+
+ {"good with id",
+ containerInfo{
+ Executable: "/usr/bin/podman",
+ ServiceName: "container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401",
+ ContainerNameOrID: "639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401",
+ RestartPolicy: "always",
+ PIDFile: "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid",
+ StopTimeout: 10,
+ PodmanVersion: "CI",
+ EnvVariable: EnvVariable,
+ },
+ goodID,
+ false,
+ false,
+ },
+ {"good with name",
+ containerInfo{
+ Executable: "/usr/bin/podman",
+ ServiceName: "container-foobar",
+ ContainerNameOrID: "foobar",
+ RestartPolicy: "always",
+ PIDFile: "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid",
+ StopTimeout: 10,
+ PodmanVersion: "CI",
+ EnvVariable: EnvVariable,
+ },
+ goodName,
+ false,
+ false,
+ },
+ {"good with name and bound to",
+ containerInfo{
+ Executable: "/usr/bin/podman",
+ ServiceName: "container-foobar",
+ ContainerNameOrID: "foobar",
+ RestartPolicy: "always",
+ PIDFile: "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid",
+ StopTimeout: 10,
+ PodmanVersion: "CI",
+ BoundToServices: []string{"pod", "a", "b", "c"},
+ EnvVariable: EnvVariable,
+ },
+ goodNameBoundTo,
+ false,
+ false,
+ },
+ {"bad restart policy",
+ containerInfo{
+ Executable: "/usr/bin/podman",
+ ServiceName: "639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401",
+ RestartPolicy: "never",
+ PIDFile: "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid",
+ StopTimeout: 10,
+ PodmanVersion: "CI",
+ EnvVariable: EnvVariable,
+ },
+ "",
+ false,
+ true,
+ },
+ {"good with name and generic",
+ containerInfo{
+ Executable: "/usr/bin/podman",
+ ServiceName: "jadda-jadda",
+ ContainerNameOrID: "jadda-jadda",
+ RestartPolicy: "always",
+ PIDFile: "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid",
+ StopTimeout: 42,
+ PodmanVersion: "CI",
+ CreateCommand: []string{"I'll get stripped", "container", "run", "--name", "jadda-jadda", "--hostname", "hello-world", "awesome-image:latest", "command", "arg1", "...", "argN"},
+ EnvVariable: EnvVariable,
+ },
+ goodNameNew,
+ true,
+ false,
+ },
+ {"good with explicit short detach param",
+ containerInfo{
+ Executable: "/usr/bin/podman",
+ ServiceName: "jadda-jadda",
+ ContainerNameOrID: "jadda-jadda",
+ RestartPolicy: "always",
+ PIDFile: "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid",
+ StopTimeout: 42,
+ PodmanVersion: "CI",
+ CreateCommand: []string{"I'll get stripped", "container", "run", "-d", "--name", "jadda-jadda", "--hostname", "hello-world", "awesome-image:latest", "command", "arg1", "...", "argN"},
+ EnvVariable: EnvVariable,
+ },
+ goodNameNew,
+ true,
+ false,
+ },
+ {"good with explicit short detach param and podInfo",
+ containerInfo{
+ Executable: "/usr/bin/podman",
+ ServiceName: "jadda-jadda",
+ ContainerNameOrID: "jadda-jadda",
+ RestartPolicy: "always",
+ PIDFile: "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid",
+ StopTimeout: 42,
+ PodmanVersion: "CI",
+ CreateCommand: []string{"I'll get stripped", "container", "run", "-d", "--name", "jadda-jadda", "--hostname", "hello-world", "awesome-image:latest", "command", "arg1", "...", "argN"},
+ EnvVariable: EnvVariable,
+ pod: &podInfo{
+ PodIDFile: "/tmp/pod-foobar.pod-id-file",
+ },
+ },
+ goodNameNewWithPodFile,
+ true,
+ false,
+ },
+ {"good with explicit full detach param",
+ containerInfo{
+ Executable: "/usr/bin/podman",
+ ServiceName: "jadda-jadda",
+ ContainerNameOrID: "jadda-jadda",
+ RestartPolicy: "always",
+ PIDFile: "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid",
+ StopTimeout: 42,
+ PodmanVersion: "CI",
+ CreateCommand: []string{"I'll get stripped", "container", "run", "--detach", "--name", "jadda-jadda", "--hostname", "hello-world", "awesome-image:latest", "command", "arg1", "...", "argN"},
+ EnvVariable: EnvVariable,
+ },
+ goodNameNewDetach,
+ true,
+ false,
+ },
+ {"good with id and no param",
+ containerInfo{
+ Executable: "/usr/bin/podman",
+ ServiceName: "container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401",
+ ContainerNameOrID: "639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401",
+ RestartPolicy: "always",
+ PIDFile: "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid",
+ StopTimeout: 10,
+ PodmanVersion: "CI",
+ CreateCommand: []string{"I'll get stripped", "container", "run", "awesome-image:latest"},
+ EnvVariable: EnvVariable,
+ },
+ goodIDNew,
+ true,
+ false,
+ },
+ }
+ for _, tt := range tests {
+ test := tt
+ t.Run(tt.name, func(t *testing.T) {
+ opts := entities.GenerateSystemdOptions{
+ Files: false,
+ New: test.new,
+ }
+ got, err := executeContainerTemplate(&test.info, opts)
+ if (err != nil) != test.wantErr {
+ t.Errorf("CreateContainerSystemdUnit() error = \n%v, wantErr \n%v", err, test.wantErr)
+ return
+ }
+ if got != test.want {
+ t.Errorf("CreateContainerSystemdUnit() = \n%v\n---------> want\n%v", got, test.want)
+ }
+ })
+ }
+}
diff --git a/pkg/systemd/generate/pods.go b/pkg/systemd/generate/pods.go
new file mode 100644
index 000000000..5cfd5ab0a
--- /dev/null
+++ b/pkg/systemd/generate/pods.go
@@ -0,0 +1,341 @@
+package generate
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+ "text/template"
+ "time"
+
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/pkg/domain/entities"
+ "github.com/containers/libpod/version"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// podInfo contains data required for generating a pod's systemd
+// unit file.
+type podInfo struct {
+ // ServiceName of the systemd service.
+ ServiceName string
+ // Name or ID of the infra container.
+ InfraNameOrID string
+ // StopTimeout sets the timeout Podman waits before killing the container
+ // during service stop.
+ StopTimeout uint
+ // RestartPolicy of the systemd unit (e.g., no, on-failure, always).
+ RestartPolicy string
+ // PIDFile of the service. Required for forking services. Must point to the
+ // PID of the associated conmon process.
+ PIDFile string
+ // PodIDFile of the unit.
+ PodIDFile string
+ // GenerateTimestamp, if set the generated unit file has a time stamp.
+ GenerateTimestamp bool
+ // RequiredServices are services this service requires. Note that this
+ // service runs before them.
+ RequiredServices []string
+ // PodmanVersion for the header. Will be set internally. Will be auto-filled
+ // if left empty.
+ PodmanVersion string
+ // Executable is the path to the podman executable. Will be auto-filled if
+ // left empty.
+ Executable string
+ // TimeStamp at the time of creating the unit file. Will be set internally.
+ TimeStamp string
+ // CreateCommand is the full command plus arguments of the process the
+ // container has been created with.
+ CreateCommand []string
+ // PodCreateCommand - a post-processed variant of CreateCommand to use
+ // when creating the pod.
+ PodCreateCommand string
+ // EnvVariable is generate.EnvVariable and must not be set.
+ EnvVariable string
+ // ExecStartPre1 of the unit.
+ ExecStartPre1 string
+ // ExecStartPre2 of the unit.
+ ExecStartPre2 string
+ // ExecStart of the unit.
+ ExecStart string
+ // ExecStop of the unit.
+ ExecStop string
+ // ExecStopPost of the unit.
+ ExecStopPost string
+}
+
+const podTemplate = headerTemplate + `Requires={{- range $index, $value := .RequiredServices -}}{{if $index}} {{end}}{{ $value }}.service{{end}}
+Before={{- range $index, $value := .RequiredServices -}}{{if $index}} {{end}}{{ $value }}.service{{end}}
+
+[Service]
+Environment={{.EnvVariable}}=%n
+Restart={{.RestartPolicy}}
+{{- if .ExecStartPre1}}
+ExecStartPre={{.ExecStartPre1}}
+{{- end}}
+{{- if .ExecStartPre2}}
+ExecStartPre={{.ExecStartPre2}}
+{{- end}}
+ExecStart={{.ExecStart}}
+ExecStop={{.ExecStop}}
+{{- if .ExecStopPost}}
+ExecStopPost={{.ExecStopPost}}
+{{- end}}
+PIDFile={{.PIDFile}}
+KillMode=none
+Type=forking
+
+[Install]
+WantedBy=multi-user.target default.target`
+
+// PodUnits generates systemd units for the specified pod and its containers.
+// Based on the options, the return value might be the content of all units or
+// the files they been written to.
+func PodUnits(pod *libpod.Pod, options entities.GenerateSystemdOptions) (string, error) {
+ // Error out if the pod has no infra container, which we require to be the
+ // main service.
+ if !pod.HasInfraContainer() {
+ return "", errors.Errorf("error generating systemd unit files: Pod %q has no infra container", pod.Name())
+ }
+
+ podInfo, err := generatePodInfo(pod, options)
+ if err != nil {
+ return "", err
+ }
+
+ infraID, err := pod.InfraContainerID()
+ if err != nil {
+ return "", err
+ }
+
+ // Compute the container-dependency graph for the Pod.
+ containers, err := pod.AllContainers()
+ if err != nil {
+ return "", err
+ }
+ if len(containers) == 0 {
+ return "", errors.Errorf("error generating systemd unit files: Pod %q has no containers", pod.Name())
+ }
+ graph, err := libpod.BuildContainerGraph(containers)
+ if err != nil {
+ return "", err
+ }
+
+ // Traverse the dependency graph and create systemdgen.containerInfo's for
+ // each container.
+ containerInfos := []*containerInfo{}
+ for ctr, dependencies := range graph.DependencyMap() {
+ // Skip the infra container as we already generated it.
+ if ctr.ID() == infraID {
+ continue
+ }
+ ctrInfo, err := generateContainerInfo(ctr, options)
+ if err != nil {
+ return "", err
+ }
+ // Now add the container's dependencies and at the container as a
+ // required service of the infra container.
+ for _, dep := range dependencies {
+ if dep.ID() == infraID {
+ ctrInfo.BoundToServices = append(ctrInfo.BoundToServices, podInfo.ServiceName)
+ } else {
+ _, serviceName := containerServiceName(dep, options)
+ ctrInfo.BoundToServices = append(ctrInfo.BoundToServices, serviceName)
+ }
+ }
+ podInfo.RequiredServices = append(podInfo.RequiredServices, ctrInfo.ServiceName)
+ containerInfos = append(containerInfos, ctrInfo)
+ }
+
+ // Now generate the systemd service for all containers.
+ builder := strings.Builder{}
+ out, err := executePodTemplate(podInfo, options)
+ if err != nil {
+ return "", err
+ }
+ builder.WriteString(out)
+ for _, info := range containerInfos {
+ info.pod = podInfo
+ builder.WriteByte('\n')
+ out, err := executeContainerTemplate(info, options)
+ if err != nil {
+ return "", err
+ }
+ builder.WriteString(out)
+ }
+
+ return builder.String(), nil
+}
+
+func generatePodInfo(pod *libpod.Pod, options entities.GenerateSystemdOptions) (*podInfo, error) {
+ // Generate a systemdgen.containerInfo for the infra container. This
+ // containerInfo acts as the main service of the pod.
+ infraCtr, err := pod.InfraContainer()
+ if err != nil {
+ return nil, errors.Wrap(err, "could not find infra container")
+ }
+
+ timeout := infraCtr.StopTimeout()
+ if options.StopTimeout != nil {
+ timeout = *options.StopTimeout
+ }
+
+ config := infraCtr.Config()
+ conmonPidFile := config.ConmonPidFile
+ if conmonPidFile == "" {
+ return nil, errors.Errorf("conmon PID file path is empty, try to recreate the container with --conmon-pidfile flag")
+ }
+
+ createCommand := pod.CreateCommand()
+ if options.New && len(createCommand) == 0 {
+ return nil, errors.Errorf("cannot use --new on pod %q: no create command found", pod.ID())
+ }
+
+ nameOrID := pod.ID()
+ ctrNameOrID := infraCtr.ID()
+ if options.Name {
+ nameOrID = pod.Name()
+ ctrNameOrID = infraCtr.Name()
+ }
+ serviceName := fmt.Sprintf("%s%s%s", options.PodPrefix, options.Separator, nameOrID)
+
+ info := podInfo{
+ ServiceName: serviceName,
+ InfraNameOrID: ctrNameOrID,
+ RestartPolicy: options.RestartPolicy,
+ PIDFile: conmonPidFile,
+ StopTimeout: timeout,
+ GenerateTimestamp: true,
+ CreateCommand: createCommand,
+ }
+ return &info, nil
+}
+
+// executePodTemplate executes the pod template on the specified podInfo. Note
+// that the podInfo is also post processed and completed, which allows for an
+// easier unit testing.
+func executePodTemplate(info *podInfo, options entities.GenerateSystemdOptions) (string, error) {
+ if err := validateRestartPolicy(info.RestartPolicy); err != nil {
+ return "", err
+ }
+
+ // Make sure the executable is set.
+ if info.Executable == "" {
+ executable, err := os.Executable()
+ if err != nil {
+ executable = "/usr/bin/podman"
+ logrus.Warnf("Could not obtain podman executable location, using default %s", executable)
+ }
+ info.Executable = executable
+ }
+
+ info.EnvVariable = EnvVariable
+ info.ExecStart = "{{.Executable}} start {{.InfraNameOrID}}"
+ info.ExecStop = "{{.Executable}} stop {{if (ge .StopTimeout 0)}}-t {{.StopTimeout}}{{end}} {{.InfraNameOrID}}"
+
+ // Assemble the ExecStart command when creating a new pod.
+ //
+ // Note that we cannot catch all corner cases here such that users
+ // *must* manually check the generated files. A pod might have been
+ // created via a Python script, which would certainly yield an invalid
+ // `info.CreateCommand`. Hence, we're doing a best effort unit
+ // generation and don't try aiming at completeness.
+ if options.New {
+ info.PIDFile = "%t/" + info.ServiceName + ".pid"
+ info.PodIDFile = "%t/" + info.ServiceName + ".pod-id"
+
+ podCreateIndex := 0
+ var podRootArgs, podCreateArgs []string
+ switch len(info.CreateCommand) {
+ case 0, 1, 2:
+ return "", errors.Errorf("pod does not appear to be created via `podman pod create`: %v", info.CreateCommand)
+ default:
+ // Make sure that pod was created with `pod create` and
+ // not something else, such as `run --pod new`.
+ for i := 1; i < len(info.CreateCommand); i++ {
+ if info.CreateCommand[i-1] == "pod" && info.CreateCommand[i] == "create" {
+ podCreateIndex = i
+ break
+ }
+ }
+ if podCreateIndex == 0 {
+ return "", errors.Errorf("pod does not appear to be created via `podman pod create`: %v", info.CreateCommand)
+ }
+ podRootArgs = info.CreateCommand[1 : podCreateIndex-2]
+ podCreateArgs = filterPodFlags(info.CreateCommand[podCreateIndex+1:])
+ }
+ // We're hard-coding the first five arguments and append the
+ // CreateCommand with a stripped command and subcomand.
+ startCommand := []string{info.Executable}
+ startCommand = append(startCommand, podRootArgs...)
+ startCommand = append(startCommand,
+ []string{"pod", "create",
+ "--infra-conmon-pidfile", "{{.PIDFile}}",
+ "--pod-id-file", "{{.PodIDFile}}"}...)
+
+ startCommand = append(startCommand, podCreateArgs...)
+
+ info.ExecStartPre1 = "/usr/bin/rm -f {{.PIDFile}} {{.PodIDFile}}"
+ info.ExecStartPre2 = strings.Join(startCommand, " ")
+ info.ExecStart = "{{.Executable}} pod start --pod-id-file {{.PodIDFile}}"
+ info.ExecStop = "{{.Executable}} pod stop --ignore --pod-id-file {{.PodIDFile}} {{if (ge .StopTimeout 0)}}-t {{.StopTimeout}}{{end}}"
+ info.ExecStopPost = "{{.Executable}} pod rm --ignore -f --pod-id-file {{.PodIDFile}}"
+ }
+ if info.PodmanVersion == "" {
+ info.PodmanVersion = version.Version
+ }
+ if info.GenerateTimestamp {
+ info.TimeStamp = fmt.Sprintf("%v", time.Now().Format(time.UnixDate))
+ }
+
+ // Sort the slices to assure a deterministic output.
+ sort.Strings(info.RequiredServices)
+
+ // Generate the template and compile it.
+ //
+ // Note that we need a two-step generation process to allow for fields
+ // embedding other fields. This way we can replace `A -> B -> C` and
+ // make the code easier to maintain at the cost of a slightly slower
+ // generation. That's especially needed for embedding the PID and ID
+ // files in other fields which will eventually get replaced in the 2nd
+ // template execution.
+ templ, err := template.New("pod_template").Parse(podTemplate)
+ if err != nil {
+ return "", errors.Wrap(err, "error parsing systemd service template")
+ }
+
+ var buf bytes.Buffer
+ if err := templ.Execute(&buf, info); err != nil {
+ return "", err
+ }
+
+ // Now parse the generated template (i.e., buf) and execute it.
+ templ, err = template.New("pod_template").Parse(buf.String())
+ if err != nil {
+ return "", errors.Wrap(err, "error parsing systemd service template")
+ }
+
+ buf = bytes.Buffer{}
+ if err := templ.Execute(&buf, info); err != nil {
+ return "", err
+ }
+
+ if !options.Files {
+ return buf.String(), nil
+ }
+
+ buf.WriteByte('\n')
+ cwd, err := os.Getwd()
+ if err != nil {
+ return "", errors.Wrap(err, "error getting current working directory")
+ }
+ path := filepath.Join(cwd, fmt.Sprintf("%s.service", info.ServiceName))
+ if err := ioutil.WriteFile(path, buf.Bytes(), 0644); err != nil {
+ return "", errors.Wrap(err, "error generating systemd unit")
+ }
+ return path, nil
+}
diff --git a/pkg/systemd/generate/pods_test.go b/pkg/systemd/generate/pods_test.go
new file mode 100644
index 000000000..f6e225c35
--- /dev/null
+++ b/pkg/systemd/generate/pods_test.go
@@ -0,0 +1,100 @@
+package generate
+
+import (
+ "testing"
+
+ "github.com/containers/libpod/pkg/domain/entities"
+)
+
+func TestValidateRestartPolicyPod(t *testing.T) {
+ type podInfo struct {
+ restart string
+ }
+ tests := []struct {
+ name string
+ podInfo podInfo
+ wantErr bool
+ }{
+ {"good-on", podInfo{restart: "no"}, false},
+ {"good-on-success", podInfo{restart: "on-success"}, false},
+ {"good-on-failure", podInfo{restart: "on-failure"}, false},
+ {"good-on-abnormal", podInfo{restart: "on-abnormal"}, false},
+ {"good-on-watchdog", podInfo{restart: "on-watchdog"}, false},
+ {"good-on-abort", podInfo{restart: "on-abort"}, false},
+ {"good-always", podInfo{restart: "always"}, false},
+ {"fail", podInfo{restart: "foobar"}, true},
+ {"failblank", podInfo{restart: ""}, true},
+ }
+ for _, tt := range tests {
+ test := tt
+ t.Run(tt.name, func(t *testing.T) {
+ if err := validateRestartPolicy(test.podInfo.restart); (err != nil) != test.wantErr {
+ t.Errorf("ValidateRestartPolicy() error = %v, wantErr %v", err, test.wantErr)
+ }
+ })
+ }
+}
+
+func TestCreatePodSystemdUnit(t *testing.T) {
+ podGoodName := `# pod-123abc.service
+# autogenerated by Podman CI
+
+[Unit]
+Description=Podman pod-123abc.service
+Documentation=man:podman-generate-systemd(1)
+Wants=network.target
+After=network-online.target
+Requires=container-1.service container-2.service
+Before=container-1.service container-2.service
+
+[Service]
+Environment=PODMAN_SYSTEMD_UNIT=%n
+Restart=always
+ExecStart=/usr/bin/podman start jadda-jadda-infra
+ExecStop=/usr/bin/podman stop -t 10 jadda-jadda-infra
+PIDFile=/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid
+KillMode=none
+Type=forking
+
+[Install]
+WantedBy=multi-user.target default.target`
+
+ tests := []struct {
+ name string
+ info podInfo
+ want string
+ wantErr bool
+ }{
+ {"pod",
+ podInfo{
+ Executable: "/usr/bin/podman",
+ ServiceName: "pod-123abc",
+ InfraNameOrID: "jadda-jadda-infra",
+ RestartPolicy: "always",
+ PIDFile: "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid",
+ StopTimeout: 10,
+ PodmanVersion: "CI",
+ RequiredServices: []string{"container-1", "container-2"},
+ },
+ podGoodName,
+ false,
+ },
+ }
+
+ for _, tt := range tests {
+ test := tt
+ t.Run(tt.name, func(t *testing.T) {
+ opts := entities.GenerateSystemdOptions{
+ Files: false,
+ }
+ got, err := executePodTemplate(&test.info, opts)
+ if (err != nil) != test.wantErr {
+ t.Errorf("CreatePodSystemdUnit() error = \n%v, wantErr \n%v", err, test.wantErr)
+ return
+ }
+ if got != test.want {
+ t.Errorf("CreatePodSystemdUnit() = \n%v\n---------> want\n%v", got, test.want)
+ }
+ })
+ }
+}
diff --git a/pkg/systemd/generate/systemdgen.go b/pkg/systemd/generate/systemdgen.go
deleted file mode 100644
index 73fe52c0e..000000000
--- a/pkg/systemd/generate/systemdgen.go
+++ /dev/null
@@ -1,237 +0,0 @@
-package generate
-
-import (
- "bytes"
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
- "sort"
- "strings"
- "text/template"
- "time"
-
- "github.com/containers/libpod/version"
- "github.com/pkg/errors"
- "github.com/sirupsen/logrus"
-)
-
-// EnvVariable "PODMAN_SYSTEMD_UNIT" is set in all generated systemd units and
-// is set to the unit's (unique) name.
-const EnvVariable = "PODMAN_SYSTEMD_UNIT"
-
-// ContainerInfo contains data required for generating a container's systemd
-// unit file.
-type ContainerInfo struct {
- // ServiceName of the systemd service.
- ServiceName string
- // Name or ID of the container.
- ContainerName string
- // InfraContainer of the pod.
- InfraContainer string
- // StopTimeout sets the timeout Podman waits before killing the container
- // during service stop.
- StopTimeout uint
- // RestartPolicy of the systemd unit (e.g., no, on-failure, always).
- RestartPolicy string
- // PIDFile of the service. Required for forking services. Must point to the
- // PID of the associated conmon process.
- PIDFile string
- // GenerateTimestamp, if set the generated unit file has a time stamp.
- GenerateTimestamp bool
- // BoundToServices are the services this service binds to. Note that this
- // service runs after them.
- BoundToServices []string
- // RequiredServices are services this service requires. Note that this
- // service runs before them.
- RequiredServices []string
- // PodmanVersion for the header. Will be set internally. Will be auto-filled
- // if left empty.
- PodmanVersion string
- // Executable is the path to the podman executable. Will be auto-filled if
- // left empty.
- Executable string
- // TimeStamp at the time of creating the unit file. Will be set internally.
- TimeStamp string
- // New controls if a new container is created or if an existing one is started.
- New bool
- // CreateCommand is the full command plus arguments of the process the
- // container has been created with.
- CreateCommand []string
- // RunCommand is a post-processed variant of CreateCommand and used for
- // the ExecStart field in generic unit files.
- RunCommand string
- // EnvVariable is generate.EnvVariable and must not be set.
- EnvVariable string
-}
-
-var restartPolicies = []string{"no", "on-success", "on-failure", "on-abnormal", "on-watchdog", "on-abort", "always"}
-
-// validateRestartPolicy checks that the user-provided policy is valid.
-func validateRestartPolicy(restart string) error {
- for _, i := range restartPolicies {
- if i == restart {
- return nil
- }
- }
- return errors.Errorf("%s is not a valid restart policy", restart)
-}
-
-const containerTemplate = `# {{.ServiceName}}.service
-# autogenerated by Podman {{.PodmanVersion}}
-{{- if .TimeStamp}}
-# {{.TimeStamp}}
-{{- end}}
-
-[Unit]
-Description=Podman {{.ServiceName}}.service
-Documentation=man:podman-generate-systemd(1)
-Wants=network.target
-After=network-online.target
-{{- if .BoundToServices}}
-RefuseManualStart=yes
-RefuseManualStop=yes
-BindsTo={{- range $index, $value := .BoundToServices -}}{{if $index}} {{end}}{{ $value }}.service{{end}}
-After={{- range $index, $value := .BoundToServices -}}{{if $index}} {{end}}{{ $value }}.service{{end}}
-{{- end}}
-{{- if .RequiredServices}}
-Requires={{- range $index, $value := .RequiredServices -}}{{if $index}} {{end}}{{ $value }}.service{{end}}
-Before={{- range $index, $value := .RequiredServices -}}{{if $index}} {{end}}{{ $value }}.service{{end}}
-{{- end}}
-
-[Service]
-Environment={{.EnvVariable}}=%n
-Restart={{.RestartPolicy}}
-{{- if .New}}
-ExecStartPre=/usr/bin/rm -f %t/%n-pid %t/%n-cid
-ExecStart={{.RunCommand}}
-ExecStop={{.Executable}} stop --ignore --cidfile %t/%n-cid {{if (ge .StopTimeout 0)}}-t {{.StopTimeout}}{{end}}
-ExecStopPost={{.Executable}} rm --ignore -f --cidfile %t/%n-cid
-PIDFile=%t/%n-pid
-{{- else}}
-ExecStart={{.Executable}} start {{.ContainerName}}
-ExecStop={{.Executable}} stop {{if (ge .StopTimeout 0)}}-t {{.StopTimeout}}{{end}} {{.ContainerName}}
-PIDFile={{.PIDFile}}
-{{- end}}
-KillMode=none
-Type=forking
-
-[Install]
-WantedBy=multi-user.target default.target`
-
-// Options include different options to control the unit file generation.
-type Options struct {
- // When set, generate service files in the current working directory and
- // return the paths to these files instead of returning all contents in one
- // big string.
- Files bool
- // New controls if a new container is created or if an existing one is started.
- New bool
-}
-
-// CreateContainerSystemdUnit creates a systemd unit file for a container.
-func CreateContainerSystemdUnit(info *ContainerInfo, opts Options) (string, error) {
- if err := validateRestartPolicy(info.RestartPolicy); err != nil {
- return "", err
- }
-
- // Make sure the executable is set.
- if info.Executable == "" {
- executable, err := os.Executable()
- if err != nil {
- executable = "/usr/bin/podman"
- logrus.Warnf("Could not obtain podman executable location, using default %s", executable)
- }
- info.Executable = executable
- }
-
- info.EnvVariable = EnvVariable
-
- // Assemble the ExecStart command when creating a new container.
- //
- // Note that we cannot catch all corner cases here such that users
- // *must* manually check the generated files. A container might have
- // been created via a Python script, which would certainly yield an
- // invalid `info.CreateCommand`. Hence, we're doing a best effort unit
- // generation and don't try aiming at completeness.
- if opts.New {
- // The create command must at least have three arguments:
- // /usr/bin/podman run $IMAGE
- index := 2
- if info.CreateCommand[1] == "container" {
- index = 3
- }
- if len(info.CreateCommand) < index+1 {
- return "", errors.Errorf("container's create command is too short or invalid: %v", info.CreateCommand)
- }
- // We're hard-coding the first five arguments and append the
- // CreateCommand with a stripped command and subcomand.
- command := []string{
- info.Executable,
- "run",
- "--conmon-pidfile", "%t/%n-pid",
- "--cidfile", "%t/%n-cid",
- "--cgroups=no-conmon",
- }
-
- // Enforce detaching
- //
- // since we use systemd `Type=forking` service
- // @see https://www.freedesktop.org/software/systemd/man/systemd.service.html#Type=
- // when we generated systemd service file with the --new param,
- // `ExecStart` will have `/usr/bin/podman run ...`
- // if `info.CreateCommand` has no `-d` or `--detach` param,
- // podman will run the container in default attached mode,
- // as a result, `systemd start` will wait the `podman run` command exit until failed with timeout error.
- hasDetachParam := false
- for _, p := range info.CreateCommand[index:] {
- if p == "--detach" || p == "-d" {
- hasDetachParam = true
- }
- }
- if !hasDetachParam {
- command = append(command, "-d")
- }
-
- command = append(command, info.CreateCommand[index:]...)
- info.RunCommand = strings.Join(command, " ")
- info.New = true
- }
-
- if info.PodmanVersion == "" {
- info.PodmanVersion = version.Version
- }
- if info.GenerateTimestamp {
- info.TimeStamp = fmt.Sprintf("%v", time.Now().Format(time.UnixDate))
- }
-
- // Sort the slices to assure a deterministic output.
- sort.Strings(info.RequiredServices)
- sort.Strings(info.BoundToServices)
-
- // Generate the template and compile it.
- templ, err := template.New("systemd_service_file").Parse(containerTemplate)
- if err != nil {
- return "", errors.Wrap(err, "error parsing systemd service template")
- }
-
- var buf bytes.Buffer
- if err := templ.Execute(&buf, info); err != nil {
- return "", err
- }
-
- if !opts.Files {
- return buf.String(), nil
- }
-
- buf.WriteByte('\n')
- cwd, err := os.Getwd()
- if err != nil {
- return "", errors.Wrap(err, "error getting current working directory")
- }
- path := filepath.Join(cwd, fmt.Sprintf("%s.service", info.ServiceName))
- if err := ioutil.WriteFile(path, buf.Bytes(), 0644); err != nil {
- return "", errors.Wrap(err, "error generating systemd unit")
- }
- return path, nil
-}
diff --git a/pkg/systemd/generate/systemdgen_test.go b/pkg/systemd/generate/systemdgen_test.go
deleted file mode 100644
index cc5db5e24..000000000
--- a/pkg/systemd/generate/systemdgen_test.go
+++ /dev/null
@@ -1,347 +0,0 @@
-package generate
-
-import (
- "testing"
-)
-
-func TestValidateRestartPolicy(t *testing.T) {
- type ContainerInfo struct {
- restart string
- }
- tests := []struct {
- name string
- ContainerInfo ContainerInfo
- wantErr bool
- }{
- {"good-on", ContainerInfo{restart: "no"}, false},
- {"good-on-success", ContainerInfo{restart: "on-success"}, false},
- {"good-on-failure", ContainerInfo{restart: "on-failure"}, false},
- {"good-on-abnormal", ContainerInfo{restart: "on-abnormal"}, false},
- {"good-on-watchdog", ContainerInfo{restart: "on-watchdog"}, false},
- {"good-on-abort", ContainerInfo{restart: "on-abort"}, false},
- {"good-always", ContainerInfo{restart: "always"}, false},
- {"fail", ContainerInfo{restart: "foobar"}, true},
- {"failblank", ContainerInfo{restart: ""}, true},
- }
- for _, tt := range tests {
- test := tt
- t.Run(tt.name, func(t *testing.T) {
- if err := validateRestartPolicy(test.ContainerInfo.restart); (err != nil) != test.wantErr {
- t.Errorf("ValidateRestartPolicy() error = %v, wantErr %v", err, test.wantErr)
- }
- })
- }
-}
-
-func TestCreateContainerSystemdUnit(t *testing.T) {
- goodID := `# container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401.service
-# autogenerated by Podman CI
-
-[Unit]
-Description=Podman container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401.service
-Documentation=man:podman-generate-systemd(1)
-Wants=network.target
-After=network-online.target
-
-[Service]
-Environment=PODMAN_SYSTEMD_UNIT=%n
-Restart=always
-ExecStart=/usr/bin/podman start 639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401
-ExecStop=/usr/bin/podman stop -t 10 639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401
-PIDFile=/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid
-KillMode=none
-Type=forking
-
-[Install]
-WantedBy=multi-user.target default.target`
-
- goodName := `# container-foobar.service
-# autogenerated by Podman CI
-
-[Unit]
-Description=Podman container-foobar.service
-Documentation=man:podman-generate-systemd(1)
-Wants=network.target
-After=network-online.target
-
-[Service]
-Environment=PODMAN_SYSTEMD_UNIT=%n
-Restart=always
-ExecStart=/usr/bin/podman start foobar
-ExecStop=/usr/bin/podman stop -t 10 foobar
-PIDFile=/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid
-KillMode=none
-Type=forking
-
-[Install]
-WantedBy=multi-user.target default.target`
-
- goodNameBoundTo := `# container-foobar.service
-# autogenerated by Podman CI
-
-[Unit]
-Description=Podman container-foobar.service
-Documentation=man:podman-generate-systemd(1)
-Wants=network.target
-After=network-online.target
-RefuseManualStart=yes
-RefuseManualStop=yes
-BindsTo=a.service b.service c.service pod.service
-After=a.service b.service c.service pod.service
-
-[Service]
-Environment=PODMAN_SYSTEMD_UNIT=%n
-Restart=always
-ExecStart=/usr/bin/podman start foobar
-ExecStop=/usr/bin/podman stop -t 10 foobar
-PIDFile=/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid
-KillMode=none
-Type=forking
-
-[Install]
-WantedBy=multi-user.target default.target`
-
- podGoodName := `# pod-123abc.service
-# autogenerated by Podman CI
-
-[Unit]
-Description=Podman pod-123abc.service
-Documentation=man:podman-generate-systemd(1)
-Wants=network.target
-After=network-online.target
-Requires=container-1.service container-2.service
-Before=container-1.service container-2.service
-
-[Service]
-Environment=PODMAN_SYSTEMD_UNIT=%n
-Restart=always
-ExecStart=/usr/bin/podman start jadda-jadda-infra
-ExecStop=/usr/bin/podman stop -t 10 jadda-jadda-infra
-PIDFile=/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid
-KillMode=none
-Type=forking
-
-[Install]
-WantedBy=multi-user.target default.target`
-
- goodNameNew := `# jadda-jadda.service
-# autogenerated by Podman CI
-
-[Unit]
-Description=Podman jadda-jadda.service
-Documentation=man:podman-generate-systemd(1)
-Wants=network.target
-After=network-online.target
-
-[Service]
-Environment=PODMAN_SYSTEMD_UNIT=%n
-Restart=always
-ExecStartPre=/usr/bin/rm -f %t/%n-pid %t/%n-cid
-ExecStart=/usr/bin/podman run --conmon-pidfile %t/%n-pid --cidfile %t/%n-cid --cgroups=no-conmon -d --name jadda-jadda --hostname hello-world awesome-image:latest command arg1 ... argN
-ExecStop=/usr/bin/podman stop --ignore --cidfile %t/%n-cid -t 42
-ExecStopPost=/usr/bin/podman rm --ignore -f --cidfile %t/%n-cid
-PIDFile=%t/%n-pid
-KillMode=none
-Type=forking
-
-[Install]
-WantedBy=multi-user.target default.target`
-
- goodNameNewDetach := `# jadda-jadda.service
-# autogenerated by Podman CI
-
-[Unit]
-Description=Podman jadda-jadda.service
-Documentation=man:podman-generate-systemd(1)
-Wants=network.target
-After=network-online.target
-
-[Service]
-Environment=PODMAN_SYSTEMD_UNIT=%n
-Restart=always
-ExecStartPre=/usr/bin/rm -f %t/%n-pid %t/%n-cid
-ExecStart=/usr/bin/podman run --conmon-pidfile %t/%n-pid --cidfile %t/%n-cid --cgroups=no-conmon --detach --name jadda-jadda --hostname hello-world awesome-image:latest command arg1 ... argN
-ExecStop=/usr/bin/podman stop --ignore --cidfile %t/%n-cid -t 42
-ExecStopPost=/usr/bin/podman rm --ignore -f --cidfile %t/%n-cid
-PIDFile=%t/%n-pid
-KillMode=none
-Type=forking
-
-[Install]
-WantedBy=multi-user.target default.target`
-
- goodIDNew := `# container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401.service
-# autogenerated by Podman CI
-
-[Unit]
-Description=Podman container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401.service
-Documentation=man:podman-generate-systemd(1)
-Wants=network.target
-After=network-online.target
-
-[Service]
-Environment=PODMAN_SYSTEMD_UNIT=%n
-Restart=always
-ExecStartPre=/usr/bin/rm -f %t/%n-pid %t/%n-cid
-ExecStart=/usr/bin/podman run --conmon-pidfile %t/%n-pid --cidfile %t/%n-cid --cgroups=no-conmon -d awesome-image:latest
-ExecStop=/usr/bin/podman stop --ignore --cidfile %t/%n-cid -t 10
-ExecStopPost=/usr/bin/podman rm --ignore -f --cidfile %t/%n-cid
-PIDFile=%t/%n-pid
-KillMode=none
-Type=forking
-
-[Install]
-WantedBy=multi-user.target default.target`
-
- tests := []struct {
- name string
- info ContainerInfo
- want string
- wantErr bool
- }{
-
- {"good with id",
- ContainerInfo{
- Executable: "/usr/bin/podman",
- ServiceName: "container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401",
- ContainerName: "639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401",
- RestartPolicy: "always",
- PIDFile: "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid",
- StopTimeout: 10,
- PodmanVersion: "CI",
- },
- goodID,
- false,
- },
- {"good with name",
- ContainerInfo{
- Executable: "/usr/bin/podman",
- ServiceName: "container-foobar",
- ContainerName: "foobar",
- RestartPolicy: "always",
- PIDFile: "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid",
- StopTimeout: 10,
- PodmanVersion: "CI",
- },
- goodName,
- false,
- },
- {"good with name and bound to",
- ContainerInfo{
- Executable: "/usr/bin/podman",
- ServiceName: "container-foobar",
- ContainerName: "foobar",
- RestartPolicy: "always",
- PIDFile: "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid",
- StopTimeout: 10,
- PodmanVersion: "CI",
- BoundToServices: []string{"pod", "a", "b", "c"},
- },
- goodNameBoundTo,
- false,
- },
- {"pod",
- ContainerInfo{
- Executable: "/usr/bin/podman",
- ServiceName: "pod-123abc",
- ContainerName: "jadda-jadda-infra",
- RestartPolicy: "always",
- PIDFile: "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid",
- StopTimeout: 10,
- PodmanVersion: "CI",
- RequiredServices: []string{"container-1", "container-2"},
- },
- podGoodName,
- false,
- },
- {"bad restart policy",
- ContainerInfo{
- Executable: "/usr/bin/podman",
- ServiceName: "639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401",
- RestartPolicy: "never",
- PIDFile: "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid",
- StopTimeout: 10,
- PodmanVersion: "CI",
- },
- "",
- true,
- },
- {"good with name and generic",
- ContainerInfo{
- Executable: "/usr/bin/podman",
- ServiceName: "jadda-jadda",
- ContainerName: "jadda-jadda",
- RestartPolicy: "always",
- PIDFile: "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid",
- StopTimeout: 42,
- PodmanVersion: "CI",
- New: true,
- CreateCommand: []string{"I'll get stripped", "container", "run", "--name", "jadda-jadda", "--hostname", "hello-world", "awesome-image:latest", "command", "arg1", "...", "argN"},
- },
- goodNameNew,
- false,
- },
- {"good with explicit short detach param",
- ContainerInfo{
- Executable: "/usr/bin/podman",
- ServiceName: "jadda-jadda",
- ContainerName: "jadda-jadda",
- RestartPolicy: "always",
- PIDFile: "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid",
- StopTimeout: 42,
- PodmanVersion: "CI",
- New: true,
- CreateCommand: []string{"I'll get stripped", "container", "run", "-d", "--name", "jadda-jadda", "--hostname", "hello-world", "awesome-image:latest", "command", "arg1", "...", "argN"},
- },
- goodNameNew,
- false,
- },
- {"good with explicit full detach param",
- ContainerInfo{
- Executable: "/usr/bin/podman",
- ServiceName: "jadda-jadda",
- ContainerName: "jadda-jadda",
- RestartPolicy: "always",
- PIDFile: "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid",
- StopTimeout: 42,
- PodmanVersion: "CI",
- New: true,
- CreateCommand: []string{"I'll get stripped", "container", "run", "--detach", "--name", "jadda-jadda", "--hostname", "hello-world", "awesome-image:latest", "command", "arg1", "...", "argN"},
- },
- goodNameNewDetach,
- false,
- },
- {"good with id and no param",
- ContainerInfo{
- Executable: "/usr/bin/podman",
- ServiceName: "container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401",
- ContainerName: "639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401",
- RestartPolicy: "always",
- PIDFile: "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid",
- StopTimeout: 10,
- PodmanVersion: "CI",
- New: true,
- CreateCommand: []string{"I'll get stripped", "container", "run", "awesome-image:latest"},
- },
- goodIDNew,
- false,
- },
- }
- for _, tt := range tests {
- test := tt
- t.Run(tt.name, func(t *testing.T) {
- opts := Options{
- Files: false,
- New: test.info.New,
- }
- got, err := CreateContainerSystemdUnit(&test.info, opts)
- if (err != nil) != test.wantErr {
- t.Errorf("CreateContainerSystemdUnit() error = \n%v, wantErr \n%v", err, test.wantErr)
- return
- }
- if got != test.want {
- t.Errorf("CreateContainerSystemdUnit() = \n%v\n---------> want\n%v", got, test.want)
- }
- })
- }
-}
diff --git a/test/e2e/create_test.go b/test/e2e/create_test.go
index f40472a7c..b9a1ff83d 100644
--- a/test/e2e/create_test.go
+++ b/test/e2e/create_test.go
@@ -2,6 +2,7 @@ package integration
import (
"fmt"
+ "io/ioutil"
"os"
"path/filepath"
@@ -221,6 +222,42 @@ var _ = Describe("Podman create", func() {
Expect(match).To(BeTrue())
})
+ It("podman create --pod-id-file", func() {
+ // First, make sure that --pod and --pod-id-file yield an error
+ // if used together.
+ session := podmanTest.Podman([]string{"create", "--pod", "foo", "--pod-id-file", "bar", ALPINE, "ls"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(125))
+
+ tmpDir, err := ioutil.TempDir("", "")
+ Expect(err).To(BeNil())
+ defer os.RemoveAll(tmpDir)
+
+ podName := "rudoplh"
+ ctrName := "prancer"
+ podIDFile := tmpDir + "pod-id-file"
+
+ // Now, let's create a pod with --pod-id-file.
+ session = podmanTest.Podman([]string{"pod", "create", "--pod-id-file", podIDFile, "--name", podName})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"pod", "inspect", podName})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.IsJSONOutputValid()).To(BeTrue())
+ podData := session.InspectPodToJSON()
+
+ // Finally we can create a container with --pod-id-file and do
+ // some checks to make sure it's working as expected.
+ session = podmanTest.Podman([]string{"create", "--pod-id-file", podIDFile, "--name", ctrName, ALPINE, "top"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ ctrJSON := podmanTest.InspectContainer(ctrName)
+ Expect(podData.ID).To(Equal(ctrJSON[0].Pod)) // Make sure the container's pod matches the pod's ID
+ })
+
It("podman run entrypoint and cmd test", func() {
name := "test101"
create := podmanTest.Podman([]string{"create", "--name", name, redis})
diff --git a/test/e2e/generate_kube_test.go b/test/e2e/generate_kube_test.go
index db750bfcc..7872a9fbf 100644
--- a/test/e2e/generate_kube_test.go
+++ b/test/e2e/generate_kube_test.go
@@ -282,7 +282,8 @@ var _ = Describe("Podman generate kube", func() {
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- inspect1 := podmanTest.Podman([]string{"inspect", "--format", "{{.Config.User}}", "test1"})
+ // container name in pod is <podName>-<ctrName>
+ inspect1 := podmanTest.Podman([]string{"inspect", "--format", "{{.Config.User}}", "toppod-test1"})
inspect1.WaitWithDefaultTimeout()
Expect(inspect1.ExitCode()).To(Equal(0))
Expect(inspect1.OutputToString()).To(ContainSubstring(inspect.OutputToString()))
@@ -295,6 +296,7 @@ var _ = Describe("Podman generate kube", func() {
// we need a container name because IDs don't persist after rm/play
ctrName := "test-ctr"
+ ctrNameInKubePod := "test1-test-ctr"
session1 := podmanTest.Podman([]string{"run", "-d", "--pod", "new:test1", "--name", ctrName, "-v", vol1 + ":/volume/:z", "alpine", "top"})
session1.WaitWithDefaultTimeout()
@@ -313,7 +315,7 @@ var _ = Describe("Podman generate kube", func() {
play.WaitWithDefaultTimeout()
Expect(play.ExitCode()).To(Equal(0))
- inspect := podmanTest.Podman([]string{"inspect", ctrName})
+ inspect := podmanTest.Podman([]string{"inspect", ctrNameInKubePod})
inspect.WaitWithDefaultTimeout()
Expect(inspect.ExitCode()).To(Equal(0))
Expect(inspect.OutputToString()).To(ContainSubstring(vol1))
diff --git a/test/e2e/generate_systemd_test.go b/test/e2e/generate_systemd_test.go
index d5ae441e2..497e8f71e 100644
--- a/test/e2e/generate_systemd_test.go
+++ b/test/e2e/generate_systemd_test.go
@@ -3,6 +3,7 @@
package integration
import (
+ "io/ioutil"
"os"
. "github.com/containers/libpod/test/utils"
@@ -191,7 +192,7 @@ var _ = Describe("Podman generate systemd", func() {
found, _ := session.GrepString("# container-foo.service")
Expect(found).To(BeTrue())
- found, _ = session.GrepString("stop --ignore --cidfile %t/%n-cid -t 42")
+ found, _ = session.GrepString("stop --ignore --cidfile %t/container-foo.ctr-id -t 42")
Expect(found).To(BeTrue())
})
@@ -230,7 +231,7 @@ var _ = Describe("Podman generate systemd", func() {
session := podmanTest.Podman([]string{"generate", "systemd", "--time", "42", "--name", "--new", "foo"})
session.WaitWithDefaultTimeout()
- Expect(session.ExitCode()).To(Equal(125))
+ Expect(session.ExitCode()).To(Equal(0))
})
It("podman generate systemd --container-prefix con", func() {
@@ -325,4 +326,49 @@ var _ = Describe("Podman generate systemd", func() {
found, _ = session.GrepString("BindsTo=p_foo.service")
Expect(found).To(BeTrue())
})
+
+ It("podman generate systemd pod with containers --new", func() {
+ tmpDir, err := ioutil.TempDir("", "")
+ Expect(err).To(BeNil())
+ tmpFile := tmpDir + "podID"
+ defer os.RemoveAll(tmpDir)
+
+ n := podmanTest.Podman([]string{"pod", "create", "--pod-id-file", tmpFile, "--name", "foo"})
+ n.WaitWithDefaultTimeout()
+ Expect(n.ExitCode()).To(Equal(0))
+
+ n = podmanTest.Podman([]string{"create", "--pod", "foo", "--name", "foo-1", "alpine", "top"})
+ n.WaitWithDefaultTimeout()
+ Expect(n.ExitCode()).To(Equal(0))
+
+ n = podmanTest.Podman([]string{"create", "--pod", "foo", "--name", "foo-2", "alpine", "top"})
+ n.WaitWithDefaultTimeout()
+ Expect(n.ExitCode()).To(Equal(0))
+
+ session := podmanTest.Podman([]string{"generate", "systemd", "--new", "--name", "foo"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ // Grepping the output (in addition to unit tests)
+ found, _ := session.GrepString("# pod-foo.service")
+ Expect(found).To(BeTrue())
+
+ found, _ = session.GrepString("Requires=container-foo-1.service container-foo-2.service")
+ Expect(found).To(BeTrue())
+
+ found, _ = session.GrepString("BindsTo=pod-foo.service")
+ Expect(found).To(BeTrue())
+
+ found, _ = session.GrepString("pod create --infra-conmon-pidfile %t/pod-foo.pid --pod-id-file %t/pod-foo.pod-id --name foo")
+ Expect(found).To(BeTrue())
+
+ found, _ = session.GrepString("ExecStartPre=/usr/bin/rm -f %t/pod-foo.pid %t/pod-foo.pod-id")
+ Expect(found).To(BeTrue())
+
+ found, _ = session.GrepString("pod stop --ignore --pod-id-file %t/pod-foo.pod-id -t 10")
+ Expect(found).To(BeTrue())
+
+ found, _ = session.GrepString("pod rm --ignore -f --pod-id-file %t/pod-foo.pod-id")
+ Expect(found).To(BeTrue())
+ })
})
diff --git a/test/e2e/play_kube_test.go b/test/e2e/play_kube_test.go
index 9daf266b8..7fe4ce967 100644
--- a/test/e2e/play_kube_test.go
+++ b/test/e2e/play_kube_test.go
@@ -3,6 +3,7 @@
package integration
import (
+ "bytes"
"fmt"
"io/ioutil"
"os"
@@ -14,7 +15,18 @@ import (
. "github.com/onsi/gomega"
)
-var yamlTemplate = `
+var unknownKindYaml = `
+apiVerson: v1
+kind: UnknownKind
+metadata:
+ labels:
+ app: app1
+ name: unknown
+spec:
+ hostname: unknown
+`
+
+var podYamlTemplate = `
apiVersion: v1
kind: Pod
metadata:
@@ -77,31 +89,137 @@ spec:
status: {}
`
+var deploymentYamlTemplate = `
+apiVersion: v1
+kind: Deployment
+metadata:
+ creationTimestamp: "2019-07-17T14:44:08Z"
+ labels:
+ app: {{ .Name }}
+ name: {{ .Name }}
+{{ with .Annotations }}
+ annotations:
+ {{ range $key, $value := . }}
+ {{ $key }}: {{ $value }}
+ {{ end }}
+{{ end }}
+
+spec:
+ replicas: {{ .Replicas }}
+ selector:
+ matchLabels:
+ app: {{ .Name }}
+ template:
+ {{ with .PodTemplate }}
+ metadata:
+ labels:
+ app: {{ .Name }}
+ {{ with .Annotations }}
+ annotations:
+ {{ range $key, $value := . }}
+ {{ $key }}: {{ $value }}
+ {{ end }}
+ {{ end }}
+ spec:
+ hostname: {{ .Hostname }}
+ containers:
+ {{ with .Ctrs }}
+ {{ range . }}
+ - command:
+ {{ range .Cmd }}
+ - {{.}}
+ {{ end }}
+ env:
+ - name: PATH
+ value: /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+ - name: TERM
+ value: xterm
+ - name: HOSTNAME
+ - name: container
+ value: podman
+ image: {{ .Image }}
+ name: {{ .Name }}
+ imagePullPolicy: {{ .PullPolicy }}
+ resources: {}
+ {{ if .SecurityContext }}
+ securityContext:
+ allowPrivilegeEscalation: true
+ {{ if .Caps }}
+ capabilities:
+ {{ with .CapAdd }}
+ add:
+ {{ range . }}
+ - {{.}}
+ {{ end }}
+ {{ end }}
+ {{ with .CapDrop }}
+ drop:
+ {{ range . }}
+ - {{.}}
+ {{ end }}
+ {{ end }}
+ {{ end }}
+ privileged: false
+ readOnlyRootFilesystem: false
+ workingDir: /
+ {{ end }}
+ {{ end }}
+ {{ end }}
+ {{ end }}
+`
+
var (
- defaultCtrName = "testCtr"
- defaultCtrCmd = []string{"top"}
- defaultCtrImage = ALPINE
- defaultPodName = "testPod"
- seccompPwdEPERM = []byte(`{"defaultAction":"SCMP_ACT_ALLOW","syscalls":[{"name":"getcwd","action":"SCMP_ACT_ERRNO"}]}`)
+ defaultCtrName = "testCtr"
+ defaultCtrCmd = []string{"top"}
+ defaultCtrImage = ALPINE
+ defaultPodName = "testPod"
+ defaultDeploymentName = "testDeployment"
+ seccompPwdEPERM = []byte(`{"defaultAction":"SCMP_ACT_ALLOW","syscalls":[{"name":"getcwd","action":"SCMP_ACT_ERRNO"}]}`)
)
-func generateKubeYaml(pod *Pod, fileName string) error {
+func writeYaml(content string, fileName string) error {
f, err := os.Create(fileName)
if err != nil {
return err
}
defer f.Close()
- t, err := template.New("pod").Parse(yamlTemplate)
+ _, err = f.WriteString(content)
if err != nil {
return err
}
- if err := t.Execute(f, pod); err != nil {
+ return nil
+}
+
+func generatePodKubeYaml(pod *Pod, fileName string) error {
+ templateBytes := &bytes.Buffer{}
+
+ t, err := template.New("pod").Parse(podYamlTemplate)
+ if err != nil {
return err
}
- return nil
+ if err := t.Execute(templateBytes, pod); err != nil {
+ return err
+ }
+
+ return writeYaml(templateBytes.String(), fileName)
+}
+
+func generateDeploymentKubeYaml(deployment *Deployment, fileName string) error {
+ templateBytes := &bytes.Buffer{}
+
+ t, err := template.New("deployment").Parse(deploymentYamlTemplate)
+ if err != nil {
+ return err
+ }
+
+ if err := t.Execute(templateBytes, deployment); err != nil {
+ return err
+ }
+
+ return writeYaml(templateBytes.String(), fileName)
}
// Pod describes the options a kube yaml can be configured at pod level
@@ -146,6 +264,59 @@ func withAnnotation(k, v string) podOption {
}
}
+// Deployment describes the options a kube yaml can be configured at deployment level
+type Deployment struct {
+ Name string
+ Replicas int32
+ Annotations map[string]string
+ PodTemplate *Pod
+}
+
+func getDeployment(options ...deploymentOption) *Deployment {
+ d := Deployment{defaultDeploymentName, 1, make(map[string]string), getPod()}
+ for _, option := range options {
+ option(&d)
+ }
+
+ return &d
+}
+
+type deploymentOption func(*Deployment)
+
+func withDeploymentAnnotation(k, v string) deploymentOption {
+ return func(deployment *Deployment) {
+ deployment.Annotations[k] = v
+ }
+}
+
+func withPod(pod *Pod) deploymentOption {
+ return func(d *Deployment) {
+ d.PodTemplate = pod
+ }
+}
+
+func withReplicas(replicas int32) deploymentOption {
+ return func(d *Deployment) {
+ d.Replicas = replicas
+ }
+}
+
+// getPodNamesInDeployment returns list of Pod objects
+// with just their name set, so that it can be passed around
+// and into getCtrNameInPod for ease of testing
+func getPodNamesInDeployment(d *Deployment) []Pod {
+ var pods []Pod
+ var i int32
+
+ for i = 0; i < d.Replicas; i++ {
+ p := Pod{}
+ p.Name = fmt.Sprintf("%s-pod-%d", d.Name, i)
+ pods = append(pods, p)
+ }
+
+ return pods
+}
+
// Ctr describes the options a kube yaml can be configured at container level
type Ctr struct {
Name string
@@ -208,6 +379,10 @@ func withPullPolicy(policy string) ctrOption {
}
}
+func getCtrNameInPod(pod *Pod) string {
+ return fmt.Sprintf("%s-%s", pod.Name, defaultCtrName)
+}
+
var _ = Describe("Podman generate kube", func() {
var (
tempdir string
@@ -234,8 +409,18 @@ var _ = Describe("Podman generate kube", func() {
processTestResult(f)
})
+ It("podman play kube fail with yaml of unsupported kind", func() {
+ err := writeYaml(unknownKindYaml, kubeYaml)
+ Expect(err).To(BeNil())
+
+ kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
+ kube.WaitWithDefaultTimeout()
+ Expect(kube.ExitCode()).To(Not(Equal(0)))
+
+ })
+
It("podman play kube fail with nonexist authfile", func() {
- err := generateKubeYaml(getPod(), kubeYaml)
+ err := generatePodKubeYaml(getPod(), kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", "--authfile", "/tmp/nonexist", kubeYaml})
@@ -245,14 +430,15 @@ var _ = Describe("Podman generate kube", func() {
})
It("podman play kube test correct command", func() {
- err := generateKubeYaml(getPod(), kubeYaml)
+ pod := getPod()
+ err := generatePodKubeYaml(pod, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
kube.WaitWithDefaultTimeout()
Expect(kube.ExitCode()).To(Equal(0))
- inspect := podmanTest.Podman([]string{"inspect", defaultCtrName})
+ inspect := podmanTest.Podman([]string{"inspect", getCtrNameInPod(pod)})
inspect.WaitWithDefaultTimeout()
Expect(inspect.ExitCode()).To(Equal(0))
Expect(inspect.OutputToString()).To(ContainSubstring(defaultCtrCmd[0]))
@@ -261,33 +447,34 @@ var _ = Describe("Podman generate kube", func() {
It("podman play kube test correct output", func() {
p := getPod(withCtr(getCtr(withCmd([]string{"echo", "hello"}))))
- err := generateKubeYaml(p, kubeYaml)
+ err := generatePodKubeYaml(p, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
kube.WaitWithDefaultTimeout()
Expect(kube.ExitCode()).To(Equal(0))
- logs := podmanTest.Podman([]string{"logs", defaultCtrName})
+ logs := podmanTest.Podman([]string{"logs", getCtrNameInPod(p)})
logs.WaitWithDefaultTimeout()
Expect(logs.ExitCode()).To(Equal(0))
Expect(logs.OutputToString()).To(ContainSubstring("hello"))
- inspect := podmanTest.Podman([]string{"inspect", defaultCtrName, "--format", "'{{ .Config.Cmd }}'"})
+ inspect := podmanTest.Podman([]string{"inspect", getCtrNameInPod(p), "--format", "'{{ .Config.Cmd }}'"})
inspect.WaitWithDefaultTimeout()
Expect(inspect.ExitCode()).To(Equal(0))
Expect(inspect.OutputToString()).To(ContainSubstring("hello"))
})
It("podman play kube test hostname", func() {
- err := generateKubeYaml(getPod(), kubeYaml)
+ pod := getPod()
+ err := generatePodKubeYaml(pod, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
kube.WaitWithDefaultTimeout()
Expect(kube.ExitCode()).To(Equal(0))
- inspect := podmanTest.Podman([]string{"inspect", defaultCtrName, "--format", "{{ .Config.Hostname }}"})
+ inspect := podmanTest.Podman([]string{"inspect", getCtrNameInPod(pod), "--format", "{{ .Config.Hostname }}"})
inspect.WaitWithDefaultTimeout()
Expect(inspect.ExitCode()).To(Equal(0))
Expect(inspect.OutputToString()).To(Equal(defaultPodName))
@@ -295,14 +482,15 @@ var _ = Describe("Podman generate kube", func() {
It("podman play kube test with customized hostname", func() {
hostname := "myhostname"
- err := generateKubeYaml(getPod(withHostname(hostname)), kubeYaml)
+ pod := getPod(withHostname(hostname))
+ err := generatePodKubeYaml(getPod(withHostname(hostname)), kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
kube.WaitWithDefaultTimeout()
Expect(kube.ExitCode()).To(Equal(0))
- inspect := podmanTest.Podman([]string{"inspect", defaultCtrName, "--format", "{{ .Config.Hostname }}"})
+ inspect := podmanTest.Podman([]string{"inspect", getCtrNameInPod(pod), "--format", "{{ .Config.Hostname }}"})
inspect.WaitWithDefaultTimeout()
Expect(inspect.ExitCode()).To(Equal(0))
Expect(inspect.OutputToString()).To(Equal(hostname))
@@ -312,14 +500,15 @@ var _ = Describe("Podman generate kube", func() {
capAdd := "CAP_SYS_ADMIN"
ctr := getCtr(withCapAdd([]string{capAdd}), withCmd([]string{"cat", "/proc/self/status"}))
- err := generateKubeYaml(getPod(withCtr(ctr)), kubeYaml)
+ pod := getPod(withCtr(ctr))
+ err := generatePodKubeYaml(pod, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
kube.WaitWithDefaultTimeout()
Expect(kube.ExitCode()).To(Equal(0))
- inspect := podmanTest.Podman([]string{"inspect", defaultCtrName})
+ inspect := podmanTest.Podman([]string{"inspect", getCtrNameInPod(pod)})
inspect.WaitWithDefaultTimeout()
Expect(inspect.ExitCode()).To(Equal(0))
Expect(inspect.OutputToString()).To(ContainSubstring(capAdd))
@@ -329,14 +518,15 @@ var _ = Describe("Podman generate kube", func() {
capDrop := "CAP_CHOWN"
ctr := getCtr(withCapDrop([]string{capDrop}))
- err := generateKubeYaml(getPod(withCtr(ctr)), kubeYaml)
+ pod := getPod(withCtr(ctr))
+ err := generatePodKubeYaml(pod, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
kube.WaitWithDefaultTimeout()
Expect(kube.ExitCode()).To(Equal(0))
- inspect := podmanTest.Podman([]string{"inspect", defaultCtrName})
+ inspect := podmanTest.Podman([]string{"inspect", getCtrNameInPod(pod)})
inspect.WaitWithDefaultTimeout()
Expect(inspect.ExitCode()).To(Equal(0))
Expect(inspect.OutputToString()).To(ContainSubstring(capDrop))
@@ -344,14 +534,15 @@ var _ = Describe("Podman generate kube", func() {
It("podman play kube no security context", func() {
// expect play kube to not fail if no security context is specified
- err := generateKubeYaml(getPod(withCtr(getCtr(withSecurityContext(false)))), kubeYaml)
+ pod := getPod(withCtr(getCtr(withSecurityContext(false))))
+ err := generatePodKubeYaml(pod, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
kube.WaitWithDefaultTimeout()
Expect(kube.ExitCode()).To(Equal(0))
- inspect := podmanTest.Podman([]string{"inspect", defaultCtrName})
+ inspect := podmanTest.Podman([]string{"inspect", getCtrNameInPod(pod)})
inspect.WaitWithDefaultTimeout()
Expect(inspect.ExitCode()).To(Equal(0))
})
@@ -367,7 +558,8 @@ var _ = Describe("Podman generate kube", func() {
ctrAnnotation := "container.seccomp.security.alpha.kubernetes.io/" + defaultCtrName
ctr := getCtr(withCmd([]string{"pwd"}))
- err = generateKubeYaml(getPod(withCtr(ctr), withAnnotation(ctrAnnotation, "localhost/"+filepath.Base(jsonFile))), kubeYaml)
+ pod := getPod(withCtr(ctr), withAnnotation(ctrAnnotation, "localhost/"+filepath.Base(jsonFile)))
+ err = generatePodKubeYaml(pod, kubeYaml)
Expect(err).To(BeNil())
// CreateSeccompJson will put the profile into podmanTest.TempDir. Use --seccomp-profile-root to tell play kube where to look
@@ -375,7 +567,7 @@ var _ = Describe("Podman generate kube", func() {
kube.WaitWithDefaultTimeout()
Expect(kube.ExitCode()).To(Equal(0))
- logs := podmanTest.Podman([]string{"logs", defaultCtrName})
+ logs := podmanTest.Podman([]string{"logs", getCtrNameInPod(pod)})
logs.WaitWithDefaultTimeout()
Expect(logs.ExitCode()).To(Equal(0))
Expect(logs.OutputToString()).To(ContainSubstring("Operation not permitted"))
@@ -392,7 +584,8 @@ var _ = Describe("Podman generate kube", func() {
ctr := getCtr(withCmd([]string{"pwd"}))
- err = generateKubeYaml(getPod(withCtr(ctr), withAnnotation("seccomp.security.alpha.kubernetes.io/pod", "localhost/"+filepath.Base(jsonFile))), kubeYaml)
+ pod := getPod(withCtr(ctr), withAnnotation("seccomp.security.alpha.kubernetes.io/pod", "localhost/"+filepath.Base(jsonFile)))
+ err = generatePodKubeYaml(pod, kubeYaml)
Expect(err).To(BeNil())
// CreateSeccompJson will put the profile into podmanTest.TempDir. Use --seccomp-profile-root to tell play kube where to look
@@ -400,7 +593,7 @@ var _ = Describe("Podman generate kube", func() {
kube.WaitWithDefaultTimeout()
Expect(kube.ExitCode()).To(Equal(0))
- logs := podmanTest.Podman([]string{"logs", defaultCtrName})
+ logs := podmanTest.Podman([]string{"logs", getCtrNameInPod(pod)})
logs.WaitWithDefaultTimeout()
Expect(logs.ExitCode()).To(Equal(0))
Expect(logs.OutputToString()).To(ContainSubstring("Operation not permitted"))
@@ -408,7 +601,7 @@ var _ = Describe("Podman generate kube", func() {
It("podman play kube with pull policy of never should be 125", func() {
ctr := getCtr(withPullPolicy("never"), withImage(BB_GLIBC))
- err := generateKubeYaml(getPod(withCtr(ctr)), kubeYaml)
+ err := generatePodKubeYaml(getPod(withCtr(ctr)), kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -418,7 +611,7 @@ var _ = Describe("Podman generate kube", func() {
It("podman play kube with pull policy of missing", func() {
ctr := getCtr(withPullPolicy("missing"), withImage(BB))
- err := generateKubeYaml(getPod(withCtr(ctr)), kubeYaml)
+ err := generatePodKubeYaml(getPod(withCtr(ctr)), kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -444,7 +637,7 @@ var _ = Describe("Podman generate kube", func() {
oldBBinspect := inspect.InspectImageJSON()
ctr := getCtr(withPullPolicy("always"), withImage(BB))
- err := generateKubeYaml(getPod(withCtr(ctr)), kubeYaml)
+ err := generatePodKubeYaml(getPod(withCtr(ctr)), kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -475,7 +668,7 @@ var _ = Describe("Podman generate kube", func() {
oldBBinspect := inspect.InspectImageJSON()
ctr := getCtr(withImage(BB))
- err := generateKubeYaml(getPod(withCtr(ctr)), kubeYaml)
+ err := generatePodKubeYaml(getPod(withCtr(ctr)), kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -519,7 +712,7 @@ spec:
kube.WaitWithDefaultTimeout()
Expect(kube.ExitCode()).To(Equal(0))
- inspect := podmanTest.Podman([]string{"inspect", "demo_kube"})
+ inspect := podmanTest.Podman([]string{"inspect", "demo_pod-demo_kube"})
inspect.WaitWithDefaultTimeout()
Expect(inspect.ExitCode()).To(Equal(0))
@@ -529,4 +722,41 @@ spec:
Expect(ctr[0].Config.Labels["key1"]).To(ContainSubstring("value1"))
Expect(ctr[0].Config.StopSignal).To(Equal(uint(51)))
})
+
+ // Deployment related tests
+ It("podman play kube deployment 1 replica test correct command", func() {
+ deployment := getDeployment()
+ err := generateDeploymentKubeYaml(deployment, kubeYaml)
+ Expect(err).To(BeNil())
+
+ kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
+ kube.WaitWithDefaultTimeout()
+ Expect(kube.ExitCode()).To(Equal(0))
+
+ podNames := getPodNamesInDeployment(deployment)
+ inspect := podmanTest.Podman([]string{"inspect", getCtrNameInPod(&podNames[0])})
+ inspect.WaitWithDefaultTimeout()
+ Expect(inspect.ExitCode()).To(Equal(0))
+ Expect(inspect.OutputToString()).To(ContainSubstring(defaultCtrCmd[0]))
+ })
+
+ It("podman play kube deployment more than 1 replica test correct command", func() {
+ var i, numReplicas int32
+ numReplicas = 5
+ deployment := getDeployment(withReplicas(numReplicas))
+ err := generateDeploymentKubeYaml(deployment, kubeYaml)
+ Expect(err).To(BeNil())
+
+ kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
+ kube.WaitWithDefaultTimeout()
+ Expect(kube.ExitCode()).To(Equal(0))
+
+ podNames := getPodNamesInDeployment(deployment)
+ for i = 0; i < numReplicas; i++ {
+ inspect := podmanTest.Podman([]string{"inspect", getCtrNameInPod(&podNames[i])})
+ inspect.WaitWithDefaultTimeout()
+ Expect(inspect.ExitCode()).To(Equal(0))
+ Expect(inspect.OutputToString()).To(ContainSubstring(defaultCtrCmd[0]))
+ }
+ })
})
diff --git a/test/e2e/pod_inspect_test.go b/test/e2e/pod_inspect_test.go
index 8040adf1e..f1acd3750 100644
--- a/test/e2e/pod_inspect_test.go
+++ b/test/e2e/pod_inspect_test.go
@@ -57,4 +57,26 @@ var _ = Describe("Podman pod inspect", func() {
podData := inspect.InspectPodToJSON()
Expect(podData.ID).To(Equal(podid))
})
+
+ It("podman pod inspect (CreateCommand)", func() {
+ podName := "myTestPod"
+ createCommand := []string{"pod", "create", "--name", podName, "--hostname", "rudolph", "--share", "net"}
+
+ // Create the pod.
+ session := podmanTest.Podman(createCommand)
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ // Inspect the pod and make sure that the create command is
+ // exactly how we created the pod.
+ inspect := podmanTest.Podman([]string{"pod", "inspect", podName})
+ inspect.WaitWithDefaultTimeout()
+ Expect(inspect.ExitCode()).To(Equal(0))
+ Expect(inspect.IsJSONOutputValid()).To(BeTrue())
+ podData := inspect.InspectPodToJSON()
+ // Let's get the last len(createCommand) items in the command.
+ inspectCreateCommand := podData.CreateCommand
+ index := len(inspectCreateCommand) - len(createCommand)
+ Expect(inspectCreateCommand[index:]).To(Equal(createCommand))
+ })
})
diff --git a/test/e2e/pod_rm_test.go b/test/e2e/pod_rm_test.go
index 4060e1268..d0ece7b53 100644
--- a/test/e2e/pod_rm_test.go
+++ b/test/e2e/pod_rm_test.go
@@ -2,6 +2,7 @@ package integration
import (
"fmt"
+ "io/ioutil"
"os"
"path/filepath"
"strings"
@@ -229,4 +230,72 @@ var _ = Describe("Podman pod rm", func() {
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
})
+
+ It("podman pod start/remove single pod via --pod-id-file", func() {
+ tmpDir, err := ioutil.TempDir("", "")
+ Expect(err).To(BeNil())
+ tmpFile := tmpDir + "podID"
+ defer os.RemoveAll(tmpDir)
+
+ podName := "rudolph"
+
+ // Create a pod with --pod-id-file.
+ session := podmanTest.Podman([]string{"pod", "create", "--name", podName, "--pod-id-file", tmpFile})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ // Create container inside the pod.
+ session = podmanTest.Podman([]string{"create", "--pod", podName, ALPINE, "top"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"pod", "start", "--pod-id-file", tmpFile})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(2)) // infra+top
+
+ session = podmanTest.Podman([]string{"pod", "rm", "--pod-id-file", tmpFile, "--force"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
+ })
+
+ It("podman pod start/remove multiple pods via --pod-id-file", func() {
+ tmpDir, err := ioutil.TempDir("", "")
+ Expect(err).To(BeNil())
+ defer os.RemoveAll(tmpDir)
+
+ podIDFiles := []string{}
+ for _, i := range "0123456789" {
+ tmpFile := tmpDir + "cid" + string(i)
+ podName := "rudolph" + string(i)
+ // Create a pod with --pod-id-file.
+ session := podmanTest.Podman([]string{"pod", "create", "--name", podName, "--pod-id-file", tmpFile})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ // Create container inside the pod.
+ session = podmanTest.Podman([]string{"create", "--pod", podName, ALPINE, "top"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ // Append the id files along with the command.
+ podIDFiles = append(podIDFiles, "--pod-id-file")
+ podIDFiles = append(podIDFiles, tmpFile)
+ }
+
+ cmd := []string{"pod", "start"}
+ cmd = append(cmd, podIDFiles...)
+ session := podmanTest.Podman(cmd)
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(20)) // 10*(infra+top)
+
+ cmd = []string{"pod", "rm", "--force"}
+ cmd = append(cmd, podIDFiles...)
+ session = podmanTest.Podman(cmd)
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
+ })
})
diff --git a/test/e2e/pod_start_test.go b/test/e2e/pod_start_test.go
index 8e78cadfd..d7d623d6e 100644
--- a/test/e2e/pod_start_test.go
+++ b/test/e2e/pod_start_test.go
@@ -1,7 +1,11 @@
package integration
import (
+ "fmt"
+ "io/ioutil"
"os"
+ "strconv"
+ "strings"
. "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
@@ -136,4 +140,94 @@ var _ = Describe("Podman pod start", func() {
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(125))
})
+
+ It("podman pod start single pod via --pod-id-file", func() {
+ tmpDir, err := ioutil.TempDir("", "")
+ Expect(err).To(BeNil())
+ tmpFile := tmpDir + "podID"
+ defer os.RemoveAll(tmpDir)
+
+ podName := "rudolph"
+
+ // Create a pod with --pod-id-file.
+ session := podmanTest.Podman([]string{"pod", "create", "--name", podName, "--pod-id-file", tmpFile})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ // Create container inside the pod.
+ session = podmanTest.Podman([]string{"create", "--pod", podName, ALPINE, "top"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"pod", "start", "--pod-id-file", tmpFile})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(2)) // infra+top
+ })
+
+ It("podman pod start multiple pods via --pod-id-file", func() {
+ tmpDir, err := ioutil.TempDir("", "")
+ Expect(err).To(BeNil())
+ defer os.RemoveAll(tmpDir)
+
+ podIDFiles := []string{}
+ for _, i := range "0123456789" {
+ tmpFile := tmpDir + "cid" + string(i)
+ podName := "rudolph" + string(i)
+ // Create a pod with --pod-id-file.
+ session := podmanTest.Podman([]string{"pod", "create", "--name", podName, "--pod-id-file", tmpFile})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ // Create container inside the pod.
+ session = podmanTest.Podman([]string{"create", "--pod", podName, ALPINE, "top"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ // Append the id files along with the command.
+ podIDFiles = append(podIDFiles, "--pod-id-file")
+ podIDFiles = append(podIDFiles, tmpFile)
+ }
+
+ cmd := []string{"pod", "start"}
+ cmd = append(cmd, podIDFiles...)
+ session := podmanTest.Podman(cmd)
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(20)) // 10*(infra+top)
+ })
+
+ It("podman pod create --infra-conmon-pod create + start", func() {
+ tmpDir, err := ioutil.TempDir("", "")
+ Expect(err).To(BeNil())
+ tmpFile := tmpDir + "podID"
+ defer os.RemoveAll(tmpDir)
+
+ podName := "rudolph"
+ // Create a pod with --infra-conmon-pid.
+ session := podmanTest.Podman([]string{"pod", "create", "--name", podName, "--infra-conmon-pidfile", tmpFile})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"pod", "start", podName})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1)) // infra
+
+ readFirstLine := func(path string) string {
+ content, err := ioutil.ReadFile(path)
+ Expect(err).To(BeNil())
+ return strings.Split(string(content), "\n")[0]
+ }
+
+ // Read the infra-conmon-pidfile and perform some sanity checks
+ // on the pid.
+ infraConmonPID := readFirstLine(tmpFile)
+ _, err = strconv.Atoi(infraConmonPID) // Make sure it's a proper integer
+ Expect(err).To(BeNil())
+
+ cmdline := readFirstLine(fmt.Sprintf("/proc/%s/cmdline", infraConmonPID))
+ Expect(cmdline).To(ContainSubstring("/conmon"))
+ })
+
})
diff --git a/test/e2e/pod_stop_test.go b/test/e2e/pod_stop_test.go
index 0a46b07c9..0fe580921 100644
--- a/test/e2e/pod_stop_test.go
+++ b/test/e2e/pod_stop_test.go
@@ -1,6 +1,7 @@
package integration
import (
+ "io/ioutil"
"os"
. "github.com/containers/libpod/test/utils"
@@ -175,4 +176,72 @@ var _ = Describe("Podman pod stop", func() {
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(125))
})
+
+ It("podman pod start/stop single pod via --pod-id-file", func() {
+ tmpDir, err := ioutil.TempDir("", "")
+ Expect(err).To(BeNil())
+ tmpFile := tmpDir + "podID"
+ defer os.RemoveAll(tmpDir)
+
+ podName := "rudolph"
+
+ // Create a pod with --pod-id-file.
+ session := podmanTest.Podman([]string{"pod", "create", "--name", podName, "--pod-id-file", tmpFile})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ // Create container inside the pod.
+ session = podmanTest.Podman([]string{"create", "--pod", podName, ALPINE, "top"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"pod", "start", "--pod-id-file", tmpFile})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(2)) // infra+top
+
+ session = podmanTest.Podman([]string{"pod", "stop", "--pod-id-file", tmpFile})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
+ })
+
+ It("podman pod start/stop multiple pods via --pod-id-file", func() {
+ tmpDir, err := ioutil.TempDir("", "")
+ Expect(err).To(BeNil())
+ defer os.RemoveAll(tmpDir)
+
+ podIDFiles := []string{}
+ for _, i := range "0123456789" {
+ tmpFile := tmpDir + "cid" + string(i)
+ podName := "rudolph" + string(i)
+ // Create a pod with --pod-id-file.
+ session := podmanTest.Podman([]string{"pod", "create", "--name", podName, "--pod-id-file", tmpFile})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ // Create container inside the pod.
+ session = podmanTest.Podman([]string{"create", "--pod", podName, ALPINE, "top"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ // Append the id files along with the command.
+ podIDFiles = append(podIDFiles, "--pod-id-file")
+ podIDFiles = append(podIDFiles, tmpFile)
+ }
+
+ cmd := []string{"pod", "start"}
+ cmd = append(cmd, podIDFiles...)
+ session := podmanTest.Podman(cmd)
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(20)) // 10*(infra+top)
+
+ cmd = []string{"pod", "stop"}
+ cmd = append(cmd, podIDFiles...)
+ session = podmanTest.Podman(cmd)
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
+ })
})
diff --git a/test/e2e/run_networking_test.go b/test/e2e/run_networking_test.go
index 9db2f5d49..4fad85f00 100644
--- a/test/e2e/run_networking_test.go
+++ b/test/e2e/run_networking_test.go
@@ -129,6 +129,32 @@ var _ = Describe("Podman run networking", func() {
Expect(inspectOut[0].NetworkSettings.Ports[0].HostIP).To(Equal("127.0.0.1"))
})
+ It("podman run -p [::1]:8080:80/udp", func() {
+ name := "testctr"
+ session := podmanTest.Podman([]string{"create", "-t", "-p", "[::1]:8080:80/udp", "--name", name, ALPINE, "/bin/sh"})
+ session.WaitWithDefaultTimeout()
+ inspectOut := podmanTest.InspectContainer(name)
+ Expect(len(inspectOut)).To(Equal(1))
+ Expect(len(inspectOut[0].NetworkSettings.Ports)).To(Equal(1))
+ Expect(inspectOut[0].NetworkSettings.Ports[0].HostPort).To(Equal(int32(8080)))
+ Expect(inspectOut[0].NetworkSettings.Ports[0].ContainerPort).To(Equal(int32(80)))
+ Expect(inspectOut[0].NetworkSettings.Ports[0].Protocol).To(Equal("udp"))
+ Expect(inspectOut[0].NetworkSettings.Ports[0].HostIP).To(Equal("::1"))
+ })
+
+ It("podman run -p [::1]:8080:80/tcp", func() {
+ name := "testctr"
+ session := podmanTest.Podman([]string{"create", "-t", "-p", "[::1]:8080:80/tcp", "--name", name, ALPINE, "/bin/sh"})
+ session.WaitWithDefaultTimeout()
+ inspectOut := podmanTest.InspectContainer(name)
+ Expect(len(inspectOut)).To(Equal(1))
+ Expect(len(inspectOut[0].NetworkSettings.Ports)).To(Equal(1))
+ Expect(inspectOut[0].NetworkSettings.Ports[0].HostPort).To(Equal(int32(8080)))
+ Expect(inspectOut[0].NetworkSettings.Ports[0].ContainerPort).To(Equal(int32(80)))
+ Expect(inspectOut[0].NetworkSettings.Ports[0].Protocol).To(Equal("tcp"))
+ Expect(inspectOut[0].NetworkSettings.Ports[0].HostIP).To(Equal("::1"))
+ })
+
It("podman run --expose 80 -P", func() {
name := "testctr"
session := podmanTest.Podman([]string{"create", "-t", "--expose", "80", "-P", "--name", name, ALPINE, "/bin/sh"})
diff --git a/vendor/k8s.io/api/apps/v1/doc.go b/vendor/k8s.io/api/apps/v1/doc.go
new file mode 100644
index 000000000..61dc97bde
--- /dev/null
+++ b/vendor/k8s.io/api/apps/v1/doc.go
@@ -0,0 +1,21 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:protobuf-gen=package
+// +k8s:openapi-gen=true
+
+package v1 // import "k8s.io/api/apps/v1"
diff --git a/vendor/k8s.io/api/apps/v1/generated.pb.go b/vendor/k8s.io/api/apps/v1/generated.pb.go
new file mode 100644
index 000000000..6ef25f50f
--- /dev/null
+++ b/vendor/k8s.io/api/apps/v1/generated.pb.go
@@ -0,0 +1,8238 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto
+
+package v1
+
+import (
+ fmt "fmt"
+
+ io "io"
+
+ proto "github.com/gogo/protobuf/proto"
+ k8s_io_api_core_v1 "k8s.io/api/core/v1"
+ v11 "k8s.io/api/core/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ math "math"
+ math_bits "math/bits"
+ reflect "reflect"
+ strings "strings"
+
+ intstr "k8s.io/apimachinery/pkg/util/intstr"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+func (m *ControllerRevision) Reset() { *m = ControllerRevision{} }
+func (*ControllerRevision) ProtoMessage() {}
+func (*ControllerRevision) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{0}
+}
+func (m *ControllerRevision) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ControllerRevision) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ControllerRevision) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ControllerRevision.Merge(m, src)
+}
+func (m *ControllerRevision) XXX_Size() int {
+ return m.Size()
+}
+func (m *ControllerRevision) XXX_DiscardUnknown() {
+ xxx_messageInfo_ControllerRevision.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerRevision proto.InternalMessageInfo
+
+func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} }
+func (*ControllerRevisionList) ProtoMessage() {}
+func (*ControllerRevisionList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{1}
+}
+func (m *ControllerRevisionList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ControllerRevisionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ControllerRevisionList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ControllerRevisionList.Merge(m, src)
+}
+func (m *ControllerRevisionList) XXX_Size() int {
+ return m.Size()
+}
+func (m *ControllerRevisionList) XXX_DiscardUnknown() {
+ xxx_messageInfo_ControllerRevisionList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerRevisionList proto.InternalMessageInfo
+
+func (m *DaemonSet) Reset() { *m = DaemonSet{} }
+func (*DaemonSet) ProtoMessage() {}
+func (*DaemonSet) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{2}
+}
+func (m *DaemonSet) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DaemonSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DaemonSet) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DaemonSet.Merge(m, src)
+}
+func (m *DaemonSet) XXX_Size() int {
+ return m.Size()
+}
+func (m *DaemonSet) XXX_DiscardUnknown() {
+ xxx_messageInfo_DaemonSet.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DaemonSet proto.InternalMessageInfo
+
+func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} }
+func (*DaemonSetCondition) ProtoMessage() {}
+func (*DaemonSetCondition) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{3}
+}
+func (m *DaemonSetCondition) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DaemonSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DaemonSetCondition) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DaemonSetCondition.Merge(m, src)
+}
+func (m *DaemonSetCondition) XXX_Size() int {
+ return m.Size()
+}
+func (m *DaemonSetCondition) XXX_DiscardUnknown() {
+ xxx_messageInfo_DaemonSetCondition.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DaemonSetCondition proto.InternalMessageInfo
+
+func (m *DaemonSetList) Reset() { *m = DaemonSetList{} }
+func (*DaemonSetList) ProtoMessage() {}
+func (*DaemonSetList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{4}
+}
+func (m *DaemonSetList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DaemonSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DaemonSetList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DaemonSetList.Merge(m, src)
+}
+func (m *DaemonSetList) XXX_Size() int {
+ return m.Size()
+}
+func (m *DaemonSetList) XXX_DiscardUnknown() {
+ xxx_messageInfo_DaemonSetList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DaemonSetList proto.InternalMessageInfo
+
+func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} }
+func (*DaemonSetSpec) ProtoMessage() {}
+func (*DaemonSetSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{5}
+}
+func (m *DaemonSetSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DaemonSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DaemonSetSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DaemonSetSpec.Merge(m, src)
+}
+func (m *DaemonSetSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *DaemonSetSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_DaemonSetSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DaemonSetSpec proto.InternalMessageInfo
+
+func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} }
+func (*DaemonSetStatus) ProtoMessage() {}
+func (*DaemonSetStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{6}
+}
+func (m *DaemonSetStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DaemonSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DaemonSetStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DaemonSetStatus.Merge(m, src)
+}
+func (m *DaemonSetStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *DaemonSetStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_DaemonSetStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DaemonSetStatus proto.InternalMessageInfo
+
+func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} }
+func (*DaemonSetUpdateStrategy) ProtoMessage() {}
+func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{7}
+}
+func (m *DaemonSetUpdateStrategy) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DaemonSetUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DaemonSetUpdateStrategy) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DaemonSetUpdateStrategy.Merge(m, src)
+}
+func (m *DaemonSetUpdateStrategy) XXX_Size() int {
+ return m.Size()
+}
+func (m *DaemonSetUpdateStrategy) XXX_DiscardUnknown() {
+ xxx_messageInfo_DaemonSetUpdateStrategy.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DaemonSetUpdateStrategy proto.InternalMessageInfo
+
+func (m *Deployment) Reset() { *m = Deployment{} }
+func (*Deployment) ProtoMessage() {}
+func (*Deployment) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{8}
+}
+func (m *Deployment) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Deployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Deployment) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Deployment.Merge(m, src)
+}
+func (m *Deployment) XXX_Size() int {
+ return m.Size()
+}
+func (m *Deployment) XXX_DiscardUnknown() {
+ xxx_messageInfo_Deployment.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Deployment proto.InternalMessageInfo
+
+func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} }
+func (*DeploymentCondition) ProtoMessage() {}
+func (*DeploymentCondition) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{9}
+}
+func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeploymentCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeploymentCondition) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeploymentCondition.Merge(m, src)
+}
+func (m *DeploymentCondition) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeploymentCondition) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeploymentCondition.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo
+
+func (m *DeploymentList) Reset() { *m = DeploymentList{} }
+func (*DeploymentList) ProtoMessage() {}
+func (*DeploymentList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{10}
+}
+func (m *DeploymentList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeploymentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeploymentList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeploymentList.Merge(m, src)
+}
+func (m *DeploymentList) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeploymentList) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeploymentList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeploymentList proto.InternalMessageInfo
+
+func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} }
+func (*DeploymentSpec) ProtoMessage() {}
+func (*DeploymentSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{11}
+}
+func (m *DeploymentSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeploymentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeploymentSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeploymentSpec.Merge(m, src)
+}
+func (m *DeploymentSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeploymentSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeploymentSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeploymentSpec proto.InternalMessageInfo
+
+func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} }
+func (*DeploymentStatus) ProtoMessage() {}
+func (*DeploymentStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{12}
+}
+func (m *DeploymentStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeploymentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeploymentStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeploymentStatus.Merge(m, src)
+}
+func (m *DeploymentStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeploymentStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeploymentStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeploymentStatus proto.InternalMessageInfo
+
+func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} }
+func (*DeploymentStrategy) ProtoMessage() {}
+func (*DeploymentStrategy) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{13}
+}
+func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeploymentStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeploymentStrategy) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeploymentStrategy.Merge(m, src)
+}
+func (m *DeploymentStrategy) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeploymentStrategy) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeploymentStrategy.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo
+
+func (m *ReplicaSet) Reset() { *m = ReplicaSet{} }
+func (*ReplicaSet) ProtoMessage() {}
+func (*ReplicaSet) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{14}
+}
+func (m *ReplicaSet) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ReplicaSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ReplicaSet) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ReplicaSet.Merge(m, src)
+}
+func (m *ReplicaSet) XXX_Size() int {
+ return m.Size()
+}
+func (m *ReplicaSet) XXX_DiscardUnknown() {
+ xxx_messageInfo_ReplicaSet.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ReplicaSet proto.InternalMessageInfo
+
+func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} }
+func (*ReplicaSetCondition) ProtoMessage() {}
+func (*ReplicaSetCondition) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{15}
+}
+func (m *ReplicaSetCondition) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ReplicaSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ReplicaSetCondition) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ReplicaSetCondition.Merge(m, src)
+}
+func (m *ReplicaSetCondition) XXX_Size() int {
+ return m.Size()
+}
+func (m *ReplicaSetCondition) XXX_DiscardUnknown() {
+ xxx_messageInfo_ReplicaSetCondition.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ReplicaSetCondition proto.InternalMessageInfo
+
+func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} }
+func (*ReplicaSetList) ProtoMessage() {}
+func (*ReplicaSetList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{16}
+}
+func (m *ReplicaSetList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ReplicaSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ReplicaSetList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ReplicaSetList.Merge(m, src)
+}
+func (m *ReplicaSetList) XXX_Size() int {
+ return m.Size()
+}
+func (m *ReplicaSetList) XXX_DiscardUnknown() {
+ xxx_messageInfo_ReplicaSetList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ReplicaSetList proto.InternalMessageInfo
+
+func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} }
+func (*ReplicaSetSpec) ProtoMessage() {}
+func (*ReplicaSetSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{17}
+}
+func (m *ReplicaSetSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ReplicaSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ReplicaSetSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ReplicaSetSpec.Merge(m, src)
+}
+func (m *ReplicaSetSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *ReplicaSetSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_ReplicaSetSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ReplicaSetSpec proto.InternalMessageInfo
+
+func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} }
+func (*ReplicaSetStatus) ProtoMessage() {}
+func (*ReplicaSetStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{18}
+}
+func (m *ReplicaSetStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ReplicaSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ReplicaSetStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ReplicaSetStatus.Merge(m, src)
+}
+func (m *ReplicaSetStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *ReplicaSetStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_ReplicaSetStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ReplicaSetStatus proto.InternalMessageInfo
+
+func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} }
+func (*RollingUpdateDaemonSet) ProtoMessage() {}
+func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{19}
+}
+func (m *RollingUpdateDaemonSet) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *RollingUpdateDaemonSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *RollingUpdateDaemonSet) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RollingUpdateDaemonSet.Merge(m, src)
+}
+func (m *RollingUpdateDaemonSet) XXX_Size() int {
+ return m.Size()
+}
+func (m *RollingUpdateDaemonSet) XXX_DiscardUnknown() {
+ xxx_messageInfo_RollingUpdateDaemonSet.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RollingUpdateDaemonSet proto.InternalMessageInfo
+
+func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} }
+func (*RollingUpdateDeployment) ProtoMessage() {}
+func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{20}
+}
+func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *RollingUpdateDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *RollingUpdateDeployment) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RollingUpdateDeployment.Merge(m, src)
+}
+func (m *RollingUpdateDeployment) XXX_Size() int {
+ return m.Size()
+}
+func (m *RollingUpdateDeployment) XXX_DiscardUnknown() {
+ xxx_messageInfo_RollingUpdateDeployment.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo
+
+func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} }
+func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {}
+func (*RollingUpdateStatefulSetStrategy) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{21}
+}
+func (m *RollingUpdateStatefulSetStrategy) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *RollingUpdateStatefulSetStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *RollingUpdateStatefulSetStrategy) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RollingUpdateStatefulSetStrategy.Merge(m, src)
+}
+func (m *RollingUpdateStatefulSetStrategy) XXX_Size() int {
+ return m.Size()
+}
+func (m *RollingUpdateStatefulSetStrategy) XXX_DiscardUnknown() {
+ xxx_messageInfo_RollingUpdateStatefulSetStrategy.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RollingUpdateStatefulSetStrategy proto.InternalMessageInfo
+
+func (m *StatefulSet) Reset() { *m = StatefulSet{} }
+func (*StatefulSet) ProtoMessage() {}
+func (*StatefulSet) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{22}
+}
+func (m *StatefulSet) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *StatefulSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *StatefulSet) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_StatefulSet.Merge(m, src)
+}
+func (m *StatefulSet) XXX_Size() int {
+ return m.Size()
+}
+func (m *StatefulSet) XXX_DiscardUnknown() {
+ xxx_messageInfo_StatefulSet.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StatefulSet proto.InternalMessageInfo
+
+func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} }
+func (*StatefulSetCondition) ProtoMessage() {}
+func (*StatefulSetCondition) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{23}
+}
+func (m *StatefulSetCondition) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *StatefulSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *StatefulSetCondition) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_StatefulSetCondition.Merge(m, src)
+}
+func (m *StatefulSetCondition) XXX_Size() int {
+ return m.Size()
+}
+func (m *StatefulSetCondition) XXX_DiscardUnknown() {
+ xxx_messageInfo_StatefulSetCondition.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StatefulSetCondition proto.InternalMessageInfo
+
+func (m *StatefulSetList) Reset() { *m = StatefulSetList{} }
+func (*StatefulSetList) ProtoMessage() {}
+func (*StatefulSetList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{24}
+}
+func (m *StatefulSetList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *StatefulSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *StatefulSetList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_StatefulSetList.Merge(m, src)
+}
+func (m *StatefulSetList) XXX_Size() int {
+ return m.Size()
+}
+func (m *StatefulSetList) XXX_DiscardUnknown() {
+ xxx_messageInfo_StatefulSetList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StatefulSetList proto.InternalMessageInfo
+
+func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} }
+func (*StatefulSetSpec) ProtoMessage() {}
+func (*StatefulSetSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{25}
+}
+func (m *StatefulSetSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *StatefulSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *StatefulSetSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_StatefulSetSpec.Merge(m, src)
+}
+func (m *StatefulSetSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *StatefulSetSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_StatefulSetSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StatefulSetSpec proto.InternalMessageInfo
+
+func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} }
+func (*StatefulSetStatus) ProtoMessage() {}
+func (*StatefulSetStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{26}
+}
+func (m *StatefulSetStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *StatefulSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *StatefulSetStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_StatefulSetStatus.Merge(m, src)
+}
+func (m *StatefulSetStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *StatefulSetStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_StatefulSetStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StatefulSetStatus proto.InternalMessageInfo
+
+func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} }
+func (*StatefulSetUpdateStrategy) ProtoMessage() {}
+func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e1014cab6f31e43b, []int{27}
+}
+func (m *StatefulSetUpdateStrategy) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *StatefulSetUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *StatefulSetUpdateStrategy) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_StatefulSetUpdateStrategy.Merge(m, src)
+}
+func (m *StatefulSetUpdateStrategy) XXX_Size() int {
+ return m.Size()
+}
+func (m *StatefulSetUpdateStrategy) XXX_DiscardUnknown() {
+ xxx_messageInfo_StatefulSetUpdateStrategy.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StatefulSetUpdateStrategy proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*ControllerRevision)(nil), "k8s.io.api.apps.v1.ControllerRevision")
+ proto.RegisterType((*ControllerRevisionList)(nil), "k8s.io.api.apps.v1.ControllerRevisionList")
+ proto.RegisterType((*DaemonSet)(nil), "k8s.io.api.apps.v1.DaemonSet")
+ proto.RegisterType((*DaemonSetCondition)(nil), "k8s.io.api.apps.v1.DaemonSetCondition")
+ proto.RegisterType((*DaemonSetList)(nil), "k8s.io.api.apps.v1.DaemonSetList")
+ proto.RegisterType((*DaemonSetSpec)(nil), "k8s.io.api.apps.v1.DaemonSetSpec")
+ proto.RegisterType((*DaemonSetStatus)(nil), "k8s.io.api.apps.v1.DaemonSetStatus")
+ proto.RegisterType((*DaemonSetUpdateStrategy)(nil), "k8s.io.api.apps.v1.DaemonSetUpdateStrategy")
+ proto.RegisterType((*Deployment)(nil), "k8s.io.api.apps.v1.Deployment")
+ proto.RegisterType((*DeploymentCondition)(nil), "k8s.io.api.apps.v1.DeploymentCondition")
+ proto.RegisterType((*DeploymentList)(nil), "k8s.io.api.apps.v1.DeploymentList")
+ proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.api.apps.v1.DeploymentSpec")
+ proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.api.apps.v1.DeploymentStatus")
+ proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.api.apps.v1.DeploymentStrategy")
+ proto.RegisterType((*ReplicaSet)(nil), "k8s.io.api.apps.v1.ReplicaSet")
+ proto.RegisterType((*ReplicaSetCondition)(nil), "k8s.io.api.apps.v1.ReplicaSetCondition")
+ proto.RegisterType((*ReplicaSetList)(nil), "k8s.io.api.apps.v1.ReplicaSetList")
+ proto.RegisterType((*ReplicaSetSpec)(nil), "k8s.io.api.apps.v1.ReplicaSetSpec")
+ proto.RegisterType((*ReplicaSetStatus)(nil), "k8s.io.api.apps.v1.ReplicaSetStatus")
+ proto.RegisterType((*RollingUpdateDaemonSet)(nil), "k8s.io.api.apps.v1.RollingUpdateDaemonSet")
+ proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.api.apps.v1.RollingUpdateDeployment")
+ proto.RegisterType((*RollingUpdateStatefulSetStrategy)(nil), "k8s.io.api.apps.v1.RollingUpdateStatefulSetStrategy")
+ proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1.StatefulSet")
+ proto.RegisterType((*StatefulSetCondition)(nil), "k8s.io.api.apps.v1.StatefulSetCondition")
+ proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1.StatefulSetList")
+ proto.RegisterType((*StatefulSetSpec)(nil), "k8s.io.api.apps.v1.StatefulSetSpec")
+ proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1.StatefulSetStatus")
+ proto.RegisterType((*StatefulSetUpdateStrategy)(nil), "k8s.io.api.apps.v1.StatefulSetUpdateStrategy")
+}
+
+func init() {
+ proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto", fileDescriptor_e1014cab6f31e43b)
+}
+
+var fileDescriptor_e1014cab6f31e43b = []byte{
+ // 2031 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x24, 0x47,
+ 0x1d, 0x75, 0xcf, 0x87, 0x3d, 0x2e, 0xaf, 0xed, 0xdd, 0xb2, 0xb1, 0x27, 0xbb, 0x64, 0x66, 0x19,
+ 0x60, 0xe3, 0x64, 0xb3, 0x3d, 0xec, 0x66, 0x13, 0xa1, 0x2c, 0x02, 0x79, 0xc6, 0x21, 0x84, 0x78,
+ 0x6c, 0x53, 0x5e, 0xef, 0x61, 0x09, 0x12, 0xe5, 0xe9, 0xda, 0x71, 0xc7, 0xfd, 0xa5, 0xee, 0xea,
+ 0x61, 0x47, 0x5c, 0x10, 0x12, 0x9c, 0x38, 0xf0, 0x9f, 0x20, 0x84, 0xe0, 0x86, 0x22, 0xc4, 0x65,
+ 0x2f, 0x48, 0x11, 0x17, 0x72, 0xb2, 0xd8, 0xc9, 0x09, 0xa1, 0x1c, 0xb9, 0xe4, 0x02, 0xaa, 0xea,
+ 0xea, 0xef, 0x6a, 0xcf, 0xd8, 0x9b, 0x38, 0x24, 0xca, 0xcd, 0x53, 0xf5, 0x7e, 0xaf, 0x7f, 0x55,
+ 0xf5, 0xab, 0x7a, 0xaf, 0xab, 0x0d, 0xee, 0x1d, 0x7f, 0xdb, 0x53, 0x75, 0xbb, 0x7d, 0xec, 0x1f,
+ 0x12, 0xd7, 0x22, 0x94, 0x78, 0xed, 0x21, 0xb1, 0x34, 0xdb, 0x6d, 0x8b, 0x0e, 0xec, 0xe8, 0x6d,
+ 0xec, 0x38, 0x5e, 0x7b, 0x78, 0xbb, 0x3d, 0x20, 0x16, 0x71, 0x31, 0x25, 0x9a, 0xea, 0xb8, 0x36,
+ 0xb5, 0x21, 0x0c, 0x30, 0x2a, 0x76, 0x74, 0x95, 0x61, 0xd4, 0xe1, 0xed, 0xab, 0xb7, 0x06, 0x3a,
+ 0x3d, 0xf2, 0x0f, 0xd5, 0xbe, 0x6d, 0xb6, 0x07, 0xf6, 0xc0, 0x6e, 0x73, 0xe8, 0xa1, 0xff, 0x88,
+ 0xff, 0xe2, 0x3f, 0xf8, 0x5f, 0x01, 0xc5, 0xd5, 0x56, 0xe2, 0x31, 0x7d, 0xdb, 0x25, 0x92, 0xc7,
+ 0x5c, 0xbd, 0x1b, 0x63, 0x4c, 0xdc, 0x3f, 0xd2, 0x2d, 0xe2, 0x8e, 0xda, 0xce, 0xf1, 0x80, 0x35,
+ 0x78, 0x6d, 0x93, 0x50, 0x2c, 0x8b, 0x6a, 0x17, 0x45, 0xb9, 0xbe, 0x45, 0x75, 0x93, 0xe4, 0x02,
+ 0x5e, 0x9b, 0x14, 0xe0, 0xf5, 0x8f, 0x88, 0x89, 0x73, 0x71, 0xaf, 0x14, 0xc5, 0xf9, 0x54, 0x37,
+ 0xda, 0xba, 0x45, 0x3d, 0xea, 0x66, 0x83, 0x5a, 0xff, 0x51, 0x00, 0xec, 0xda, 0x16, 0x75, 0x6d,
+ 0xc3, 0x20, 0x2e, 0x22, 0x43, 0xdd, 0xd3, 0x6d, 0x0b, 0xfe, 0x14, 0xd4, 0xd8, 0x78, 0x34, 0x4c,
+ 0x71, 0x5d, 0xb9, 0xae, 0x6c, 0x2c, 0xdc, 0xf9, 0x96, 0x1a, 0x4f, 0x72, 0x44, 0xaf, 0x3a, 0xc7,
+ 0x03, 0xd6, 0xe0, 0xa9, 0x0c, 0xad, 0x0e, 0x6f, 0xab, 0xbb, 0x87, 0xef, 0x92, 0x3e, 0xed, 0x11,
+ 0x8a, 0x3b, 0xf0, 0xc9, 0x49, 0x73, 0x66, 0x7c, 0xd2, 0x04, 0x71, 0x1b, 0x8a, 0x58, 0xe1, 0x2e,
+ 0xa8, 0x70, 0xf6, 0x12, 0x67, 0xbf, 0x55, 0xc8, 0x2e, 0x06, 0xad, 0x22, 0xfc, 0xb3, 0x37, 0x1e,
+ 0x53, 0x62, 0xb1, 0xf4, 0x3a, 0x97, 0x04, 0x75, 0x65, 0x0b, 0x53, 0x8c, 0x38, 0x11, 0x7c, 0x19,
+ 0xd4, 0x5c, 0x91, 0x7e, 0xbd, 0x7c, 0x5d, 0xd9, 0x28, 0x77, 0x2e, 0x0b, 0x54, 0x2d, 0x1c, 0x16,
+ 0x8a, 0x10, 0xad, 0xbf, 0x2a, 0x60, 0x2d, 0x3f, 0xee, 0x6d, 0xdd, 0xa3, 0xf0, 0x9d, 0xdc, 0xd8,
+ 0xd5, 0xe9, 0xc6, 0xce, 0xa2, 0xf9, 0xc8, 0xa3, 0x07, 0x87, 0x2d, 0x89, 0x71, 0xbf, 0x0d, 0xaa,
+ 0x3a, 0x25, 0xa6, 0x57, 0x2f, 0x5d, 0x2f, 0x6f, 0x2c, 0xdc, 0xb9, 0xa1, 0xe6, 0x6b, 0x57, 0xcd,
+ 0x27, 0xd6, 0x59, 0x14, 0x94, 0xd5, 0xb7, 0x58, 0x30, 0x0a, 0x38, 0x5a, 0xff, 0x55, 0xc0, 0xfc,
+ 0x16, 0x26, 0xa6, 0x6d, 0xed, 0x13, 0x7a, 0x01, 0x8b, 0xd6, 0x05, 0x15, 0xcf, 0x21, 0x7d, 0xb1,
+ 0x68, 0x5f, 0x93, 0xe5, 0x1e, 0xa5, 0xb3, 0xef, 0x90, 0x7e, 0xbc, 0x50, 0xec, 0x17, 0xe2, 0xc1,
+ 0xf0, 0x6d, 0x30, 0xeb, 0x51, 0x4c, 0x7d, 0x8f, 0x2f, 0xd3, 0xc2, 0x9d, 0xaf, 0x9f, 0x4e, 0xc3,
+ 0xa1, 0x9d, 0x25, 0x41, 0x34, 0x1b, 0xfc, 0x46, 0x82, 0xa2, 0xf5, 0xaf, 0x12, 0x80, 0x11, 0xb6,
+ 0x6b, 0x5b, 0x9a, 0x4e, 0x59, 0xfd, 0xbe, 0x0e, 0x2a, 0x74, 0xe4, 0x10, 0x3e, 0x0d, 0xf3, 0x9d,
+ 0x1b, 0x61, 0x16, 0xf7, 0x47, 0x0e, 0xf9, 0xf8, 0xa4, 0xb9, 0x96, 0x8f, 0x60, 0x3d, 0x88, 0xc7,
+ 0xc0, 0xed, 0x28, 0xbf, 0x12, 0x8f, 0xbe, 0x9b, 0x7e, 0xf4, 0xc7, 0x27, 0x4d, 0xc9, 0x61, 0xa1,
+ 0x46, 0x4c, 0xe9, 0x04, 0xe1, 0x10, 0x40, 0x03, 0x7b, 0xf4, 0xbe, 0x8b, 0x2d, 0x2f, 0x78, 0x92,
+ 0x6e, 0x12, 0x31, 0xf2, 0x97, 0xa6, 0x5b, 0x1e, 0x16, 0xd1, 0xb9, 0x2a, 0xb2, 0x80, 0xdb, 0x39,
+ 0x36, 0x24, 0x79, 0x02, 0xbc, 0x01, 0x66, 0x5d, 0x82, 0x3d, 0xdb, 0xaa, 0x57, 0xf8, 0x28, 0xa2,
+ 0x09, 0x44, 0xbc, 0x15, 0x89, 0x5e, 0xf8, 0x22, 0x98, 0x33, 0x89, 0xe7, 0xe1, 0x01, 0xa9, 0x57,
+ 0x39, 0x70, 0x59, 0x00, 0xe7, 0x7a, 0x41, 0x33, 0x0a, 0xfb, 0x5b, 0xbf, 0x57, 0xc0, 0x62, 0x34,
+ 0x73, 0x17, 0xb0, 0x55, 0x3a, 0xe9, 0xad, 0xf2, 0xfc, 0xa9, 0x75, 0x52, 0xb0, 0x43, 0xde, 0x2b,
+ 0x27, 0x72, 0x66, 0x45, 0x08, 0x7f, 0x02, 0x6a, 0x1e, 0x31, 0x48, 0x9f, 0xda, 0xae, 0xc8, 0xf9,
+ 0x95, 0x29, 0x73, 0xc6, 0x87, 0xc4, 0xd8, 0x17, 0xa1, 0x9d, 0x4b, 0x2c, 0xe9, 0xf0, 0x17, 0x8a,
+ 0x28, 0xe1, 0x8f, 0x40, 0x8d, 0x12, 0xd3, 0x31, 0x30, 0x25, 0x62, 0x9b, 0xa4, 0xea, 0x9b, 0x95,
+ 0x0b, 0x23, 0xdb, 0xb3, 0xb5, 0xfb, 0x02, 0xc6, 0x37, 0x4a, 0x34, 0x0f, 0x61, 0x2b, 0x8a, 0x68,
+ 0xe0, 0x31, 0x58, 0xf2, 0x1d, 0x8d, 0x21, 0x29, 0x3b, 0xba, 0x07, 0x23, 0x51, 0x3e, 0x37, 0x4f,
+ 0x9d, 0x90, 0x83, 0x54, 0x48, 0x67, 0x4d, 0x3c, 0x60, 0x29, 0xdd, 0x8e, 0x32, 0xd4, 0x70, 0x13,
+ 0x2c, 0x9b, 0xba, 0x85, 0x08, 0xd6, 0x46, 0xfb, 0xa4, 0x6f, 0x5b, 0x9a, 0xc7, 0x0b, 0xa8, 0xda,
+ 0x59, 0x17, 0x04, 0xcb, 0xbd, 0x74, 0x37, 0xca, 0xe2, 0xe1, 0x36, 0x58, 0x0d, 0xcf, 0xd9, 0x1f,
+ 0xe8, 0x1e, 0xb5, 0xdd, 0xd1, 0xb6, 0x6e, 0xea, 0xb4, 0x3e, 0xcb, 0x79, 0xea, 0xe3, 0x93, 0xe6,
+ 0x2a, 0x92, 0xf4, 0x23, 0x69, 0x54, 0xeb, 0x37, 0xb3, 0x60, 0x39, 0x73, 0x1a, 0xc0, 0x07, 0x60,
+ 0xad, 0xef, 0xbb, 0x2e, 0xb1, 0xe8, 0x8e, 0x6f, 0x1e, 0x12, 0x77, 0xbf, 0x7f, 0x44, 0x34, 0xdf,
+ 0x20, 0x1a, 0x5f, 0xd1, 0x6a, 0xa7, 0x21, 0x72, 0x5d, 0xeb, 0x4a, 0x51, 0xa8, 0x20, 0x1a, 0xfe,
+ 0x10, 0x40, 0x8b, 0x37, 0xf5, 0x74, 0xcf, 0x8b, 0x38, 0x4b, 0x9c, 0x33, 0xda, 0x80, 0x3b, 0x39,
+ 0x04, 0x92, 0x44, 0xb1, 0x1c, 0x35, 0xe2, 0xe9, 0x2e, 0xd1, 0xb2, 0x39, 0x96, 0xd3, 0x39, 0x6e,
+ 0x49, 0x51, 0xa8, 0x20, 0x1a, 0xbe, 0x0a, 0x16, 0x82, 0xa7, 0xf1, 0x39, 0x17, 0x8b, 0xb3, 0x22,
+ 0xc8, 0x16, 0x76, 0xe2, 0x2e, 0x94, 0xc4, 0xb1, 0xa1, 0xd9, 0x87, 0x1e, 0x71, 0x87, 0x44, 0x7b,
+ 0x33, 0xf0, 0x00, 0x4c, 0x28, 0xab, 0x5c, 0x28, 0xa3, 0xa1, 0xed, 0xe6, 0x10, 0x48, 0x12, 0xc5,
+ 0x86, 0x16, 0x54, 0x4d, 0x6e, 0x68, 0xb3, 0xe9, 0xa1, 0x1d, 0x48, 0x51, 0xa8, 0x20, 0x9a, 0xd5,
+ 0x5e, 0x90, 0xf2, 0xe6, 0x10, 0xeb, 0x06, 0x3e, 0x34, 0x48, 0x7d, 0x2e, 0x5d, 0x7b, 0x3b, 0xe9,
+ 0x6e, 0x94, 0xc5, 0xc3, 0x37, 0xc1, 0x95, 0xa0, 0xe9, 0xc0, 0xc2, 0x11, 0x49, 0x8d, 0x93, 0x3c,
+ 0x27, 0x48, 0xae, 0xec, 0x64, 0x01, 0x28, 0x1f, 0x03, 0x5f, 0x07, 0x4b, 0x7d, 0xdb, 0x30, 0x78,
+ 0x3d, 0x76, 0x6d, 0xdf, 0xa2, 0xf5, 0x79, 0xce, 0x02, 0xd9, 0x1e, 0xea, 0xa6, 0x7a, 0x50, 0x06,
+ 0x09, 0x1f, 0x02, 0xd0, 0x0f, 0xe5, 0xc0, 0xab, 0x83, 0x62, 0xa1, 0xcf, 0xeb, 0x50, 0x2c, 0xc0,
+ 0x51, 0x93, 0x87, 0x12, 0x6c, 0xad, 0xf7, 0x14, 0xb0, 0x5e, 0xb0, 0xc7, 0xe1, 0xf7, 0x52, 0xaa,
+ 0x77, 0x33, 0xa3, 0x7a, 0xd7, 0x0a, 0xc2, 0x12, 0xd2, 0xd7, 0x07, 0x8b, 0xcc, 0x77, 0xe8, 0xd6,
+ 0x20, 0x80, 0x88, 0x13, 0xec, 0x25, 0x59, 0xee, 0x28, 0x09, 0x8c, 0x8f, 0xe1, 0x2b, 0xe3, 0x93,
+ 0xe6, 0x62, 0xaa, 0x0f, 0xa5, 0x39, 0x5b, 0xbf, 0x2c, 0x01, 0xb0, 0x45, 0x1c, 0xc3, 0x1e, 0x99,
+ 0xc4, 0xba, 0x08, 0xd7, 0xb2, 0x95, 0x72, 0x2d, 0x2d, 0xe9, 0x42, 0x44, 0xf9, 0x14, 0xda, 0x96,
+ 0xed, 0x8c, 0x6d, 0xf9, 0xc6, 0x04, 0x9e, 0xd3, 0x7d, 0xcb, 0x3f, 0xca, 0x60, 0x25, 0x06, 0xc7,
+ 0xc6, 0xe5, 0x5e, 0x6a, 0x09, 0x5f, 0xc8, 0x2c, 0xe1, 0xba, 0x24, 0xe4, 0x53, 0x73, 0x2e, 0xef,
+ 0x82, 0x25, 0xe6, 0x2b, 0x82, 0x55, 0xe3, 0xae, 0x65, 0xf6, 0xcc, 0xae, 0x25, 0x52, 0x9d, 0xed,
+ 0x14, 0x13, 0xca, 0x30, 0x17, 0xb8, 0xa4, 0xb9, 0xcf, 0xa3, 0x4b, 0xfa, 0x83, 0x02, 0x96, 0xe2,
+ 0x65, 0xba, 0x00, 0x9b, 0xd4, 0x4d, 0xdb, 0xa4, 0xc6, 0xe9, 0x75, 0x59, 0xe0, 0x93, 0xfe, 0x5e,
+ 0x49, 0x66, 0xcd, 0x8d, 0xd2, 0x06, 0x7b, 0xa1, 0x72, 0x0c, 0xbd, 0x8f, 0x3d, 0x21, 0xab, 0x97,
+ 0x82, 0x97, 0xa9, 0xa0, 0x0d, 0x45, 0xbd, 0x29, 0x4b, 0x55, 0xfa, 0x74, 0x2d, 0x55, 0xf9, 0x93,
+ 0xb1, 0x54, 0xf7, 0x41, 0xcd, 0x0b, 0xcd, 0x54, 0x85, 0x53, 0xde, 0x98, 0xb4, 0x9d, 0x85, 0x8f,
+ 0x8a, 0x58, 0x23, 0x07, 0x15, 0x31, 0xc9, 0xbc, 0x53, 0xf5, 0xb3, 0xf4, 0x4e, 0xac, 0xbc, 0x1d,
+ 0xec, 0x7b, 0x44, 0xe3, 0x5b, 0xa9, 0x16, 0x97, 0xf7, 0x1e, 0x6f, 0x45, 0xa2, 0x17, 0x1e, 0x80,
+ 0x75, 0xc7, 0xb5, 0x07, 0x2e, 0xf1, 0xbc, 0x2d, 0x82, 0x35, 0x43, 0xb7, 0x48, 0x38, 0x80, 0x40,
+ 0xf5, 0xae, 0x8d, 0x4f, 0x9a, 0xeb, 0x7b, 0x72, 0x08, 0x2a, 0x8a, 0x6d, 0xfd, 0xb9, 0x02, 0x2e,
+ 0x67, 0x4f, 0xc4, 0x02, 0x23, 0xa2, 0x9c, 0xcb, 0x88, 0xbc, 0x9c, 0x28, 0xd1, 0xc0, 0xa5, 0x25,
+ 0xde, 0xf9, 0x73, 0x65, 0xba, 0x09, 0x96, 0x85, 0xf1, 0x08, 0x3b, 0x85, 0x15, 0x8b, 0x96, 0xe7,
+ 0x20, 0xdd, 0x8d, 0xb2, 0x78, 0x78, 0x0f, 0x2c, 0xba, 0xdc, 0x5b, 0x85, 0x04, 0x81, 0x3f, 0xf9,
+ 0x8a, 0x20, 0x58, 0x44, 0xc9, 0x4e, 0x94, 0xc6, 0x32, 0x6f, 0x12, 0x5b, 0x8e, 0x90, 0xa0, 0x92,
+ 0xf6, 0x26, 0x9b, 0x59, 0x00, 0xca, 0xc7, 0xc0, 0x1e, 0x58, 0xf1, 0xad, 0x3c, 0x55, 0x50, 0x6b,
+ 0xd7, 0x04, 0xd5, 0xca, 0x41, 0x1e, 0x82, 0x64, 0x71, 0xf0, 0xc7, 0x29, 0xbb, 0x32, 0xcb, 0x4f,
+ 0x91, 0x17, 0x4e, 0xdf, 0x0e, 0x53, 0xfb, 0x15, 0x89, 0x8f, 0xaa, 0x4d, 0xeb, 0xa3, 0x5a, 0x7f,
+ 0x52, 0x00, 0xcc, 0x6f, 0xc1, 0x89, 0x2f, 0xf7, 0xb9, 0x88, 0x84, 0x44, 0x6a, 0x72, 0x87, 0x73,
+ 0x73, 0xb2, 0xc3, 0x89, 0x4f, 0xd0, 0xe9, 0x2c, 0x8e, 0x98, 0xde, 0x8b, 0xb9, 0x98, 0x99, 0xc2,
+ 0xe2, 0xc4, 0xf9, 0x3c, 0x9b, 0xc5, 0x49, 0xf0, 0x9c, 0x6e, 0x71, 0xfe, 0x5d, 0x02, 0x2b, 0x31,
+ 0x78, 0x6a, 0x8b, 0x23, 0x09, 0xf9, 0xf2, 0x72, 0x66, 0x3a, 0xdb, 0x11, 0x4f, 0xdd, 0xff, 0x89,
+ 0xed, 0x88, 0x13, 0x2a, 0xb0, 0x1d, 0xbf, 0x2b, 0x25, 0xb3, 0x3e, 0xa3, 0xed, 0xf8, 0x04, 0xae,
+ 0x2a, 0x3e, 0x77, 0xce, 0xa5, 0xf5, 0x97, 0x32, 0xb8, 0x9c, 0xdd, 0x82, 0x29, 0x1d, 0x54, 0x26,
+ 0xea, 0xe0, 0x1e, 0x58, 0x7d, 0xe4, 0x1b, 0xc6, 0x88, 0x8f, 0x21, 0x21, 0x86, 0x81, 0x82, 0x7e,
+ 0x55, 0x44, 0xae, 0x7e, 0x5f, 0x82, 0x41, 0xd2, 0xc8, 0xbc, 0x2c, 0x56, 0x9e, 0x55, 0x16, 0xab,
+ 0xe7, 0x90, 0x45, 0xb9, 0xb3, 0x28, 0x9f, 0xcb, 0x59, 0x4c, 0xad, 0x89, 0x92, 0xe3, 0x6a, 0xe2,
+ 0x3b, 0xfc, 0xaf, 0x15, 0xb0, 0x26, 0x7f, 0x7d, 0x86, 0x06, 0x58, 0x32, 0xf1, 0xe3, 0xe4, 0xe5,
+ 0xc5, 0x24, 0xc1, 0xf0, 0xa9, 0x6e, 0xa8, 0xc1, 0xd7, 0x1d, 0xf5, 0x2d, 0x8b, 0xee, 0xba, 0xfb,
+ 0xd4, 0xd5, 0xad, 0x41, 0x20, 0xb0, 0xbd, 0x14, 0x17, 0xca, 0x70, 0xb7, 0x3e, 0x54, 0xc0, 0x7a,
+ 0x81, 0xca, 0x5d, 0x6c, 0x26, 0xf0, 0x21, 0xa8, 0x99, 0xf8, 0xf1, 0xbe, 0xef, 0x0e, 0x42, 0x49,
+ 0x3e, 0xfb, 0x73, 0xf8, 0x2e, 0xec, 0x09, 0x16, 0x14, 0xf1, 0xb5, 0x76, 0xc1, 0xf5, 0xd4, 0x20,
+ 0xd9, 0xa6, 0x21, 0x8f, 0x7c, 0x83, 0xef, 0x1f, 0xe1, 0x29, 0x6e, 0x82, 0x79, 0x07, 0xbb, 0x54,
+ 0x8f, 0xcc, 0x68, 0xb5, 0xb3, 0x38, 0x3e, 0x69, 0xce, 0xef, 0x85, 0x8d, 0x28, 0xee, 0x6f, 0xfd,
+ 0xaa, 0x04, 0x16, 0x12, 0x24, 0x17, 0xa0, 0xef, 0x6f, 0xa4, 0xf4, 0x5d, 0xfa, 0xc5, 0x24, 0x39,
+ 0xaa, 0x22, 0x81, 0xef, 0x65, 0x04, 0xfe, 0x9b, 0x93, 0x88, 0x4e, 0x57, 0xf8, 0x8f, 0x4a, 0x60,
+ 0x35, 0x81, 0x8e, 0x25, 0xfe, 0x3b, 0x29, 0x89, 0xdf, 0xc8, 0x48, 0x7c, 0x5d, 0x16, 0xf3, 0xa5,
+ 0xc6, 0x4f, 0xd6, 0xf8, 0x3f, 0x2a, 0x60, 0x39, 0x31, 0x77, 0x17, 0x20, 0xf2, 0x5b, 0x69, 0x91,
+ 0x6f, 0x4e, 0xa8, 0x97, 0x02, 0x95, 0x7f, 0x52, 0x4d, 0xe5, 0xfd, 0x85, 0xbf, 0x5d, 0xf8, 0x39,
+ 0x58, 0x1d, 0xda, 0x86, 0x6f, 0x92, 0xae, 0x81, 0x75, 0x33, 0x04, 0x30, 0x55, 0x64, 0x93, 0xf8,
+ 0xa2, 0x94, 0x9e, 0xb8, 0x9e, 0xee, 0x51, 0x62, 0xd1, 0x07, 0x71, 0x64, 0xac, 0xc5, 0x0f, 0x24,
+ 0x74, 0x48, 0xfa, 0x10, 0xf8, 0x2a, 0x58, 0x60, 0x6a, 0xa6, 0xf7, 0xc9, 0x0e, 0x36, 0xc3, 0x9a,
+ 0x8a, 0xbe, 0x0f, 0xec, 0xc7, 0x5d, 0x28, 0x89, 0x83, 0x47, 0x60, 0xc5, 0xb1, 0xb5, 0x1e, 0xb6,
+ 0xf0, 0x80, 0xb0, 0xf3, 0x7f, 0xcf, 0x36, 0xf4, 0xfe, 0x88, 0xdf, 0x3b, 0xcc, 0x77, 0x5e, 0x0b,
+ 0xdf, 0x29, 0xf7, 0xf2, 0x10, 0xe6, 0xd9, 0x25, 0xcd, 0x7c, 0x3f, 0xcb, 0x28, 0xa1, 0x99, 0xfb,
+ 0x9c, 0x35, 0x97, 0xfb, 0x1f, 0x00, 0x59, 0x71, 0x9d, 0xf3, 0x83, 0x56, 0xd1, 0x8d, 0x4a, 0xed,
+ 0x5c, 0x5f, 0xa3, 0x3e, 0xaa, 0x80, 0x2b, 0xb9, 0x03, 0xf2, 0x33, 0xbc, 0xd3, 0xc8, 0x39, 0xaf,
+ 0xf2, 0x19, 0x9c, 0xd7, 0x26, 0x58, 0x16, 0x1f, 0xc2, 0x32, 0xc6, 0x2d, 0x32, 0xd0, 0xdd, 0x74,
+ 0x37, 0xca, 0xe2, 0x65, 0x77, 0x2a, 0xd5, 0x33, 0xde, 0xa9, 0x24, 0xb3, 0x10, 0xff, 0xbf, 0x11,
+ 0x54, 0x5d, 0x3e, 0x0b, 0xf1, 0x6f, 0x1c, 0x59, 0x3c, 0xfc, 0x6e, 0x58, 0x52, 0x11, 0xc3, 0x1c,
+ 0x67, 0xc8, 0xd4, 0x48, 0x44, 0x90, 0x41, 0x3f, 0xd3, 0xc7, 0x9e, 0x77, 0x24, 0x1f, 0x7b, 0x36,
+ 0x26, 0x94, 0xf2, 0xf4, 0x56, 0xf1, 0x6f, 0x0a, 0x78, 0xae, 0x70, 0x0f, 0xc0, 0xcd, 0x94, 0xce,
+ 0xde, 0xca, 0xe8, 0xec, 0xf3, 0x85, 0x81, 0x09, 0xb1, 0x35, 0xe5, 0x17, 0x22, 0x77, 0x27, 0x5e,
+ 0x88, 0x48, 0x5c, 0xd4, 0xe4, 0x9b, 0x91, 0xce, 0xc6, 0x93, 0xa7, 0x8d, 0x99, 0xf7, 0x9f, 0x36,
+ 0x66, 0x3e, 0x78, 0xda, 0x98, 0xf9, 0xc5, 0xb8, 0xa1, 0x3c, 0x19, 0x37, 0x94, 0xf7, 0xc7, 0x0d,
+ 0xe5, 0x83, 0x71, 0x43, 0xf9, 0xe7, 0xb8, 0xa1, 0xfc, 0xf6, 0xc3, 0xc6, 0xcc, 0xc3, 0xd2, 0xf0,
+ 0xf6, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x59, 0xb3, 0x11, 0xc0, 0x12, 0x26, 0x00, 0x00,
+}
+
+func (m *ControllerRevision) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ControllerRevision) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ControllerRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Revision))
+ i--
+ dAtA[i] = 0x18
+ {
+ size, err := m.Data.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ControllerRevisionList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ControllerRevisionList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ControllerRevisionList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *DaemonSet) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DaemonSet) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DaemonSetCondition) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DaemonSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Message)
+ copy(dAtA[i:], m.Message)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+ i--
+ dAtA[i] = 0x2a
+ i -= len(m.Reason)
+ copy(dAtA[i:], m.Reason)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
+ i--
+ dAtA[i] = 0x22
+ {
+ size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.Status)
+ copy(dAtA[i:], m.Status)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *DaemonSetList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DaemonSetList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DaemonSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *DaemonSetSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DaemonSetSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DaemonSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.RevisionHistoryLimit != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit))
+ i--
+ dAtA[i] = 0x30
+ }
+ i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds))
+ i--
+ dAtA[i] = 0x20
+ {
+ size, err := m.UpdateStrategy.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Template.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ if m.Selector != nil {
+ {
+ size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *DaemonSetStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DaemonSetStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DaemonSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Conditions) > 0 {
+ for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x52
+ }
+ }
+ if m.CollisionCount != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount))
+ i--
+ dAtA[i] = 0x48
+ }
+ i = encodeVarintGenerated(dAtA, i, uint64(m.NumberUnavailable))
+ i--
+ dAtA[i] = 0x40
+ i = encodeVarintGenerated(dAtA, i, uint64(m.NumberAvailable))
+ i--
+ dAtA[i] = 0x38
+ i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedNumberScheduled))
+ i--
+ dAtA[i] = 0x30
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration))
+ i--
+ dAtA[i] = 0x28
+ i = encodeVarintGenerated(dAtA, i, uint64(m.NumberReady))
+ i--
+ dAtA[i] = 0x20
+ i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredNumberScheduled))
+ i--
+ dAtA[i] = 0x18
+ i = encodeVarintGenerated(dAtA, i, uint64(m.NumberMisscheduled))
+ i--
+ dAtA[i] = 0x10
+ i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentNumberScheduled))
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func (m *DaemonSetUpdateStrategy) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DaemonSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DaemonSetUpdateStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.RollingUpdate != nil {
+ {
+ size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *Deployment) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Deployment) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Deployment) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeploymentCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ {
+ size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ i -= len(m.Message)
+ copy(dAtA[i:], m.Message)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+ i--
+ dAtA[i] = 0x2a
+ i -= len(m.Reason)
+ copy(dAtA[i:], m.Reason)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
+ i--
+ dAtA[i] = 0x22
+ i -= len(m.Status)
+ copy(dAtA[i:], m.Status)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *DeploymentList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeploymentList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeploymentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.ProgressDeadlineSeconds != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds))
+ i--
+ dAtA[i] = 0x48
+ }
+ i--
+ if m.Paused {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x38
+ if m.RevisionHistoryLimit != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit))
+ i--
+ dAtA[i] = 0x30
+ }
+ i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds))
+ i--
+ dAtA[i] = 0x28
+ {
+ size, err := m.Strategy.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ {
+ size, err := m.Template.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if m.Selector != nil {
+ {
+ size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Replicas != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeploymentStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeploymentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.CollisionCount != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount))
+ i--
+ dAtA[i] = 0x40
+ }
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas))
+ i--
+ dAtA[i] = 0x38
+ if len(m.Conditions) > 0 {
+ for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ }
+ i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas))
+ i--
+ dAtA[i] = 0x28
+ i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas))
+ i--
+ dAtA[i] = 0x20
+ i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas))
+ i--
+ dAtA[i] = 0x18
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas))
+ i--
+ dAtA[i] = 0x10
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration))
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeploymentStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.RollingUpdate != nil {
+ {
+ size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ReplicaSet) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ReplicaSet) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ReplicaSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Message)
+ copy(dAtA[i:], m.Message)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+ i--
+ dAtA[i] = 0x2a
+ i -= len(m.Reason)
+ copy(dAtA[i:], m.Reason)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
+ i--
+ dAtA[i] = 0x22
+ {
+ size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.Status)
+ copy(dAtA[i:], m.Status)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ReplicaSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ReplicaSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds))
+ i--
+ dAtA[i] = 0x20
+ {
+ size, err := m.Template.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if m.Selector != nil {
+ {
+ size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Replicas != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ReplicaSetStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ReplicaSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Conditions) > 0 {
+ for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ }
+ i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas))
+ i--
+ dAtA[i] = 0x28
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas))
+ i--
+ dAtA[i] = 0x20
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration))
+ i--
+ dAtA[i] = 0x18
+ i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas))
+ i--
+ dAtA[i] = 0x10
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas))
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RollingUpdateDaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.MaxUnavailable != nil {
+ {
+ size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RollingUpdateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.MaxSurge != nil {
+ {
+ size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.MaxUnavailable != nil {
+ {
+ size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *RollingUpdateStatefulSetStrategy) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RollingUpdateStatefulSetStrategy) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RollingUpdateStatefulSetStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Partition != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.Partition))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *StatefulSet) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *StatefulSet) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StatefulSet) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *StatefulSetCondition) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StatefulSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Message)
+ copy(dAtA[i:], m.Message)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+ i--
+ dAtA[i] = 0x2a
+ i -= len(m.Reason)
+ copy(dAtA[i:], m.Reason)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
+ i--
+ dAtA[i] = 0x22
+ {
+ size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.Status)
+ copy(dAtA[i:], m.Status)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *StatefulSetList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *StatefulSetList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StatefulSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *StatefulSetSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StatefulSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.RevisionHistoryLimit != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit))
+ i--
+ dAtA[i] = 0x40
+ }
+ {
+ size, err := m.UpdateStrategy.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ i -= len(m.PodManagementPolicy)
+ copy(dAtA[i:], m.PodManagementPolicy)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodManagementPolicy)))
+ i--
+ dAtA[i] = 0x32
+ i -= len(m.ServiceName)
+ copy(dAtA[i:], m.ServiceName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName)))
+ i--
+ dAtA[i] = 0x2a
+ if len(m.VolumeClaimTemplates) > 0 {
+ for iNdEx := len(m.VolumeClaimTemplates) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.VolumeClaimTemplates[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ {
+ size, err := m.Template.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if m.Selector != nil {
+ {
+ size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Replicas != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *StatefulSetStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *StatefulSetStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StatefulSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Conditions) > 0 {
+ for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x52
+ }
+ }
+ if m.CollisionCount != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount))
+ i--
+ dAtA[i] = 0x48
+ }
+ i -= len(m.UpdateRevision)
+ copy(dAtA[i:], m.UpdateRevision)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateRevision)))
+ i--
+ dAtA[i] = 0x3a
+ i -= len(m.CurrentRevision)
+ copy(dAtA[i:], m.CurrentRevision)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentRevision)))
+ i--
+ dAtA[i] = 0x32
+ i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas))
+ i--
+ dAtA[i] = 0x28
+ i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas))
+ i--
+ dAtA[i] = 0x20
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas))
+ i--
+ dAtA[i] = 0x18
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas))
+ i--
+ dAtA[i] = 0x10
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration))
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func (m *StatefulSetUpdateStrategy) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StatefulSetUpdateStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.RollingUpdate != nil {
+ {
+ size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+ offset -= sovGenerated(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *ControllerRevision) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Data.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.Revision))
+ return n
+}
+
+func (m *ControllerRevisionList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *DaemonSet) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *DaemonSetCondition) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Status)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastTransitionTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Reason)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *DaemonSetList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *DaemonSetSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Selector != nil {
+ l = m.Selector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = m.Template.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.UpdateStrategy.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.MinReadySeconds))
+ if m.RevisionHistoryLimit != nil {
+ n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit))
+ }
+ return n
+}
+
+func (m *DaemonSetStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled))
+ n += 1 + sovGenerated(uint64(m.NumberMisscheduled))
+ n += 1 + sovGenerated(uint64(m.DesiredNumberScheduled))
+ n += 1 + sovGenerated(uint64(m.NumberReady))
+ n += 1 + sovGenerated(uint64(m.ObservedGeneration))
+ n += 1 + sovGenerated(uint64(m.UpdatedNumberScheduled))
+ n += 1 + sovGenerated(uint64(m.NumberAvailable))
+ n += 1 + sovGenerated(uint64(m.NumberUnavailable))
+ if m.CollisionCount != nil {
+ n += 1 + sovGenerated(uint64(*m.CollisionCount))
+ }
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *DaemonSetUpdateStrategy) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.RollingUpdate != nil {
+ l = m.RollingUpdate.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *Deployment) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *DeploymentCondition) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Status)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Reason)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastUpdateTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastTransitionTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *DeploymentList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *DeploymentSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Replicas != nil {
+ n += 1 + sovGenerated(uint64(*m.Replicas))
+ }
+ if m.Selector != nil {
+ l = m.Selector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = m.Template.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Strategy.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.MinReadySeconds))
+ if m.RevisionHistoryLimit != nil {
+ n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit))
+ }
+ n += 2
+ if m.ProgressDeadlineSeconds != nil {
+ n += 1 + sovGenerated(uint64(*m.ProgressDeadlineSeconds))
+ }
+ return n
+}
+
+func (m *DeploymentStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 1 + sovGenerated(uint64(m.ObservedGeneration))
+ n += 1 + sovGenerated(uint64(m.Replicas))
+ n += 1 + sovGenerated(uint64(m.UpdatedReplicas))
+ n += 1 + sovGenerated(uint64(m.AvailableReplicas))
+ n += 1 + sovGenerated(uint64(m.UnavailableReplicas))
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ n += 1 + sovGenerated(uint64(m.ReadyReplicas))
+ if m.CollisionCount != nil {
+ n += 1 + sovGenerated(uint64(*m.CollisionCount))
+ }
+ return n
+}
+
+func (m *DeploymentStrategy) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.RollingUpdate != nil {
+ l = m.RollingUpdate.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *ReplicaSet) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ReplicaSetCondition) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Status)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastTransitionTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Reason)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ReplicaSetList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ReplicaSetSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Replicas != nil {
+ n += 1 + sovGenerated(uint64(*m.Replicas))
+ }
+ if m.Selector != nil {
+ l = m.Selector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = m.Template.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.MinReadySeconds))
+ return n
+}
+
+func (m *ReplicaSetStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 1 + sovGenerated(uint64(m.Replicas))
+ n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas))
+ n += 1 + sovGenerated(uint64(m.ObservedGeneration))
+ n += 1 + sovGenerated(uint64(m.ReadyReplicas))
+ n += 1 + sovGenerated(uint64(m.AvailableReplicas))
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *RollingUpdateDaemonSet) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.MaxUnavailable != nil {
+ l = m.MaxUnavailable.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *RollingUpdateDeployment) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.MaxUnavailable != nil {
+ l = m.MaxUnavailable.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.MaxSurge != nil {
+ l = m.MaxSurge.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *RollingUpdateStatefulSetStrategy) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Partition != nil {
+ n += 1 + sovGenerated(uint64(*m.Partition))
+ }
+ return n
+}
+
+func (m *StatefulSet) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *StatefulSetCondition) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Status)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastTransitionTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Reason)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *StatefulSetList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *StatefulSetSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Replicas != nil {
+ n += 1 + sovGenerated(uint64(*m.Replicas))
+ }
+ if m.Selector != nil {
+ l = m.Selector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = m.Template.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.VolumeClaimTemplates) > 0 {
+ for _, e := range m.VolumeClaimTemplates {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.ServiceName)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.PodManagementPolicy)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.UpdateStrategy.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.RevisionHistoryLimit != nil {
+ n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit))
+ }
+ return n
+}
+
+func (m *StatefulSetStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 1 + sovGenerated(uint64(m.ObservedGeneration))
+ n += 1 + sovGenerated(uint64(m.Replicas))
+ n += 1 + sovGenerated(uint64(m.ReadyReplicas))
+ n += 1 + sovGenerated(uint64(m.CurrentReplicas))
+ n += 1 + sovGenerated(uint64(m.UpdatedReplicas))
+ l = len(m.CurrentRevision)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.UpdateRevision)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.CollisionCount != nil {
+ n += 1 + sovGenerated(uint64(*m.CollisionCount))
+ }
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *StatefulSetUpdateStrategy) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.RollingUpdate != nil {
+ l = m.RollingUpdate.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *ControllerRevision) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ControllerRevision{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Data:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Data), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`,
+ `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ControllerRevisionList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]ControllerRevision{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ControllerRevision", "ControllerRevision", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&ControllerRevisionList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DaemonSet) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DaemonSet{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DaemonSetSpec", "DaemonSetSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DaemonSetStatus", "DaemonSetStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DaemonSetCondition) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DaemonSetCondition{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `Status:` + fmt.Sprintf("%v", this.Status) + `,`,
+ `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
+ `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
+ `Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DaemonSetList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]DaemonSet{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&DaemonSetList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DaemonSetSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DaemonSetSpec{`,
+ `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
+ `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`,
+ `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "DaemonSetUpdateStrategy", "DaemonSetUpdateStrategy", 1), `&`, ``, 1) + `,`,
+ `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`,
+ `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DaemonSetStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForConditions := "[]DaemonSetCondition{"
+ for _, f := range this.Conditions {
+ repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForConditions += "}"
+ s := strings.Join([]string{`&DaemonSetStatus{`,
+ `CurrentNumberScheduled:` + fmt.Sprintf("%v", this.CurrentNumberScheduled) + `,`,
+ `NumberMisscheduled:` + fmt.Sprintf("%v", this.NumberMisscheduled) + `,`,
+ `DesiredNumberScheduled:` + fmt.Sprintf("%v", this.DesiredNumberScheduled) + `,`,
+ `NumberReady:` + fmt.Sprintf("%v", this.NumberReady) + `,`,
+ `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`,
+ `UpdatedNumberScheduled:` + fmt.Sprintf("%v", this.UpdatedNumberScheduled) + `,`,
+ `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`,
+ `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`,
+ `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`,
+ `Conditions:` + repeatedStringForConditions + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DaemonSetUpdateStrategy) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DaemonSetUpdateStrategy{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Deployment) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Deployment{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeploymentCondition) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DeploymentCondition{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `Status:` + fmt.Sprintf("%v", this.Status) + `,`,
+ `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
+ `Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+ `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
+ `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeploymentList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]Deployment{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Deployment", "Deployment", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&DeploymentList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeploymentSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DeploymentSpec{`,
+ `Replicas:` + valueToStringGenerated(this.Replicas) + `,`,
+ `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
+ `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`,
+ `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`,
+ `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`,
+ `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`,
+ `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`,
+ `ProgressDeadlineSeconds:` + valueToStringGenerated(this.ProgressDeadlineSeconds) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeploymentStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForConditions := "[]DeploymentCondition{"
+ for _, f := range this.Conditions {
+ repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForConditions += "}"
+ s := strings.Join([]string{`&DeploymentStatus{`,
+ `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`,
+ `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`,
+ `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`,
+ `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`,
+ `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`,
+ `Conditions:` + repeatedStringForConditions + `,`,
+ `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`,
+ `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeploymentStrategy) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DeploymentStrategy{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ReplicaSet) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ReplicaSet{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicaSetSpec", "ReplicaSetSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicaSetStatus", "ReplicaSetStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ReplicaSetCondition) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ReplicaSetCondition{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `Status:` + fmt.Sprintf("%v", this.Status) + `,`,
+ `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
+ `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
+ `Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ReplicaSetList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]ReplicaSet{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&ReplicaSetList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ReplicaSetSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ReplicaSetSpec{`,
+ `Replicas:` + valueToStringGenerated(this.Replicas) + `,`,
+ `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
+ `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`,
+ `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ReplicaSetStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForConditions := "[]ReplicaSetCondition{"
+ for _, f := range this.Conditions {
+ repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForConditions += "}"
+ s := strings.Join([]string{`&ReplicaSetStatus{`,
+ `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`,
+ `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`,
+ `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`,
+ `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`,
+ `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`,
+ `Conditions:` + repeatedStringForConditions + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *RollingUpdateDaemonSet) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&RollingUpdateDaemonSet{`,
+ `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *RollingUpdateDeployment) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&RollingUpdateDeployment{`,
+ `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`,
+ `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *RollingUpdateStatefulSetStrategy) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&RollingUpdateStatefulSetStrategy{`,
+ `Partition:` + valueToStringGenerated(this.Partition) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *StatefulSet) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&StatefulSet{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "StatefulSetSpec", "StatefulSetSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "StatefulSetStatus", "StatefulSetStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *StatefulSetCondition) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&StatefulSetCondition{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `Status:` + fmt.Sprintf("%v", this.Status) + `,`,
+ `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
+ `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
+ `Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *StatefulSetList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]StatefulSet{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&StatefulSetList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *StatefulSetSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForVolumeClaimTemplates := "[]PersistentVolumeClaim{"
+ for _, f := range this.VolumeClaimTemplates {
+ repeatedStringForVolumeClaimTemplates += fmt.Sprintf("%v", f) + ","
+ }
+ repeatedStringForVolumeClaimTemplates += "}"
+ s := strings.Join([]string{`&StatefulSetSpec{`,
+ `Replicas:` + valueToStringGenerated(this.Replicas) + `,`,
+ `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
+ `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`,
+ `VolumeClaimTemplates:` + repeatedStringForVolumeClaimTemplates + `,`,
+ `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`,
+ `PodManagementPolicy:` + fmt.Sprintf("%v", this.PodManagementPolicy) + `,`,
+ `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "StatefulSetUpdateStrategy", "StatefulSetUpdateStrategy", 1), `&`, ``, 1) + `,`,
+ `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *StatefulSetStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForConditions := "[]StatefulSetCondition{"
+ for _, f := range this.Conditions {
+ repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForConditions += "}"
+ s := strings.Join([]string{`&StatefulSetStatus{`,
+ `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`,
+ `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`,
+ `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`,
+ `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`,
+ `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`,
+ `CurrentRevision:` + fmt.Sprintf("%v", this.CurrentRevision) + `,`,
+ `UpdateRevision:` + fmt.Sprintf("%v", this.UpdateRevision) + `,`,
+ `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`,
+ `Conditions:` + repeatedStringForConditions + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *StatefulSetUpdateStrategy) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&StatefulSetUpdateStrategy{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateStatefulSetStrategy", "RollingUpdateStatefulSetStrategy", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringGenerated(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *ControllerRevision) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ControllerRevision: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ControllerRevision: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
+ }
+ m.Revision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Revision |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ControllerRevisionList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ControllerRevisionList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, ControllerRevision{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DaemonSet) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DaemonSet: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DaemonSet: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DaemonSetCondition: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DaemonSetCondition: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = DaemonSetConditionType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Reason = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DaemonSetList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DaemonSetList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DaemonSetList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, DaemonSet{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DaemonSetSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DaemonSetSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Selector == nil {
+ m.Selector = &v1.LabelSelector{}
+ }
+ if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.UpdateStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType)
+ }
+ m.MinReadySeconds = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.MinReadySeconds |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.RevisionHistoryLimit = &v
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DaemonSetStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DaemonSetStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CurrentNumberScheduled", wireType)
+ }
+ m.CurrentNumberScheduled = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.CurrentNumberScheduled |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NumberMisscheduled", wireType)
+ }
+ m.NumberMisscheduled = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.NumberMisscheduled |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DesiredNumberScheduled", wireType)
+ }
+ m.DesiredNumberScheduled = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.DesiredNumberScheduled |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NumberReady", wireType)
+ }
+ m.NumberReady = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.NumberReady |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType)
+ }
+ m.ObservedGeneration = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ObservedGeneration |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UpdatedNumberScheduled", wireType)
+ }
+ m.UpdatedNumberScheduled = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.UpdatedNumberScheduled |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 7:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NumberAvailable", wireType)
+ }
+ m.NumberAvailable = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.NumberAvailable |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NumberUnavailable", wireType)
+ }
+ m.NumberUnavailable = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.NumberUnavailable |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 9:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.CollisionCount = &v
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Conditions = append(m.Conditions, DaemonSetCondition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DaemonSetUpdateStrategy: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DaemonSetUpdateStrategy: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = DaemonSetUpdateStrategyType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.RollingUpdate == nil {
+ m.RollingUpdate = &RollingUpdateDaemonSet{}
+ }
+ if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Deployment) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Deployment: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Deployment: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeploymentCondition) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeploymentCondition: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeploymentCondition: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = DeploymentConditionType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Reason = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeploymentList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeploymentList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeploymentList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, Deployment{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeploymentSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeploymentSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeploymentSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Replicas = &v
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Selector == nil {
+ m.Selector = &v1.LabelSelector{}
+ }
+ if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Strategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType)
+ }
+ m.MinReadySeconds = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.MinReadySeconds |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.RevisionHistoryLimit = &v
+ case 7:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Paused = bool(v != 0)
+ case 9:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProgressDeadlineSeconds", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ProgressDeadlineSeconds = &v
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeploymentStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeploymentStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeploymentStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType)
+ }
+ m.ObservedGeneration = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ObservedGeneration |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType)
+ }
+ m.Replicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Replicas |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType)
+ }
+ m.UpdatedReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.UpdatedReplicas |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType)
+ }
+ m.AvailableReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.AvailableReplicas |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UnavailableReplicas", wireType)
+ }
+ m.UnavailableReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.UnavailableReplicas |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Conditions = append(m.Conditions, DeploymentCondition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType)
+ }
+ m.ReadyReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ReadyReplicas |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.CollisionCount = &v
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeploymentStrategy: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeploymentStrategy: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = DeploymentStrategyType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.RollingUpdate == nil {
+ m.RollingUpdate = &RollingUpdateDeployment{}
+ }
+ if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ReplicaSet) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ReplicaSet: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ReplicaSet: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ReplicaSetCondition: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ReplicaSetCondition: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = ReplicaSetConditionType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Reason = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ReplicaSetList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ReplicaSetList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ReplicaSetList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, ReplicaSet{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ReplicaSetSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ReplicaSetSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Replicas = &v
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Selector == nil {
+ m.Selector = &v1.LabelSelector{}
+ }
+ if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType)
+ }
+ m.MinReadySeconds = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.MinReadySeconds |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ReplicaSetStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ReplicaSetStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType)
+ }
+ m.Replicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Replicas |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FullyLabeledReplicas", wireType)
+ }
+ m.FullyLabeledReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.FullyLabeledReplicas |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType)
+ }
+ m.ObservedGeneration = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ObservedGeneration |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType)
+ }
+ m.ReadyReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ReadyReplicas |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType)
+ }
+ m.AvailableReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.AvailableReplicas |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Conditions = append(m.Conditions, ReplicaSetCondition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RollingUpdateDaemonSet: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RollingUpdateDaemonSet: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.MaxUnavailable == nil {
+ m.MaxUnavailable = &intstr.IntOrString{}
+ }
+ if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RollingUpdateDeployment: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RollingUpdateDeployment: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.MaxUnavailable == nil {
+ m.MaxUnavailable = &intstr.IntOrString{}
+ }
+ if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.MaxSurge == nil {
+ m.MaxSurge = &intstr.IntOrString{}
+ }
+ if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RollingUpdateStatefulSetStrategy: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RollingUpdateStatefulSetStrategy: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Partition = &v
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *StatefulSet) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: StatefulSet: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: StatefulSet: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: StatefulSetCondition: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: StatefulSetCondition: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = StatefulSetConditionType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Reason = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *StatefulSetList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: StatefulSetList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: StatefulSetList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, StatefulSet{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: StatefulSetSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: StatefulSetSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Replicas = &v
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Selector == nil {
+ m.Selector = &v1.LabelSelector{}
+ }
+ if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field VolumeClaimTemplates", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, v11.PersistentVolumeClaim{})
+ if err := m.VolumeClaimTemplates[len(m.VolumeClaimTemplates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ServiceName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PodManagementPolicy", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PodManagementPolicy = PodManagementPolicyType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.UpdateStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.RevisionHistoryLimit = &v
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: StatefulSetStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: StatefulSetStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType)
+ }
+ m.ObservedGeneration = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ObservedGeneration |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType)
+ }
+ m.Replicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Replicas |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType)
+ }
+ m.ReadyReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ReadyReplicas |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CurrentReplicas", wireType)
+ }
+ m.CurrentReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.CurrentReplicas |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType)
+ }
+ m.UpdatedReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.UpdatedReplicas |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CurrentRevision", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.CurrentRevision = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UpdateRevision", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.UpdateRevision = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 9:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.CollisionCount = &v
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Conditions = append(m.Conditions, StatefulSetCondition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: StatefulSetUpdateStrategy: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: StatefulSetUpdateStrategy: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = StatefulSetUpdateStrategyType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.RollingUpdate == nil {
+ m.RollingUpdate = &RollingUpdateStatefulSetStrategy{}
+ }
+ if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenerated(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupGenerated
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/k8s.io/api/apps/v1/generated.proto b/vendor/k8s.io/api/apps/v1/generated.proto
new file mode 100644
index 000000000..6c5527974
--- /dev/null
+++ b/vendor/k8s.io/api/apps/v1/generated.proto
@@ -0,0 +1,701 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.api.apps.v1;
+
+import "k8s.io/api/core/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+import "k8s.io/apimachinery/pkg/util/intstr/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1";
+
+// ControllerRevision implements an immutable snapshot of state data. Clients
+// are responsible for serializing and deserializing the objects that contain
+// their internal state.
+// Once a ControllerRevision has been successfully created, it can not be updated.
+// The API Server will fail validation of all requests that attempt to mutate
+// the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both
+// the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However,
+// it may be subject to name and representation changes in future releases, and clients should not
+// depend on its stability. It is primarily for internal use by controllers.
+message ControllerRevision {
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Data is the serialized representation of the state.
+ optional k8s.io.apimachinery.pkg.runtime.RawExtension data = 2;
+
+ // Revision indicates the revision of the state represented by Data.
+ optional int64 revision = 3;
+}
+
+// ControllerRevisionList is a resource containing a list of ControllerRevision objects.
+message ControllerRevisionList {
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of ControllerRevisions
+ repeated ControllerRevision items = 2;
+}
+
+// DaemonSet represents the configuration of a daemon set.
+message DaemonSet {
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // The desired behavior of this daemon set.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ // +optional
+ optional DaemonSetSpec spec = 2;
+
+ // The current status of this daemon set. This data may be
+ // out of date by some window of time.
+ // Populated by the system.
+ // Read-only.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ // +optional
+ optional DaemonSetStatus status = 3;
+}
+
+// DaemonSetCondition describes the state of a DaemonSet at a certain point.
+message DaemonSetCondition {
+ // Type of DaemonSet condition.
+ optional string type = 1;
+
+ // Status of the condition, one of True, False, Unknown.
+ optional string status = 2;
+
+ // Last time the condition transitioned from one status to another.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
+
+ // The reason for the condition's last transition.
+ // +optional
+ optional string reason = 4;
+
+ // A human readable message indicating details about the transition.
+ // +optional
+ optional string message = 5;
+}
+
+// DaemonSetList is a collection of daemon sets.
+message DaemonSetList {
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // A list of daemon sets.
+ repeated DaemonSet items = 2;
+}
+
+// DaemonSetSpec is the specification of a daemon set.
+message DaemonSetSpec {
+ // A label query over pods that are managed by the daemon set.
+ // Must match in order to be controlled.
+ // It must match the pod template's labels.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1;
+
+ // An object that describes the pod that will be created.
+ // The DaemonSet will create exactly one copy of this pod on every node
+ // that matches the template's node selector (or on every node if no node
+ // selector is specified).
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
+ optional k8s.io.api.core.v1.PodTemplateSpec template = 2;
+
+ // An update strategy to replace existing DaemonSet pods with new pods.
+ // +optional
+ optional DaemonSetUpdateStrategy updateStrategy = 3;
+
+ // The minimum number of seconds for which a newly created DaemonSet pod should
+ // be ready without any of its container crashing, for it to be considered
+ // available. Defaults to 0 (pod will be considered available as soon as it
+ // is ready).
+ // +optional
+ optional int32 minReadySeconds = 4;
+
+ // The number of old history to retain to allow rollback.
+ // This is a pointer to distinguish between explicit zero and not specified.
+ // Defaults to 10.
+ // +optional
+ optional int32 revisionHistoryLimit = 6;
+}
+
+// DaemonSetStatus represents the current status of a daemon set.
+message DaemonSetStatus {
+ // The number of nodes that are running at least 1
+ // daemon pod and are supposed to run the daemon pod.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
+ optional int32 currentNumberScheduled = 1;
+
+ // The number of nodes that are running the daemon pod, but are
+ // not supposed to run the daemon pod.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
+ optional int32 numberMisscheduled = 2;
+
+ // The total number of nodes that should be running the daemon
+ // pod (including nodes correctly running the daemon pod).
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
+ optional int32 desiredNumberScheduled = 3;
+
+ // The number of nodes that should be running the daemon pod and have one
+ // or more of the daemon pod running and ready.
+ optional int32 numberReady = 4;
+
+ // The most recent generation observed by the daemon set controller.
+ // +optional
+ optional int64 observedGeneration = 5;
+
+ // The total number of nodes that are running updated daemon pod
+ // +optional
+ optional int32 updatedNumberScheduled = 6;
+
+ // The number of nodes that should be running the
+ // daemon pod and have one or more of the daemon pod running and
+ // available (ready for at least spec.minReadySeconds)
+ // +optional
+ optional int32 numberAvailable = 7;
+
+ // The number of nodes that should be running the
+ // daemon pod and have none of the daemon pod running and available
+ // (ready for at least spec.minReadySeconds)
+ // +optional
+ optional int32 numberUnavailable = 8;
+
+ // Count of hash collisions for the DaemonSet. The DaemonSet controller
+ // uses this field as a collision avoidance mechanism when it needs to
+ // create the name for the newest ControllerRevision.
+ // +optional
+ optional int32 collisionCount = 9;
+
+ // Represents the latest available observations of a DaemonSet's current state.
+ // +optional
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ repeated DaemonSetCondition conditions = 10;
+}
+
+// DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet.
+message DaemonSetUpdateStrategy {
+ // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". Default is RollingUpdate.
+ // +optional
+ optional string type = 1;
+
+ // Rolling update config params. Present only if type = "RollingUpdate".
+ // ---
+ // TODO: Update this to follow our convention for oneOf, whatever we decide it
+ // to be. Same as Deployment `strategy.rollingUpdate`.
+ // See https://github.com/kubernetes/kubernetes/issues/35345
+ // +optional
+ optional RollingUpdateDaemonSet rollingUpdate = 2;
+}
+
+// Deployment enables declarative updates for Pods and ReplicaSets.
+message Deployment {
+ // Standard object metadata.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Specification of the desired behavior of the Deployment.
+ // +optional
+ optional DeploymentSpec spec = 2;
+
+ // Most recently observed status of the Deployment.
+ // +optional
+ optional DeploymentStatus status = 3;
+}
+
+// DeploymentCondition describes the state of a deployment at a certain point.
+message DeploymentCondition {
+ // Type of deployment condition.
+ optional string type = 1;
+
+ // Status of the condition, one of True, False, Unknown.
+ optional string status = 2;
+
+ // The last time this condition was updated.
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6;
+
+ // Last time the condition transitioned from one status to another.
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7;
+
+ // The reason for the condition's last transition.
+ optional string reason = 4;
+
+ // A human readable message indicating details about the transition.
+ optional string message = 5;
+}
+
+// DeploymentList is a list of Deployments.
+message DeploymentList {
+ // Standard list metadata.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of Deployments.
+ repeated Deployment items = 2;
+}
+
+// DeploymentSpec is the specification of the desired behavior of the Deployment.
+message DeploymentSpec {
+ // Number of desired pods. This is a pointer to distinguish between explicit
+ // zero and not specified. Defaults to 1.
+ // +optional
+ optional int32 replicas = 1;
+
+ // Label selector for pods. Existing ReplicaSets whose pods are
+ // selected by this will be the ones affected by this deployment.
+ // It must match the pod template's labels.
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
+
+ // Template describes the pods that will be created.
+ optional k8s.io.api.core.v1.PodTemplateSpec template = 3;
+
+ // The deployment strategy to use to replace existing pods with new ones.
+ // +optional
+ // +patchStrategy=retainKeys
+ optional DeploymentStrategy strategy = 4;
+
+ // Minimum number of seconds for which a newly created pod should be ready
+ // without any of its container crashing, for it to be considered available.
+ // Defaults to 0 (pod will be considered available as soon as it is ready)
+ // +optional
+ optional int32 minReadySeconds = 5;
+
+ // The number of old ReplicaSets to retain to allow rollback.
+ // This is a pointer to distinguish between explicit zero and not specified.
+ // Defaults to 10.
+ // +optional
+ optional int32 revisionHistoryLimit = 6;
+
+ // Indicates that the deployment is paused.
+ // +optional
+ optional bool paused = 7;
+
+ // The maximum time in seconds for a deployment to make progress before it
+ // is considered to be failed. The deployment controller will continue to
+ // process failed deployments and a condition with a ProgressDeadlineExceeded
+ // reason will be surfaced in the deployment status. Note that progress will
+ // not be estimated during the time a deployment is paused. Defaults to 600s.
+ optional int32 progressDeadlineSeconds = 9;
+}
+
+// DeploymentStatus is the most recently observed status of the Deployment.
+message DeploymentStatus {
+ // The generation observed by the deployment controller.
+ // +optional
+ optional int64 observedGeneration = 1;
+
+ // Total number of non-terminated pods targeted by this deployment (their labels match the selector).
+ // +optional
+ optional int32 replicas = 2;
+
+ // Total number of non-terminated pods targeted by this deployment that have the desired template spec.
+ // +optional
+ optional int32 updatedReplicas = 3;
+
+ // Total number of ready pods targeted by this deployment.
+ // +optional
+ optional int32 readyReplicas = 7;
+
+ // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.
+ // +optional
+ optional int32 availableReplicas = 4;
+
+ // Total number of unavailable pods targeted by this deployment. This is the total number of
+ // pods that are still required for the deployment to have 100% available capacity. They may
+ // either be pods that are running but not yet available or pods that still have not been created.
+ // +optional
+ optional int32 unavailableReplicas = 5;
+
+ // Represents the latest available observations of a deployment's current state.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ repeated DeploymentCondition conditions = 6;
+
+ // Count of hash collisions for the Deployment. The Deployment controller uses this
+ // field as a collision avoidance mechanism when it needs to create the name for the
+ // newest ReplicaSet.
+ // +optional
+ optional int32 collisionCount = 8;
+}
+
+// DeploymentStrategy describes how to replace existing pods with new ones.
+message DeploymentStrategy {
+ // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate.
+ // +optional
+ optional string type = 1;
+
+ // Rolling update config params. Present only if DeploymentStrategyType =
+ // RollingUpdate.
+ // ---
+ // TODO: Update this to follow our convention for oneOf, whatever we decide it
+ // to be.
+ // +optional
+ optional RollingUpdateDeployment rollingUpdate = 2;
+}
+
+// ReplicaSet ensures that a specified number of pod replicas are running at any given time.
+message ReplicaSet {
+ // If the Labels of a ReplicaSet are empty, they are defaulted to
+ // be the same as the Pod(s) that the ReplicaSet manages.
+ // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Spec defines the specification of the desired behavior of the ReplicaSet.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ // +optional
+ optional ReplicaSetSpec spec = 2;
+
+ // Status is the most recently observed status of the ReplicaSet.
+ // This data may be out of date by some window of time.
+ // Populated by the system.
+ // Read-only.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ // +optional
+ optional ReplicaSetStatus status = 3;
+}
+
+// ReplicaSetCondition describes the state of a replica set at a certain point.
+message ReplicaSetCondition {
+ // Type of replica set condition.
+ optional string type = 1;
+
+ // Status of the condition, one of True, False, Unknown.
+ optional string status = 2;
+
+ // The last time the condition transitioned from one status to another.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
+
+ // The reason for the condition's last transition.
+ // +optional
+ optional string reason = 4;
+
+ // A human readable message indicating details about the transition.
+ // +optional
+ optional string message = 5;
+}
+
+// ReplicaSetList is a collection of ReplicaSets.
+message ReplicaSetList {
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // List of ReplicaSets.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller
+ repeated ReplicaSet items = 2;
+}
+
+// ReplicaSetSpec is the specification of a ReplicaSet.
+message ReplicaSetSpec {
+ // Replicas is the number of desired replicas.
+ // This is a pointer to distinguish between explicit zero and unspecified.
+ // Defaults to 1.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
+ // +optional
+ optional int32 replicas = 1;
+
+ // Minimum number of seconds for which a newly created pod should be ready
+ // without any of its container crashing, for it to be considered available.
+ // Defaults to 0 (pod will be considered available as soon as it is ready)
+ // +optional
+ optional int32 minReadySeconds = 4;
+
+ // Selector is a label query over pods that should match the replica count.
+ // Label keys and values that must match in order to be controlled by this replica set.
+ // It must match the pod template's labels.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
+
+ // Template is the object that describes the pod that will be created if
+ // insufficient replicas are detected.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
+ // +optional
+ optional k8s.io.api.core.v1.PodTemplateSpec template = 3;
+}
+
+// ReplicaSetStatus represents the current status of a ReplicaSet.
+message ReplicaSetStatus {
+ // Replicas is the most recently oberved number of replicas.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
+ optional int32 replicas = 1;
+
+ // The number of pods that have labels matching the labels of the pod template of the replicaset.
+ // +optional
+ optional int32 fullyLabeledReplicas = 2;
+
+ // The number of ready replicas for this replica set.
+ // +optional
+ optional int32 readyReplicas = 4;
+
+ // The number of available replicas (ready for at least minReadySeconds) for this replica set.
+ // +optional
+ optional int32 availableReplicas = 5;
+
+ // ObservedGeneration reflects the generation of the most recently observed ReplicaSet.
+ // +optional
+ optional int64 observedGeneration = 3;
+
+ // Represents the latest available observations of a replica set's current state.
+ // +optional
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ repeated ReplicaSetCondition conditions = 6;
+}
+
+// Spec to control the desired behavior of daemon set rolling update.
+message RollingUpdateDaemonSet {
+ // The maximum number of DaemonSet pods that can be unavailable during the
+ // update. Value can be an absolute number (ex: 5) or a percentage of total
+ // number of DaemonSet pods at the start of the update (ex: 10%). Absolute
+ // number is calculated from percentage by rounding up.
+ // This cannot be 0.
+ // Default value is 1.
+ // Example: when this is set to 30%, at most 30% of the total number of nodes
+ // that should be running the daemon pod (i.e. status.desiredNumberScheduled)
+ // can have their pods stopped for an update at any given
+ // time. The update starts by stopping at most 30% of those DaemonSet pods
+ // and then brings up new DaemonSet pods in their place. Once the new pods
+ // are available, it then proceeds onto other DaemonSet pods, thus ensuring
+ // that at least 70% of original number of DaemonSet pods are available at
+ // all times during the update.
+ // +optional
+ optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1;
+}
+
+// Spec to control the desired behavior of rolling update.
+message RollingUpdateDeployment {
+ // The maximum number of pods that can be unavailable during the update.
+ // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+ // Absolute number is calculated from percentage by rounding down.
+ // This can not be 0 if MaxSurge is 0.
+ // Defaults to 25%.
+ // Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods
+ // immediately when the rolling update starts. Once new pods are ready, old ReplicaSet
+ // can be scaled down further, followed by scaling up the new ReplicaSet, ensuring
+ // that the total number of pods available at all times during the update is at
+ // least 70% of desired pods.
+ // +optional
+ optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1;
+
+ // The maximum number of pods that can be scheduled above the desired number of
+ // pods.
+ // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+ // This can not be 0 if MaxUnavailable is 0.
+ // Absolute number is calculated from percentage by rounding up.
+ // Defaults to 25%.
+ // Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when
+ // the rolling update starts, such that the total number of old and new pods do not exceed
+ // 130% of desired pods. Once old pods have been killed,
+ // new ReplicaSet can be scaled up further, ensuring that total number of pods running
+ // at any time during the update is at most 130% of desired pods.
+ // +optional
+ optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2;
+}
+
+// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.
+message RollingUpdateStatefulSetStrategy {
+ // Partition indicates the ordinal at which the StatefulSet should be
+ // partitioned.
+ // Default value is 0.
+ // +optional
+ optional int32 partition = 1;
+}
+
+// StatefulSet represents a set of pods with consistent identities.
+// Identities are defined as:
+// - Network: A single stable DNS and hostname.
+// - Storage: As many VolumeClaims as requested.
+// The StatefulSet guarantees that a given network identity will always
+// map to the same storage identity.
+message StatefulSet {
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Spec defines the desired identities of pods in this set.
+ // +optional
+ optional StatefulSetSpec spec = 2;
+
+ // Status is the current status of Pods in this StatefulSet. This data
+ // may be out of date by some window of time.
+ // +optional
+ optional StatefulSetStatus status = 3;
+}
+
+// StatefulSetCondition describes the state of a statefulset at a certain point.
+message StatefulSetCondition {
+ // Type of statefulset condition.
+ optional string type = 1;
+
+ // Status of the condition, one of True, False, Unknown.
+ optional string status = 2;
+
+ // Last time the condition transitioned from one status to another.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
+
+ // The reason for the condition's last transition.
+ // +optional
+ optional string reason = 4;
+
+ // A human readable message indicating details about the transition.
+ // +optional
+ optional string message = 5;
+}
+
+// StatefulSetList is a collection of StatefulSets.
+message StatefulSetList {
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ repeated StatefulSet items = 2;
+}
+
+// A StatefulSetSpec is the specification of a StatefulSet.
+message StatefulSetSpec {
+ // replicas is the desired number of replicas of the given Template.
+ // These are replicas in the sense that they are instantiations of the
+ // same Template, but individual replicas also have a consistent identity.
+ // If unspecified, defaults to 1.
+ // TODO: Consider a rename of this field.
+ // +optional
+ optional int32 replicas = 1;
+
+ // selector is a label query over pods that should match the replica count.
+ // It must match the pod template's labels.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
+
+ // template is the object that describes the pod that will be created if
+ // insufficient replicas are detected. Each pod stamped out by the StatefulSet
+ // will fulfill this Template, but have a unique identity from the rest
+ // of the StatefulSet.
+ optional k8s.io.api.core.v1.PodTemplateSpec template = 3;
+
+ // volumeClaimTemplates is a list of claims that pods are allowed to reference.
+ // The StatefulSet controller is responsible for mapping network identities to
+ // claims in a way that maintains the identity of a pod. Every claim in
+ // this list must have at least one matching (by name) volumeMount in one
+ // container in the template. A claim in this list takes precedence over
+ // any volumes in the template, with the same name.
+ // TODO: Define the behavior if a claim already exists with the same name.
+ // +optional
+ repeated k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4;
+
+ // serviceName is the name of the service that governs this StatefulSet.
+ // This service must exist before the StatefulSet, and is responsible for
+ // the network identity of the set. Pods get DNS/hostnames that follow the
+ // pattern: pod-specific-string.serviceName.default.svc.cluster.local
+ // where "pod-specific-string" is managed by the StatefulSet controller.
+ optional string serviceName = 5;
+
+ // podManagementPolicy controls how pods are created during initial scale up,
+ // when replacing pods on nodes, or when scaling down. The default policy is
+ // `OrderedReady`, where pods are created in increasing order (pod-0, then
+ // pod-1, etc) and the controller will wait until each pod is ready before
+ // continuing. When scaling down, the pods are removed in the opposite order.
+ // The alternative policy is `Parallel` which will create pods in parallel
+ // to match the desired scale without waiting, and on scale down will delete
+ // all pods at once.
+ // +optional
+ optional string podManagementPolicy = 6;
+
+ // updateStrategy indicates the StatefulSetUpdateStrategy that will be
+ // employed to update Pods in the StatefulSet when a revision is made to
+ // Template.
+ optional StatefulSetUpdateStrategy updateStrategy = 7;
+
+ // revisionHistoryLimit is the maximum number of revisions that will
+ // be maintained in the StatefulSet's revision history. The revision history
+ // consists of all revisions not represented by a currently applied
+ // StatefulSetSpec version. The default value is 10.
+ optional int32 revisionHistoryLimit = 8;
+}
+
+// StatefulSetStatus represents the current state of a StatefulSet.
+message StatefulSetStatus {
+ // observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the
+ // StatefulSet's generation, which is updated on mutation by the API Server.
+ // +optional
+ optional int64 observedGeneration = 1;
+
+ // replicas is the number of Pods created by the StatefulSet controller.
+ optional int32 replicas = 2;
+
+ // readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.
+ optional int32 readyReplicas = 3;
+
+ // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version
+ // indicated by currentRevision.
+ optional int32 currentReplicas = 4;
+
+ // updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version
+ // indicated by updateRevision.
+ optional int32 updatedReplicas = 5;
+
+ // currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the
+ // sequence [0,currentReplicas).
+ optional string currentRevision = 6;
+
+ // updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence
+ // [replicas-updatedReplicas,replicas)
+ optional string updateRevision = 7;
+
+ // collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller
+ // uses this field as a collision avoidance mechanism when it needs to create the name for the
+ // newest ControllerRevision.
+ // +optional
+ optional int32 collisionCount = 9;
+
+ // Represents the latest available observations of a statefulset's current state.
+ // +optional
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ repeated StatefulSetCondition conditions = 10;
+}
+
+// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet
+// controller will use to perform updates. It includes any additional parameters
+// necessary to perform the update for the indicated strategy.
+message StatefulSetUpdateStrategy {
+ // Type indicates the type of the StatefulSetUpdateStrategy.
+ // Default is RollingUpdate.
+ // +optional
+ optional string type = 1;
+
+ // RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.
+ // +optional
+ optional RollingUpdateStatefulSetStrategy rollingUpdate = 2;
+}
+
diff --git a/vendor/k8s.io/api/apps/v1/register.go b/vendor/k8s.io/api/apps/v1/register.go
new file mode 100644
index 000000000..027101046
--- /dev/null
+++ b/vendor/k8s.io/api/apps/v1/register.go
@@ -0,0 +1,60 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "apps"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+ // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+ // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+ SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ localSchemeBuilder = &SchemeBuilder
+ AddToScheme = localSchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &Deployment{},
+ &DeploymentList{},
+ &StatefulSet{},
+ &StatefulSetList{},
+ &DaemonSet{},
+ &DaemonSetList{},
+ &ReplicaSet{},
+ &ReplicaSetList{},
+ &ControllerRevision{},
+ &ControllerRevisionList{},
+ )
+ metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+ return nil
+}
diff --git a/vendor/k8s.io/api/apps/v1/types.go b/vendor/k8s.io/api/apps/v1/types.go
new file mode 100644
index 000000000..e003a0c4f
--- /dev/null
+++ b/vendor/k8s.io/api/apps/v1/types.go
@@ -0,0 +1,826 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/util/intstr"
+)
+
+const (
+ ControllerRevisionHashLabelKey = "controller-revision-hash"
+ StatefulSetRevisionLabel = ControllerRevisionHashLabelKey
+ DeprecatedRollbackTo = "deprecated.deployment.rollback.to"
+ DeprecatedTemplateGeneration = "deprecated.daemonset.template.generation"
+ StatefulSetPodNameLabel = "statefulset.kubernetes.io/pod-name"
+)
+
+// +genclient
+// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale
+// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// StatefulSet represents a set of pods with consistent identities.
+// Identities are defined as:
+// - Network: A single stable DNS and hostname.
+// - Storage: As many VolumeClaims as requested.
+// The StatefulSet guarantees that a given network identity will always
+// map to the same storage identity.
+type StatefulSet struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Spec defines the desired identities of pods in this set.
+ // +optional
+ Spec StatefulSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+ // Status is the current status of Pods in this StatefulSet. This data
+ // may be out of date by some window of time.
+ // +optional
+ Status StatefulSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// PodManagementPolicyType defines the policy for creating pods under a stateful set.
+type PodManagementPolicyType string
+
+const (
+ // OrderedReadyPodManagement will create pods in strictly increasing order on
+ // scale up and strictly decreasing order on scale down, progressing only when
+ // the previous pod is ready or terminated. At most one pod will be changed
+ // at any time.
+ OrderedReadyPodManagement PodManagementPolicyType = "OrderedReady"
+ // ParallelPodManagement will create and delete pods as soon as the stateful set
+ // replica count is changed, and will not wait for pods to be ready or complete
+ // termination.
+ ParallelPodManagement PodManagementPolicyType = "Parallel"
+)
+
+// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet
+// controller will use to perform updates. It includes any additional parameters
+// necessary to perform the update for the indicated strategy.
+type StatefulSetUpdateStrategy struct {
+ // Type indicates the type of the StatefulSetUpdateStrategy.
+ // Default is RollingUpdate.
+ // +optional
+ Type StatefulSetUpdateStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=StatefulSetStrategyType"`
+ // RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.
+ // +optional
+ RollingUpdate *RollingUpdateStatefulSetStrategy `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"`
+}
+
+// StatefulSetUpdateStrategyType is a string enumeration type that enumerates
+// all possible update strategies for the StatefulSet controller.
+type StatefulSetUpdateStrategyType string
+
+const (
+ // RollingUpdateStatefulSetStrategyType indicates that update will be
+ // applied to all Pods in the StatefulSet with respect to the StatefulSet
+ // ordering constraints. When a scale operation is performed with this
+ // strategy, new Pods will be created from the specification version indicated
+ // by the StatefulSet's updateRevision.
+ RollingUpdateStatefulSetStrategyType StatefulSetUpdateStrategyType = "RollingUpdate"
+ // OnDeleteStatefulSetStrategyType triggers the legacy behavior. Version
+ // tracking and ordered rolling restarts are disabled. Pods are recreated
+ // from the StatefulSetSpec when they are manually deleted. When a scale
+ // operation is performed with this strategy,specification version indicated
+ // by the StatefulSet's currentRevision.
+ OnDeleteStatefulSetStrategyType StatefulSetUpdateStrategyType = "OnDelete"
+)
+
+// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.
+type RollingUpdateStatefulSetStrategy struct {
+ // Partition indicates the ordinal at which the StatefulSet should be
+ // partitioned.
+ // Default value is 0.
+ // +optional
+ Partition *int32 `json:"partition,omitempty" protobuf:"varint,1,opt,name=partition"`
+}
+
+// A StatefulSetSpec is the specification of a StatefulSet.
+type StatefulSetSpec struct {
+ // replicas is the desired number of replicas of the given Template.
+ // These are replicas in the sense that they are instantiations of the
+ // same Template, but individual replicas also have a consistent identity.
+ // If unspecified, defaults to 1.
+ // TODO: Consider a rename of this field.
+ // +optional
+ Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
+
+ // selector is a label query over pods that should match the replica count.
+ // It must match the pod template's labels.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+ Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"`
+
+ // template is the object that describes the pod that will be created if
+ // insufficient replicas are detected. Each pod stamped out by the StatefulSet
+ // will fulfill this Template, but have a unique identity from the rest
+ // of the StatefulSet.
+ Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"`
+
+ // volumeClaimTemplates is a list of claims that pods are allowed to reference.
+ // The StatefulSet controller is responsible for mapping network identities to
+ // claims in a way that maintains the identity of a pod. Every claim in
+ // this list must have at least one matching (by name) volumeMount in one
+ // container in the template. A claim in this list takes precedence over
+ // any volumes in the template, with the same name.
+ // TODO: Define the behavior if a claim already exists with the same name.
+ // +optional
+ VolumeClaimTemplates []v1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty" protobuf:"bytes,4,rep,name=volumeClaimTemplates"`
+
+ // serviceName is the name of the service that governs this StatefulSet.
+ // This service must exist before the StatefulSet, and is responsible for
+ // the network identity of the set. Pods get DNS/hostnames that follow the
+ // pattern: pod-specific-string.serviceName.default.svc.cluster.local
+ // where "pod-specific-string" is managed by the StatefulSet controller.
+ ServiceName string `json:"serviceName" protobuf:"bytes,5,opt,name=serviceName"`
+
+ // podManagementPolicy controls how pods are created during initial scale up,
+ // when replacing pods on nodes, or when scaling down. The default policy is
+ // `OrderedReady`, where pods are created in increasing order (pod-0, then
+ // pod-1, etc) and the controller will wait until each pod is ready before
+ // continuing. When scaling down, the pods are removed in the opposite order.
+ // The alternative policy is `Parallel` which will create pods in parallel
+ // to match the desired scale without waiting, and on scale down will delete
+ // all pods at once.
+ // +optional
+ PodManagementPolicy PodManagementPolicyType `json:"podManagementPolicy,omitempty" protobuf:"bytes,6,opt,name=podManagementPolicy,casttype=PodManagementPolicyType"`
+
+ // updateStrategy indicates the StatefulSetUpdateStrategy that will be
+ // employed to update Pods in the StatefulSet when a revision is made to
+ // Template.
+ UpdateStrategy StatefulSetUpdateStrategy `json:"updateStrategy,omitempty" protobuf:"bytes,7,opt,name=updateStrategy"`
+
+ // revisionHistoryLimit is the maximum number of revisions that will
+ // be maintained in the StatefulSet's revision history. The revision history
+ // consists of all revisions not represented by a currently applied
+ // StatefulSetSpec version. The default value is 10.
+ RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,8,opt,name=revisionHistoryLimit"`
+}
+
+// StatefulSetStatus represents the current state of a StatefulSet.
+type StatefulSetStatus struct {
+ // observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the
+ // StatefulSet's generation, which is updated on mutation by the API Server.
+ // +optional
+ ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"`
+
+ // replicas is the number of Pods created by the StatefulSet controller.
+ Replicas int32 `json:"replicas" protobuf:"varint,2,opt,name=replicas"`
+
+ // readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.
+ ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,3,opt,name=readyReplicas"`
+
+ // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version
+ // indicated by currentRevision.
+ CurrentReplicas int32 `json:"currentReplicas,omitempty" protobuf:"varint,4,opt,name=currentReplicas"`
+
+ // updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version
+ // indicated by updateRevision.
+ UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,5,opt,name=updatedReplicas"`
+
+ // currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the
+ // sequence [0,currentReplicas).
+ CurrentRevision string `json:"currentRevision,omitempty" protobuf:"bytes,6,opt,name=currentRevision"`
+
+ // updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence
+ // [replicas-updatedReplicas,replicas)
+ UpdateRevision string `json:"updateRevision,omitempty" protobuf:"bytes,7,opt,name=updateRevision"`
+
+ // collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller
+ // uses this field as a collision avoidance mechanism when it needs to create the name for the
+ // newest ControllerRevision.
+ // +optional
+ CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"`
+
+ // Represents the latest available observations of a statefulset's current state.
+ // +optional
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ Conditions []StatefulSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"`
+}
+
+type StatefulSetConditionType string
+
+// StatefulSetCondition describes the state of a statefulset at a certain point.
+type StatefulSetCondition struct {
+ // Type of statefulset condition.
+ Type StatefulSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=StatefulSetConditionType"`
+ // Status of the condition, one of True, False, Unknown.
+ Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"`
+ // Last time the condition transitioned from one status to another.
+ // +optional
+ LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"`
+ // The reason for the condition's last transition.
+ // +optional
+ Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
+ // A human readable message indicating details about the transition.
+ // +optional
+ Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// StatefulSetList is a collection of StatefulSets.
+type StatefulSetList struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ Items []StatefulSet `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient
+// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale
+// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Deployment enables declarative updates for Pods and ReplicaSets.
+type Deployment struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object metadata.
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Specification of the desired behavior of the Deployment.
+ // +optional
+ Spec DeploymentSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+ // Most recently observed status of the Deployment.
+ // +optional
+ Status DeploymentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// DeploymentSpec is the specification of the desired behavior of the Deployment.
+type DeploymentSpec struct {
+ // Number of desired pods. This is a pointer to distinguish between explicit
+ // zero and not specified. Defaults to 1.
+ // +optional
+ Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
+
+ // Label selector for pods. Existing ReplicaSets whose pods are
+ // selected by this will be the ones affected by this deployment.
+ // It must match the pod template's labels.
+ Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"`
+
+ // Template describes the pods that will be created.
+ Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"`
+
+ // The deployment strategy to use to replace existing pods with new ones.
+ // +optional
+ // +patchStrategy=retainKeys
+ Strategy DeploymentStrategy `json:"strategy,omitempty" patchStrategy:"retainKeys" protobuf:"bytes,4,opt,name=strategy"`
+
+ // Minimum number of seconds for which a newly created pod should be ready
+ // without any of its container crashing, for it to be considered available.
+ // Defaults to 0 (pod will be considered available as soon as it is ready)
+ // +optional
+ MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,5,opt,name=minReadySeconds"`
+
+ // The number of old ReplicaSets to retain to allow rollback.
+ // This is a pointer to distinguish between explicit zero and not specified.
+ // Defaults to 10.
+ // +optional
+ RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"`
+
+ // Indicates that the deployment is paused.
+ // +optional
+ Paused bool `json:"paused,omitempty" protobuf:"varint,7,opt,name=paused"`
+
+ // The maximum time in seconds for a deployment to make progress before it
+ // is considered to be failed. The deployment controller will continue to
+ // process failed deployments and a condition with a ProgressDeadlineExceeded
+ // reason will be surfaced in the deployment status. Note that progress will
+ // not be estimated during the time a deployment is paused. Defaults to 600s.
+ ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty" protobuf:"varint,9,opt,name=progressDeadlineSeconds"`
+}
+
+const (
+ // DefaultDeploymentUniqueLabelKey is the default key of the selector that is added
+ // to existing ReplicaSets (and label key that is added to its pods) to prevent the existing ReplicaSets
+ // to select new pods (and old pods being select by new ReplicaSet).
+ DefaultDeploymentUniqueLabelKey string = "pod-template-hash"
+)
+
+// DeploymentStrategy describes how to replace existing pods with new ones.
+type DeploymentStrategy struct {
+ // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate.
+ // +optional
+ Type DeploymentStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentStrategyType"`
+
+ // Rolling update config params. Present only if DeploymentStrategyType =
+ // RollingUpdate.
+ //---
+ // TODO: Update this to follow our convention for oneOf, whatever we decide it
+ // to be.
+ // +optional
+ RollingUpdate *RollingUpdateDeployment `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"`
+}
+
+type DeploymentStrategyType string
+
+const (
+ // Kill all existing pods before creating new ones.
+ RecreateDeploymentStrategyType DeploymentStrategyType = "Recreate"
+
+ // Replace the old ReplicaSets by new one using rolling update i.e gradually scale down the old ReplicaSets and scale up the new one.
+ RollingUpdateDeploymentStrategyType DeploymentStrategyType = "RollingUpdate"
+)
+
+// Spec to control the desired behavior of rolling update.
+type RollingUpdateDeployment struct {
+ // The maximum number of pods that can be unavailable during the update.
+ // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+ // Absolute number is calculated from percentage by rounding down.
+ // This can not be 0 if MaxSurge is 0.
+ // Defaults to 25%.
+ // Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods
+ // immediately when the rolling update starts. Once new pods are ready, old ReplicaSet
+ // can be scaled down further, followed by scaling up the new ReplicaSet, ensuring
+ // that the total number of pods available at all times during the update is at
+ // least 70% of desired pods.
+ // +optional
+ MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"`
+
+ // The maximum number of pods that can be scheduled above the desired number of
+ // pods.
+ // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+ // This can not be 0 if MaxUnavailable is 0.
+ // Absolute number is calculated from percentage by rounding up.
+ // Defaults to 25%.
+ // Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when
+ // the rolling update starts, such that the total number of old and new pods do not exceed
+ // 130% of desired pods. Once old pods have been killed,
+ // new ReplicaSet can be scaled up further, ensuring that total number of pods running
+ // at any time during the update is at most 130% of desired pods.
+ // +optional
+ MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"`
+}
+
+// DeploymentStatus is the most recently observed status of the Deployment.
+type DeploymentStatus struct {
+ // The generation observed by the deployment controller.
+ // +optional
+ ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"`
+
+ // Total number of non-terminated pods targeted by this deployment (their labels match the selector).
+ // +optional
+ Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"`
+
+ // Total number of non-terminated pods targeted by this deployment that have the desired template spec.
+ // +optional
+ UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"`
+
+ // Total number of ready pods targeted by this deployment.
+ // +optional
+ ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"`
+
+ // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.
+ // +optional
+ AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"`
+
+ // Total number of unavailable pods targeted by this deployment. This is the total number of
+ // pods that are still required for the deployment to have 100% available capacity. They may
+ // either be pods that are running but not yet available or pods that still have not been created.
+ // +optional
+ UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"`
+
+ // Represents the latest available observations of a deployment's current state.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"`
+
+ // Count of hash collisions for the Deployment. The Deployment controller uses this
+ // field as a collision avoidance mechanism when it needs to create the name for the
+ // newest ReplicaSet.
+ // +optional
+ CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,8,opt,name=collisionCount"`
+}
+
+type DeploymentConditionType string
+
+// These are valid conditions of a deployment.
+const (
+ // Available means the deployment is available, ie. at least the minimum available
+ // replicas required are up and running for at least minReadySeconds.
+ DeploymentAvailable DeploymentConditionType = "Available"
+ // Progressing means the deployment is progressing. Progress for a deployment is
+ // considered when a new replica set is created or adopted, and when new pods scale
+ // up or old pods scale down. Progress is not estimated for paused deployments or
+ // when progressDeadlineSeconds is not specified.
+ DeploymentProgressing DeploymentConditionType = "Progressing"
+ // ReplicaFailure is added in a deployment when one of its pods fails to be created
+ // or deleted.
+ DeploymentReplicaFailure DeploymentConditionType = "ReplicaFailure"
+)
+
+// DeploymentCondition describes the state of a deployment at a certain point.
+type DeploymentCondition struct {
+ // Type of deployment condition.
+ Type DeploymentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentConditionType"`
+ // Status of the condition, one of True, False, Unknown.
+ Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"`
+ // The last time this condition was updated.
+ LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"`
+ // Last time the condition transitioned from one status to another.
+ LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,7,opt,name=lastTransitionTime"`
+ // The reason for the condition's last transition.
+ Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
+ // A human readable message indicating details about the transition.
+ Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// DeploymentList is a list of Deployments.
+type DeploymentList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is the list of Deployments.
+ Items []Deployment `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet.
+type DaemonSetUpdateStrategy struct {
+ // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". Default is RollingUpdate.
+ // +optional
+ Type DaemonSetUpdateStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"`
+
+ // Rolling update config params. Present only if type = "RollingUpdate".
+ //---
+ // TODO: Update this to follow our convention for oneOf, whatever we decide it
+ // to be. Same as Deployment `strategy.rollingUpdate`.
+ // See https://github.com/kubernetes/kubernetes/issues/35345
+ // +optional
+ RollingUpdate *RollingUpdateDaemonSet `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"`
+}
+
+type DaemonSetUpdateStrategyType string
+
+const (
+ // Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other.
+ RollingUpdateDaemonSetStrategyType DaemonSetUpdateStrategyType = "RollingUpdate"
+
+ // Replace the old daemons only when it's killed
+ OnDeleteDaemonSetStrategyType DaemonSetUpdateStrategyType = "OnDelete"
+)
+
+// Spec to control the desired behavior of daemon set rolling update.
+type RollingUpdateDaemonSet struct {
+ // The maximum number of DaemonSet pods that can be unavailable during the
+ // update. Value can be an absolute number (ex: 5) or a percentage of total
+ // number of DaemonSet pods at the start of the update (ex: 10%). Absolute
+ // number is calculated from percentage by rounding up.
+ // This cannot be 0.
+ // Default value is 1.
+ // Example: when this is set to 30%, at most 30% of the total number of nodes
+ // that should be running the daemon pod (i.e. status.desiredNumberScheduled)
+ // can have their pods stopped for an update at any given
+ // time. The update starts by stopping at most 30% of those DaemonSet pods
+ // and then brings up new DaemonSet pods in their place. Once the new pods
+ // are available, it then proceeds onto other DaemonSet pods, thus ensuring
+ // that at least 70% of original number of DaemonSet pods are available at
+ // all times during the update.
+ // +optional
+ MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"`
+}
+
+// DaemonSetSpec is the specification of a daemon set.
+type DaemonSetSpec struct {
+ // A label query over pods that are managed by the daemon set.
+ // Must match in order to be controlled.
+ // It must match the pod template's labels.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+ Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,1,opt,name=selector"`
+
+ // An object that describes the pod that will be created.
+ // The DaemonSet will create exactly one copy of this pod on every node
+ // that matches the template's node selector (or on every node if no node
+ // selector is specified).
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
+ Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,2,opt,name=template"`
+
+ // An update strategy to replace existing DaemonSet pods with new pods.
+ // +optional
+ UpdateStrategy DaemonSetUpdateStrategy `json:"updateStrategy,omitempty" protobuf:"bytes,3,opt,name=updateStrategy"`
+
+ // The minimum number of seconds for which a newly created DaemonSet pod should
+ // be ready without any of its container crashing, for it to be considered
+ // available. Defaults to 0 (pod will be considered available as soon as it
+ // is ready).
+ // +optional
+ MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"`
+
+ // The number of old history to retain to allow rollback.
+ // This is a pointer to distinguish between explicit zero and not specified.
+ // Defaults to 10.
+ // +optional
+ RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"`
+}
+
+// DaemonSetStatus represents the current status of a daemon set.
+type DaemonSetStatus struct {
+ // The number of nodes that are running at least 1
+ // daemon pod and are supposed to run the daemon pod.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
+ CurrentNumberScheduled int32 `json:"currentNumberScheduled" protobuf:"varint,1,opt,name=currentNumberScheduled"`
+
+ // The number of nodes that are running the daemon pod, but are
+ // not supposed to run the daemon pod.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
+ NumberMisscheduled int32 `json:"numberMisscheduled" protobuf:"varint,2,opt,name=numberMisscheduled"`
+
+ // The total number of nodes that should be running the daemon
+ // pod (including nodes correctly running the daemon pod).
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
+ DesiredNumberScheduled int32 `json:"desiredNumberScheduled" protobuf:"varint,3,opt,name=desiredNumberScheduled"`
+
+ // The number of nodes that should be running the daemon pod and have one
+ // or more of the daemon pod running and ready.
+ NumberReady int32 `json:"numberReady" protobuf:"varint,4,opt,name=numberReady"`
+
+ // The most recent generation observed by the daemon set controller.
+ // +optional
+ ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,5,opt,name=observedGeneration"`
+
+ // The total number of nodes that are running updated daemon pod
+ // +optional
+ UpdatedNumberScheduled int32 `json:"updatedNumberScheduled,omitempty" protobuf:"varint,6,opt,name=updatedNumberScheduled"`
+
+ // The number of nodes that should be running the
+ // daemon pod and have one or more of the daemon pod running and
+ // available (ready for at least spec.minReadySeconds)
+ // +optional
+ NumberAvailable int32 `json:"numberAvailable,omitempty" protobuf:"varint,7,opt,name=numberAvailable"`
+
+ // The number of nodes that should be running the
+ // daemon pod and have none of the daemon pod running and available
+ // (ready for at least spec.minReadySeconds)
+ // +optional
+ NumberUnavailable int32 `json:"numberUnavailable,omitempty" protobuf:"varint,8,opt,name=numberUnavailable"`
+
+ // Count of hash collisions for the DaemonSet. The DaemonSet controller
+ // uses this field as a collision avoidance mechanism when it needs to
+ // create the name for the newest ControllerRevision.
+ // +optional
+ CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"`
+
+ // Represents the latest available observations of a DaemonSet's current state.
+ // +optional
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ Conditions []DaemonSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"`
+}
+
+type DaemonSetConditionType string
+
+// TODO: Add valid condition types of a DaemonSet.
+
+// DaemonSetCondition describes the state of a DaemonSet at a certain point.
+type DaemonSetCondition struct {
+ // Type of DaemonSet condition.
+ Type DaemonSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DaemonSetConditionType"`
+ // Status of the condition, one of True, False, Unknown.
+ Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"`
+ // Last time the condition transitioned from one status to another.
+ // +optional
+ LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"`
+ // The reason for the condition's last transition.
+ // +optional
+ Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
+ // A human readable message indicating details about the transition.
+ // +optional
+ Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// DaemonSet represents the configuration of a daemon set.
+type DaemonSet struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // The desired behavior of this daemon set.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ // +optional
+ Spec DaemonSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+ // The current status of this daemon set. This data may be
+ // out of date by some window of time.
+ // Populated by the system.
+ // Read-only.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ // +optional
+ Status DaemonSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+const (
+ // DefaultDaemonSetUniqueLabelKey is the default label key that is added
+ // to existing DaemonSet pods to distinguish between old and new
+ // DaemonSet pods during DaemonSet template updates.
+ DefaultDaemonSetUniqueLabelKey = ControllerRevisionHashLabelKey
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// DaemonSetList is a collection of daemon sets.
+type DaemonSetList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // A list of daemon sets.
+ Items []DaemonSet `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient
+// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale
+// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ReplicaSet ensures that a specified number of pod replicas are running at any given time.
+type ReplicaSet struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // If the Labels of a ReplicaSet are empty, they are defaulted to
+ // be the same as the Pod(s) that the ReplicaSet manages.
+ // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Spec defines the specification of the desired behavior of the ReplicaSet.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ // +optional
+ Spec ReplicaSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+ // Status is the most recently observed status of the ReplicaSet.
+ // This data may be out of date by some window of time.
+ // Populated by the system.
+ // Read-only.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ // +optional
+ Status ReplicaSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ReplicaSetList is a collection of ReplicaSets.
+type ReplicaSetList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // List of ReplicaSets.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller
+ Items []ReplicaSet `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// ReplicaSetSpec is the specification of a ReplicaSet.
+type ReplicaSetSpec struct {
+ // Replicas is the number of desired replicas.
+ // This is a pointer to distinguish between explicit zero and unspecified.
+ // Defaults to 1.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
+ // +optional
+ Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
+
+ // Minimum number of seconds for which a newly created pod should be ready
+ // without any of its container crashing, for it to be considered available.
+ // Defaults to 0 (pod will be considered available as soon as it is ready)
+ // +optional
+ MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"`
+
+ // Selector is a label query over pods that should match the replica count.
+ // Label keys and values that must match in order to be controlled by this replica set.
+ // It must match the pod template's labels.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+ Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"`
+
+ // Template is the object that describes the pod that will be created if
+ // insufficient replicas are detected.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
+ // +optional
+ Template v1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"`
+}
+
+// ReplicaSetStatus represents the current status of a ReplicaSet.
+type ReplicaSetStatus struct {
+ // Replicas is the most recently oberved number of replicas.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
+ Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"`
+
+ // The number of pods that have labels matching the labels of the pod template of the replicaset.
+ // +optional
+ FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"`
+
+ // The number of ready replicas for this replica set.
+ // +optional
+ ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"`
+
+ // The number of available replicas (ready for at least minReadySeconds) for this replica set.
+ // +optional
+ AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,5,opt,name=availableReplicas"`
+
+ // ObservedGeneration reflects the generation of the most recently observed ReplicaSet.
+ // +optional
+ ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"`
+
+ // Represents the latest available observations of a replica set's current state.
+ // +optional
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ Conditions []ReplicaSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"`
+}
+
+type ReplicaSetConditionType string
+
+// These are valid conditions of a replica set.
+const (
+ // ReplicaSetReplicaFailure is added in a replica set when one of its pods fails to be created
+ // due to insufficient quota, limit ranges, pod security policy, node selectors, etc. or deleted
+ // due to kubelet being down or finalizers are failing.
+ ReplicaSetReplicaFailure ReplicaSetConditionType = "ReplicaFailure"
+)
+
+// ReplicaSetCondition describes the state of a replica set at a certain point.
+type ReplicaSetCondition struct {
+ // Type of replica set condition.
+ Type ReplicaSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ReplicaSetConditionType"`
+ // Status of the condition, one of True, False, Unknown.
+ Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"`
+ // The last time the condition transitioned from one status to another.
+ // +optional
+ LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"`
+ // The reason for the condition's last transition.
+ // +optional
+ Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
+ // A human readable message indicating details about the transition.
+ // +optional
+ Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ControllerRevision implements an immutable snapshot of state data. Clients
+// are responsible for serializing and deserializing the objects that contain
+// their internal state.
+// Once a ControllerRevision has been successfully created, it can not be updated.
+// The API Server will fail validation of all requests that attempt to mutate
+// the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both
+// the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However,
+// it may be subject to name and representation changes in future releases, and clients should not
+// depend on its stability. It is primarily for internal use by controllers.
+type ControllerRevision struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Data is the serialized representation of the state.
+ Data runtime.RawExtension `json:"data,omitempty" protobuf:"bytes,2,opt,name=data"`
+
+ // Revision indicates the revision of the state represented by Data.
+ Revision int64 `json:"revision" protobuf:"varint,3,opt,name=revision"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ControllerRevisionList is a resource containing a list of ControllerRevision objects.
+type ControllerRevisionList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is the list of ControllerRevisions
+ Items []ControllerRevision `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
diff --git a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go
new file mode 100644
index 000000000..3f0299d03
--- /dev/null
+++ b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go
@@ -0,0 +1,365 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_ControllerRevision = map[string]string{
+ "": "ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.",
+ "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "data": "Data is the serialized representation of the state.",
+ "revision": "Revision indicates the revision of the state represented by Data.",
+}
+
+func (ControllerRevision) SwaggerDoc() map[string]string {
+ return map_ControllerRevision
+}
+
+var map_ControllerRevisionList = map[string]string{
+ "": "ControllerRevisionList is a resource containing a list of ControllerRevision objects.",
+ "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "Items is the list of ControllerRevisions",
+}
+
+func (ControllerRevisionList) SwaggerDoc() map[string]string {
+ return map_ControllerRevisionList
+}
+
+var map_DaemonSet = map[string]string{
+ "": "DaemonSet represents the configuration of a daemon set.",
+ "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+ "status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+}
+
+func (DaemonSet) SwaggerDoc() map[string]string {
+ return map_DaemonSet
+}
+
+var map_DaemonSetCondition = map[string]string{
+ "": "DaemonSetCondition describes the state of a DaemonSet at a certain point.",
+ "type": "Type of DaemonSet condition.",
+ "status": "Status of the condition, one of True, False, Unknown.",
+ "lastTransitionTime": "Last time the condition transitioned from one status to another.",
+ "reason": "The reason for the condition's last transition.",
+ "message": "A human readable message indicating details about the transition.",
+}
+
+func (DaemonSetCondition) SwaggerDoc() map[string]string {
+ return map_DaemonSetCondition
+}
+
+var map_DaemonSetList = map[string]string{
+ "": "DaemonSetList is a collection of daemon sets.",
+ "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "A list of daemon sets.",
+}
+
+func (DaemonSetList) SwaggerDoc() map[string]string {
+ return map_DaemonSetList
+}
+
+var map_DaemonSetSpec = map[string]string{
+ "": "DaemonSetSpec is the specification of a daemon set.",
+ "selector": "A label query over pods that are managed by the daemon set. Must match in order to be controlled. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
+ "template": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template",
+ "updateStrategy": "An update strategy to replace existing DaemonSet pods with new pods.",
+ "minReadySeconds": "The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).",
+ "revisionHistoryLimit": "The number of old history to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.",
+}
+
+func (DaemonSetSpec) SwaggerDoc() map[string]string {
+ return map_DaemonSetSpec
+}
+
+var map_DaemonSetStatus = map[string]string{
+ "": "DaemonSetStatus represents the current status of a daemon set.",
+ "currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
+ "numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
+ "desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
+ "numberReady": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready.",
+ "observedGeneration": "The most recent generation observed by the daemon set controller.",
+ "updatedNumberScheduled": "The total number of nodes that are running updated daemon pod",
+ "numberAvailable": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds)",
+ "numberUnavailable": "The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds)",
+ "collisionCount": "Count of hash collisions for the DaemonSet. The DaemonSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.",
+ "conditions": "Represents the latest available observations of a DaemonSet's current state.",
+}
+
+func (DaemonSetStatus) SwaggerDoc() map[string]string {
+ return map_DaemonSetStatus
+}
+
+var map_DaemonSetUpdateStrategy = map[string]string{
+ "": "DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet.",
+ "type": "Type of daemon set update. Can be \"RollingUpdate\" or \"OnDelete\". Default is RollingUpdate.",
+ "rollingUpdate": "Rolling update config params. Present only if type = \"RollingUpdate\".",
+}
+
+func (DaemonSetUpdateStrategy) SwaggerDoc() map[string]string {
+ return map_DaemonSetUpdateStrategy
+}
+
+var map_Deployment = map[string]string{
+ "": "Deployment enables declarative updates for Pods and ReplicaSets.",
+ "metadata": "Standard object metadata.",
+ "spec": "Specification of the desired behavior of the Deployment.",
+ "status": "Most recently observed status of the Deployment.",
+}
+
+func (Deployment) SwaggerDoc() map[string]string {
+ return map_Deployment
+}
+
+var map_DeploymentCondition = map[string]string{
+ "": "DeploymentCondition describes the state of a deployment at a certain point.",
+ "type": "Type of deployment condition.",
+ "status": "Status of the condition, one of True, False, Unknown.",
+ "lastUpdateTime": "The last time this condition was updated.",
+ "lastTransitionTime": "Last time the condition transitioned from one status to another.",
+ "reason": "The reason for the condition's last transition.",
+ "message": "A human readable message indicating details about the transition.",
+}
+
+func (DeploymentCondition) SwaggerDoc() map[string]string {
+ return map_DeploymentCondition
+}
+
+var map_DeploymentList = map[string]string{
+ "": "DeploymentList is a list of Deployments.",
+ "metadata": "Standard list metadata.",
+ "items": "Items is the list of Deployments.",
+}
+
+func (DeploymentList) SwaggerDoc() map[string]string {
+ return map_DeploymentList
+}
+
+var map_DeploymentSpec = map[string]string{
+ "": "DeploymentSpec is the specification of the desired behavior of the Deployment.",
+ "replicas": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.",
+ "selector": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment. It must match the pod template's labels.",
+ "template": "Template describes the pods that will be created.",
+ "strategy": "The deployment strategy to use to replace existing pods with new ones.",
+ "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)",
+ "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.",
+ "paused": "Indicates that the deployment is paused.",
+ "progressDeadlineSeconds": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.",
+}
+
+func (DeploymentSpec) SwaggerDoc() map[string]string {
+ return map_DeploymentSpec
+}
+
+var map_DeploymentStatus = map[string]string{
+ "": "DeploymentStatus is the most recently observed status of the Deployment.",
+ "observedGeneration": "The generation observed by the deployment controller.",
+ "replicas": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).",
+ "updatedReplicas": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.",
+ "readyReplicas": "Total number of ready pods targeted by this deployment.",
+ "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.",
+ "unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.",
+ "conditions": "Represents the latest available observations of a deployment's current state.",
+ "collisionCount": "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.",
+}
+
+func (DeploymentStatus) SwaggerDoc() map[string]string {
+ return map_DeploymentStatus
+}
+
+var map_DeploymentStrategy = map[string]string{
+ "": "DeploymentStrategy describes how to replace existing pods with new ones.",
+ "type": "Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate.",
+ "rollingUpdate": "Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate.",
+}
+
+func (DeploymentStrategy) SwaggerDoc() map[string]string {
+ return map_DeploymentStrategy
+}
+
+var map_ReplicaSet = map[string]string{
+ "": "ReplicaSet ensures that a specified number of pod replicas are running at any given time.",
+ "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+ "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+}
+
+func (ReplicaSet) SwaggerDoc() map[string]string {
+ return map_ReplicaSet
+}
+
+var map_ReplicaSetCondition = map[string]string{
+ "": "ReplicaSetCondition describes the state of a replica set at a certain point.",
+ "type": "Type of replica set condition.",
+ "status": "Status of the condition, one of True, False, Unknown.",
+ "lastTransitionTime": "The last time the condition transitioned from one status to another.",
+ "reason": "The reason for the condition's last transition.",
+ "message": "A human readable message indicating details about the transition.",
+}
+
+func (ReplicaSetCondition) SwaggerDoc() map[string]string {
+ return map_ReplicaSetCondition
+}
+
+var map_ReplicaSetList = map[string]string{
+ "": "ReplicaSetList is a collection of ReplicaSets.",
+ "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "items": "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller",
+}
+
+func (ReplicaSetList) SwaggerDoc() map[string]string {
+ return map_ReplicaSetList
+}
+
+var map_ReplicaSetSpec = map[string]string{
+ "": "ReplicaSetSpec is the specification of a ReplicaSet.",
+ "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller",
+ "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)",
+ "selector": "Selector is a label query over pods that should match the replica count. Label keys and values that must match in order to be controlled by this replica set. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
+ "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template",
+}
+
+func (ReplicaSetSpec) SwaggerDoc() map[string]string {
+ return map_ReplicaSetSpec
+}
+
+var map_ReplicaSetStatus = map[string]string{
+ "": "ReplicaSetStatus represents the current status of a ReplicaSet.",
+ "replicas": "Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller",
+ "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replicaset.",
+ "readyReplicas": "The number of ready replicas for this replica set.",
+ "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this replica set.",
+ "observedGeneration": "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.",
+ "conditions": "Represents the latest available observations of a replica set's current state.",
+}
+
+func (ReplicaSetStatus) SwaggerDoc() map[string]string {
+ return map_ReplicaSetStatus
+}
+
+var map_RollingUpdateDaemonSet = map[string]string{
+ "": "Spec to control the desired behavior of daemon set rolling update.",
+ "maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0. Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.",
+}
+
+func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string {
+ return map_RollingUpdateDaemonSet
+}
+
+var map_RollingUpdateDeployment = map[string]string{
+ "": "Spec to control the desired behavior of rolling update.",
+ "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old ReplicaSet can be scaled down further, followed by scaling up the new ReplicaSet, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.",
+ "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is at most 130% of desired pods.",
+}
+
+func (RollingUpdateDeployment) SwaggerDoc() map[string]string {
+ return map_RollingUpdateDeployment
+}
+
+var map_RollingUpdateStatefulSetStrategy = map[string]string{
+ "": "RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.",
+ "partition": "Partition indicates the ordinal at which the StatefulSet should be partitioned. Default value is 0.",
+}
+
+func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string {
+ return map_RollingUpdateStatefulSetStrategy
+}
+
+var map_StatefulSet = map[string]string{
+ "": "StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.",
+ "spec": "Spec defines the desired identities of pods in this set.",
+ "status": "Status is the current status of Pods in this StatefulSet. This data may be out of date by some window of time.",
+}
+
+func (StatefulSet) SwaggerDoc() map[string]string {
+ return map_StatefulSet
+}
+
+var map_StatefulSetCondition = map[string]string{
+ "": "StatefulSetCondition describes the state of a statefulset at a certain point.",
+ "type": "Type of statefulset condition.",
+ "status": "Status of the condition, one of True, False, Unknown.",
+ "lastTransitionTime": "Last time the condition transitioned from one status to another.",
+ "reason": "The reason for the condition's last transition.",
+ "message": "A human readable message indicating details about the transition.",
+}
+
+func (StatefulSetCondition) SwaggerDoc() map[string]string {
+ return map_StatefulSetCondition
+}
+
+var map_StatefulSetList = map[string]string{
+ "": "StatefulSetList is a collection of StatefulSets.",
+}
+
+func (StatefulSetList) SwaggerDoc() map[string]string {
+ return map_StatefulSetList
+}
+
+var map_StatefulSetSpec = map[string]string{
+ "": "A StatefulSetSpec is the specification of a StatefulSet.",
+ "replicas": "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.",
+ "selector": "selector is a label query over pods that should match the replica count. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
+ "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.",
+ "volumeClaimTemplates": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.",
+ "serviceName": "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.",
+ "podManagementPolicy": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.",
+ "updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.",
+ "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.",
+}
+
+func (StatefulSetSpec) SwaggerDoc() map[string]string {
+ return map_StatefulSetSpec
+}
+
+var map_StatefulSetStatus = map[string]string{
+ "": "StatefulSetStatus represents the current state of a StatefulSet.",
+ "observedGeneration": "observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the StatefulSet's generation, which is updated on mutation by the API Server.",
+ "replicas": "replicas is the number of Pods created by the StatefulSet controller.",
+ "readyReplicas": "readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.",
+ "currentReplicas": "currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by currentRevision.",
+ "updatedReplicas": "updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by updateRevision.",
+ "currentRevision": "currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas).",
+ "updateRevision": "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)",
+ "collisionCount": "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.",
+ "conditions": "Represents the latest available observations of a statefulset's current state.",
+}
+
+func (StatefulSetStatus) SwaggerDoc() map[string]string {
+ return map_StatefulSetStatus
+}
+
+var map_StatefulSetUpdateStrategy = map[string]string{
+ "": "StatefulSetUpdateStrategy indicates the strategy that the StatefulSet controller will use to perform updates. It includes any additional parameters necessary to perform the update for the indicated strategy.",
+ "type": "Type indicates the type of the StatefulSetUpdateStrategy. Default is RollingUpdate.",
+ "rollingUpdate": "RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.",
+}
+
+func (StatefulSetUpdateStrategy) SwaggerDoc() map[string]string {
+ return map_StatefulSetUpdateStrategy
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go
new file mode 100644
index 000000000..7b7ff385c
--- /dev/null
+++ b/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go
@@ -0,0 +1,772 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ intstr "k8s.io/apimachinery/pkg/util/intstr"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControllerRevision) DeepCopyInto(out *ControllerRevision) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Data.DeepCopyInto(&out.Data)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRevision.
+func (in *ControllerRevision) DeepCopy() *ControllerRevision {
+ if in == nil {
+ return nil
+ }
+ out := new(ControllerRevision)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ControllerRevision) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControllerRevisionList) DeepCopyInto(out *ControllerRevisionList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ControllerRevision, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRevisionList.
+func (in *ControllerRevisionList) DeepCopy() *ControllerRevisionList {
+ if in == nil {
+ return nil
+ }
+ out := new(ControllerRevisionList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ControllerRevisionList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DaemonSet) DeepCopyInto(out *DaemonSet) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSet.
+func (in *DaemonSet) DeepCopy() *DaemonSet {
+ if in == nil {
+ return nil
+ }
+ out := new(DaemonSet)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DaemonSet) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DaemonSetCondition) DeepCopyInto(out *DaemonSetCondition) {
+ *out = *in
+ in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetCondition.
+func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition {
+ if in == nil {
+ return nil
+ }
+ out := new(DaemonSetCondition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]DaemonSet, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetList.
+func (in *DaemonSetList) DeepCopy() *DaemonSetList {
+ if in == nil {
+ return nil
+ }
+ out := new(DaemonSetList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DaemonSetList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DaemonSetSpec) DeepCopyInto(out *DaemonSetSpec) {
+ *out = *in
+ if in.Selector != nil {
+ in, out := &in.Selector, &out.Selector
+ *out = new(metav1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ in.Template.DeepCopyInto(&out.Template)
+ in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy)
+ if in.RevisionHistoryLimit != nil {
+ in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit
+ *out = new(int32)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetSpec.
+func (in *DaemonSetSpec) DeepCopy() *DaemonSetSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(DaemonSetSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DaemonSetStatus) DeepCopyInto(out *DaemonSetStatus) {
+ *out = *in
+ if in.CollisionCount != nil {
+ in, out := &in.CollisionCount, &out.CollisionCount
+ *out = new(int32)
+ **out = **in
+ }
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]DaemonSetCondition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetStatus.
+func (in *DaemonSetStatus) DeepCopy() *DaemonSetStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(DaemonSetStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DaemonSetUpdateStrategy) DeepCopyInto(out *DaemonSetUpdateStrategy) {
+ *out = *in
+ if in.RollingUpdate != nil {
+ in, out := &in.RollingUpdate, &out.RollingUpdate
+ *out = new(RollingUpdateDaemonSet)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetUpdateStrategy.
+func (in *DaemonSetUpdateStrategy) DeepCopy() *DaemonSetUpdateStrategy {
+ if in == nil {
+ return nil
+ }
+ out := new(DaemonSetUpdateStrategy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Deployment) DeepCopyInto(out *Deployment) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Deployment.
+func (in *Deployment) DeepCopy() *Deployment {
+ if in == nil {
+ return nil
+ }
+ out := new(Deployment)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Deployment) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentCondition) DeepCopyInto(out *DeploymentCondition) {
+ *out = *in
+ in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime)
+ in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCondition.
+func (in *DeploymentCondition) DeepCopy() *DeploymentCondition {
+ if in == nil {
+ return nil
+ }
+ out := new(DeploymentCondition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentList) DeepCopyInto(out *DeploymentList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Deployment, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentList.
+func (in *DeploymentList) DeepCopy() *DeploymentList {
+ if in == nil {
+ return nil
+ }
+ out := new(DeploymentList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DeploymentList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) {
+ *out = *in
+ if in.Replicas != nil {
+ in, out := &in.Replicas, &out.Replicas
+ *out = new(int32)
+ **out = **in
+ }
+ if in.Selector != nil {
+ in, out := &in.Selector, &out.Selector
+ *out = new(metav1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ in.Template.DeepCopyInto(&out.Template)
+ in.Strategy.DeepCopyInto(&out.Strategy)
+ if in.RevisionHistoryLimit != nil {
+ in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit
+ *out = new(int32)
+ **out = **in
+ }
+ if in.ProgressDeadlineSeconds != nil {
+ in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds
+ *out = new(int32)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentSpec.
+func (in *DeploymentSpec) DeepCopy() *DeploymentSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(DeploymentSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]DeploymentCondition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.CollisionCount != nil {
+ in, out := &in.CollisionCount, &out.CollisionCount
+ *out = new(int32)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStatus.
+func (in *DeploymentStatus) DeepCopy() *DeploymentStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(DeploymentStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentStrategy) DeepCopyInto(out *DeploymentStrategy) {
+ *out = *in
+ if in.RollingUpdate != nil {
+ in, out := &in.RollingUpdate, &out.RollingUpdate
+ *out = new(RollingUpdateDeployment)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStrategy.
+func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy {
+ if in == nil {
+ return nil
+ }
+ out := new(DeploymentStrategy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReplicaSet) DeepCopyInto(out *ReplicaSet) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSet.
+func (in *ReplicaSet) DeepCopy() *ReplicaSet {
+ if in == nil {
+ return nil
+ }
+ out := new(ReplicaSet)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ReplicaSet) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReplicaSetCondition) DeepCopyInto(out *ReplicaSetCondition) {
+ *out = *in
+ in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetCondition.
+func (in *ReplicaSetCondition) DeepCopy() *ReplicaSetCondition {
+ if in == nil {
+ return nil
+ }
+ out := new(ReplicaSetCondition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReplicaSetList) DeepCopyInto(out *ReplicaSetList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ReplicaSet, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetList.
+func (in *ReplicaSetList) DeepCopy() *ReplicaSetList {
+ if in == nil {
+ return nil
+ }
+ out := new(ReplicaSetList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ReplicaSetList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReplicaSetSpec) DeepCopyInto(out *ReplicaSetSpec) {
+ *out = *in
+ if in.Replicas != nil {
+ in, out := &in.Replicas, &out.Replicas
+ *out = new(int32)
+ **out = **in
+ }
+ if in.Selector != nil {
+ in, out := &in.Selector, &out.Selector
+ *out = new(metav1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ in.Template.DeepCopyInto(&out.Template)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetSpec.
+func (in *ReplicaSetSpec) DeepCopy() *ReplicaSetSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ReplicaSetSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReplicaSetStatus) DeepCopyInto(out *ReplicaSetStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]ReplicaSetCondition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetStatus.
+func (in *ReplicaSetStatus) DeepCopy() *ReplicaSetStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ReplicaSetStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RollingUpdateDaemonSet) DeepCopyInto(out *RollingUpdateDaemonSet) {
+ *out = *in
+ if in.MaxUnavailable != nil {
+ in, out := &in.MaxUnavailable, &out.MaxUnavailable
+ *out = new(intstr.IntOrString)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDaemonSet.
+func (in *RollingUpdateDaemonSet) DeepCopy() *RollingUpdateDaemonSet {
+ if in == nil {
+ return nil
+ }
+ out := new(RollingUpdateDaemonSet)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RollingUpdateDeployment) DeepCopyInto(out *RollingUpdateDeployment) {
+ *out = *in
+ if in.MaxUnavailable != nil {
+ in, out := &in.MaxUnavailable, &out.MaxUnavailable
+ *out = new(intstr.IntOrString)
+ **out = **in
+ }
+ if in.MaxSurge != nil {
+ in, out := &in.MaxSurge, &out.MaxSurge
+ *out = new(intstr.IntOrString)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDeployment.
+func (in *RollingUpdateDeployment) DeepCopy() *RollingUpdateDeployment {
+ if in == nil {
+ return nil
+ }
+ out := new(RollingUpdateDeployment)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RollingUpdateStatefulSetStrategy) DeepCopyInto(out *RollingUpdateStatefulSetStrategy) {
+ *out = *in
+ if in.Partition != nil {
+ in, out := &in.Partition, &out.Partition
+ *out = new(int32)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateStatefulSetStrategy.
+func (in *RollingUpdateStatefulSetStrategy) DeepCopy() *RollingUpdateStatefulSetStrategy {
+ if in == nil {
+ return nil
+ }
+ out := new(RollingUpdateStatefulSetStrategy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StatefulSet) DeepCopyInto(out *StatefulSet) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSet.
+func (in *StatefulSet) DeepCopy() *StatefulSet {
+ if in == nil {
+ return nil
+ }
+ out := new(StatefulSet)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *StatefulSet) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StatefulSetCondition) DeepCopyInto(out *StatefulSetCondition) {
+ *out = *in
+ in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetCondition.
+func (in *StatefulSetCondition) DeepCopy() *StatefulSetCondition {
+ if in == nil {
+ return nil
+ }
+ out := new(StatefulSetCondition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]StatefulSet, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetList.
+func (in *StatefulSetList) DeepCopy() *StatefulSetList {
+ if in == nil {
+ return nil
+ }
+ out := new(StatefulSetList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *StatefulSetList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) {
+ *out = *in
+ if in.Replicas != nil {
+ in, out := &in.Replicas, &out.Replicas
+ *out = new(int32)
+ **out = **in
+ }
+ if in.Selector != nil {
+ in, out := &in.Selector, &out.Selector
+ *out = new(metav1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ in.Template.DeepCopyInto(&out.Template)
+ if in.VolumeClaimTemplates != nil {
+ in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates
+ *out = make([]corev1.PersistentVolumeClaim, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy)
+ if in.RevisionHistoryLimit != nil {
+ in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit
+ *out = new(int32)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetSpec.
+func (in *StatefulSetSpec) DeepCopy() *StatefulSetSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(StatefulSetSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StatefulSetStatus) DeepCopyInto(out *StatefulSetStatus) {
+ *out = *in
+ if in.CollisionCount != nil {
+ in, out := &in.CollisionCount, &out.CollisionCount
+ *out = new(int32)
+ **out = **in
+ }
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]StatefulSetCondition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetStatus.
+func (in *StatefulSetStatus) DeepCopy() *StatefulSetStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(StatefulSetStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StatefulSetUpdateStrategy) DeepCopyInto(out *StatefulSetUpdateStrategy) {
+ *out = *in
+ if in.RollingUpdate != nil {
+ in, out := &in.RollingUpdate, &out.RollingUpdate
+ *out = new(RollingUpdateStatefulSetStrategy)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetUpdateStrategy.
+func (in *StatefulSetUpdateStrategy) DeepCopy() *StatefulSetUpdateStrategy {
+ if in == nil {
+ return nil
+ }
+ out := new(StatefulSetUpdateStrategy)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 73a6e2595..a44d0e88d 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -689,6 +689,7 @@ gopkg.in/yaml.v2
# gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c
gopkg.in/yaml.v3
# k8s.io/api v0.18.3
+k8s.io/api/apps/v1
k8s.io/api/core/v1
# k8s.io/apimachinery v0.18.3
k8s.io/apimachinery/pkg/api/errors