summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorValentin Rothberg <vrothberg@redhat.com>2022-05-05 13:34:01 +0200
committerValentin Rothberg <vrothberg@redhat.com>2022-05-12 10:51:13 +0200
commit840c120c21124de921a7f57435cf0d0497103736 (patch)
tree18b6d18b88ff178474487bd59e0d4275c1b27ea2
parentecf0177a01535b273a62e12577d7caf062a91117 (diff)
downloadpodman-840c120c21124de921a7f57435cf0d0497103736.tar.gz
podman-840c120c21124de921a7f57435cf0d0497103736.tar.bz2
podman-840c120c21124de921a7f57435cf0d0497103736.zip
play kube: service container
Add the notion of a "service container" to play kube. A service container is started before the pods in play kube and is (reverse) linked to them. The service container is stopped/removed *after* all pods it is associated with are stopped/removed. In other words, a service container tracks the entire life cycle of a service started via `podman play kube`. This is required to enable `play kube` in a systemd unit file. The service container is only used when the `--service-container` flag is set on the CLI. This flag has been marked as hidden as it is not meant to be used outside the context of `play kube`. It is further not supported on the remote client. The wiring with systemd will be done in a later commit. Signed-off-by: Valentin Rothberg <vrothberg@redhat.com>
-rw-r--r--cmd/podman/play/kube.go9
-rw-r--r--libpod/container.go8
-rw-r--r--libpod/container_config.go3
-rw-r--r--libpod/container_inspect.go1
-rw-r--r--libpod/container_validate.go8
-rw-r--r--libpod/define/container_inspect.go1
-rw-r--r--libpod/options.go38
-rw-r--r--libpod/pod.go7
-rw-r--r--libpod/pod_api.go22
-rw-r--r--libpod/runtime_img.go2
-rw-r--r--libpod/runtime_pod_linux.go4
-rw-r--r--libpod/service.go213
-rw-r--r--pkg/domain/entities/play.go2
-rw-r--r--pkg/domain/infra/abi/play.go82
-rw-r--r--pkg/specgen/generate/pause_image.go89
-rw-r--r--pkg/specgen/generate/pod_create.go104
-rw-r--r--pkg/specgen/podspecgen.go3
-rw-r--r--test/system/200-pod.bats13
-rw-r--r--test/system/700-play.bats55
-rw-r--r--test/system/helpers.bash13
20 files changed, 564 insertions, 113 deletions
diff --git a/cmd/podman/play/kube.go b/cmd/podman/play/kube.go
index 5fe059139..f5b121009 100644
--- a/cmd/podman/play/kube.go
+++ b/cmd/podman/play/kube.go
@@ -139,6 +139,15 @@ func init() {
flags.StringVar(&kubeOptions.ContextDir, contextDirFlagName, "", "Path to top level of context directory")
_ = kubeCmd.RegisterFlagCompletionFunc(contextDirFlagName, completion.AutocompleteDefault)
+ // NOTE: The service-container flag is marked as hidden as it
+ // is purely designed for running play-kube in systemd units.
+ // It is not something users should need to know or care about.
+ //
+ // Having a flag rather than an env variable is cleaner.
+ serviceFlagName := "service-container"
+ flags.BoolVar(&kubeOptions.ServiceContainer, serviceFlagName, false, "Starts a service container before all pods")
+ _ = flags.MarkHidden("service-container")
+
flags.StringVar(&kubeOptions.SignaturePolicy, "signature-policy", "", "`Pathname` of signature policy file (not usually used)")
_ = flags.MarkHidden("signature-policy")
diff --git a/libpod/container.go b/libpod/container.go
index d7af9a100..64b4453fb 100644
--- a/libpod/container.go
+++ b/libpod/container.go
@@ -211,6 +211,14 @@ type ContainerState struct {
// network and an interface names
NetInterfaceDescriptions ContainerNetworkDescriptions `json:"networkDescriptions,omitempty"`
+ // Service indicates that container is the service container of a
+ // service. A service consists of one or more pods. The service
+ // container is started before all pods and is stopped when the last
+ // pod stops. The service container allows for tracking and managing
+ // the entire life cycle of service which may be started via
+ // `podman-play-kube`.
+ Service Service
+
// containerPlatformState holds platform-specific container state.
containerPlatformState
diff --git a/libpod/container_config.go b/libpod/container_config.go
index 371a1dec0..3e85ad4d5 100644
--- a/libpod/container_config.go
+++ b/libpod/container_config.go
@@ -382,6 +382,9 @@ type ContainerMiscConfig struct {
// IsInfra is a bool indicating whether this container is an infra container used for
// sharing kernel namespaces in a pod
IsInfra bool `json:"pause"`
+ // IsService is a bool indicating whether this container is a service container used for
+ // tracking the life cycle of K8s service.
+ IsService bool `json:"isService"`
// SdNotifyMode tells libpod what to do with a NOTIFY_SOCKET if passed
SdNotifyMode string `json:"sdnotifyMode,omitempty"`
// Systemd tells libpod to setup the container in systemd mode, a value of nil denotes false
diff --git a/libpod/container_inspect.go b/libpod/container_inspect.go
index 76e0e9e13..5d809644d 100644
--- a/libpod/container_inspect.go
+++ b/libpod/container_inspect.go
@@ -171,6 +171,7 @@ func (c *Container) getContainerInspectData(size bool, driverData *define.Driver
Mounts: inspectMounts,
Dependencies: c.Dependencies(),
IsInfra: c.IsInfra(),
+ IsService: c.isService(),
}
if c.state.ConfigPath != "" {
diff --git a/libpod/container_validate.go b/libpod/container_validate.go
index c6c9a4c6d..d939c94e6 100644
--- a/libpod/container_validate.go
+++ b/libpod/container_validate.go
@@ -1,6 +1,8 @@
package libpod
import (
+ "fmt"
+
"github.com/containers/podman/v4/libpod/define"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
@@ -27,6 +29,12 @@ func (c *Container) validate() error {
return errors.Wrapf(define.ErrInvalidArg, "must set root filesystem source to either image or rootfs")
}
+ // A container cannot be marked as an infra and service container at
+ // the same time.
+ if c.IsInfra() && c.isService() {
+ return fmt.Errorf("cannot be infra and service container at the same time: %w", define.ErrInvalidArg)
+ }
+
// Cannot make a network namespace if we are joining another container's
// network namespace
if c.config.CreateNetNS && c.config.NetNsCtr != "" {
diff --git a/libpod/define/container_inspect.go b/libpod/define/container_inspect.go
index 6cdffb8b7..e7b82d654 100644
--- a/libpod/define/container_inspect.go
+++ b/libpod/define/container_inspect.go
@@ -683,6 +683,7 @@ type InspectContainerData struct {
NetworkSettings *InspectNetworkSettings `json:"NetworkSettings"`
Namespace string `json:"Namespace"`
IsInfra bool `json:"IsInfra"`
+ IsService bool `json:"IsService"`
Config *InspectContainerConfig `json:"Config"`
HostConfig *InspectContainerHostConfig `json:"HostConfig"`
}
diff --git a/libpod/options.go b/libpod/options.go
index 9b83cb76a..feb89510f 100644
--- a/libpod/options.go
+++ b/libpod/options.go
@@ -1,6 +1,7 @@
package libpod
import (
+ "fmt"
"net"
"os"
"path/filepath"
@@ -1477,7 +1478,7 @@ func WithCreateCommand(cmd []string) CtrCreateOption {
}
}
-// withIsInfra allows us to dfferentiate between infra containers and regular containers
+// withIsInfra allows us to dfferentiate between infra containers and other containers
// within the container config
func withIsInfra() CtrCreateOption {
return func(ctr *Container) error {
@@ -1491,6 +1492,20 @@ func withIsInfra() CtrCreateOption {
}
}
+// WithIsService allows us to dfferentiate between service containers and other container
+// within the container config
+func WithIsService() CtrCreateOption {
+ return func(ctr *Container) error {
+ if ctr.valid {
+ return define.ErrCtrFinalized
+ }
+
+ ctr.config.IsService = true
+
+ return nil
+ }
+}
+
// WithCreateWorkingDir tells Podman to create the container's working directory
// if it does not exist.
func WithCreateWorkingDir() CtrCreateOption {
@@ -2081,6 +2096,27 @@ func WithInfraContainer() PodCreateOption {
}
}
+// WithServiceContainer associates the specified service container ID with the pod.
+func WithServiceContainer(id string) PodCreateOption {
+ return func(pod *Pod) error {
+ if pod.valid {
+ return define.ErrPodFinalized
+ }
+
+ ctr, err := pod.runtime.LookupContainer(id)
+ if err != nil {
+ return fmt.Errorf("looking up service container: %w", err)
+ }
+
+ if err := ctr.addServicePodLocked(pod.ID()); err != nil {
+ return fmt.Errorf("associating service container %s with pod %s: %w", id, pod.ID(), err)
+ }
+
+ pod.config.ServiceContainerID = id
+ return nil
+ }
+}
+
// WithVolatile sets the volatile flag for the container storage.
// The option can potentially cause data loss when used on a container that must survive a machine reboot.
func WithVolatile() CtrCreateOption {
diff --git a/libpod/pod.go b/libpod/pod.go
index 2211d5be7..3c8dc43d4 100644
--- a/libpod/pod.go
+++ b/libpod/pod.go
@@ -64,6 +64,13 @@ type PodConfig struct {
HasInfra bool `json:"hasInfra,omitempty"`
+ // ServiceContainerID is the main container of a service. A service
+ // consists of one or more pods. The service container is started
+ // before all pods and is stopped when the last pod stops.
+ // The service container allows for tracking and managing the entire
+ // life cycle of service which may be started via `podman-play-kube`.
+ ServiceContainerID string `json:"serviceContainerID,omitempty"`
+
// Time pod was created
CreatedTime time.Time `json:"created"`
diff --git a/libpod/pod_api.go b/libpod/pod_api.go
index 73b28822b..eede896a9 100644
--- a/libpod/pod_api.go
+++ b/libpod/pod_api.go
@@ -75,6 +75,10 @@ func (p *Pod) Start(ctx context.Context) (map[string]error, error) {
return nil, define.ErrPodRemoved
}
+ if err := p.maybeStartServiceContainer(ctx); err != nil {
+ return nil, err
+ }
+
// Before "regular" containers start in the pod, all init containers
// must have run and exited successfully.
if err := p.startInitContainers(ctx); err != nil {
@@ -197,6 +201,11 @@ func (p *Pod) stopWithTimeout(ctx context.Context, cleanup bool, timeout int) (m
if len(ctrErrors) > 0 {
return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error stopping some containers")
}
+
+ if err := p.maybeStopServiceContainer(); err != nil {
+ return nil, err
+ }
+
return nil, nil
}
@@ -297,6 +306,10 @@ func (p *Pod) Cleanup(ctx context.Context) (map[string]error, error) {
return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error cleaning up some containers")
}
+ if err := p.maybeStopServiceContainer(); err != nil {
+ return nil, err
+ }
+
return nil, nil
}
@@ -443,6 +456,10 @@ func (p *Pod) Restart(ctx context.Context) (map[string]error, error) {
return nil, define.ErrPodRemoved
}
+ if err := p.maybeStartServiceContainer(ctx); err != nil {
+ return nil, err
+ }
+
allCtrs, err := p.runtime.state.PodContainers(p)
if err != nil {
return nil, err
@@ -530,6 +547,11 @@ func (p *Pod) Kill(ctx context.Context, signal uint) (map[string]error, error) {
if len(ctrErrors) > 0 {
return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error killing some containers")
}
+
+ if err := p.maybeStopServiceContainer(); err != nil {
+ return nil, err
+ }
+
return nil, nil
}
diff --git a/libpod/runtime_img.go b/libpod/runtime_img.go
index 54eadf6b8..b13482722 100644
--- a/libpod/runtime_img.go
+++ b/libpod/runtime_img.go
@@ -40,7 +40,7 @@ func (r *Runtime) RemoveContainersForImageCallback(ctx context.Context) libimage
if ctr.config.IsInfra {
pod, err := r.state.Pod(ctr.config.Pod)
if err != nil {
- return errors.Wrapf(err, "container %s is in pod %s, but pod cannot be retrieved", ctr.ID(), pod.ID())
+ return errors.Wrapf(err, "container %s is in pod %s, but pod cannot be retrieved", ctr.ID(), ctr.config.Pod)
}
if err := r.removePod(ctx, pod, true, true, timeout); err != nil {
return errors.Wrapf(err, "removing image %s: container %s using image could not be removed", imageID, ctr.ID())
diff --git a/libpod/runtime_pod_linux.go b/libpod/runtime_pod_linux.go
index 62ec7df60..dcc3a044f 100644
--- a/libpod/runtime_pod_linux.go
+++ b/libpod/runtime_pod_linux.go
@@ -380,6 +380,10 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
}
}
+ if err := p.maybeRemoveServiceContainer(); err != nil {
+ return err
+ }
+
// Remove pod from state
if err := r.state.RemovePod(p); err != nil {
if removalErr != nil {
diff --git a/libpod/service.go b/libpod/service.go
new file mode 100644
index 000000000..ad147e87b
--- /dev/null
+++ b/libpod/service.go
@@ -0,0 +1,213 @@
+package libpod
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/containers/podman/v4/libpod/define"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// A service consists of one or more pods. The service container is started
+// before all pods and is stopped when the last pod stops. The service
+// container allows for tracking and managing the entire life cycle of service
+// which may be started via `podman-play-kube`.
+type Service struct {
+ // Pods running as part of the service.
+ Pods []string `json:"servicePods"`
+}
+
+// Indicates whether the pod is associated with a service container.
+// The pod is expected to be updated and locked.
+func (p *Pod) hasServiceContainer() bool {
+ return p.config.ServiceContainerID != ""
+}
+
+// Returns the pod's service container.
+// The pod is expected to be updated and locked.
+func (p *Pod) serviceContainer() (*Container, error) {
+ id := p.config.ServiceContainerID
+ if id == "" {
+ return nil, errors.Wrap(define.ErrNoSuchCtr, "pod has no service container")
+ }
+ return p.runtime.state.Container(id)
+}
+
+// ServiceContainer returns the service container.
+func (p *Pod) ServiceContainer() (*Container, error) {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+ if err := p.updatePod(); err != nil {
+ return nil, err
+ }
+ return p.serviceContainer()
+}
+
+func (c *Container) addServicePodLocked(id string) error {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ if err := c.syncContainer(); err != nil {
+ return err
+ }
+ c.state.Service.Pods = append(c.state.Service.Pods, id)
+ return c.save()
+}
+
+func (c *Container) isService() bool {
+ return c.config.IsService
+}
+
+// canStopServiceContainer returns true if all pods of the service are stopped.
+// Note that the method acquires the container lock.
+func (c *Container) canStopServiceContainerLocked() (bool, error) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ if err := c.syncContainer(); err != nil {
+ return false, err
+ }
+
+ if !c.isService() {
+ return false, fmt.Errorf("internal error: checking service: container %s is not a service container", c.ID())
+ }
+
+ for _, id := range c.state.Service.Pods {
+ pod, err := c.runtime.LookupPod(id)
+ if err != nil {
+ if errors.Is(err, define.ErrNoSuchPod) {
+ continue
+ }
+ return false, err
+ }
+
+ status, err := pod.GetPodStatus()
+ if err != nil {
+ return false, err
+ }
+
+ // We can only stop the service if all pods are done.
+ switch status {
+ case define.PodStateStopped, define.PodStateExited, define.PodStateErrored:
+ continue
+ default:
+ return false, nil
+ }
+ }
+
+ return true, nil
+}
+
+// Checks whether the service container can be stopped and does so.
+func (p *Pod) maybeStopServiceContainer() error {
+ if !p.hasServiceContainer() {
+ return nil
+ }
+
+ serviceCtr, err := p.serviceContainer()
+ if err != nil {
+ return fmt.Errorf("getting pod's service container: %w", err)
+ }
+ // Checking whether the service can be stopped must be done in
+ // the runtime's work queue to resolve ABBA dead locks in the
+ // pod->container->servicePods hierarchy.
+ p.runtime.queueWork(func() {
+ logrus.Debugf("Pod %s has a service %s: checking if it can be stopped", p.ID(), serviceCtr.ID())
+ canStop, err := serviceCtr.canStopServiceContainerLocked()
+ if err != nil {
+ logrus.Errorf("Checking whether service of container %s can be stopped: %v", serviceCtr.ID(), err)
+ return
+ }
+ if !canStop {
+ return
+ }
+ logrus.Debugf("Stopping service container %s", serviceCtr.ID())
+ if err := serviceCtr.Stop(); err != nil {
+ logrus.Errorf("Stopping service container %s: %v", serviceCtr.ID(), err)
+ }
+ })
+ return nil
+}
+
+// Starts the pod's service container if it's not already running.
+func (p *Pod) maybeStartServiceContainer(ctx context.Context) error {
+ if !p.hasServiceContainer() {
+ return nil
+ }
+
+ serviceCtr, err := p.serviceContainer()
+ if err != nil {
+ return fmt.Errorf("getting pod's service container: %w", err)
+ }
+
+ serviceCtr.lock.Lock()
+ defer serviceCtr.lock.Unlock()
+
+ if err := serviceCtr.syncContainer(); err != nil {
+ return err
+ }
+
+ if serviceCtr.state.State == define.ContainerStateRunning {
+ return nil
+ }
+
+ // Restart will reinit among other things.
+ return serviceCtr.restartWithTimeout(ctx, 0)
+}
+
+// canRemoveServiceContainer returns true if all pods of the service are removed.
+// Note that the method acquires the container lock.
+func (c *Container) canRemoveServiceContainerLocked() (bool, error) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ if err := c.syncContainer(); err != nil {
+ return false, err
+ }
+
+ if !c.isService() {
+ return false, fmt.Errorf("internal error: checking service: container %s is not a service container", c.ID())
+ }
+
+ for _, id := range c.state.Service.Pods {
+ if _, err := c.runtime.LookupPod(id); err != nil {
+ if errors.Is(err, define.ErrNoSuchPod) {
+ continue
+ }
+ return false, err
+ }
+ return false, nil
+ }
+
+ return true, nil
+}
+
+// Checks whether the service container can be removed and does so.
+func (p *Pod) maybeRemoveServiceContainer() error {
+ if !p.hasServiceContainer() {
+ return nil
+ }
+
+ serviceCtr, err := p.serviceContainer()
+ if err != nil {
+ return fmt.Errorf("getting pod's service container: %w", err)
+ }
+ // Checking whether the service can be stopped must be done in
+ // the runtime's work queue to resolve ABBA dead locks in the
+ // pod->container->servicePods hierarchy.
+ p.runtime.queueWork(func() {
+ logrus.Debugf("Pod %s has a service %s: checking if it can be removed", p.ID(), serviceCtr.ID())
+ canRemove, err := serviceCtr.canRemoveServiceContainerLocked()
+ if err != nil {
+ logrus.Errorf("Checking whether service of container %s can be removed: %v", serviceCtr.ID(), err)
+ return
+ }
+ if !canRemove {
+ return
+ }
+ timeout := uint(0)
+ logrus.Debugf("Removing service container %s", serviceCtr.ID())
+ if err := p.runtime.RemoveContainer(context.Background(), serviceCtr, true, false, &timeout); err != nil {
+ logrus.Errorf("Removing service container %s: %v", serviceCtr.ID(), err)
+ }
+ })
+ return nil
+}
diff --git a/pkg/domain/entities/play.go b/pkg/domain/entities/play.go
index bf7c33f2b..f1ba21650 100644
--- a/pkg/domain/entities/play.go
+++ b/pkg/domain/entities/play.go
@@ -54,6 +54,8 @@ type PlayKubeOptions struct {
LogOptions []string
// Start - don't start the pod if false
Start types.OptionalBool
+ // ServiceContainer - creates a service container that is started before and is stopped after all pods.
+ ServiceContainer bool
// Userns - define the user namespace to use.
Userns string
}
diff --git a/pkg/domain/infra/abi/play.go b/pkg/domain/infra/abi/play.go
index 019361694..420d51483 100644
--- a/pkg/domain/infra/abi/play.go
+++ b/pkg/domain/infra/abi/play.go
@@ -28,12 +28,54 @@ import (
"github.com/containers/podman/v4/pkg/specgenutil"
"github.com/containers/podman/v4/pkg/util"
"github.com/ghodss/yaml"
+ "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
yamlv2 "gopkg.in/yaml.v2"
)
-func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options entities.PlayKubeOptions) (*entities.PlayKubeReport, error) {
+// createServiceContainer creates a container that can later on
+// be associated with the pods of a K8s yaml. It will be started along with
+// the first pod.
+func (ic *ContainerEngine) createServiceContainer(ctx context.Context, name string) (*libpod.Container, error) {
+ // Similar to infra containers, a service container is using the pause image.
+ image, err := generate.PullOrBuildInfraImage(ic.Libpod, "")
+ if err != nil {
+ return nil, fmt.Errorf("image for service container: %w", err)
+ }
+
+ ctrOpts := entities.ContainerCreateOptions{
+ // Inherited from infra containers
+ ImageVolume: "bind",
+ IsInfra: false,
+ MemorySwappiness: -1,
+ // No need to spin up slirp etc.
+ Net: &entities.NetOptions{Network: specgen.Namespace{NSMode: specgen.NoNetwork}},
+ }
+
+ // Create and fill out the runtime spec.
+ s := specgen.NewSpecGenerator(image, false)
+ if err := specgenutil.FillOutSpecGen(s, &ctrOpts, []string{}); err != nil {
+ return nil, fmt.Errorf("completing spec for service container: %w", err)
+ }
+ s.Name = name
+
+ runtimeSpec, spec, opts, err := generate.MakeContainer(ctx, ic.Libpod, s, false, nil)
+ if err != nil {
+ return nil, fmt.Errorf("creating runtime spec for service container: %w", err)
+ }
+ opts = append(opts, libpod.WithIsService())
+
+ // Create a new libpod container based on the spec.
+ ctr, err := ic.Libpod.NewContainer(ctx, runtimeSpec, spec, false, opts...)
+ if err != nil {
+ return nil, fmt.Errorf("creating service container: %w", err)
+ }
+
+ return ctr, nil
+}
+
+func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options entities.PlayKubeOptions) (_ *entities.PlayKubeReport, finalErr error) {
report := &entities.PlayKubeReport{}
validKinds := 0
@@ -67,6 +109,30 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options
return nil, errors.Wrap(err, "unable to read kube YAML")
}
+ // TODO: create constants for the various "kinds" of yaml files.
+ var serviceContainer *libpod.Container
+ if options.ServiceContainer && (kind == "Pod" || kind == "Deployment") {
+ // The name of the service container is the first 12
+ // characters of the yaml file's hash followed by the
+ // '-service' suffix to guarantee a predictable and
+ // discoverable name.
+ hash := digest.FromBytes(content).Encoded()
+ ctr, err := ic.createServiceContainer(ctx, hash[0:12]+"-service")
+ if err != nil {
+ return nil, err
+ }
+ serviceContainer = ctr
+ // Make sure to remove the container in case something goes wrong below.
+ defer func() {
+ if finalErr == nil {
+ return
+ }
+ if err := ic.Libpod.RemoveContainer(ctx, ctr, true, false, nil); err != nil {
+ logrus.Errorf("Cleaning up service container after failure: %v", err)
+ }
+ }()
+ }
+
switch kind {
case "Pod":
var podYAML v1.Pod
@@ -90,7 +156,7 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options
podYAML.Annotations[name] = val
}
- r, err := ic.playKubePod(ctx, podTemplateSpec.ObjectMeta.Name, &podTemplateSpec, options, &ipIndex, podYAML.Annotations, configMaps)
+ r, err := ic.playKubePod(ctx, podTemplateSpec.ObjectMeta.Name, &podTemplateSpec, options, &ipIndex, podYAML.Annotations, configMaps, serviceContainer)
if err != nil {
return nil, err
}
@@ -104,7 +170,7 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options
return nil, errors.Wrap(err, "unable to read YAML as Kube Deployment")
}
- r, err := ic.playKubeDeployment(ctx, &deploymentYAML, options, &ipIndex, configMaps)
+ r, err := ic.playKubeDeployment(ctx, &deploymentYAML, options, &ipIndex, configMaps, serviceContainer)
if err != nil {
return nil, err
}
@@ -148,7 +214,7 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options
return report, nil
}
-func (ic *ContainerEngine) playKubeDeployment(ctx context.Context, deploymentYAML *v1apps.Deployment, options entities.PlayKubeOptions, ipIndex *int, configMaps []v1.ConfigMap) (*entities.PlayKubeReport, error) {
+func (ic *ContainerEngine) playKubeDeployment(ctx context.Context, deploymentYAML *v1apps.Deployment, options entities.PlayKubeOptions, ipIndex *int, configMaps []v1.ConfigMap, serviceContainer *libpod.Container) (*entities.PlayKubeReport, error) {
var (
deploymentName string
podSpec v1.PodTemplateSpec
@@ -170,7 +236,7 @@ func (ic *ContainerEngine) playKubeDeployment(ctx context.Context, deploymentYAM
// create "replicas" number of pods
for i = 0; i < numReplicas; i++ {
podName := fmt.Sprintf("%s-pod-%d", deploymentName, i)
- podReport, err := ic.playKubePod(ctx, podName, &podSpec, options, ipIndex, deploymentYAML.Annotations, configMaps)
+ podReport, err := ic.playKubePod(ctx, podName, &podSpec, options, ipIndex, deploymentYAML.Annotations, configMaps, serviceContainer)
if err != nil {
return nil, errors.Wrapf(err, "error encountered while bringing up pod %s", podName)
}
@@ -179,7 +245,7 @@ func (ic *ContainerEngine) playKubeDeployment(ctx context.Context, deploymentYAM
return &report, nil
}
-func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podYAML *v1.PodTemplateSpec, options entities.PlayKubeOptions, ipIndex *int, annotations map[string]string, configMaps []v1.ConfigMap) (*entities.PlayKubeReport, error) {
+func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podYAML *v1.PodTemplateSpec, options entities.PlayKubeOptions, ipIndex *int, annotations map[string]string, configMaps []v1.ConfigMap, serviceContainer *libpod.Container) (*entities.PlayKubeReport, error) {
var (
writer io.Writer
playKubePod entities.PlayKubePod
@@ -374,6 +440,10 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
}
}
+ if serviceContainer != nil {
+ podSpec.PodSpecGen.ServiceContainerID = serviceContainer.ID()
+ }
+
// Create the Pod
pod, err := generate.MakePod(&podSpec, ic.Libpod)
if err != nil {
diff --git a/pkg/specgen/generate/pause_image.go b/pkg/specgen/generate/pause_image.go
new file mode 100644
index 000000000..4aba230a3
--- /dev/null
+++ b/pkg/specgen/generate/pause_image.go
@@ -0,0 +1,89 @@
+package generate
+
+import (
+ "context"
+ "fmt"
+ "io/ioutil"
+ "os"
+
+ buildahDefine "github.com/containers/buildah/define"
+ "github.com/containers/common/pkg/config"
+ "github.com/containers/podman/v4/libpod"
+ "github.com/containers/podman/v4/libpod/define"
+)
+
+// PullOrBuildInfraImage pulls down the specified image or the one set in
+// containers.conf. If none is set, it builds a local pause image.
+func PullOrBuildInfraImage(rt *libpod.Runtime, imageName string) (string, error) {
+ rtConfig, err := rt.GetConfigNoCopy()
+ if err != nil {
+ return "", err
+ }
+
+ if imageName == "" {
+ imageName = rtConfig.Engine.InfraImage
+ }
+
+ if imageName != "" {
+ _, err := rt.LibimageRuntime().Pull(context.Background(), imageName, config.PullPolicyMissing, nil)
+ if err != nil {
+ return "", err
+ }
+ return imageName, nil
+ }
+
+ name, err := buildPauseImage(rt, rtConfig)
+ if err != nil {
+ return "", fmt.Errorf("building local pause image: %w", err)
+ }
+ return name, nil
+}
+
+func buildPauseImage(rt *libpod.Runtime, rtConfig *config.Config) (string, error) {
+ version, err := define.GetVersion()
+ if err != nil {
+ return "", err
+ }
+ imageName := fmt.Sprintf("localhost/podman-pause:%s-%d", version.Version, version.Built)
+
+ // First check if the image has already been built.
+ if _, _, err := rt.LibimageRuntime().LookupImage(imageName, nil); err == nil {
+ return imageName, nil
+ }
+
+ // Also look into the path as some distributions install catatonit in
+ // /usr/bin.
+ catatonitPath, err := rtConfig.FindHelperBinary("catatonit", true)
+ if err != nil {
+ return "", fmt.Errorf("finding pause binary: %w", err)
+ }
+
+ buildContent := fmt.Sprintf(`FROM scratch
+COPY %s /catatonit
+ENTRYPOINT ["/catatonit", "-P"]`, catatonitPath)
+
+ tmpF, err := ioutil.TempFile("", "pause.containerfile")
+ if err != nil {
+ return "", err
+ }
+ if _, err := tmpF.WriteString(buildContent); err != nil {
+ return "", err
+ }
+ if err := tmpF.Close(); err != nil {
+ return "", err
+ }
+ defer os.Remove(tmpF.Name())
+
+ buildOptions := buildahDefine.BuildOptions{
+ CommonBuildOpts: &buildahDefine.CommonBuildOptions{},
+ Output: imageName,
+ Quiet: true,
+ IgnoreFile: "/dev/null", // makes sure to not read a local .ignorefile (see #13529)
+ IIDFile: "/dev/null", // prevents Buildah from writing the ID on stdout
+ }
+ if _, _, err := rt.Build(context.Background(), buildOptions, tmpF.Name()); err != nil {
+ return "", err
+ }
+
+ return imageName, nil
+}
diff --git a/pkg/specgen/generate/pod_create.go b/pkg/specgen/generate/pod_create.go
index fce32d688..5b7bb2b57 100644
--- a/pkg/specgen/generate/pod_create.go
+++ b/pkg/specgen/generate/pod_create.go
@@ -2,13 +2,8 @@ package generate
import (
"context"
- "fmt"
- "io/ioutil"
"net"
- "os"
- buildahDefine "github.com/containers/buildah/define"
- "github.com/containers/common/pkg/config"
"github.com/containers/podman/v4/libpod"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/domain/entities"
@@ -17,98 +12,18 @@ import (
"github.com/sirupsen/logrus"
)
-func buildPauseImage(rt *libpod.Runtime, rtConfig *config.Config) (string, error) {
- version, err := define.GetVersion()
- if err != nil {
- return "", err
- }
- imageName := fmt.Sprintf("localhost/podman-pause:%s-%d", version.Version, version.Built)
-
- // First check if the image has already been built.
- if _, _, err := rt.LibimageRuntime().LookupImage(imageName, nil); err == nil {
- return imageName, nil
- }
-
- // Also look into the path as some distributions install catatonit in
- // /usr/bin.
- catatonitPath, err := rtConfig.FindHelperBinary("catatonit", true)
- if err != nil {
- return "", fmt.Errorf("finding pause binary: %w", err)
- }
-
- buildContent := fmt.Sprintf(`FROM scratch
-COPY %s /catatonit
-ENTRYPOINT ["/catatonit", "-P"]`, catatonitPath)
-
- tmpF, err := ioutil.TempFile("", "pause.containerfile")
- if err != nil {
- return "", err
- }
- if _, err := tmpF.WriteString(buildContent); err != nil {
- return "", err
- }
- if err := tmpF.Close(); err != nil {
- return "", err
- }
- defer os.Remove(tmpF.Name())
-
- buildOptions := buildahDefine.BuildOptions{
- CommonBuildOpts: &buildahDefine.CommonBuildOptions{},
- Output: imageName,
- Quiet: true,
- IgnoreFile: "/dev/null", // makes sure to not read a local .ignorefile (see #13529)
- IIDFile: "/dev/null", // prevents Buildah from writing the ID on stdout
- }
- if _, _, err := rt.Build(context.Background(), buildOptions, tmpF.Name()); err != nil {
- return "", err
- }
-
- return imageName, nil
-}
-
-func pullOrBuildInfraImage(p *entities.PodSpec, rt *libpod.Runtime) error {
- if p.PodSpecGen.NoInfra {
- return nil
- }
-
- rtConfig, err := rt.GetConfigNoCopy()
- if err != nil {
- return err
- }
-
- // NOTE: we need pull down the infra image if it was explicitly set by
- // the user (or containers.conf) to the non-default one.
- imageName := p.PodSpecGen.InfraImage
- if imageName == "" {
- imageName = rtConfig.Engine.InfraImage
- }
-
- if imageName != "" {
- _, err := rt.LibimageRuntime().Pull(context.Background(), imageName, config.PullPolicyMissing, nil)
- if err != nil {
- return err
- }
- } else {
- name, err := buildPauseImage(rt, rtConfig)
- if err != nil {
- return fmt.Errorf("building local pause image: %w", err)
- }
- imageName = name
- }
-
- p.PodSpecGen.InfraImage = imageName
- p.PodSpecGen.InfraContainerSpec.RawImageName = imageName
-
- return nil
-}
-
func MakePod(p *entities.PodSpec, rt *libpod.Runtime) (*libpod.Pod, error) {
if err := p.PodSpecGen.Validate(); err != nil {
return nil, err
}
- if err := pullOrBuildInfraImage(p, rt); err != nil {
- return nil, err
+ if !p.PodSpecGen.NoInfra {
+ imageName, err := PullOrBuildInfraImage(rt, p.PodSpecGen.InfraImage)
+ if err != nil {
+ return nil, err
+ }
+ p.PodSpecGen.InfraImage = imageName
+ p.PodSpecGen.InfraContainerSpec.RawImageName = imageName
}
if !p.PodSpecGen.NoInfra && p.PodSpecGen.InfraContainerSpec != nil {
@@ -180,6 +95,11 @@ func createPodOptions(p *specgen.PodSpecGenerator) ([]libpod.PodCreateOption, er
options = append(options, libpod.WithPodUser())
}
}
+
+ if len(p.ServiceContainerID) > 0 {
+ options = append(options, libpod.WithServiceContainer(p.ServiceContainerID))
+ }
+
if len(p.CgroupParent) > 0 {
options = append(options, libpod.WithPodCgroupParent(p.CgroupParent))
}
diff --git a/pkg/specgen/podspecgen.go b/pkg/specgen/podspecgen.go
index 1bb64448f..603506241 100644
--- a/pkg/specgen/podspecgen.go
+++ b/pkg/specgen/podspecgen.go
@@ -204,6 +204,9 @@ type PodSpecGenerator struct {
PodStorageConfig
PodSecurityConfig
InfraContainerSpec *SpecGenerator `json:"-"`
+
+ // The ID of the pod's service container.
+ ServiceContainerID string `json:"serviceContainerID,omitempty"`
}
type PodResourceConfig struct {
diff --git a/test/system/200-pod.bats b/test/system/200-pod.bats
index 39982848f..4250f2680 100644
--- a/test/system/200-pod.bats
+++ b/test/system/200-pod.bats
@@ -408,19 +408,6 @@ EOF
run_podman pod rm test
}
-# Wait for the pod (1st arg) to transition into the state (2nd arg)
-function _ensure_pod_state() {
- for i in {0..5}; do
- run_podman pod inspect $1 --format "{{.State}}"
- if [[ $output == "$2" ]]; then
- break
- fi
- sleep 0.5
- done
-
- is "$output" "$2" "unexpected pod state"
-}
-
@test "pod exit policies" {
# Test setting exit policies
run_podman pod create
diff --git a/test/system/700-play.bats b/test/system/700-play.bats
index 7988b26a4..2e5327a85 100644
--- a/test/system/700-play.bats
+++ b/test/system/700-play.bats
@@ -100,6 +100,61 @@ RELABEL="system_u:object_r:container_file_t:s0"
run_podman pod rm -t 0 -f test_pod
}
+@test "podman play --service-container" {
+ skip_if_remote "service containers only work locally"
+
+ TESTDIR=$PODMAN_TMPDIR/testdir
+ mkdir -p $TESTDIR
+
+yaml="
+apiVersion: v1
+kind: Pod
+metadata:
+ labels:
+ app: test
+ name: test_pod
+spec:
+ containers:
+ - command:
+ - top
+ image: $IMAGE
+ name: test
+ resources: {}
+"
+
+ echo "$yaml" > $PODMAN_TMPDIR/test.yaml
+ run_podman play kube --service-container=true $PODMAN_TMPDIR/test.yaml
+
+ # Make sure that the service container exists and runs.
+ run_podman container inspect "352a88685060-service" --format "{{.State.Running}}"
+ is "$output" "true"
+
+ # Stop the *main* container and make sure that
+ # 1) The pod transitions to Exited
+ # 2) The service container is stopped
+ # #) The service container is marked as an service container
+ run_podman stop test_pod-test
+ _ensure_pod_state test_pod Exited
+ run_podman container inspect "352a88685060-service" --format "{{.State.Running}}"
+ is "$output" "false"
+ run_podman container inspect "352a88685060-service" --format "{{.IsService}}"
+ is "$output" "true"
+
+ # Restart the pod, make sure the service is running again
+ run_podman pod restart test_pod
+ run_podman container inspect "352a88685060-service" --format "{{.State.Running}}"
+ is "$output" "true"
+
+ # Kill the pod and make sure the service is not running
+ run_podman pod kill test_pod
+ run_podman container inspect "352a88685060-service" --format "{{.State.Running}}"
+ is "$output" "false"
+
+ # Remove the pod and make sure the service is removed along with it
+ run_podman pod rm test_pod
+ run_podman 1 container exists "352a88685060-service"
+}
+
@test "podman play --network" {
TESTDIR=$PODMAN_TMPDIR/testdir
mkdir -p $TESTDIR
diff --git a/test/system/helpers.bash b/test/system/helpers.bash
index 138d668f4..072131202 100644
--- a/test/system/helpers.bash
+++ b/test/system/helpers.bash
@@ -392,6 +392,19 @@ function pause_image() {
echo "localhost/podman-pause:$output"
}
+# Wait for the pod (1st arg) to transition into the state (2nd arg)
+function _ensure_pod_state() {
+ for i in {0..5}; do
+ run_podman pod inspect $1 --format "{{.State}}"
+ if [[ $output == "$2" ]]; then
+ break
+ fi
+ sleep 0.5
+ done
+
+ is "$output" "$2" "unexpected pod state"
+}
+
###########################
# _add_label_if_missing # make sure skip messages include rootless/remote
###########################