diff options
author | Valentin Rothberg <vrothberg@redhat.com> | 2022-05-11 15:02:06 +0200 |
---|---|---|
committer | Valentin Rothberg <vrothberg@redhat.com> | 2022-05-17 10:18:58 +0200 |
commit | 8684d41e387ae40cc64cd513bbc3f7ac319360f4 (patch) | |
tree | 11bcd5086463c93ec5f14de10d47fff85ffa26eb /libpod | |
parent | eb26fa45f1326191dea27f2afabf82cb8b934140 (diff) | |
download | podman-8684d41e387ae40cc64cd513bbc3f7ac319360f4.tar.gz podman-8684d41e387ae40cc64cd513bbc3f7ac319360f4.tar.bz2 podman-8684d41e387ae40cc64cd513bbc3f7ac319360f4.zip |
k8systemd: run k8s workloads in systemd
Support running `podman play kube` in systemd by exploiting the
previously added "service containers". During `play kube`, a service
container is started before all the pods and containers, and is stopped
last. The service container communicates its conmon PID via sdnotify.
Add a new systemd template to dispatch such k8s workloads. The argument
of the template is the path to the k8s file. Note that the path must be
escaped for systemd not to bark:
Let's assume we have a `top.yaml` file in the home directory:
```
$ escaped=$(systemd-escape ~/top.yaml)
$ systemctl --user start podman-play-kube@$escaped.service
```
Closes: https://issues.redhat.com/browse/RUN-1287
Signed-off-by: Valentin Rothberg <vrothberg@redhat.com>
Diffstat (limited to 'libpod')
-rw-r--r-- | libpod/container_inspect.go | 2 | ||||
-rw-r--r-- | libpod/container_validate.go | 2 | ||||
-rw-r--r-- | libpod/runtime_ctr.go | 20 | ||||
-rw-r--r-- | libpod/service.go | 15 |
4 files changed, 33 insertions, 6 deletions
diff --git a/libpod/container_inspect.go b/libpod/container_inspect.go index 5d809644d..93240812d 100644 --- a/libpod/container_inspect.go +++ b/libpod/container_inspect.go @@ -171,7 +171,7 @@ func (c *Container) getContainerInspectData(size bool, driverData *define.Driver Mounts: inspectMounts, Dependencies: c.Dependencies(), IsInfra: c.IsInfra(), - IsService: c.isService(), + IsService: c.IsService(), } if c.state.ConfigPath != "" { diff --git a/libpod/container_validate.go b/libpod/container_validate.go index d939c94e6..cfbdd2b1e 100644 --- a/libpod/container_validate.go +++ b/libpod/container_validate.go @@ -31,7 +31,7 @@ func (c *Container) validate() error { // A container cannot be marked as an infra and service container at // the same time. - if c.IsInfra() && c.isService() { + if c.IsInfra() && c.IsService() { return fmt.Errorf("cannot be infra and service container at the same time: %w", define.ErrInvalidArg) } diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go index df7174ac6..0119cb2e5 100644 --- a/libpod/runtime_ctr.go +++ b/libpod/runtime_ctr.go @@ -644,6 +644,16 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo return err } + if c.IsService() { + canStop, err := c.canStopServiceContainer() + if err != nil { + return err + } + if !canStop { + return fmt.Errorf("container %s is the service container of pod(s) %s and cannot be removed without removing the pod(s)", c.ID(), strings.Join(c.state.Service.Pods, ",")) + } + } + // If we're not force-removing, we need to check if we're in a good // state to remove. if !force { @@ -903,6 +913,16 @@ func (r *Runtime) evictContainer(ctx context.Context, idOrName string, removeVol } } + if c.IsService() { + canStop, err := c.canStopServiceContainer() + if err != nil { + return id, err + } + if !canStop { + return id, fmt.Errorf("container %s is the service container of pod(s) %s and cannot be removed without removing the pod(s)", c.ID(), strings.Join(c.state.Service.Pods, ",")) + } + } + var cleanupErr error // Remove the container from the state if c.config.Pod != "" { diff --git a/libpod/service.go b/libpod/service.go index ad147e87b..c14f5e51d 100644 --- a/libpod/service.go +++ b/libpod/service.go @@ -54,11 +54,12 @@ func (c *Container) addServicePodLocked(id string) error { return c.save() } -func (c *Container) isService() bool { +// IsService returns true when the container is a "service container". +func (c *Container) IsService() bool { return c.config.IsService } -// canStopServiceContainer returns true if all pods of the service are stopped. +// canStopServiceContainerLocked returns true if all pods of the service are stopped. // Note that the method acquires the container lock. func (c *Container) canStopServiceContainerLocked() (bool, error) { c.lock.Lock() @@ -67,10 +68,16 @@ func (c *Container) canStopServiceContainerLocked() (bool, error) { return false, err } - if !c.isService() { + if !c.IsService() { return false, fmt.Errorf("internal error: checking service: container %s is not a service container", c.ID()) } + return c.canStopServiceContainer() +} + +// canStopServiceContainer returns true if all pods of the service are stopped. +// Note that the method expects the container to be locked. +func (c *Container) canStopServiceContainer() (bool, error) { for _, id := range c.state.Service.Pods { pod, err := c.runtime.LookupPod(id) if err != nil { @@ -163,7 +170,7 @@ func (c *Container) canRemoveServiceContainerLocked() (bool, error) { return false, err } - if !c.isService() { + if !c.IsService() { return false, fmt.Errorf("internal error: checking service: container %s is not a service container", c.ID()) } |