summaryrefslogtreecommitdiff
path: root/libpod/pod_api.go
diff options
context:
space:
mode:
authorValentin Rothberg <vrothberg@redhat.com>2022-04-13 16:21:21 +0200
committerValentin Rothberg <vrothberg@redhat.com>2022-05-02 13:29:59 +0200
commit4eff0c8cf284a6007122aec731e4d97059750166 (patch)
treecdbfee34bd64bb295556667129a6a3c5db9b4612 /libpod/pod_api.go
parent77d872ea38ec7b685ec99efe6688d1793c9fa256 (diff)
downloadpodman-4eff0c8cf284a6007122aec731e4d97059750166.tar.gz
podman-4eff0c8cf284a6007122aec731e4d97059750166.tar.bz2
podman-4eff0c8cf284a6007122aec731e4d97059750166.zip
pod: add exit policies
Add the notion of an "exit policy" to a pod. This policy controls the behaviour when the last container of pod exits. Initially, there are two policies: - "continue" : the pod continues running. This is the default policy when creating a pod. - "stop" : stop the pod when the last container exits. This is the default behaviour for `play kube`. In order to implement the deferred stop of a pod, add a worker queue to the libpod runtime. The queue will pick up work items and in this case helps resolve dead locks that would otherwise occur if we attempted to stop a pod during container cleanup. Note that the default restart policy of `play kube` is "Always". Hence, in order to really solve #13464, the YAML files must set a custom restart policy; the tests use "OnFailure". Fixes: #13464 Signed-off-by: Valentin Rothberg <vrothberg@redhat.com>
Diffstat (limited to 'libpod/pod_api.go')
-rw-r--r--libpod/pod_api.go51
1 files changed, 51 insertions, 0 deletions
diff --git a/libpod/pod_api.go b/libpod/pod_api.go
index ba30d878e..73b28822b 100644
--- a/libpod/pod_api.go
+++ b/libpod/pod_api.go
@@ -2,6 +2,7 @@ package libpod
import (
"context"
+ "fmt"
"github.com/containers/common/pkg/cgroups"
"github.com/containers/podman/v4/libpod/define"
@@ -134,6 +135,10 @@ func (p *Pod) StopWithTimeout(ctx context.Context, cleanup bool, timeout int) (m
p.lock.Lock()
defer p.lock.Unlock()
+ return p.stopWithTimeout(ctx, cleanup, timeout)
+}
+
+func (p *Pod) stopWithTimeout(ctx context.Context, cleanup bool, timeout int) (map[string]error, error) {
if !p.valid {
return nil, define.ErrPodRemoved
}
@@ -195,6 +200,51 @@ func (p *Pod) StopWithTimeout(ctx context.Context, cleanup bool, timeout int) (m
return nil, nil
}
+// Stops the pod if only the infra containers remains running.
+func (p *Pod) stopIfOnlyInfraRemains(ctx context.Context, ignoreID string) error {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ infraID := ""
+
+ if p.HasInfraContainer() {
+ infra, err := p.infraContainer()
+ if err != nil {
+ return err
+ }
+ infraID = infra.ID()
+ }
+
+ allCtrs, err := p.runtime.state.PodContainers(p)
+ if err != nil {
+ return err
+ }
+
+ for _, ctr := range allCtrs {
+ if ctr.ID() == infraID || ctr.ID() == ignoreID {
+ continue
+ }
+
+ state, err := ctr.State()
+ if err != nil {
+ return fmt.Errorf("getting state of container %s: %w", ctr.ID(), err)
+ }
+
+ switch state {
+ case define.ContainerStateExited,
+ define.ContainerStateRemoving,
+ define.ContainerStateStopping,
+ define.ContainerStateUnknown:
+ continue
+ default:
+ return nil
+ }
+ }
+
+ _, err = p.stopWithTimeout(ctx, true, -1)
+ return err
+}
+
// Cleanup cleans up all containers within a pod that have stopped.
// All containers are cleaned up independently. An error with one container will
// not prevent other containers being cleaned up.
@@ -661,6 +711,7 @@ func (p *Pod) Inspect() (*define.InspectPodData, error) {
Namespace: p.Namespace(),
Created: p.CreatedTime(),
CreateCommand: p.config.CreateCommand,
+ ExitPolicy: string(p.config.ExitPolicy),
State: podState,
Hostname: p.config.Hostname,
Labels: p.Labels(),