summaryrefslogtreecommitdiff
path: root/libpod
diff options
context:
space:
mode:
Diffstat (limited to 'libpod')
-rw-r--r--libpod/container.go8
-rw-r--r--libpod/container_config.go3
-rw-r--r--libpod/container_inspect.go1
-rw-r--r--libpod/container_internal_linux.go2
-rw-r--r--libpod/container_validate.go8
-rw-r--r--libpod/define/container.go2
-rw-r--r--libpod/define/container_inspect.go1
-rw-r--r--libpod/diff.go23
-rw-r--r--libpod/events.go4
-rw-r--r--libpod/events/config.go2
-rw-r--r--libpod/events/events.go2
-rw-r--r--libpod/info.go11
-rw-r--r--libpod/info_test.go8
-rw-r--r--libpod/options.go38
-rw-r--r--libpod/pod.go7
-rw-r--r--libpod/pod_api.go22
-rw-r--r--libpod/runtime.go4
-rw-r--r--libpod/runtime_ctr.go26
-rw-r--r--libpod/runtime_img.go2
-rw-r--r--libpod/runtime_pod_linux.go4
-rw-r--r--libpod/runtime_renumber.go2
-rw-r--r--libpod/service.go220
22 files changed, 370 insertions, 30 deletions
diff --git a/libpod/container.go b/libpod/container.go
index d7af9a100..64b4453fb 100644
--- a/libpod/container.go
+++ b/libpod/container.go
@@ -211,6 +211,14 @@ type ContainerState struct {
// network and an interface names
NetInterfaceDescriptions ContainerNetworkDescriptions `json:"networkDescriptions,omitempty"`
+ // Service indicates that container is the service container of a
+ // service. A service consists of one or more pods. The service
+ // container is started before all pods and is stopped when the last
+ // pod stops. The service container allows for tracking and managing
+ // the entire life cycle of service which may be started via
+ // `podman-play-kube`.
+ Service Service
+
// containerPlatformState holds platform-specific container state.
containerPlatformState
diff --git a/libpod/container_config.go b/libpod/container_config.go
index 371a1dec0..3e85ad4d5 100644
--- a/libpod/container_config.go
+++ b/libpod/container_config.go
@@ -382,6 +382,9 @@ type ContainerMiscConfig struct {
// IsInfra is a bool indicating whether this container is an infra container used for
// sharing kernel namespaces in a pod
IsInfra bool `json:"pause"`
+ // IsService is a bool indicating whether this container is a service container used for
+ // tracking the life cycle of K8s service.
+ IsService bool `json:"isService"`
// SdNotifyMode tells libpod what to do with a NOTIFY_SOCKET if passed
SdNotifyMode string `json:"sdnotifyMode,omitempty"`
// Systemd tells libpod to setup the container in systemd mode, a value of nil denotes false
diff --git a/libpod/container_inspect.go b/libpod/container_inspect.go
index 76e0e9e13..93240812d 100644
--- a/libpod/container_inspect.go
+++ b/libpod/container_inspect.go
@@ -171,6 +171,7 @@ func (c *Container) getContainerInspectData(size bool, driverData *define.Driver
Mounts: inspectMounts,
Dependencies: c.Dependencies(),
IsInfra: c.IsInfra(),
+ IsService: c.IsService(),
}
if c.state.ConfigPath != "" {
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index 4742b22ab..d7683cce9 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -3282,7 +3282,7 @@ func (c *Container) fixVolumePermissions(v *ContainerNamedVolume) error {
return err
}
stat := st.Sys().(*syscall.Stat_t)
- atime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec))
+ atime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) // nolint: unconvert
if err := os.Chtimes(mountPoint, atime, st.ModTime()); err != nil {
return err
}
diff --git a/libpod/container_validate.go b/libpod/container_validate.go
index c6c9a4c6d..cfbdd2b1e 100644
--- a/libpod/container_validate.go
+++ b/libpod/container_validate.go
@@ -1,6 +1,8 @@
package libpod
import (
+ "fmt"
+
"github.com/containers/podman/v4/libpod/define"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
@@ -27,6 +29,12 @@ func (c *Container) validate() error {
return errors.Wrapf(define.ErrInvalidArg, "must set root filesystem source to either image or rootfs")
}
+ // A container cannot be marked as an infra and service container at
+ // the same time.
+ if c.IsInfra() && c.IsService() {
+ return fmt.Errorf("cannot be infra and service container at the same time: %w", define.ErrInvalidArg)
+ }
+
// Cannot make a network namespace if we are joining another container's
// network namespace
if c.config.CreateNetNS && c.config.NetNsCtr != "" {
diff --git a/libpod/define/container.go b/libpod/define/container.go
index bb44a6a4a..ba939578f 100644
--- a/libpod/define/container.go
+++ b/libpod/define/container.go
@@ -35,4 +35,6 @@ const (
// OneShotInitContainer is a container that only runs as init once
// and is then deleted.
OneShotInitContainer = "once"
+ // ContainerInitPath is the default path of the mounted container init.
+ ContainerInitPath = "/run/podman-init"
)
diff --git a/libpod/define/container_inspect.go b/libpod/define/container_inspect.go
index 6cdffb8b7..e7b82d654 100644
--- a/libpod/define/container_inspect.go
+++ b/libpod/define/container_inspect.go
@@ -683,6 +683,7 @@ type InspectContainerData struct {
NetworkSettings *InspectNetworkSettings `json:"NetworkSettings"`
Namespace string `json:"Namespace"`
IsInfra bool `json:"IsInfra"`
+ IsService bool `json:"IsService"`
Config *InspectContainerConfig `json:"Config"`
HostConfig *InspectContainerHostConfig `json:"HostConfig"`
}
diff --git a/libpod/diff.go b/libpod/diff.go
index 794b26b48..86fa063ec 100644
--- a/libpod/diff.go
+++ b/libpod/diff.go
@@ -8,17 +8,18 @@ import (
)
var initInodes = map[string]bool{
- "/dev": true,
- "/etc/hostname": true,
- "/etc/hosts": true,
- "/etc/resolv.conf": true,
- "/proc": true,
- "/run": true,
- "/run/notify": true,
- "/run/.containerenv": true,
- "/run/secrets": true,
- "/sys": true,
- "/etc/mtab": true,
+ "/dev": true,
+ "/etc/hostname": true,
+ "/etc/hosts": true,
+ "/etc/resolv.conf": true,
+ "/proc": true,
+ "/run": true,
+ "/run/notify": true,
+ "/run/.containerenv": true,
+ "/run/secrets": true,
+ define.ContainerInitPath: true,
+ "/sys": true,
+ "/etc/mtab": true,
}
// GetDiff returns the differences between the two images, layers, or containers
diff --git a/libpod/events.go b/libpod/events.go
index 39f5786a4..f09d8402a 100644
--- a/libpod/events.go
+++ b/libpod/events.go
@@ -89,8 +89,8 @@ func (p *Pod) newPodEvent(status events.Status) {
}
}
-// newSystemEvent creates a new event for libpod as a whole.
-func (r *Runtime) newSystemEvent(status events.Status) {
+// NewSystemEvent creates a new event for libpod as a whole.
+func (r *Runtime) NewSystemEvent(status events.Status) {
e := events.NewEvent(status)
e.Type = events.System
diff --git a/libpod/events/config.go b/libpod/events/config.go
index 00cdca007..2e7016136 100644
--- a/libpod/events/config.go
+++ b/libpod/events/config.go
@@ -98,6 +98,8 @@ type Type string
// Status describes the actual event action (stop, start, create, kill)
type Status string
+// When updating this list below please also update the shell completion list in
+// cmd/podman/common/completion.go and the StringToXXX function in events.go.
const (
// Container - event is related to containers
Container Type = "container"
diff --git a/libpod/events/events.go b/libpod/events/events.go
index 04417fd8d..e83c2efee 100644
--- a/libpod/events/events.go
+++ b/libpod/events/events.go
@@ -150,6 +150,8 @@ func StringToStatus(name string) (Status, error) {
switch name {
case Attach.String():
return Attach, nil
+ case AutoUpdate.String():
+ return AutoUpdate, nil
case Build.String():
return Build, nil
case Checkpoint.String():
diff --git a/libpod/info.go b/libpod/info.go
index 321680a81..bc49a6cc9 100644
--- a/libpod/info.go
+++ b/libpod/info.go
@@ -406,26 +406,25 @@ func getCPUUtilization() (*define.CPUUsage, error) {
}
defer f.Close()
scanner := bufio.NewScanner(f)
- // Read firt line of /proc/stat
+ // Read first line of /proc/stat that has entries for system ("cpu" line)
for scanner.Scan() {
break
}
// column 1 is user, column 3 is system, column 4 is idle
- stats := strings.Split(scanner.Text(), " ")
+ stats := strings.Fields(scanner.Text())
return statToPercent(stats)
}
func statToPercent(stats []string) (*define.CPUUsage, error) {
- // There is always an extra space between cpu and the first metric
- userTotal, err := strconv.ParseFloat(stats[2], 64)
+ userTotal, err := strconv.ParseFloat(stats[1], 64)
if err != nil {
return nil, errors.Wrapf(err, "unable to parse user value %q", stats[1])
}
- systemTotal, err := strconv.ParseFloat(stats[4], 64)
+ systemTotal, err := strconv.ParseFloat(stats[3], 64)
if err != nil {
return nil, errors.Wrapf(err, "unable to parse system value %q", stats[3])
}
- idleTotal, err := strconv.ParseFloat(stats[5], 64)
+ idleTotal, err := strconv.ParseFloat(stats[4], 64)
if err != nil {
return nil, errors.Wrapf(err, "unable to parse idle value %q", stats[4])
}
diff --git a/libpod/info_test.go b/libpod/info_test.go
index 909b573c0..b0e4bf8c0 100644
--- a/libpod/info_test.go
+++ b/libpod/info_test.go
@@ -20,7 +20,7 @@ func Test_statToPercent(t *testing.T) {
}{
{
name: "GoodParse",
- args: args{in0: []string{"cpu", " ", "33628064", "27537", "9696996", "1314806705", "588142", "4775073", "2789228", "0", "598711", "0"}},
+ args: args{in0: []string{"cpu", "33628064", "27537", "9696996", "1314806705", "588142", "4775073", "2789228", "0", "598711", "0"}},
want: &define.CPUUsage{
UserPercent: 2.48,
SystemPercent: 0.71,
@@ -30,19 +30,19 @@ func Test_statToPercent(t *testing.T) {
},
{
name: "BadUserValue",
- args: args{in0: []string{"cpu", " ", "k", "27537", "9696996", "1314806705", "588142", "4775073", "2789228", "0", "598711", "0"}},
+ args: args{in0: []string{"cpu", "k", "27537", "9696996", "1314806705", "588142", "4775073", "2789228", "0", "598711", "0"}},
want: nil,
wantErr: assert.Error,
},
{
name: "BadSystemValue",
- args: args{in0: []string{"cpu", " ", "33628064", "27537", "k", "1314806705", "588142", "4775073", "2789228", "0", "598711", "0"}},
+ args: args{in0: []string{"cpu", "33628064", "27537", "k", "1314806705", "588142", "4775073", "2789228", "0", "598711", "0"}},
want: nil,
wantErr: assert.Error,
},
{
name: "BadIdleValue",
- args: args{in0: []string{"cpu", " ", "33628064", "27537", "9696996", "k", "588142", "4775073", "2789228", "0", "598711", "0"}},
+ args: args{in0: []string{"cpu", "33628064", "27537", "9696996", "k", "588142", "4775073", "2789228", "0", "598711", "0"}},
want: nil,
wantErr: assert.Error,
},
diff --git a/libpod/options.go b/libpod/options.go
index 9b83cb76a..feb89510f 100644
--- a/libpod/options.go
+++ b/libpod/options.go
@@ -1,6 +1,7 @@
package libpod
import (
+ "fmt"
"net"
"os"
"path/filepath"
@@ -1477,7 +1478,7 @@ func WithCreateCommand(cmd []string) CtrCreateOption {
}
}
-// withIsInfra allows us to dfferentiate between infra containers and regular containers
+// withIsInfra allows us to dfferentiate between infra containers and other containers
// within the container config
func withIsInfra() CtrCreateOption {
return func(ctr *Container) error {
@@ -1491,6 +1492,20 @@ func withIsInfra() CtrCreateOption {
}
}
+// WithIsService allows us to dfferentiate between service containers and other container
+// within the container config
+func WithIsService() CtrCreateOption {
+ return func(ctr *Container) error {
+ if ctr.valid {
+ return define.ErrCtrFinalized
+ }
+
+ ctr.config.IsService = true
+
+ return nil
+ }
+}
+
// WithCreateWorkingDir tells Podman to create the container's working directory
// if it does not exist.
func WithCreateWorkingDir() CtrCreateOption {
@@ -2081,6 +2096,27 @@ func WithInfraContainer() PodCreateOption {
}
}
+// WithServiceContainer associates the specified service container ID with the pod.
+func WithServiceContainer(id string) PodCreateOption {
+ return func(pod *Pod) error {
+ if pod.valid {
+ return define.ErrPodFinalized
+ }
+
+ ctr, err := pod.runtime.LookupContainer(id)
+ if err != nil {
+ return fmt.Errorf("looking up service container: %w", err)
+ }
+
+ if err := ctr.addServicePodLocked(pod.ID()); err != nil {
+ return fmt.Errorf("associating service container %s with pod %s: %w", id, pod.ID(), err)
+ }
+
+ pod.config.ServiceContainerID = id
+ return nil
+ }
+}
+
// WithVolatile sets the volatile flag for the container storage.
// The option can potentially cause data loss when used on a container that must survive a machine reboot.
func WithVolatile() CtrCreateOption {
diff --git a/libpod/pod.go b/libpod/pod.go
index 2211d5be7..3c8dc43d4 100644
--- a/libpod/pod.go
+++ b/libpod/pod.go
@@ -64,6 +64,13 @@ type PodConfig struct {
HasInfra bool `json:"hasInfra,omitempty"`
+ // ServiceContainerID is the main container of a service. A service
+ // consists of one or more pods. The service container is started
+ // before all pods and is stopped when the last pod stops.
+ // The service container allows for tracking and managing the entire
+ // life cycle of service which may be started via `podman-play-kube`.
+ ServiceContainerID string `json:"serviceContainerID,omitempty"`
+
// Time pod was created
CreatedTime time.Time `json:"created"`
diff --git a/libpod/pod_api.go b/libpod/pod_api.go
index 73b28822b..eede896a9 100644
--- a/libpod/pod_api.go
+++ b/libpod/pod_api.go
@@ -75,6 +75,10 @@ func (p *Pod) Start(ctx context.Context) (map[string]error, error) {
return nil, define.ErrPodRemoved
}
+ if err := p.maybeStartServiceContainer(ctx); err != nil {
+ return nil, err
+ }
+
// Before "regular" containers start in the pod, all init containers
// must have run and exited successfully.
if err := p.startInitContainers(ctx); err != nil {
@@ -197,6 +201,11 @@ func (p *Pod) stopWithTimeout(ctx context.Context, cleanup bool, timeout int) (m
if len(ctrErrors) > 0 {
return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error stopping some containers")
}
+
+ if err := p.maybeStopServiceContainer(); err != nil {
+ return nil, err
+ }
+
return nil, nil
}
@@ -297,6 +306,10 @@ func (p *Pod) Cleanup(ctx context.Context) (map[string]error, error) {
return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error cleaning up some containers")
}
+ if err := p.maybeStopServiceContainer(); err != nil {
+ return nil, err
+ }
+
return nil, nil
}
@@ -443,6 +456,10 @@ func (p *Pod) Restart(ctx context.Context) (map[string]error, error) {
return nil, define.ErrPodRemoved
}
+ if err := p.maybeStartServiceContainer(ctx); err != nil {
+ return nil, err
+ }
+
allCtrs, err := p.runtime.state.PodContainers(p)
if err != nil {
return nil, err
@@ -530,6 +547,11 @@ func (p *Pod) Kill(ctx context.Context, signal uint) (map[string]error, error) {
if len(ctrErrors) > 0 {
return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error killing some containers")
}
+
+ if err := p.maybeStopServiceContainer(); err != nil {
+ return nil, err
+ }
+
return nil, nil
}
diff --git a/libpod/runtime.go b/libpod/runtime.go
index f4cd9cf00..4efa7b8e8 100644
--- a/libpod/runtime.go
+++ b/libpod/runtime.go
@@ -349,7 +349,7 @@ func makeRuntime(runtime *Runtime) (retErr error) {
// it will try to use existing XDG_RUNTIME_DIR
// if current user has no write access to XDG_RUNTIME_DIR we will fail later
if err := unix.Access(runtime.storageConfig.RunRoot, unix.W_OK); err != nil {
- msg := "XDG_RUNTIME_DIR is pointing to a path which is not writable. Most likely podman will fail."
+ msg := fmt.Sprintf("RunRoot is pointing to a path (%s) which is not writable. Most likely podman will fail.", runtime.storageConfig.RunRoot)
if errors.Is(err, os.ErrNotExist) {
// if dir does not exists try to create it
if err := os.MkdirAll(runtime.storageConfig.RunRoot, 0700); err != nil {
@@ -930,7 +930,7 @@ func (r *Runtime) refresh(alivePath string) error {
}
defer file.Close()
- r.newSystemEvent(events.Refresh)
+ r.NewSystemEvent(events.Refresh)
return nil
}
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index df7174ac6..2eaa77572 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -644,6 +644,16 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
return err
}
+ if c.IsService() {
+ canStop, err := c.canStopServiceContainer()
+ if err != nil {
+ return err
+ }
+ if !canStop {
+ return fmt.Errorf("container %s is the service container of pod(s) %s and cannot be removed without removing the pod(s)", c.ID(), strings.Join(c.state.Service.Pods, ","))
+ }
+ }
+
// If we're not force-removing, we need to check if we're in a good
// state to remove.
if !force {
@@ -732,7 +742,11 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
// after setting the state to ContainerStateRemoving will prevent that the container is
// restarted
if err := c.removeAllExecSessions(); err != nil {
- return err
+ if cleanupErr == nil {
+ cleanupErr = err
+ } else {
+ logrus.Errorf("Remove exec sessions: %v", err)
+ }
}
// Stop the container's storage
@@ -903,6 +917,16 @@ func (r *Runtime) evictContainer(ctx context.Context, idOrName string, removeVol
}
}
+ if c.IsService() {
+ canStop, err := c.canStopServiceContainer()
+ if err != nil {
+ return id, err
+ }
+ if !canStop {
+ return id, fmt.Errorf("container %s is the service container of pod(s) %s and cannot be removed without removing the pod(s)", c.ID(), strings.Join(c.state.Service.Pods, ","))
+ }
+ }
+
var cleanupErr error
// Remove the container from the state
if c.config.Pod != "" {
diff --git a/libpod/runtime_img.go b/libpod/runtime_img.go
index 54eadf6b8..b13482722 100644
--- a/libpod/runtime_img.go
+++ b/libpod/runtime_img.go
@@ -40,7 +40,7 @@ func (r *Runtime) RemoveContainersForImageCallback(ctx context.Context) libimage
if ctr.config.IsInfra {
pod, err := r.state.Pod(ctr.config.Pod)
if err != nil {
- return errors.Wrapf(err, "container %s is in pod %s, but pod cannot be retrieved", ctr.ID(), pod.ID())
+ return errors.Wrapf(err, "container %s is in pod %s, but pod cannot be retrieved", ctr.ID(), ctr.config.Pod)
}
if err := r.removePod(ctx, pod, true, true, timeout); err != nil {
return errors.Wrapf(err, "removing image %s: container %s using image could not be removed", imageID, ctr.ID())
diff --git a/libpod/runtime_pod_linux.go b/libpod/runtime_pod_linux.go
index 62ec7df60..dcc3a044f 100644
--- a/libpod/runtime_pod_linux.go
+++ b/libpod/runtime_pod_linux.go
@@ -380,6 +380,10 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
}
}
+ if err := p.maybeRemoveServiceContainer(); err != nil {
+ return err
+ }
+
// Remove pod from state
if err := r.state.RemovePod(p); err != nil {
if removalErr != nil {
diff --git a/libpod/runtime_renumber.go b/libpod/runtime_renumber.go
index 17e1d97e5..db055f40b 100644
--- a/libpod/runtime_renumber.go
+++ b/libpod/runtime_renumber.go
@@ -71,7 +71,7 @@ func (r *Runtime) renumberLocks() error {
}
}
- r.newSystemEvent(events.Renumber)
+ r.NewSystemEvent(events.Renumber)
return nil
}
diff --git a/libpod/service.go b/libpod/service.go
new file mode 100644
index 000000000..c14f5e51d
--- /dev/null
+++ b/libpod/service.go
@@ -0,0 +1,220 @@
+package libpod
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/containers/podman/v4/libpod/define"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// A service consists of one or more pods. The service container is started
+// before all pods and is stopped when the last pod stops. The service
+// container allows for tracking and managing the entire life cycle of service
+// which may be started via `podman-play-kube`.
+type Service struct {
+ // Pods running as part of the service.
+ Pods []string `json:"servicePods"`
+}
+
+// Indicates whether the pod is associated with a service container.
+// The pod is expected to be updated and locked.
+func (p *Pod) hasServiceContainer() bool {
+ return p.config.ServiceContainerID != ""
+}
+
+// Returns the pod's service container.
+// The pod is expected to be updated and locked.
+func (p *Pod) serviceContainer() (*Container, error) {
+ id := p.config.ServiceContainerID
+ if id == "" {
+ return nil, errors.Wrap(define.ErrNoSuchCtr, "pod has no service container")
+ }
+ return p.runtime.state.Container(id)
+}
+
+// ServiceContainer returns the service container.
+func (p *Pod) ServiceContainer() (*Container, error) {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+ if err := p.updatePod(); err != nil {
+ return nil, err
+ }
+ return p.serviceContainer()
+}
+
+func (c *Container) addServicePodLocked(id string) error {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ if err := c.syncContainer(); err != nil {
+ return err
+ }
+ c.state.Service.Pods = append(c.state.Service.Pods, id)
+ return c.save()
+}
+
+// IsService returns true when the container is a "service container".
+func (c *Container) IsService() bool {
+ return c.config.IsService
+}
+
+// canStopServiceContainerLocked returns true if all pods of the service are stopped.
+// Note that the method acquires the container lock.
+func (c *Container) canStopServiceContainerLocked() (bool, error) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ if err := c.syncContainer(); err != nil {
+ return false, err
+ }
+
+ if !c.IsService() {
+ return false, fmt.Errorf("internal error: checking service: container %s is not a service container", c.ID())
+ }
+
+ return c.canStopServiceContainer()
+}
+
+// canStopServiceContainer returns true if all pods of the service are stopped.
+// Note that the method expects the container to be locked.
+func (c *Container) canStopServiceContainer() (bool, error) {
+ for _, id := range c.state.Service.Pods {
+ pod, err := c.runtime.LookupPod(id)
+ if err != nil {
+ if errors.Is(err, define.ErrNoSuchPod) {
+ continue
+ }
+ return false, err
+ }
+
+ status, err := pod.GetPodStatus()
+ if err != nil {
+ return false, err
+ }
+
+ // We can only stop the service if all pods are done.
+ switch status {
+ case define.PodStateStopped, define.PodStateExited, define.PodStateErrored:
+ continue
+ default:
+ return false, nil
+ }
+ }
+
+ return true, nil
+}
+
+// Checks whether the service container can be stopped and does so.
+func (p *Pod) maybeStopServiceContainer() error {
+ if !p.hasServiceContainer() {
+ return nil
+ }
+
+ serviceCtr, err := p.serviceContainer()
+ if err != nil {
+ return fmt.Errorf("getting pod's service container: %w", err)
+ }
+ // Checking whether the service can be stopped must be done in
+ // the runtime's work queue to resolve ABBA dead locks in the
+ // pod->container->servicePods hierarchy.
+ p.runtime.queueWork(func() {
+ logrus.Debugf("Pod %s has a service %s: checking if it can be stopped", p.ID(), serviceCtr.ID())
+ canStop, err := serviceCtr.canStopServiceContainerLocked()
+ if err != nil {
+ logrus.Errorf("Checking whether service of container %s can be stopped: %v", serviceCtr.ID(), err)
+ return
+ }
+ if !canStop {
+ return
+ }
+ logrus.Debugf("Stopping service container %s", serviceCtr.ID())
+ if err := serviceCtr.Stop(); err != nil {
+ logrus.Errorf("Stopping service container %s: %v", serviceCtr.ID(), err)
+ }
+ })
+ return nil
+}
+
+// Starts the pod's service container if it's not already running.
+func (p *Pod) maybeStartServiceContainer(ctx context.Context) error {
+ if !p.hasServiceContainer() {
+ return nil
+ }
+
+ serviceCtr, err := p.serviceContainer()
+ if err != nil {
+ return fmt.Errorf("getting pod's service container: %w", err)
+ }
+
+ serviceCtr.lock.Lock()
+ defer serviceCtr.lock.Unlock()
+
+ if err := serviceCtr.syncContainer(); err != nil {
+ return err
+ }
+
+ if serviceCtr.state.State == define.ContainerStateRunning {
+ return nil
+ }
+
+ // Restart will reinit among other things.
+ return serviceCtr.restartWithTimeout(ctx, 0)
+}
+
+// canRemoveServiceContainer returns true if all pods of the service are removed.
+// Note that the method acquires the container lock.
+func (c *Container) canRemoveServiceContainerLocked() (bool, error) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ if err := c.syncContainer(); err != nil {
+ return false, err
+ }
+
+ if !c.IsService() {
+ return false, fmt.Errorf("internal error: checking service: container %s is not a service container", c.ID())
+ }
+
+ for _, id := range c.state.Service.Pods {
+ if _, err := c.runtime.LookupPod(id); err != nil {
+ if errors.Is(err, define.ErrNoSuchPod) {
+ continue
+ }
+ return false, err
+ }
+ return false, nil
+ }
+
+ return true, nil
+}
+
+// Checks whether the service container can be removed and does so.
+func (p *Pod) maybeRemoveServiceContainer() error {
+ if !p.hasServiceContainer() {
+ return nil
+ }
+
+ serviceCtr, err := p.serviceContainer()
+ if err != nil {
+ return fmt.Errorf("getting pod's service container: %w", err)
+ }
+ // Checking whether the service can be stopped must be done in
+ // the runtime's work queue to resolve ABBA dead locks in the
+ // pod->container->servicePods hierarchy.
+ p.runtime.queueWork(func() {
+ logrus.Debugf("Pod %s has a service %s: checking if it can be removed", p.ID(), serviceCtr.ID())
+ canRemove, err := serviceCtr.canRemoveServiceContainerLocked()
+ if err != nil {
+ logrus.Errorf("Checking whether service of container %s can be removed: %v", serviceCtr.ID(), err)
+ return
+ }
+ if !canRemove {
+ return
+ }
+ timeout := uint(0)
+ logrus.Debugf("Removing service container %s", serviceCtr.ID())
+ if err := p.runtime.RemoveContainer(context.Background(), serviceCtr, true, false, &timeout); err != nil {
+ logrus.Errorf("Removing service container %s: %v", serviceCtr.ID(), err)
+ }
+ })
+ return nil
+}