diff options
Diffstat (limited to 'libpod')
-rw-r--r-- | libpod/container.go | 8 | ||||
-rw-r--r-- | libpod/container_config.go | 3 | ||||
-rw-r--r-- | libpod/container_inspect.go | 1 | ||||
-rw-r--r-- | libpod/container_validate.go | 8 | ||||
-rw-r--r-- | libpod/define/container_inspect.go | 1 | ||||
-rw-r--r-- | libpod/options.go | 38 | ||||
-rw-r--r-- | libpod/pod.go | 7 | ||||
-rw-r--r-- | libpod/pod_api.go | 22 | ||||
-rw-r--r-- | libpod/runtime_img.go | 2 | ||||
-rw-r--r-- | libpod/runtime_pod_linux.go | 4 | ||||
-rw-r--r-- | libpod/service.go | 213 |
11 files changed, 305 insertions, 2 deletions
diff --git a/libpod/container.go b/libpod/container.go index d7af9a100..64b4453fb 100644 --- a/libpod/container.go +++ b/libpod/container.go @@ -211,6 +211,14 @@ type ContainerState struct { // network and an interface names NetInterfaceDescriptions ContainerNetworkDescriptions `json:"networkDescriptions,omitempty"` + // Service indicates that container is the service container of a + // service. A service consists of one or more pods. The service + // container is started before all pods and is stopped when the last + // pod stops. The service container allows for tracking and managing + // the entire life cycle of service which may be started via + // `podman-play-kube`. + Service Service + // containerPlatformState holds platform-specific container state. containerPlatformState diff --git a/libpod/container_config.go b/libpod/container_config.go index 371a1dec0..3e85ad4d5 100644 --- a/libpod/container_config.go +++ b/libpod/container_config.go @@ -382,6 +382,9 @@ type ContainerMiscConfig struct { // IsInfra is a bool indicating whether this container is an infra container used for // sharing kernel namespaces in a pod IsInfra bool `json:"pause"` + // IsService is a bool indicating whether this container is a service container used for + // tracking the life cycle of K8s service. + IsService bool `json:"isService"` // SdNotifyMode tells libpod what to do with a NOTIFY_SOCKET if passed SdNotifyMode string `json:"sdnotifyMode,omitempty"` // Systemd tells libpod to setup the container in systemd mode, a value of nil denotes false diff --git a/libpod/container_inspect.go b/libpod/container_inspect.go index 76e0e9e13..5d809644d 100644 --- a/libpod/container_inspect.go +++ b/libpod/container_inspect.go @@ -171,6 +171,7 @@ func (c *Container) getContainerInspectData(size bool, driverData *define.Driver Mounts: inspectMounts, Dependencies: c.Dependencies(), IsInfra: c.IsInfra(), + IsService: c.isService(), } if c.state.ConfigPath != "" { diff --git a/libpod/container_validate.go b/libpod/container_validate.go index c6c9a4c6d..d939c94e6 100644 --- a/libpod/container_validate.go +++ b/libpod/container_validate.go @@ -1,6 +1,8 @@ package libpod import ( + "fmt" + "github.com/containers/podman/v4/libpod/define" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" @@ -27,6 +29,12 @@ func (c *Container) validate() error { return errors.Wrapf(define.ErrInvalidArg, "must set root filesystem source to either image or rootfs") } + // A container cannot be marked as an infra and service container at + // the same time. + if c.IsInfra() && c.isService() { + return fmt.Errorf("cannot be infra and service container at the same time: %w", define.ErrInvalidArg) + } + // Cannot make a network namespace if we are joining another container's // network namespace if c.config.CreateNetNS && c.config.NetNsCtr != "" { diff --git a/libpod/define/container_inspect.go b/libpod/define/container_inspect.go index 6cdffb8b7..e7b82d654 100644 --- a/libpod/define/container_inspect.go +++ b/libpod/define/container_inspect.go @@ -683,6 +683,7 @@ type InspectContainerData struct { NetworkSettings *InspectNetworkSettings `json:"NetworkSettings"` Namespace string `json:"Namespace"` IsInfra bool `json:"IsInfra"` + IsService bool `json:"IsService"` Config *InspectContainerConfig `json:"Config"` HostConfig *InspectContainerHostConfig `json:"HostConfig"` } diff --git a/libpod/options.go b/libpod/options.go index 9b83cb76a..feb89510f 100644 --- a/libpod/options.go +++ b/libpod/options.go @@ -1,6 +1,7 @@ package libpod import ( + "fmt" "net" "os" "path/filepath" @@ -1477,7 +1478,7 @@ func WithCreateCommand(cmd []string) CtrCreateOption { } } -// withIsInfra allows us to dfferentiate between infra containers and regular containers +// withIsInfra allows us to dfferentiate between infra containers and other containers // within the container config func withIsInfra() CtrCreateOption { return func(ctr *Container) error { @@ -1491,6 +1492,20 @@ func withIsInfra() CtrCreateOption { } } +// WithIsService allows us to dfferentiate between service containers and other container +// within the container config +func WithIsService() CtrCreateOption { + return func(ctr *Container) error { + if ctr.valid { + return define.ErrCtrFinalized + } + + ctr.config.IsService = true + + return nil + } +} + // WithCreateWorkingDir tells Podman to create the container's working directory // if it does not exist. func WithCreateWorkingDir() CtrCreateOption { @@ -2081,6 +2096,27 @@ func WithInfraContainer() PodCreateOption { } } +// WithServiceContainer associates the specified service container ID with the pod. +func WithServiceContainer(id string) PodCreateOption { + return func(pod *Pod) error { + if pod.valid { + return define.ErrPodFinalized + } + + ctr, err := pod.runtime.LookupContainer(id) + if err != nil { + return fmt.Errorf("looking up service container: %w", err) + } + + if err := ctr.addServicePodLocked(pod.ID()); err != nil { + return fmt.Errorf("associating service container %s with pod %s: %w", id, pod.ID(), err) + } + + pod.config.ServiceContainerID = id + return nil + } +} + // WithVolatile sets the volatile flag for the container storage. // The option can potentially cause data loss when used on a container that must survive a machine reboot. func WithVolatile() CtrCreateOption { diff --git a/libpod/pod.go b/libpod/pod.go index 2211d5be7..3c8dc43d4 100644 --- a/libpod/pod.go +++ b/libpod/pod.go @@ -64,6 +64,13 @@ type PodConfig struct { HasInfra bool `json:"hasInfra,omitempty"` + // ServiceContainerID is the main container of a service. A service + // consists of one or more pods. The service container is started + // before all pods and is stopped when the last pod stops. + // The service container allows for tracking and managing the entire + // life cycle of service which may be started via `podman-play-kube`. + ServiceContainerID string `json:"serviceContainerID,omitempty"` + // Time pod was created CreatedTime time.Time `json:"created"` diff --git a/libpod/pod_api.go b/libpod/pod_api.go index 73b28822b..eede896a9 100644 --- a/libpod/pod_api.go +++ b/libpod/pod_api.go @@ -75,6 +75,10 @@ func (p *Pod) Start(ctx context.Context) (map[string]error, error) { return nil, define.ErrPodRemoved } + if err := p.maybeStartServiceContainer(ctx); err != nil { + return nil, err + } + // Before "regular" containers start in the pod, all init containers // must have run and exited successfully. if err := p.startInitContainers(ctx); err != nil { @@ -197,6 +201,11 @@ func (p *Pod) stopWithTimeout(ctx context.Context, cleanup bool, timeout int) (m if len(ctrErrors) > 0 { return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error stopping some containers") } + + if err := p.maybeStopServiceContainer(); err != nil { + return nil, err + } + return nil, nil } @@ -297,6 +306,10 @@ func (p *Pod) Cleanup(ctx context.Context) (map[string]error, error) { return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error cleaning up some containers") } + if err := p.maybeStopServiceContainer(); err != nil { + return nil, err + } + return nil, nil } @@ -443,6 +456,10 @@ func (p *Pod) Restart(ctx context.Context) (map[string]error, error) { return nil, define.ErrPodRemoved } + if err := p.maybeStartServiceContainer(ctx); err != nil { + return nil, err + } + allCtrs, err := p.runtime.state.PodContainers(p) if err != nil { return nil, err @@ -530,6 +547,11 @@ func (p *Pod) Kill(ctx context.Context, signal uint) (map[string]error, error) { if len(ctrErrors) > 0 { return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error killing some containers") } + + if err := p.maybeStopServiceContainer(); err != nil { + return nil, err + } + return nil, nil } diff --git a/libpod/runtime_img.go b/libpod/runtime_img.go index 54eadf6b8..b13482722 100644 --- a/libpod/runtime_img.go +++ b/libpod/runtime_img.go @@ -40,7 +40,7 @@ func (r *Runtime) RemoveContainersForImageCallback(ctx context.Context) libimage if ctr.config.IsInfra { pod, err := r.state.Pod(ctr.config.Pod) if err != nil { - return errors.Wrapf(err, "container %s is in pod %s, but pod cannot be retrieved", ctr.ID(), pod.ID()) + return errors.Wrapf(err, "container %s is in pod %s, but pod cannot be retrieved", ctr.ID(), ctr.config.Pod) } if err := r.removePod(ctx, pod, true, true, timeout); err != nil { return errors.Wrapf(err, "removing image %s: container %s using image could not be removed", imageID, ctr.ID()) diff --git a/libpod/runtime_pod_linux.go b/libpod/runtime_pod_linux.go index 62ec7df60..dcc3a044f 100644 --- a/libpod/runtime_pod_linux.go +++ b/libpod/runtime_pod_linux.go @@ -380,6 +380,10 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool, } } + if err := p.maybeRemoveServiceContainer(); err != nil { + return err + } + // Remove pod from state if err := r.state.RemovePod(p); err != nil { if removalErr != nil { diff --git a/libpod/service.go b/libpod/service.go new file mode 100644 index 000000000..ad147e87b --- /dev/null +++ b/libpod/service.go @@ -0,0 +1,213 @@ +package libpod + +import ( + "context" + "fmt" + + "github.com/containers/podman/v4/libpod/define" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// A service consists of one or more pods. The service container is started +// before all pods and is stopped when the last pod stops. The service +// container allows for tracking and managing the entire life cycle of service +// which may be started via `podman-play-kube`. +type Service struct { + // Pods running as part of the service. + Pods []string `json:"servicePods"` +} + +// Indicates whether the pod is associated with a service container. +// The pod is expected to be updated and locked. +func (p *Pod) hasServiceContainer() bool { + return p.config.ServiceContainerID != "" +} + +// Returns the pod's service container. +// The pod is expected to be updated and locked. +func (p *Pod) serviceContainer() (*Container, error) { + id := p.config.ServiceContainerID + if id == "" { + return nil, errors.Wrap(define.ErrNoSuchCtr, "pod has no service container") + } + return p.runtime.state.Container(id) +} + +// ServiceContainer returns the service container. +func (p *Pod) ServiceContainer() (*Container, error) { + p.lock.Lock() + defer p.lock.Unlock() + if err := p.updatePod(); err != nil { + return nil, err + } + return p.serviceContainer() +} + +func (c *Container) addServicePodLocked(id string) error { + c.lock.Lock() + defer c.lock.Unlock() + if err := c.syncContainer(); err != nil { + return err + } + c.state.Service.Pods = append(c.state.Service.Pods, id) + return c.save() +} + +func (c *Container) isService() bool { + return c.config.IsService +} + +// canStopServiceContainer returns true if all pods of the service are stopped. +// Note that the method acquires the container lock. +func (c *Container) canStopServiceContainerLocked() (bool, error) { + c.lock.Lock() + defer c.lock.Unlock() + if err := c.syncContainer(); err != nil { + return false, err + } + + if !c.isService() { + return false, fmt.Errorf("internal error: checking service: container %s is not a service container", c.ID()) + } + + for _, id := range c.state.Service.Pods { + pod, err := c.runtime.LookupPod(id) + if err != nil { + if errors.Is(err, define.ErrNoSuchPod) { + continue + } + return false, err + } + + status, err := pod.GetPodStatus() + if err != nil { + return false, err + } + + // We can only stop the service if all pods are done. + switch status { + case define.PodStateStopped, define.PodStateExited, define.PodStateErrored: + continue + default: + return false, nil + } + } + + return true, nil +} + +// Checks whether the service container can be stopped and does so. +func (p *Pod) maybeStopServiceContainer() error { + if !p.hasServiceContainer() { + return nil + } + + serviceCtr, err := p.serviceContainer() + if err != nil { + return fmt.Errorf("getting pod's service container: %w", err) + } + // Checking whether the service can be stopped must be done in + // the runtime's work queue to resolve ABBA dead locks in the + // pod->container->servicePods hierarchy. + p.runtime.queueWork(func() { + logrus.Debugf("Pod %s has a service %s: checking if it can be stopped", p.ID(), serviceCtr.ID()) + canStop, err := serviceCtr.canStopServiceContainerLocked() + if err != nil { + logrus.Errorf("Checking whether service of container %s can be stopped: %v", serviceCtr.ID(), err) + return + } + if !canStop { + return + } + logrus.Debugf("Stopping service container %s", serviceCtr.ID()) + if err := serviceCtr.Stop(); err != nil { + logrus.Errorf("Stopping service container %s: %v", serviceCtr.ID(), err) + } + }) + return nil +} + +// Starts the pod's service container if it's not already running. +func (p *Pod) maybeStartServiceContainer(ctx context.Context) error { + if !p.hasServiceContainer() { + return nil + } + + serviceCtr, err := p.serviceContainer() + if err != nil { + return fmt.Errorf("getting pod's service container: %w", err) + } + + serviceCtr.lock.Lock() + defer serviceCtr.lock.Unlock() + + if err := serviceCtr.syncContainer(); err != nil { + return err + } + + if serviceCtr.state.State == define.ContainerStateRunning { + return nil + } + + // Restart will reinit among other things. + return serviceCtr.restartWithTimeout(ctx, 0) +} + +// canRemoveServiceContainer returns true if all pods of the service are removed. +// Note that the method acquires the container lock. +func (c *Container) canRemoveServiceContainerLocked() (bool, error) { + c.lock.Lock() + defer c.lock.Unlock() + if err := c.syncContainer(); err != nil { + return false, err + } + + if !c.isService() { + return false, fmt.Errorf("internal error: checking service: container %s is not a service container", c.ID()) + } + + for _, id := range c.state.Service.Pods { + if _, err := c.runtime.LookupPod(id); err != nil { + if errors.Is(err, define.ErrNoSuchPod) { + continue + } + return false, err + } + return false, nil + } + + return true, nil +} + +// Checks whether the service container can be removed and does so. +func (p *Pod) maybeRemoveServiceContainer() error { + if !p.hasServiceContainer() { + return nil + } + + serviceCtr, err := p.serviceContainer() + if err != nil { + return fmt.Errorf("getting pod's service container: %w", err) + } + // Checking whether the service can be stopped must be done in + // the runtime's work queue to resolve ABBA dead locks in the + // pod->container->servicePods hierarchy. + p.runtime.queueWork(func() { + logrus.Debugf("Pod %s has a service %s: checking if it can be removed", p.ID(), serviceCtr.ID()) + canRemove, err := serviceCtr.canRemoveServiceContainerLocked() + if err != nil { + logrus.Errorf("Checking whether service of container %s can be removed: %v", serviceCtr.ID(), err) + return + } + if !canRemove { + return + } + timeout := uint(0) + logrus.Debugf("Removing service container %s", serviceCtr.ID()) + if err := p.runtime.RemoveContainer(context.Background(), serviceCtr, true, false, &timeout); err != nil { + logrus.Errorf("Removing service container %s: %v", serviceCtr.ID(), err) + } + }) + return nil +} |