summaryrefslogtreecommitdiff
path: root/libpod
diff options
context:
space:
mode:
Diffstat (limited to 'libpod')
-rw-r--r--libpod/container_api.go2
-rw-r--r--libpod/container_internal.go8
-rw-r--r--libpod/container_internal_linux.go41
-rw-r--r--libpod/container_log.go14
-rw-r--r--libpod/container_log_linux.go9
-rw-r--r--libpod/define/pod_inspect.go12
-rw-r--r--libpod/events/journal_linux.go13
-rw-r--r--libpod/events/logfile.go26
-rw-r--r--libpod/options.go12
-rw-r--r--libpod/pod.go108
-rw-r--r--libpod/pod_api.go72
-rw-r--r--libpod/runtime_pod_linux.go102
12 files changed, 195 insertions, 224 deletions
diff --git a/libpod/container_api.go b/libpod/container_api.go
index 742eb6d3e..2ff4bfe08 100644
--- a/libpod/container_api.go
+++ b/libpod/container_api.go
@@ -555,7 +555,7 @@ func (c *Container) WaitForExit(ctx context.Context, pollInterval time.Duration)
// The container never ran.
return true, 0, nil
}
- return true, -1, err
+ return true, -1, fmt.Errorf("%w (container in state %s)", err, c.state.State)
}
return true, exitCode, nil
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index 7e330430c..bad68991b 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -347,7 +347,7 @@ func (c *Container) syncContainer() error {
}
// If runtime knows about the container, update its status in runtime
// And then save back to disk
- if c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning, define.ContainerStateStopped, define.ContainerStatePaused) {
+ if c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning, define.ContainerStateStopped, define.ContainerStateStopping, define.ContainerStatePaused) {
oldState := c.state.State
if err := c.checkExitFile(); err != nil {
@@ -1316,10 +1316,10 @@ func (c *Container) stop(timeout uint) error {
// Since we're now subject to a race condition with other processes who
// may have altered the state (and other data), let's check if the
- // state has changed. If so, we should return immediately and log a
- // warning.
+ // state has changed. If so, we should return immediately and leave
+ // breadcrumbs for debugging if needed.
if c.state.State != define.ContainerStateStopping {
- logrus.Warnf(
+ logrus.Debugf(
"Container %q state changed from %q to %q while waiting for it to be stopped: discontinuing stop procedure as another process interfered",
c.ID(), define.ContainerStateStopping, c.state.State,
)
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index 46e525add..a131ab367 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -2886,23 +2886,6 @@ func (c *Container) generatePasswdAndGroup() (string, string, error) {
}
}
- // Next, check if the container even has a /etc/passwd or /etc/group.
- // If it doesn't we don't want to create them ourselves.
- if needPasswd {
- exists, err := c.checkFileExistsInRootfs("/etc/passwd")
- if err != nil {
- return "", "", err
- }
- needPasswd = exists
- }
- if needGroup {
- exists, err := c.checkFileExistsInRootfs("/etc/group")
- if err != nil {
- return "", "", err
- }
- needGroup = exists
- }
-
// If we don't need a /etc/passwd or /etc/group at this point we can
// just return.
if !needPasswd && !needGroup {
@@ -2947,7 +2930,7 @@ func (c *Container) generatePasswdAndGroup() (string, string, error) {
return "", "", fmt.Errorf("error looking up location of container %s /etc/passwd: %w", c.ID(), err)
}
- f, err := os.OpenFile(containerPasswd, os.O_APPEND|os.O_WRONLY, 0600)
+ f, err := os.OpenFile(containerPasswd, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
if err != nil {
return "", "", fmt.Errorf("container %s: %w", c.ID(), err)
}
@@ -2993,7 +2976,7 @@ func (c *Container) generatePasswdAndGroup() (string, string, error) {
return "", "", fmt.Errorf("error looking up location of container %s /etc/group: %w", c.ID(), err)
}
- f, err := os.OpenFile(containerGroup, os.O_APPEND|os.O_WRONLY, 0600)
+ f, err := os.OpenFile(containerGroup, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
if err != nil {
return "", "", fmt.Errorf("container %s: %w", c.ID(), err)
}
@@ -3112,26 +3095,6 @@ func (c *Container) cleanupOverlayMounts() error {
return overlay.CleanupContent(c.config.StaticDir)
}
-// Check if a file exists at the given path in the container's root filesystem.
-// Container must already be mounted for this to be used.
-func (c *Container) checkFileExistsInRootfs(file string) (bool, error) {
- checkPath, err := securejoin.SecureJoin(c.state.Mountpoint, file)
- if err != nil {
- return false, fmt.Errorf("cannot create path to container %s file %q: %w", c.ID(), file, err)
- }
- stat, err := os.Stat(checkPath)
- if err != nil {
- if os.IsNotExist(err) {
- return false, nil
- }
- return false, fmt.Errorf("container %s: %w", c.ID(), err)
- }
- if stat.IsDir() {
- return false, nil
- }
- return true, nil
-}
-
// Creates and mounts an empty dir to mount secrets into, if it does not already exist
func (c *Container) createSecretMountDir() error {
src := filepath.Join(c.state.RunDir, "/run/secrets")
diff --git a/libpod/container_log.go b/libpod/container_log.go
index a9e0fe065..c49b54eb1 100644
--- a/libpod/container_log.go
+++ b/libpod/container_log.go
@@ -10,6 +10,7 @@ import (
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/libpod/events"
"github.com/containers/podman/v4/libpod/logs"
+ "github.com/nxadm/tail"
"github.com/nxadm/tail/watch"
"github.com/sirupsen/logrus"
)
@@ -74,14 +75,19 @@ func (c *Container) readFromLogFile(ctx context.Context, options *logs.LogOption
go func() {
defer options.WaitGroup.Done()
-
- for line := range t.Lines {
+ var line *tail.Line
+ var ok bool
+ for {
select {
case <-ctx.Done():
// the consumer has cancelled
+ t.Kill(errors.New("hangup by client"))
return
- default:
- // fallthrough
+ case line, ok = <-t.Lines:
+ if !ok {
+ // channel was closed
+ return
+ }
}
nll, err := logs.NewLogLine(line.Text)
if err != nil {
diff --git a/libpod/container_log_linux.go b/libpod/container_log_linux.go
index 0686caed2..7e95f2449 100644
--- a/libpod/container_log_linux.go
+++ b/libpod/container_log_linux.go
@@ -178,8 +178,13 @@ func (c *Container) readFromJournal(ctx context.Context, options *logs.LogOption
if !options.Follow || !containerCouldBeLogging {
return
}
- // Sleep until something's happening on the journal.
- journal.Wait(sdjournal.IndefiniteWait)
+
+ // journal.Wait() is blocking, this would cause the goroutine to hang forever
+ // if no more journal entries are generated and thus if the client
+ // has closed the connection in the meantime to leak memory.
+ // Waiting only 5 seconds makes sure we can check if the client closed in the
+ // meantime at least every 5 seconds.
+ journal.Wait(5 * time.Second)
continue
}
lastReadCursor = cursor
diff --git a/libpod/define/pod_inspect.go b/libpod/define/pod_inspect.go
index 2afef48c4..d56074882 100644
--- a/libpod/define/pod_inspect.go
+++ b/libpod/define/pod_inspect.go
@@ -57,20 +57,32 @@ type InspectPodData struct {
CPUPeriod uint64 `json:"cpu_period,omitempty"`
// CPUQuota contains the CPU quota of the pod
CPUQuota int64 `json:"cpu_quota,omitempty"`
+ // CPUShares contains the cpu shares for the pod
+ CPUShares uint64 `json:"cpu_shares,omitempty"`
// CPUSetCPUs contains linux specific CPU data for the pod
CPUSetCPUs string `json:"cpuset_cpus,omitempty"`
+ // CPUSetMems contains linux specific CPU data for the pod
+ CPUSetMems string `json:"cpuset_mems,omitempty"`
// Mounts contains volume related information for the pod
Mounts []InspectMount `json:"mounts,omitempty"`
// Devices contains the specified host devices
Devices []InspectDevice `json:"devices,omitempty"`
// BlkioDeviceReadBps contains the Read/Access limit for the pod's devices
BlkioDeviceReadBps []InspectBlkioThrottleDevice `json:"device_read_bps,omitempty"`
+ // BlkioDeviceReadBps contains the Read/Access limit for the pod's devices
+ BlkioDeviceWriteBps []InspectBlkioThrottleDevice `json:"device_write_bps,omitempty"`
// VolumesFrom contains the containers that the pod inherits mounts from
VolumesFrom []string `json:"volumes_from,omitempty"`
// SecurityOpt contains the specified security labels and related SELinux information
SecurityOpts []string `json:"security_opt,omitempty"`
// MemoryLimit contains the specified cgroup memory limit for the pod
MemoryLimit uint64 `json:"memory_limit,omitempty"`
+ // MemorySwap contains the specified memory swap limit for the pod
+ MemorySwap uint64 `json:"memory_swap,omitempty"`
+ // BlkioWeight contains the blkio weight limit for the pod
+ BlkioWeight uint64 `json:"blkio_weight,omitempty"`
+ // BlkioWeightDevice contains the blkio weight device limits for the pod
+ BlkioWeightDevice []InspectBlkioWeightDevice `json:"blkio_weight_device,omitempty"`
}
// InspectPodInfraConfig contains the configuration of the pod's infra
diff --git a/libpod/events/journal_linux.go b/libpod/events/journal_linux.go
index 0a0a768d0..16ef6504f 100644
--- a/libpod/events/journal_linux.go
+++ b/libpod/events/journal_linux.go
@@ -141,9 +141,18 @@ func (e EventJournalD) Read(ctx context.Context, options ReadOptions) error {
if !options.Stream || (len(options.Until) > 0 && time.Now().After(untilTime)) {
break
}
- t := sdjournal.IndefiniteWait
+
+ // j.Wait() is blocking, this would cause the goroutine to hang forever
+ // if no more journal entries are generated and thus if the client
+ // has closed the connection in the meantime to leak memory.
+ // Waiting only 5 seconds makes sure we can check if the client closed in the
+ // meantime at least every 5 seconds.
+ t := 5 * time.Second
if len(options.Until) > 0 {
- t = time.Until(untilTime)
+ until := time.Until(untilTime)
+ if until < t {
+ t = until
+ }
}
_ = j.Wait(t)
continue
diff --git a/libpod/events/logfile.go b/libpod/events/logfile.go
index 4dafd8600..c7dbf4850 100644
--- a/libpod/events/logfile.go
+++ b/libpod/events/logfile.go
@@ -108,23 +108,19 @@ func (e EventLogFile) Read(ctx context.Context, options ReadOptions) error {
}
}()
}
- funcDone := make(chan bool)
- copy := true
- go func() {
- select {
- case <-funcDone:
- // Do nothing
- case <-ctx.Done():
- copy = false
- t.Kill(errors.New("hangup by client"))
- }
- }()
- for line := range t.Lines {
+ var line *tail.Line
+ var ok bool
+ for {
select {
case <-ctx.Done():
// the consumer has cancelled
+ t.Kill(errors.New("hangup by client"))
return nil
- default:
+ case line, ok = <-t.Lines:
+ if !ok {
+ // channel was closed
+ return nil
+ }
// fallthrough
}
@@ -138,12 +134,10 @@ func (e EventLogFile) Read(ctx context.Context, options ReadOptions) error {
default:
return fmt.Errorf("event type %s is not valid in %s", event.Type.String(), e.options.LogFilePath)
}
- if copy && applyFilters(event, filterMap) {
+ if applyFilters(event, filterMap) {
options.EventChannel <- event
}
}
- funcDone <- true
- return nil
}
// String returns a string representation of the logger
diff --git a/libpod/options.go b/libpod/options.go
index f03980017..b31cb4ab2 100644
--- a/libpod/options.go
+++ b/libpod/options.go
@@ -2145,6 +2145,18 @@ func WithServiceContainer(id string) PodCreateOption {
}
}
+// WithPodResources sets resource limits to be applied to the pod's cgroup
+// these will be inherited by all containers unless overridden.
+func WithPodResources(resources specs.LinuxResources) PodCreateOption {
+ return func(pod *Pod) error {
+ if pod.valid {
+ return define.ErrPodFinalized
+ }
+ pod.config.ResourceLimits = resources
+ return nil
+ }
+}
+
// WithVolatile sets the volatile flag for the container storage.
// The option can potentially cause data loss when used on a container that must survive a machine reboot.
func WithVolatile() CtrCreateOption {
diff --git a/libpod/pod.go b/libpod/pod.go
index e059c9416..89f6d6cfe 100644
--- a/libpod/pod.go
+++ b/libpod/pod.go
@@ -83,6 +83,9 @@ type PodConfig struct {
// ID of the pod's lock
LockID uint32 `json:"lockID"`
+
+ // ResourceLimits hold the pod level resource limits
+ ResourceLimits specs.LinuxResources
}
// podState represents a pod's state
@@ -116,18 +119,7 @@ func (p *Pod) ResourceLim() *specs.LinuxResources {
empty := &specs.LinuxResources{
CPU: &specs.LinuxCPU{},
}
- infra, err := p.runtime.GetContainer(p.state.InfraContainerID)
- if err != nil {
- return empty
- }
- conf := infra.config.Spec
- if err != nil {
- return empty
- }
- if conf.Linux == nil || conf.Linux.Resources == nil {
- return empty
- }
- if err = JSONDeepCopy(conf.Linux.Resources, resCopy); err != nil {
+ if err := JSONDeepCopy(p.config.ResourceLimits, resCopy); err != nil {
return nil
}
if resCopy.CPU != nil {
@@ -139,51 +131,91 @@ func (p *Pod) ResourceLim() *specs.LinuxResources {
// CPUPeriod returns the pod CPU period
func (p *Pod) CPUPeriod() uint64 {
- if p.state.InfraContainerID == "" {
+ resLim := p.ResourceLim()
+ if resLim.CPU == nil || resLim.CPU.Period == nil {
return 0
}
- infra, err := p.runtime.GetContainer(p.state.InfraContainerID)
- if err != nil {
+ return *resLim.CPU.Period
+}
+
+// CPUQuota returns the pod CPU quota
+func (p *Pod) CPUQuota() int64 {
+ resLim := p.ResourceLim()
+ if resLim.CPU == nil || resLim.CPU.Quota == nil {
return 0
}
- conf := infra.config.Spec
- if conf != nil && conf.Linux != nil && conf.Linux.Resources != nil && conf.Linux.Resources.CPU != nil && conf.Linux.Resources.CPU.Period != nil {
- return *conf.Linux.Resources.CPU.Period
+ return *resLim.CPU.Quota
+}
+
+// MemoryLimit returns the pod Memory Limit
+func (p *Pod) MemoryLimit() uint64 {
+ resLim := p.ResourceLim()
+ if resLim.Memory == nil || resLim.Memory.Limit == nil {
+ return 0
}
- return 0
+ return uint64(*resLim.Memory.Limit)
}
-// CPUQuota returns the pod CPU quota
-func (p *Pod) CPUQuota() int64 {
- if p.state.InfraContainerID == "" {
+// MemorySwap returns the pod Memory swap limit
+func (p *Pod) MemorySwap() uint64 {
+ resLim := p.ResourceLim()
+ if resLim.Memory == nil || resLim.Memory.Swap == nil {
return 0
}
- infra, err := p.runtime.GetContainer(p.state.InfraContainerID)
- if err != nil {
+ return uint64(*resLim.Memory.Swap)
+}
+
+// BlkioWeight returns the pod blkio weight
+func (p *Pod) BlkioWeight() uint64 {
+ resLim := p.ResourceLim()
+ if resLim.BlockIO == nil || resLim.BlockIO.Weight == nil {
return 0
}
- conf := infra.config.Spec
- if conf != nil && conf.Linux != nil && conf.Linux.Resources != nil && conf.Linux.Resources.CPU != nil && conf.Linux.Resources.CPU.Quota != nil {
- return *conf.Linux.Resources.CPU.Quota
+ return uint64(*resLim.BlockIO.Weight)
+}
+
+// CPUSetMems returns the pod CPUSet memory nodes
+func (p *Pod) CPUSetMems() string {
+ resLim := p.ResourceLim()
+ if resLim.CPU == nil {
+ return ""
}
- return 0
+ return resLim.CPU.Mems
}
-// MemoryLimit returns the pod Memory Limit
-func (p *Pod) MemoryLimit() uint64 {
- if p.state.InfraContainerID == "" {
+// CPUShares returns the pod cpu shares
+func (p *Pod) CPUShares() uint64 {
+ resLim := p.ResourceLim()
+ if resLim.CPU == nil || resLim.CPU.Shares == nil {
return 0
}
- infra, err := p.runtime.GetContainer(p.state.InfraContainerID)
+ return *resLim.CPU.Shares
+}
+
+// BlkiThrottleReadBps returns the pod throttle devices
+func (p *Pod) BlkiThrottleReadBps() []define.InspectBlkioThrottleDevice {
+ resLim := p.ResourceLim()
+ if resLim.BlockIO == nil || resLim.BlockIO.ThrottleReadBpsDevice == nil {
+ return []define.InspectBlkioThrottleDevice{}
+ }
+ devs, err := blkioDeviceThrottle(nil, resLim.BlockIO.ThrottleReadBpsDevice)
if err != nil {
- return 0
+ return []define.InspectBlkioThrottleDevice{}
}
- conf := infra.config.Spec
- if conf != nil && conf.Linux != nil && conf.Linux.Resources != nil && conf.Linux.Resources.Memory != nil && conf.Linux.Resources.Memory.Limit != nil {
- val := *conf.Linux.Resources.Memory.Limit
- return uint64(val)
+ return devs
+}
+
+// BlkiThrottleWriteBps returns the pod throttle devices
+func (p *Pod) BlkiThrottleWriteBps() []define.InspectBlkioThrottleDevice {
+ resLim := p.ResourceLim()
+ if resLim.BlockIO == nil || resLim.BlockIO.ThrottleWriteBpsDevice == nil {
+ return []define.InspectBlkioThrottleDevice{}
+ }
+ devs, err := blkioDeviceThrottle(nil, resLim.BlockIO.ThrottleWriteBpsDevice)
+ if err != nil {
+ return []define.InspectBlkioThrottleDevice{}
}
- return 0
+ return devs
}
// NetworkMode returns the Network mode given by the user ex: pod, private...
diff --git a/libpod/pod_api.go b/libpod/pod_api.go
index c1d54d55e..29964ae95 100644
--- a/libpod/pod_api.go
+++ b/libpod/pod_api.go
@@ -659,7 +659,6 @@ func (p *Pod) Inspect() (*define.InspectPodData, error) {
var infraConfig *define.InspectPodInfraConfig
var inspectMounts []define.InspectMount
var devices []define.InspectDevice
- var deviceLimits []define.InspectBlkioThrottleDevice
var infraSecurity []string
if p.state.InfraContainerID != "" {
infra, err := p.runtime.GetContainer(p.state.InfraContainerID)
@@ -683,18 +682,6 @@ func (p *Pod) Inspect() (*define.InspectPodData, error) {
if err != nil {
return nil, err
}
- var nodes map[string]string
- devices, err = infra.GetDevices(false, *infra.config.Spec, nodes)
- if err != nil {
- return nil, err
- }
- spec := infra.config.Spec
- if spec.Linux != nil && spec.Linux.Resources != nil && spec.Linux.Resources.BlockIO != nil {
- deviceLimits, err = blkioDeviceThrottle(nodes, spec.Linux.Resources.BlockIO.ThrottleReadBpsDevice)
- if err != nil {
- return nil, err
- }
- }
if len(infra.config.ContainerNetworkConfig.DNSServer) > 0 {
infraConfig.DNSServer = make([]string, 0, len(infra.config.ContainerNetworkConfig.DNSServer))
@@ -731,33 +718,38 @@ func (p *Pod) Inspect() (*define.InspectPodData, error) {
}
inspectData := define.InspectPodData{
- ID: p.ID(),
- Name: p.Name(),
- Namespace: p.Namespace(),
- Created: p.CreatedTime(),
- CreateCommand: p.config.CreateCommand,
- ExitPolicy: string(p.config.ExitPolicy),
- State: podState,
- Hostname: p.config.Hostname,
- Labels: p.Labels(),
- CreateCgroup: p.config.UsePodCgroup,
- CgroupParent: p.CgroupParent(),
- CgroupPath: p.state.CgroupPath,
- CreateInfra: infraConfig != nil,
- InfraContainerID: p.state.InfraContainerID,
- InfraConfig: infraConfig,
- SharedNamespaces: sharesNS,
- NumContainers: uint(len(containers)),
- Containers: ctrs,
- CPUSetCPUs: p.ResourceLim().CPU.Cpus,
- CPUPeriod: p.CPUPeriod(),
- CPUQuota: p.CPUQuota(),
- MemoryLimit: p.MemoryLimit(),
- Mounts: inspectMounts,
- Devices: devices,
- BlkioDeviceReadBps: deviceLimits,
- VolumesFrom: p.VolumesFrom(),
- SecurityOpts: infraSecurity,
+ ID: p.ID(),
+ Name: p.Name(),
+ Namespace: p.Namespace(),
+ Created: p.CreatedTime(),
+ CreateCommand: p.config.CreateCommand,
+ ExitPolicy: string(p.config.ExitPolicy),
+ State: podState,
+ Hostname: p.config.Hostname,
+ Labels: p.Labels(),
+ CreateCgroup: p.config.UsePodCgroup,
+ CgroupParent: p.CgroupParent(),
+ CgroupPath: p.state.CgroupPath,
+ CreateInfra: infraConfig != nil,
+ InfraContainerID: p.state.InfraContainerID,
+ InfraConfig: infraConfig,
+ SharedNamespaces: sharesNS,
+ NumContainers: uint(len(containers)),
+ Containers: ctrs,
+ CPUSetCPUs: p.ResourceLim().CPU.Cpus,
+ CPUPeriod: p.CPUPeriod(),
+ CPUQuota: p.CPUQuota(),
+ MemoryLimit: p.MemoryLimit(),
+ Mounts: inspectMounts,
+ Devices: devices,
+ BlkioDeviceReadBps: p.BlkiThrottleReadBps(),
+ VolumesFrom: p.VolumesFrom(),
+ SecurityOpts: infraSecurity,
+ MemorySwap: p.MemorySwap(),
+ BlkioWeight: p.BlkioWeight(),
+ CPUSetMems: p.CPUSetMems(),
+ BlkioDeviceWriteBps: p.BlkiThrottleWriteBps(),
+ CPUShares: p.CPUShares(),
}
return &inspectData, nil
diff --git a/libpod/runtime_pod_linux.go b/libpod/runtime_pod_linux.go
index 75ff24e41..57c0b5c48 100644
--- a/libpod/runtime_pod_linux.go
+++ b/libpod/runtime_pod_linux.go
@@ -7,7 +7,6 @@ import (
"context"
"errors"
"fmt"
- "os"
"path"
"path/filepath"
"strings"
@@ -18,7 +17,6 @@ import (
"github.com/containers/podman/v4/libpod/events"
"github.com/containers/podman/v4/pkg/rootless"
"github.com/containers/podman/v4/pkg/specgen"
- runcconfig "github.com/opencontainers/runc/libcontainer/configs"
"github.com/sirupsen/logrus"
)
@@ -80,7 +78,7 @@ func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, option
p.InfraContainerSpec.CgroupParent = pod.state.CgroupPath
// cgroupfs + rootless = permission denied when creating the cgroup.
if !rootless.IsRootless() {
- res, err := GetLimits(p.InfraContainerSpec.ResourceLimits)
+ res, err := GetLimits(p.ResourceLimits)
if err != nil {
return nil, err
}
@@ -113,7 +111,7 @@ func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, option
// If we are set to use pod cgroups, set the cgroup parent that
// all containers in the pod will share
if pod.config.UsePodCgroup {
- cgroupPath, err := systemdSliceFromPath(pod.config.CgroupParent, fmt.Sprintf("libpod_pod_%s", pod.ID()), p.InfraContainerSpec.ResourceLimits)
+ cgroupPath, err := systemdSliceFromPath(pod.config.CgroupParent, fmt.Sprintf("libpod_pod_%s", pod.ID()), p.ResourceLimits)
if err != nil {
return nil, fmt.Errorf("unable to create pod cgroup for pod %s: %w", pod.ID(), err)
}
@@ -214,83 +212,37 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
return fmt.Errorf("pod %s contains containers and cannot be removed: %w", p.ID(), define.ErrCtrExists)
}
- // Go through and lock all containers so we can operate on them all at
- // once.
- // First loop also checks that we are ready to go ahead and remove.
- containersLocked := true
+ ctrNamedVolumes := make(map[string]*ContainerNamedVolume)
+
+ var removalErr error
for _, ctr := range ctrs {
- ctrLock := ctr.lock
- ctrLock.Lock()
- defer func() {
- if containersLocked {
+ err := func() error {
+ ctrLock := ctr.lock
+ ctrLock.Lock()
+ defer func() {
ctrLock.Unlock()
- }
- }()
+ }()
- // If we're force-removing, no need to check status.
- if force {
- continue
- }
-
- // Sync all containers
- if err := ctr.syncContainer(); err != nil {
- return err
- }
-
- // Ensure state appropriate for removal
- if err := ctr.checkReadyForRemoval(); err != nil {
- return fmt.Errorf("pod %s has containers that are not ready to be removed: %w", p.ID(), err)
- }
- }
-
- // We're going to be removing containers.
- // If we are Cgroupfs cgroup driver, to avoid races, we need to hit
- // the pod and conmon Cgroups with a PID limit to prevent them from
- // spawning any further processes (particularly cleanup processes) which
- // would prevent removing the Cgroups.
- if p.runtime.config.Engine.CgroupManager == config.CgroupfsCgroupsManager {
- // Get the conmon Cgroup
- conmonCgroupPath := filepath.Join(p.state.CgroupPath, "conmon")
- conmonCgroup, err := cgroups.Load(conmonCgroupPath)
- if err != nil && err != cgroups.ErrCgroupDeleted && err != cgroups.ErrCgroupV1Rootless {
- logrus.Errorf("Retrieving pod %s conmon cgroup %s: %v", p.ID(), conmonCgroupPath, err)
- }
-
- // New resource limits
- resLimits := new(runcconfig.Resources)
- resLimits.PidsLimit = 1 // Inhibit forks with very low pids limit
-
- // Don't try if we failed to retrieve the cgroup
- if err == nil {
- if err := conmonCgroup.Update(resLimits); err != nil && !os.IsNotExist(err) {
- logrus.Warnf("Error updating pod %s conmon cgroup PID limit: %v", p.ID(), err)
+ if err := ctr.syncContainer(); err != nil {
+ return err
}
- }
- }
-
- var removalErr error
- ctrNamedVolumes := make(map[string]*ContainerNamedVolume)
+ for _, vol := range ctr.config.NamedVolumes {
+ ctrNamedVolumes[vol.Name] = vol
+ }
- // Second loop - all containers are good, so we should be clear to
- // remove.
- for _, ctr := range ctrs {
- // Remove the container.
- // Do NOT remove named volumes. Instead, we're going to build a
- // list of them to be removed at the end, once the containers
- // have been removed by RemovePodContainers.
- for _, vol := range ctr.config.NamedVolumes {
- ctrNamedVolumes[vol.Name] = vol
- }
+ return r.removeContainer(ctx, ctr, force, false, true, timeout)
+ }()
- if err := r.removeContainer(ctx, ctr, force, false, true, timeout); err != nil {
- if removalErr == nil {
- removalErr = err
- } else {
- logrus.Errorf("Removing container %s from pod %s: %v", ctr.ID(), p.ID(), err)
- }
+ if removalErr == nil {
+ removalErr = err
+ } else {
+ logrus.Errorf("Removing container %s from pod %s: %v", ctr.ID(), p.ID(), err)
}
}
+ if removalErr != nil {
+ return removalErr
+ }
// Clear infra container ID before we remove the infra container.
// There is a potential issue if we don't do that, and removal is
@@ -326,12 +278,6 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
}
}
- // let's unlock the containers so if there is any cleanup process, it can terminate its execution
- for _, ctr := range ctrs {
- ctr.lock.Unlock()
- }
- containersLocked = false
-
// Remove pod cgroup, if present
if p.state.CgroupPath != "" {
logrus.Debugf("Removing pod cgroup %s", p.state.CgroupPath)