aboutsummaryrefslogtreecommitdiff
path: root/libpod
diff options
context:
space:
mode:
Diffstat (limited to 'libpod')
-rw-r--r--libpod/container_exec.go56
-rw-r--r--libpod/container_internal.go23
-rw-r--r--libpod/container_internal_linux.go56
-rw-r--r--libpod/define/containerstate.go1
-rw-r--r--libpod/networking_linux.go11
-rw-r--r--libpod/networking_slirp4netns.go147
-rw-r--r--libpod/oci_conmon_linux.go8
-rw-r--r--libpod/pod.go9
-rw-r--r--libpod/runtime_pod_linux.go74
-rw-r--r--libpod/stats.go22
-rw-r--r--libpod/volume.go12
11 files changed, 266 insertions, 153 deletions
diff --git a/libpod/container_exec.go b/libpod/container_exec.go
index d1c190905..140267f28 100644
--- a/libpod/container_exec.go
+++ b/libpod/container_exec.go
@@ -341,22 +341,60 @@ func (c *Container) ExecStartAndAttach(sessionID string, streams *define.AttachS
}
lastErr = tmpErr
- exitCode, err := c.readExecExitCode(session.ID())
- if err != nil {
+ exitCode, exitCodeErr := c.readExecExitCode(session.ID())
+
+ // Lock again.
+ // Important: we must lock and sync *before* the above error is handled.
+ // We need info from the database to handle the error.
+ if !c.batched {
+ c.lock.Lock()
+ }
+ // We can't reuse the old exec session (things may have changed from
+ // other use, the container was unlocked).
+ // So re-sync and get a fresh copy.
+ // If we can't do this, no point in continuing, any attempt to save
+ // would write garbage to the DB.
+ if err := c.syncContainer(); err != nil {
+ if errors.Is(err, define.ErrNoSuchCtr) || errors.Is(err, define.ErrCtrRemoved) {
+ // We can't save status, but since the container has
+ // been entirely removed, we don't have to; exit cleanly
+ return lastErr
+ }
if lastErr != nil {
logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr)
}
- lastErr = err
- }
+ return errors.Wrapf(err, "error syncing container %s state to update exec session %s", c.ID(), sessionID)
+ }
+
+ // Now handle the error from readExecExitCode above.
+ if exitCodeErr != nil {
+ newSess, ok := c.state.ExecSessions[sessionID]
+ if !ok {
+ // The exec session was removed entirely, probably by
+ // the cleanup process. When it did so, it should have
+ // written an event with the exit code.
+ // Given that, there's nothing more we can do.
+ logrus.Infof("Container %s exec session %s already removed", c.ID(), session.ID())
+ return lastErr
+ }
- logrus.Debugf("Container %s exec session %s completed with exit code %d", c.ID(), session.ID(), exitCode)
+ if newSess.State == define.ExecStateStopped {
+ // Exec session already cleaned up.
+ // Exit code should be recorded, so it's OK if we were
+ // not able to read it.
+ logrus.Infof("Container %s exec session %s already cleaned up", c.ID(), session.ID())
+ return lastErr
+ }
- // Lock again
- if !c.batched {
- c.lock.Lock()
+ if lastErr != nil {
+ logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr)
+ }
+ lastErr = exitCodeErr
}
- if err := writeExecExitCode(c, session.ID(), exitCode); err != nil {
+ logrus.Debugf("Container %s exec session %s completed with exit code %d", c.ID(), session.ID(), exitCode)
+
+ if err := justWriteExecExitCode(c, session.ID(), exitCode); err != nil {
if lastErr != nil {
logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr)
}
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index 51533b3bf..c2642f872 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -99,15 +99,8 @@ func (c *Container) rootFsSize() (int64, error) {
// rwSize gets the size of the mutable top layer of the container.
func (c *Container) rwSize() (int64, error) {
if c.config.Rootfs != "" {
- var size int64
- err := filepath.Walk(c.config.Rootfs, func(path string, info os.FileInfo, err error) error {
- if err != nil {
- return err
- }
- size += info.Size()
- return nil
- })
- return size, err
+ size, err := util.SizeOfPath(c.config.Rootfs)
+ return int64(size), err
}
container, err := c.runtime.store.Container(c.ID())
@@ -1087,13 +1080,6 @@ func (c *Container) init(ctx context.Context, retainRetries bool) error {
// With the spec complete, do an OCI create
if _, err = c.ociRuntime.CreateContainer(c, nil); err != nil {
- // Fedora 31 is carrying a patch to display improved error
- // messages to better handle the V2 transition. This is NOT
- // upstream in any OCI runtime.
- // TODO: Remove once runc supports cgroupsv2
- if strings.Contains(err.Error(), "this version of runc doesn't work on cgroups v2") {
- logrus.Errorf("Oci runtime %q does not support Cgroups V2: use system migrate to mitigate", c.ociRuntime.Name())
- }
return err
}
@@ -1268,7 +1254,10 @@ func (c *Container) start() error {
}
}
- if c.config.HealthCheckConfig != nil {
+ // Check if healthcheck is not nil and --no-healthcheck option is not set.
+ // If --no-healthcheck is set Test will be always set to `[NONE]` so no need
+ // to update status in such case.
+ if c.config.HealthCheckConfig != nil && !(len(c.config.HealthCheckConfig.Test) == 1 && c.config.HealthCheckConfig.Test[0] == "NONE") {
if err := c.updateHealthStatus(define.HealthCheckStarting); err != nil {
logrus.Error(err)
}
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index afa351c17..f7c8c190f 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -173,6 +173,57 @@ func (c *Container) prepare() error {
return nil
}
+// isWorkDirSymlink returns true if resolved workdir is symlink or a chain of symlinks,
+// and final resolved target is present either on volume, mount or inside of container
+// otherwise it returns false. Following function is meant for internal use only and
+// can change at any point of time.
+func (c *Container) isWorkDirSymlink(resolvedPath string) bool {
+ // We cannot create workdir since explicit --workdir is
+ // set in config but workdir could also be a symlink.
+ // If its a symlink lets check if resolved link is present
+ // on the container or not.
+
+ // If we can resolve symlink and resolved link is present on the container
+ // then return nil cause its a valid use-case.
+
+ maxSymLinks := 0
+ for {
+ // Linux only supports a chain of 40 links.
+ // Reference: https://github.com/torvalds/linux/blob/master/include/linux/namei.h#L13
+ if maxSymLinks > 40 {
+ break
+ }
+ resolvedSymlink, err := os.Readlink(resolvedPath)
+ if err != nil {
+ // End sym-link resolution loop.
+ break
+ }
+ if resolvedSymlink != "" {
+ _, resolvedSymlinkWorkdir, err := c.resolvePath(c.state.Mountpoint, resolvedSymlink)
+ if isPathOnVolume(c, resolvedSymlinkWorkdir) || isPathOnBindMount(c, resolvedSymlinkWorkdir) {
+ // Resolved symlink exists on external volume or mount
+ return true
+ }
+ if err != nil {
+ // Could not resolve path so end sym-link resolution loop.
+ break
+ }
+ if resolvedSymlinkWorkdir != "" {
+ resolvedPath = resolvedSymlinkWorkdir
+ _, err := os.Stat(resolvedSymlinkWorkdir)
+ if err == nil {
+ // Symlink resolved successfully and resolved path exists on container,
+ // this is a valid use-case so return nil.
+ logrus.Debugf("Workdir is a symlink with target to %q and resolved symlink exists on container", resolvedSymlink)
+ return true
+ }
+ }
+ }
+ maxSymLinks++
+ }
+ return false
+}
+
// resolveWorkDir resolves the container's workdir and, depending on the
// configuration, will create it, or error out if it does not exist.
// Note that the container must be mounted before.
@@ -205,6 +256,11 @@ func (c *Container) resolveWorkDir() error {
// the path exists on the container.
if err != nil {
if os.IsNotExist(err) {
+ // If resolved Workdir path gets marked as a valid symlink,
+ // return nil cause this is valid use-case.
+ if c.isWorkDirSymlink(resolvedWorkdir) {
+ return nil
+ }
return errors.Errorf("workdir %q does not exist on container %s", workdir, c.ID())
}
// This might be a serious error (e.g., permission), so
diff --git a/libpod/define/containerstate.go b/libpod/define/containerstate.go
index 23ba1f451..9ad3aec08 100644
--- a/libpod/define/containerstate.go
+++ b/libpod/define/containerstate.go
@@ -138,7 +138,6 @@ type ContainerStats struct {
CPU float64
CPUNano uint64
CPUSystemNano uint64
- DataPoints int64
SystemNano uint64
MemUsage uint64
MemLimit uint64
diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go
index 29b9941fe..eb264379f 100644
--- a/libpod/networking_linux.go
+++ b/libpod/networking_linux.go
@@ -496,10 +496,13 @@ func (r *Runtime) GetRootlessNetNs(new bool) (*RootlessNetNS, error) {
return nil, err
}
- // move to systemd scope to prevent systemd from killing it
- err = utils.MoveRootlessNetnsSlirpProcessToUserSlice(cmd.Process.Pid)
- if err != nil {
- logrus.Errorf("failed to move the rootless netns slirp4netns process to the systemd user.slice: %v", err)
+ if utils.RunsOnSystemd() {
+ // move to systemd scope to prevent systemd from killing it
+ err = utils.MoveRootlessNetnsSlirpProcessToUserSlice(cmd.Process.Pid)
+ if err != nil {
+ // only log this, it is not fatal but can lead to issues when running podman inside systemd units
+ logrus.Errorf("failed to move the rootless netns slirp4netns process to the systemd user.slice: %v", err)
+ }
}
// build a new resolv.conf file which uses the slirp4netns dns server address
diff --git a/libpod/networking_slirp4netns.go b/libpod/networking_slirp4netns.go
index 690f0c1fa..e5cc95cda 100644
--- a/libpod/networking_slirp4netns.go
+++ b/libpod/networking_slirp4netns.go
@@ -13,6 +13,7 @@ import (
"path/filepath"
"strconv"
"strings"
+ "sync"
"syscall"
"time"
@@ -302,11 +303,15 @@ func (r *Runtime) setupSlirp4netns(ctr *Container, netns ns.NetNS) error {
cmd.Stdout = logFile
cmd.Stderr = logFile
- var slirpReadyChan (chan struct{})
-
+ var slirpReadyWg, netnsReadyWg *sync.WaitGroup
if netOptions.enableIPv6 {
- slirpReadyChan = make(chan struct{})
- defer close(slirpReadyChan)
+ // use two wait groups to make sure we set the sysctl before
+ // starting slirp and reset it only after slirp is ready
+ slirpReadyWg = &sync.WaitGroup{}
+ netnsReadyWg = &sync.WaitGroup{}
+ slirpReadyWg.Add(1)
+ netnsReadyWg.Add(1)
+
go func() {
err := ns.WithNetNSPath(netnsPath, func(_ ns.NetNS) error {
// Duplicate Address Detection slows the ipv6 setup down for 1-2 seconds.
@@ -318,23 +323,37 @@ func (r *Runtime) setupSlirp4netns(ctr *Container, netns ns.NetNS) error {
// is ready in case users rely on this sysctl.
orgValue, err := ioutil.ReadFile(ipv6ConfDefaultAcceptDadSysctl)
if err != nil {
+ netnsReadyWg.Done()
+ // on ipv6 disabled systems the sysctl does not exists
+ // so we should not error
+ if errors.Is(err, os.ErrNotExist) {
+ return nil
+ }
return err
}
err = ioutil.WriteFile(ipv6ConfDefaultAcceptDadSysctl, []byte("0"), 0644)
+ netnsReadyWg.Done()
if err != nil {
return err
}
- // wait for slirp to finish setup
- <-slirpReadyChan
+
+ // wait until slirp4nets is ready before reseting this value
+ slirpReadyWg.Wait()
return ioutil.WriteFile(ipv6ConfDefaultAcceptDadSysctl, orgValue, 0644)
})
if err != nil {
logrus.Warnf("failed to set net.ipv6.conf.default.accept_dad sysctl: %v", err)
}
}()
+
+ // wait until we set the sysctl
+ netnsReadyWg.Wait()
}
if err := cmd.Start(); err != nil {
+ if netOptions.enableIPv6 {
+ slirpReadyWg.Done()
+ }
return errors.Wrapf(err, "failed to start slirp4netns process")
}
defer func() {
@@ -344,11 +363,12 @@ func (r *Runtime) setupSlirp4netns(ctr *Container, netns ns.NetNS) error {
}
}()
- if err := waitForSync(syncR, cmd, logFile, 1*time.Second); err != nil {
- return err
+ err = waitForSync(syncR, cmd, logFile, 1*time.Second)
+ if netOptions.enableIPv6 {
+ slirpReadyWg.Done()
}
- if slirpReadyChan != nil {
- slirpReadyChan <- struct{}{}
+ if err != nil {
+ return err
}
// Set a default slirp subnet. Parsing a string with the net helper is easier than building the struct myself
@@ -594,60 +614,73 @@ func (r *Runtime) setupRootlessPortMappingViaSlirp(ctr *Container, cmd *exec.Cmd
// for each port we want to add we need to open a connection to the slirp4netns control socket
// and send the add_hostfwd command.
- for _, i := range ctr.convertPortMappings() {
- conn, err := net.Dial("unix", apiSocket)
- if err != nil {
- return errors.Wrapf(err, "cannot open connection to %s", apiSocket)
- }
- defer func() {
- if err := conn.Close(); err != nil {
- logrus.Errorf("Unable to close connection: %q", err)
+ for _, port := range ctr.convertPortMappings() {
+ protocols := strings.Split(port.Protocol, ",")
+ for _, protocol := range protocols {
+ hostIP := port.HostIP
+ if hostIP == "" {
+ hostIP = "0.0.0.0"
+ }
+ for i := uint16(0); i < port.Range; i++ {
+ if err := openSlirp4netnsPort(apiSocket, protocol, hostIP, port.HostPort+i, port.ContainerPort+i); err != nil {
+ return err
+ }
}
- }()
- hostIP := i.HostIP
- if hostIP == "" {
- hostIP = "0.0.0.0"
- }
- apiCmd := slirp4netnsCmd{
- Execute: "add_hostfwd",
- Args: slirp4netnsCmdArg{
- Proto: i.Protocol,
- HostAddr: hostIP,
- HostPort: i.HostPort,
- GuestPort: i.ContainerPort,
- },
- }
- // create the JSON payload and send it. Mark the end of request shutting down writes
- // to the socket, as requested by slirp4netns.
- data, err := json.Marshal(&apiCmd)
- if err != nil {
- return errors.Wrapf(err, "cannot marshal JSON for slirp4netns")
- }
- if _, err := conn.Write([]byte(fmt.Sprintf("%s\n", data))); err != nil {
- return errors.Wrapf(err, "cannot write to control socket %s", apiSocket)
- }
- if err := conn.(*net.UnixConn).CloseWrite(); err != nil {
- return errors.Wrapf(err, "cannot shutdown the socket %s", apiSocket)
- }
- buf := make([]byte, 2048)
- readLength, err := conn.Read(buf)
- if err != nil {
- return errors.Wrapf(err, "cannot read from control socket %s", apiSocket)
- }
- // if there is no 'error' key in the received JSON data, then the operation was
- // successful.
- var y map[string]interface{}
- if err := json.Unmarshal(buf[0:readLength], &y); err != nil {
- return errors.Wrapf(err, "error parsing error status from slirp4netns")
- }
- if e, found := y["error"]; found {
- return errors.Errorf("error from slirp4netns while setting up port redirection: %v", e)
}
}
logrus.Debug("slirp4netns port-forwarding setup via add_hostfwd is ready")
return nil
}
+// openSlirp4netnsPort sends the slirp4netns pai quey to the given socket
+func openSlirp4netnsPort(apiSocket, proto, hostip string, hostport, guestport uint16) error {
+ conn, err := net.Dial("unix", apiSocket)
+ if err != nil {
+ return errors.Wrapf(err, "cannot open connection to %s", apiSocket)
+ }
+ defer func() {
+ if err := conn.Close(); err != nil {
+ logrus.Errorf("Unable to close slirp4netns connection: %q", err)
+ }
+ }()
+ apiCmd := slirp4netnsCmd{
+ Execute: "add_hostfwd",
+ Args: slirp4netnsCmdArg{
+ Proto: proto,
+ HostAddr: hostip,
+ HostPort: hostport,
+ GuestPort: guestport,
+ },
+ }
+ // create the JSON payload and send it. Mark the end of request shutting down writes
+ // to the socket, as requested by slirp4netns.
+ data, err := json.Marshal(&apiCmd)
+ if err != nil {
+ return errors.Wrapf(err, "cannot marshal JSON for slirp4netns")
+ }
+ if _, err := conn.Write([]byte(fmt.Sprintf("%s\n", data))); err != nil {
+ return errors.Wrapf(err, "cannot write to control socket %s", apiSocket)
+ }
+ if err := conn.(*net.UnixConn).CloseWrite(); err != nil {
+ return errors.Wrapf(err, "cannot shutdown the socket %s", apiSocket)
+ }
+ buf := make([]byte, 2048)
+ readLength, err := conn.Read(buf)
+ if err != nil {
+ return errors.Wrapf(err, "cannot read from control socket %s", apiSocket)
+ }
+ // if there is no 'error' key in the received JSON data, then the operation was
+ // successful.
+ var y map[string]interface{}
+ if err := json.Unmarshal(buf[0:readLength], &y); err != nil {
+ return errors.Wrapf(err, "error parsing error status from slirp4netns")
+ }
+ if e, found := y["error"]; found {
+ return errors.Errorf("from slirp4netns while setting up port redirection: %v", e)
+ }
+ return nil
+}
+
func getRootlessPortChildIP(c *Container, netStatus map[string]types.StatusBlock) string {
if c.config.NetMode.IsSlirp4netns() {
slirp4netnsIP, err := GetSlirp4netnsIP(c.slirp4netnsSubnet)
diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go
index a328f7621..7c4fffa5f 100644
--- a/libpod/oci_conmon_linux.go
+++ b/libpod/oci_conmon_linux.go
@@ -1199,7 +1199,7 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co
cmd.ExtraFiles = append(cmd.ExtraFiles, childSyncPipe, childStartPipe)
if r.reservePorts && !rootless.IsRootless() && !ctr.config.NetMode.IsSlirp4netns() {
- ports, err := bindPorts(ctr.config.PortMappings)
+ ports, err := bindPorts(ctr.convertPortMappings())
if err != nil {
return 0, err
}
@@ -1585,11 +1585,13 @@ func readConmonPipeData(runtimeName string, pipe *os.File, ociLog string) (int,
var si *syncInfo
rdr := bufio.NewReader(pipe)
b, err := rdr.ReadBytes('\n')
- if err != nil {
+ // ignore EOF here, error is returned even when data was read
+ // if it is no valid json unmarshal will fail below
+ if err != nil && !errors.Is(err, io.EOF) {
ch <- syncStruct{err: err}
}
if err := json.Unmarshal(b, &si); err != nil {
- ch <- syncStruct{err: err}
+ ch <- syncStruct{err: fmt.Errorf("conmon bytes %q: %w", string(b), err)}
return
}
ch <- syncStruct{si: si}
diff --git a/libpod/pod.go b/libpod/pod.go
index 6273ff247..ed2d97b37 100644
--- a/libpod/pod.go
+++ b/libpod/pod.go
@@ -422,10 +422,6 @@ type PodContainerStats struct {
// GetPodStats returns the stats for each of its containers
func (p *Pod) GetPodStats(previousContainerStats map[string]*define.ContainerStats) (map[string]*define.ContainerStats, error) {
- var (
- ok bool
- prevStat *define.ContainerStats
- )
p.lock.Lock()
defer p.lock.Unlock()
@@ -438,10 +434,7 @@ func (p *Pod) GetPodStats(previousContainerStats map[string]*define.ContainerSta
}
newContainerStats := make(map[string]*define.ContainerStats)
for _, c := range containers {
- if prevStat, ok = previousContainerStats[c.ID()]; !ok {
- prevStat = &define.ContainerStats{}
- }
- newStats, err := c.GetContainerStats(prevStat)
+ newStats, err := c.GetContainerStats(previousContainerStats[c.ID()])
// If the container wasn't running, don't include it
// but also suppress the error
if err != nil && errors.Cause(err) != define.ErrCtrStateInvalid {
diff --git a/libpod/runtime_pod_linux.go b/libpod/runtime_pod_linux.go
index 155ad5c2d..2bbccfdf6 100644
--- a/libpod/runtime_pod_linux.go
+++ b/libpod/runtime_pod_linux.go
@@ -1,3 +1,4 @@
+//go:build linux
// +build linux
package libpod
@@ -5,6 +6,7 @@ package libpod
import (
"context"
"fmt"
+ "os"
"path"
"path/filepath"
"strings"
@@ -59,50 +61,52 @@ func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, option
pod.valid = true
// Check Cgroup parent sanity, and set it if it was not set
- switch r.config.Engine.CgroupManager {
- case config.CgroupfsCgroupsManager:
- canUseCgroup := !rootless.IsRootless() || isRootlessCgroupSet(pod.config.CgroupParent)
- if canUseCgroup {
+ if r.config.Cgroups() != "disabled" {
+ switch r.config.Engine.CgroupManager {
+ case config.CgroupfsCgroupsManager:
+ canUseCgroup := !rootless.IsRootless() || isRootlessCgroupSet(pod.config.CgroupParent)
+ if canUseCgroup {
+ if pod.config.CgroupParent == "" {
+ pod.config.CgroupParent = CgroupfsDefaultCgroupParent
+ } else if strings.HasSuffix(path.Base(pod.config.CgroupParent), ".slice") {
+ return nil, errors.Wrapf(define.ErrInvalidArg, "systemd slice received as cgroup parent when using cgroupfs")
+ }
+ // If we are set to use pod cgroups, set the cgroup parent that
+ // all containers in the pod will share
+ // No need to create it with cgroupfs - the first container to
+ // launch should do it for us
+ if pod.config.UsePodCgroup {
+ pod.state.CgroupPath = filepath.Join(pod.config.CgroupParent, pod.ID())
+ if p.InfraContainerSpec != nil {
+ p.InfraContainerSpec.CgroupParent = pod.state.CgroupPath
+ }
+ }
+ }
+ case config.SystemdCgroupsManager:
if pod.config.CgroupParent == "" {
- pod.config.CgroupParent = CgroupfsDefaultCgroupParent
- } else if strings.HasSuffix(path.Base(pod.config.CgroupParent), ".slice") {
- return nil, errors.Wrapf(define.ErrInvalidArg, "systemd slice received as cgroup parent when using cgroupfs")
+ if rootless.IsRootless() {
+ pod.config.CgroupParent = SystemdDefaultRootlessCgroupParent
+ } else {
+ pod.config.CgroupParent = SystemdDefaultCgroupParent
+ }
+ } else if len(pod.config.CgroupParent) < 6 || !strings.HasSuffix(path.Base(pod.config.CgroupParent), ".slice") {
+ return nil, errors.Wrapf(define.ErrInvalidArg, "did not receive systemd slice as cgroup parent when using systemd to manage cgroups")
}
// If we are set to use pod cgroups, set the cgroup parent that
// all containers in the pod will share
- // No need to create it with cgroupfs - the first container to
- // launch should do it for us
if pod.config.UsePodCgroup {
- pod.state.CgroupPath = filepath.Join(pod.config.CgroupParent, pod.ID())
+ cgroupPath, err := systemdSliceFromPath(pod.config.CgroupParent, fmt.Sprintf("libpod_pod_%s", pod.ID()))
+ if err != nil {
+ return nil, errors.Wrapf(err, "unable to create pod cgroup for pod %s", pod.ID())
+ }
+ pod.state.CgroupPath = cgroupPath
if p.InfraContainerSpec != nil {
p.InfraContainerSpec.CgroupParent = pod.state.CgroupPath
}
}
+ default:
+ return nil, errors.Wrapf(define.ErrInvalidArg, "unsupported Cgroup manager: %s - cannot validate cgroup parent", r.config.Engine.CgroupManager)
}
- case config.SystemdCgroupsManager:
- if pod.config.CgroupParent == "" {
- if rootless.IsRootless() {
- pod.config.CgroupParent = SystemdDefaultRootlessCgroupParent
- } else {
- pod.config.CgroupParent = SystemdDefaultCgroupParent
- }
- } else if len(pod.config.CgroupParent) < 6 || !strings.HasSuffix(path.Base(pod.config.CgroupParent), ".slice") {
- return nil, errors.Wrapf(define.ErrInvalidArg, "did not receive systemd slice as cgroup parent when using systemd to manage cgroups")
- }
- // If we are set to use pod cgroups, set the cgroup parent that
- // all containers in the pod will share
- if pod.config.UsePodCgroup {
- cgroupPath, err := systemdSliceFromPath(pod.config.CgroupParent, fmt.Sprintf("libpod_pod_%s", pod.ID()))
- if err != nil {
- return nil, errors.Wrapf(err, "unable to create pod cgroup for pod %s", pod.ID())
- }
- pod.state.CgroupPath = cgroupPath
- if p.InfraContainerSpec != nil {
- p.InfraContainerSpec.CgroupParent = pod.state.CgroupPath
- }
- }
- default:
- return nil, errors.Wrapf(define.ErrInvalidArg, "unsupported Cgroup manager: %s - cannot validate cgroup parent", r.config.Engine.CgroupManager)
}
if pod.config.UsePodCgroup {
@@ -236,7 +240,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
// Don't try if we failed to retrieve the cgroup
if err == nil {
- if err := conmonCgroup.Update(resLimits); err != nil {
+ if err := conmonCgroup.Update(resLimits); err != nil && !os.IsNotExist(err) {
logrus.Warnf("Error updating pod %s conmon cgroup PID limit: %v", p.ID(), err)
}
}
diff --git a/libpod/stats.go b/libpod/stats.go
index dbb10a27e..4fff385a3 100644
--- a/libpod/stats.go
+++ b/libpod/stats.go
@@ -13,7 +13,9 @@ import (
"github.com/pkg/errors"
)
-// GetContainerStats gets the running stats for a given container
+// GetContainerStats gets the running stats for a given container.
+// The previousStats is used to correctly calculate cpu percentages. You
+// should pass nil if there is no previous stat for this container.
func (c *Container) GetContainerStats(previousStats *define.ContainerStats) (*define.ContainerStats, error) {
stats := new(define.ContainerStats)
stats.ContainerID = c.ID()
@@ -35,6 +37,14 @@ func (c *Container) GetContainerStats(previousStats *define.ContainerStats) (*de
return stats, define.ErrCtrStateInvalid
}
+ if previousStats == nil {
+ previousStats = &define.ContainerStats{
+ // if we have no prev stats use the container start time as prev time
+ // otherwise we cannot correctly calculate the CPU percentage
+ SystemNano: uint64(c.state.StartedTime.UnixNano()),
+ }
+ }
+
cgroupPath, err := c.cGroupPath()
if err != nil {
return nil, err
@@ -66,8 +76,8 @@ func (c *Container) GetContainerStats(previousStats *define.ContainerStats) (*de
stats.Duration = cgroupStats.CPU.Usage.Total
stats.UpTime = time.Duration(stats.Duration)
stats.CPU = calculateCPUPercent(cgroupStats, previousCPU, now, previousStats.SystemNano)
- stats.AvgCPU = calculateAvgCPU(stats.CPU, previousStats.AvgCPU, previousStats.DataPoints)
- stats.DataPoints = previousStats.DataPoints + 1
+ // calc the average cpu usage for the time the container is running
+ stats.AvgCPU = calculateCPUPercent(cgroupStats, 0, now, uint64(c.state.StartedTime.UnixNano()))
stats.MemUsage = cgroupStats.Memory.Usage.Usage
stats.MemLimit = c.getMemLimit()
stats.MemPerc = (float64(stats.MemUsage) / float64(stats.MemLimit)) * 100
@@ -145,9 +155,3 @@ func calculateBlockIO(stats *cgroups.Metrics) (read uint64, write uint64) {
}
return
}
-
-// calculateAvgCPU calculates the avg CPU percentage given the previous average and the number of data points.
-func calculateAvgCPU(statsCPU float64, prevAvg float64, prevData int64) float64 {
- avgPer := ((prevAvg * float64(prevData)) + statsCPU) / (float64(prevData) + 1)
- return avgPer
-}
diff --git a/libpod/volume.go b/libpod/volume.go
index d60d978ed..2b263f9fb 100644
--- a/libpod/volume.go
+++ b/libpod/volume.go
@@ -1,13 +1,12 @@
package libpod
import (
- "os"
- "path/filepath"
"time"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/libpod/lock"
"github.com/containers/podman/v4/libpod/plugin"
+ "github.com/containers/podman/v4/pkg/util"
)
// Volume is a libpod named volume.
@@ -93,14 +92,7 @@ func (v *Volume) Name() string {
// Returns the size on disk of volume
func (v *Volume) Size() (uint64, error) {
- var size uint64
- err := filepath.Walk(v.config.MountPoint, func(path string, info os.FileInfo, err error) error {
- if err == nil && !info.IsDir() {
- size += (uint64)(info.Size())
- }
- return err
- })
- return size, err
+ return util.SizeOfPath(v.config.MountPoint)
}
// Driver retrieves the volume's driver.