From 2792e598c7ce1198ec8464a3119504123ae8397c Mon Sep 17 00:00:00 2001 From: cdoern Date: Mon, 13 Jun 2022 15:35:16 -0400 Subject: podman cgroup enhancement currently, setting any sort of resource limit in a pod does nothing. With the newly refactored creation process in c/common, podman ca now set resources at a pod level meaning that resource related flags can now be exposed to podman pod create. cgroupfs and systemd are both supported with varying completion. cgroupfs is a much simpler process and one that is virtually complete for all resource types, the flags now just need to be added. systemd on the other hand has to be handeled via the dbus api meaning that the limits need to be passed as recognized properties to systemd. The properties added so far are the ones that podman pod create supports as well as `cpuset-mems` as this will be the next flag I work on. Signed-off-by: Charlie Doern --- pkg/api/handlers/compat/containers_stats.go | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) (limited to 'pkg/api/handlers') diff --git a/pkg/api/handlers/compat/containers_stats.go b/pkg/api/handlers/compat/containers_stats.go index d6bc26416..12c5283fc 100644 --- a/pkg/api/handlers/compat/containers_stats.go +++ b/pkg/api/handlers/compat/containers_stats.go @@ -12,6 +12,7 @@ import ( api "github.com/containers/podman/v4/pkg/api/types" docker "github.com/docker/docker/api/types" "github.com/gorilla/schema" + runccgroups "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -133,7 +134,7 @@ streamLabel: // A label to flatten the scope } cfg := ctnr.Config() - memoryLimit := cgroupStat.Memory.Usage.Limit + memoryLimit := cgroupStat.MemoryStats.Usage.Limit if cfg.Spec.Linux != nil && cfg.Spec.Linux.Resources != nil && cfg.Spec.Linux.Resources.Memory != nil && *cfg.Spec.Linux.Resources.Memory.Limit > 0 { memoryLimit = uint64(*cfg.Spec.Linux.Resources.Memory.Limit) } @@ -144,11 +145,11 @@ streamLabel: // A label to flatten the scope Read: time.Now(), PreRead: preRead, PidsStats: docker.PidsStats{ - Current: cgroupStat.Pids.Current, + Current: cgroupStat.PidsStats.Current, Limit: 0, }, BlkioStats: docker.BlkioStats{ - IoServiceBytesRecursive: toBlkioStatEntry(cgroupStat.Blkio.IoServiceBytesRecursive), + IoServiceBytesRecursive: toBlkioStatEntry(cgroupStat.BlkioStats.IoServiceBytesRecursive), IoServicedRecursive: nil, IoQueuedRecursive: nil, IoServiceTimeRecursive: nil, @@ -159,14 +160,14 @@ streamLabel: // A label to flatten the scope }, CPUStats: CPUStats{ CPUUsage: docker.CPUUsage{ - TotalUsage: cgroupStat.CPU.Usage.Total, - PercpuUsage: cgroupStat.CPU.Usage.PerCPU, - UsageInKernelmode: cgroupStat.CPU.Usage.Kernel, - UsageInUsermode: cgroupStat.CPU.Usage.Total - cgroupStat.CPU.Usage.Kernel, + TotalUsage: cgroupStat.CpuStats.CpuUsage.TotalUsage, + PercpuUsage: cgroupStat.CpuStats.CpuUsage.PercpuUsage, + UsageInKernelmode: cgroupStat.CpuStats.CpuUsage.UsageInKernelmode, + UsageInUsermode: cgroupStat.CpuStats.CpuUsage.TotalUsage - cgroupStat.CpuStats.CpuUsage.UsageInKernelmode, }, CPU: stats.CPU, SystemUsage: systemUsage, - OnlineCPUs: uint32(len(cgroupStat.CPU.Usage.PerCPU)), + OnlineCPUs: uint32(len(cgroupStat.CpuStats.CpuUsage.PercpuUsage)), ThrottlingData: docker.ThrottlingData{ Periods: 0, ThrottledPeriods: 0, @@ -175,8 +176,8 @@ streamLabel: // A label to flatten the scope }, PreCPUStats: preCPUStats, MemoryStats: docker.MemoryStats{ - Usage: cgroupStat.Memory.Usage.Usage, - MaxUsage: cgroupStat.Memory.Usage.Limit, + Usage: cgroupStat.MemoryStats.Usage.Usage, + MaxUsage: cgroupStat.MemoryStats.Usage.Limit, Stats: nil, Failcnt: 0, Limit: memoryLimit, @@ -216,7 +217,7 @@ streamLabel: // A label to flatten the scope } } -func toBlkioStatEntry(entries []cgroups.BlkIOEntry) []docker.BlkioStatEntry { +func toBlkioStatEntry(entries []runccgroups.BlkioStatEntry) []docker.BlkioStatEntry { results := make([]docker.BlkioStatEntry, len(entries)) for i, e := range entries { bits, err := json.Marshal(e) -- cgit v1.2.3-54-g00ecf