diff options
author | OpenShift Merge Robot <openshift-merge-robot@users.noreply.github.com> | 2020-03-10 17:43:23 +0100 |
---|---|---|
committer | GitHub <noreply@github.com> | 2020-03-10 17:43:23 +0100 |
commit | 173f430852e52a1acf69b4d9d18da20893031a79 (patch) | |
tree | e27af2df4e9c2c24f6a245e245ccc96d5fe1706b /pkg/api/handlers/compat/containers_stats.go | |
parent | 684813fb3effbd7a483e44233ed395eb49c7fded (diff) | |
parent | 31112e4b087612f7d63e83d770263b8b9fa4f206 (diff) | |
download | podman-173f430852e52a1acf69b4d9d18da20893031a79.tar.gz podman-173f430852e52a1acf69b4d9d18da20893031a79.tar.bz2 podman-173f430852e52a1acf69b4d9d18da20893031a79.zip |
Merge pull request #5432 from jwhonce/wip/compat_refactor
Refactor handler packages
Diffstat (limited to 'pkg/api/handlers/compat/containers_stats.go')
-rw-r--r-- | pkg/api/handlers/compat/containers_stats.go | 211 |
1 files changed, 211 insertions, 0 deletions
diff --git a/pkg/api/handlers/compat/containers_stats.go b/pkg/api/handlers/compat/containers_stats.go new file mode 100644 index 000000000..53ad0a632 --- /dev/null +++ b/pkg/api/handlers/compat/containers_stats.go @@ -0,0 +1,211 @@ +package compat + +import ( + "encoding/json" + "net/http" + "time" + + "github.com/containers/libpod/libpod" + "github.com/containers/libpod/libpod/define" + "github.com/containers/libpod/pkg/api/handlers/utils" + "github.com/containers/libpod/pkg/cgroups" + docker "github.com/docker/docker/api/types" + "github.com/gorilla/schema" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const DefaultStatsPeriod = 5 * time.Second + +func StatsContainer(w http.ResponseWriter, r *http.Request) { + runtime := r.Context().Value("runtime").(*libpod.Runtime) + decoder := r.Context().Value("decoder").(*schema.Decoder) + + query := struct { + Stream bool `schema:"stream"` + }{ + Stream: true, + } + if err := decoder.Decode(&query, r.URL.Query()); err != nil { + utils.Error(w, "Something went wrong.", http.StatusBadRequest, errors.Wrapf(err, "Failed to parse parameters for %s", r.URL.String())) + return + } + + name := utils.GetName(r) + ctnr, err := runtime.LookupContainer(name) + if err != nil { + utils.ContainerNotFound(w, name, err) + return + } + + // If the container isn't running, then let's not bother and return + // immediately. + state, err := ctnr.State() + if err != nil { + utils.InternalServerError(w, err) + return + } + if state != define.ContainerStateRunning && !query.Stream { + utils.InternalServerError(w, define.ErrCtrStateInvalid) + return + } + + stats, err := ctnr.GetContainerStats(&libpod.ContainerStats{}) + if err != nil { + utils.InternalServerError(w, errors.Wrapf(err, "Failed to obtain Container %s stats", name)) + return + } + + var preRead time.Time + var preCPUStats CPUStats + if query.Stream { + preRead = time.Now() + systemUsage, _ := cgroups.GetSystemCPUUsage() + preCPUStats = CPUStats{ + CPUUsage: docker.CPUUsage{ + TotalUsage: stats.CPUNano, + PercpuUsage: stats.PerCPU, + UsageInKernelmode: stats.CPUSystemNano, + UsageInUsermode: stats.CPUNano - stats.CPUSystemNano, + }, + CPU: stats.CPU, + SystemUsage: systemUsage, + OnlineCPUs: 0, + ThrottlingData: docker.ThrottlingData{}, + } + } + + for ok := true; ok; ok = query.Stream { + // Container stats + stats, err := ctnr.GetContainerStats(stats) + if err != nil { + utils.InternalServerError(w, err) + return + } + inspect, err := ctnr.Inspect(false) + if err != nil { + utils.InternalServerError(w, err) + return + } + // Cgroup stats + cgroupPath, err := ctnr.CGroupPath() + if err != nil { + utils.InternalServerError(w, err) + return + } + cgroup, err := cgroups.Load(cgroupPath) + if err != nil { + utils.InternalServerError(w, err) + return + } + cgroupStat, err := cgroup.Stat() + if err != nil { + utils.InternalServerError(w, err) + return + } + + // FIXME: network inspection does not yet work entirely + net := make(map[string]docker.NetworkStats) + networkName := inspect.NetworkSettings.EndpointID + if networkName == "" { + networkName = "network" + } + net[networkName] = docker.NetworkStats{ + RxBytes: stats.NetInput, + RxPackets: 0, + RxErrors: 0, + RxDropped: 0, + TxBytes: stats.NetOutput, + TxPackets: 0, + TxErrors: 0, + TxDropped: 0, + EndpointID: inspect.NetworkSettings.EndpointID, + InstanceID: "", + } + + systemUsage, _ := cgroups.GetSystemCPUUsage() + s := StatsJSON{ + Stats: Stats{ + Read: time.Now(), + PreRead: preRead, + PidsStats: docker.PidsStats{ + Current: cgroupStat.Pids.Current, + Limit: 0, + }, + BlkioStats: docker.BlkioStats{ + IoServiceBytesRecursive: toBlkioStatEntry(cgroupStat.Blkio.IoServiceBytesRecursive), + IoServicedRecursive: nil, + IoQueuedRecursive: nil, + IoServiceTimeRecursive: nil, + IoWaitTimeRecursive: nil, + IoMergedRecursive: nil, + IoTimeRecursive: nil, + SectorsRecursive: nil, + }, + CPUStats: CPUStats{ + CPUUsage: docker.CPUUsage{ + TotalUsage: cgroupStat.CPU.Usage.Total, + PercpuUsage: cgroupStat.CPU.Usage.PerCPU, + UsageInKernelmode: cgroupStat.CPU.Usage.Kernel, + UsageInUsermode: cgroupStat.CPU.Usage.Total - cgroupStat.CPU.Usage.Kernel, + }, + CPU: stats.CPU, + SystemUsage: systemUsage, + OnlineCPUs: uint32(len(cgroupStat.CPU.Usage.PerCPU)), + ThrottlingData: docker.ThrottlingData{ + Periods: 0, + ThrottledPeriods: 0, + ThrottledTime: 0, + }, + }, + PreCPUStats: preCPUStats, + MemoryStats: docker.MemoryStats{ + Usage: cgroupStat.Memory.Usage.Usage, + MaxUsage: cgroupStat.Memory.Usage.Limit, + Stats: nil, + Failcnt: 0, + Limit: cgroupStat.Memory.Usage.Limit, + Commit: 0, + CommitPeak: 0, + PrivateWorkingSet: 0, + }, + }, + Name: stats.Name, + ID: stats.ContainerID, + Networks: net, + } + + utils.WriteJSON(w, http.StatusOK, s) + if flusher, ok := w.(http.Flusher); ok { + flusher.Flush() + } + + preRead = s.Read + bits, err := json.Marshal(s.CPUStats) + if err != nil { + logrus.Errorf("Unable to marshal cpu stats: %q", err) + } + if err := json.Unmarshal(bits, &preCPUStats); err != nil { + logrus.Errorf("Unable to unmarshal previous stats: %q", err) + } + + // Only sleep when we're streaming. + if query.Stream { + time.Sleep(DefaultStatsPeriod) + } + } +} + +func toBlkioStatEntry(entries []cgroups.BlkIOEntry) []docker.BlkioStatEntry { + results := make([]docker.BlkioStatEntry, len(entries)) + for i, e := range entries { + bits, err := json.Marshal(e) + if err != nil { + logrus.Errorf("unable to marshal blkio stats: %q", err) + } + if err := json.Unmarshal(bits, &results[i]); err != nil { + logrus.Errorf("unable to unmarshal blkio stats: %q", err) + } + } + return results +} |