diff options
Diffstat (limited to 'libpod')
-rw-r--r-- | libpod/container_exec.go | 56 | ||||
-rw-r--r-- | libpod/container_path_resolution.go | 1 | ||||
-rw-r--r-- | libpod/define/containerstate.go | 1 | ||||
-rw-r--r-- | libpod/doc.go | 11 | ||||
-rw-r--r-- | libpod/networking_machine.go | 15 | ||||
-rw-r--r-- | libpod/oci_conmon_exec_linux.go | 7 | ||||
-rw-r--r-- | libpod/oci_conmon_linux.go | 6 | ||||
-rw-r--r-- | libpod/pod.go | 9 | ||||
-rw-r--r-- | libpod/stats.go | 22 |
9 files changed, 94 insertions, 34 deletions
diff --git a/libpod/container_exec.go b/libpod/container_exec.go index d1c190905..140267f28 100644 --- a/libpod/container_exec.go +++ b/libpod/container_exec.go @@ -341,22 +341,60 @@ func (c *Container) ExecStartAndAttach(sessionID string, streams *define.AttachS } lastErr = tmpErr - exitCode, err := c.readExecExitCode(session.ID()) - if err != nil { + exitCode, exitCodeErr := c.readExecExitCode(session.ID()) + + // Lock again. + // Important: we must lock and sync *before* the above error is handled. + // We need info from the database to handle the error. + if !c.batched { + c.lock.Lock() + } + // We can't reuse the old exec session (things may have changed from + // other use, the container was unlocked). + // So re-sync and get a fresh copy. + // If we can't do this, no point in continuing, any attempt to save + // would write garbage to the DB. + if err := c.syncContainer(); err != nil { + if errors.Is(err, define.ErrNoSuchCtr) || errors.Is(err, define.ErrCtrRemoved) { + // We can't save status, but since the container has + // been entirely removed, we don't have to; exit cleanly + return lastErr + } if lastErr != nil { logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr) } - lastErr = err - } + return errors.Wrapf(err, "error syncing container %s state to update exec session %s", c.ID(), sessionID) + } + + // Now handle the error from readExecExitCode above. + if exitCodeErr != nil { + newSess, ok := c.state.ExecSessions[sessionID] + if !ok { + // The exec session was removed entirely, probably by + // the cleanup process. When it did so, it should have + // written an event with the exit code. + // Given that, there's nothing more we can do. + logrus.Infof("Container %s exec session %s already removed", c.ID(), session.ID()) + return lastErr + } - logrus.Debugf("Container %s exec session %s completed with exit code %d", c.ID(), session.ID(), exitCode) + if newSess.State == define.ExecStateStopped { + // Exec session already cleaned up. + // Exit code should be recorded, so it's OK if we were + // not able to read it. + logrus.Infof("Container %s exec session %s already cleaned up", c.ID(), session.ID()) + return lastErr + } - // Lock again - if !c.batched { - c.lock.Lock() + if lastErr != nil { + logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr) + } + lastErr = exitCodeErr } - if err := writeExecExitCode(c, session.ID(), exitCode); err != nil { + logrus.Debugf("Container %s exec session %s completed with exit code %d", c.ID(), session.ID(), exitCode) + + if err := justWriteExecExitCode(c, session.ID(), exitCode); err != nil { if lastErr != nil { logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr) } diff --git a/libpod/container_path_resolution.go b/libpod/container_path_resolution.go index 7db23b783..80a3749f5 100644 --- a/libpod/container_path_resolution.go +++ b/libpod/container_path_resolution.go @@ -1,4 +1,3 @@ -// +linux package libpod import ( diff --git a/libpod/define/containerstate.go b/libpod/define/containerstate.go index 23ba1f451..9ad3aec08 100644 --- a/libpod/define/containerstate.go +++ b/libpod/define/containerstate.go @@ -138,7 +138,6 @@ type ContainerStats struct { CPU float64 CPUNano uint64 CPUSystemNano uint64 - DataPoints int64 SystemNano uint64 MemUsage uint64 MemLimit uint64 diff --git a/libpod/doc.go b/libpod/doc.go new file mode 100644 index 000000000..948153181 --- /dev/null +++ b/libpod/doc.go @@ -0,0 +1,11 @@ +// The libpod library is not stable and we do not support use cases outside of +// this repository. The API can change at any time even with patch releases. +// +// If you need a stable interface Podman provides a HTTP API which follows semver, +// please see https://docs.podman.io/en/latest/markdown/podman-system-service.1.html +// to start the api service and https://docs.podman.io/en/latest/_static/api.html +// for the API reference. +// +// We also provide stable go bindings to talk to the api service from another go +// program, see the pkg/bindings directory. +package libpod diff --git a/libpod/networking_machine.go b/libpod/networking_machine.go index ca759b893..d2a6b7cfa 100644 --- a/libpod/networking_machine.go +++ b/libpod/networking_machine.go @@ -11,6 +11,7 @@ import ( "net/http" "strconv" "strings" + "time" "github.com/containers/common/libnetwork/types" "github.com/sirupsen/logrus" @@ -36,7 +37,18 @@ func requestMachinePorts(expose bool, ports []types.PortMapping) error { url = url + "unexpose" } ctx := context.Background() - client := &http.Client{} + client := &http.Client{ + Transport: &http.Transport{ + // make sure to not set a proxy here so explicitly ignore the proxy + // since we want to talk directly to gvproxy + // https://github.com/containers/podman/issues/13628 + Proxy: nil, + MaxIdleConns: 50, + IdleConnTimeout: 30 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + }, + } buf := new(bytes.Buffer) for num, port := range ports { protocols := strings.Split(port.Protocol, ",") @@ -78,7 +90,6 @@ func requestMachinePorts(expose bool, ports []types.PortMapping) error { } func makeMachineRequest(ctx context.Context, client *http.Client, url string, buf io.Reader) error { - //var buf io.ReadWriter req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, buf) if err != nil { return err diff --git a/libpod/oci_conmon_exec_linux.go b/libpod/oci_conmon_exec_linux.go index aa970bbde..65123b37e 100644 --- a/libpod/oci_conmon_exec_linux.go +++ b/libpod/oci_conmon_exec_linux.go @@ -758,11 +758,14 @@ func prepareProcessExec(c *Container, options *ExecOptions, env []string, sessio } else { pspec.Capabilities.Bounding = ctrSpec.Process.Capabilities.Bounding } + + // Always unset the inheritable capabilities similarly to what the Linux kernel does + // They are used only when using capabilities with uid != 0. + pspec.Capabilities.Inheritable = []string{} + if execUser.Uid == 0 { pspec.Capabilities.Effective = pspec.Capabilities.Bounding - pspec.Capabilities.Inheritable = pspec.Capabilities.Bounding pspec.Capabilities.Permitted = pspec.Capabilities.Bounding - pspec.Capabilities.Ambient = pspec.Capabilities.Bounding } else { if user == c.config.User { pspec.Capabilities.Effective = ctrSpec.Process.Capabilities.Effective diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go index ba4079bed..38bf85834 100644 --- a/libpod/oci_conmon_linux.go +++ b/libpod/oci_conmon_linux.go @@ -1587,11 +1587,13 @@ func readConmonPipeData(runtimeName string, pipe *os.File, ociLog string) (int, var si *syncInfo rdr := bufio.NewReader(pipe) b, err := rdr.ReadBytes('\n') - if err != nil { + // ignore EOF here, error is returned even when data was read + // if it is no valid json unmarshal will fail below + if err != nil && !errors.Is(err, io.EOF) { ch <- syncStruct{err: err} } if err := json.Unmarshal(b, &si); err != nil { - ch <- syncStruct{err: err} + ch <- syncStruct{err: fmt.Errorf("conmon bytes %q: %w", string(b), err)} return } ch <- syncStruct{si: si} diff --git a/libpod/pod.go b/libpod/pod.go index 6273ff247..ed2d97b37 100644 --- a/libpod/pod.go +++ b/libpod/pod.go @@ -422,10 +422,6 @@ type PodContainerStats struct { // GetPodStats returns the stats for each of its containers func (p *Pod) GetPodStats(previousContainerStats map[string]*define.ContainerStats) (map[string]*define.ContainerStats, error) { - var ( - ok bool - prevStat *define.ContainerStats - ) p.lock.Lock() defer p.lock.Unlock() @@ -438,10 +434,7 @@ func (p *Pod) GetPodStats(previousContainerStats map[string]*define.ContainerSta } newContainerStats := make(map[string]*define.ContainerStats) for _, c := range containers { - if prevStat, ok = previousContainerStats[c.ID()]; !ok { - prevStat = &define.ContainerStats{} - } - newStats, err := c.GetContainerStats(prevStat) + newStats, err := c.GetContainerStats(previousContainerStats[c.ID()]) // If the container wasn't running, don't include it // but also suppress the error if err != nil && errors.Cause(err) != define.ErrCtrStateInvalid { diff --git a/libpod/stats.go b/libpod/stats.go index b5d39240d..25baa378d 100644 --- a/libpod/stats.go +++ b/libpod/stats.go @@ -14,7 +14,9 @@ import ( "github.com/pkg/errors" ) -// GetContainerStats gets the running stats for a given container +// GetContainerStats gets the running stats for a given container. +// The previousStats is used to correctly calculate cpu percentages. You +// should pass nil if there is no previous stat for this container. func (c *Container) GetContainerStats(previousStats *define.ContainerStats) (*define.ContainerStats, error) { stats := new(define.ContainerStats) stats.ContainerID = c.ID() @@ -36,6 +38,14 @@ func (c *Container) GetContainerStats(previousStats *define.ContainerStats) (*de return stats, define.ErrCtrStateInvalid } + if previousStats == nil { + previousStats = &define.ContainerStats{ + // if we have no prev stats use the container start time as prev time + // otherwise we cannot correctly calculate the CPU percentage + SystemNano: uint64(c.state.StartedTime.UnixNano()), + } + } + cgroupPath, err := c.cGroupPath() if err != nil { return nil, err @@ -67,8 +77,8 @@ func (c *Container) GetContainerStats(previousStats *define.ContainerStats) (*de stats.Duration = cgroupStats.CPU.Usage.Total stats.UpTime = time.Duration(stats.Duration) stats.CPU = calculateCPUPercent(cgroupStats, previousCPU, now, previousStats.SystemNano) - stats.AvgCPU = calculateAvgCPU(stats.CPU, previousStats.AvgCPU, previousStats.DataPoints) - stats.DataPoints = previousStats.DataPoints + 1 + // calc the average cpu usage for the time the container is running + stats.AvgCPU = calculateCPUPercent(cgroupStats, 0, now, uint64(c.state.StartedTime.UnixNano())) stats.MemUsage = cgroupStats.Memory.Usage.Usage stats.MemLimit = c.getMemLimit() stats.MemPerc = (float64(stats.MemUsage) / float64(stats.MemLimit)) * 100 @@ -146,9 +156,3 @@ func calculateBlockIO(stats *cgroups.Metrics) (read uint64, write uint64) { } return } - -// calculateAvgCPU calculates the avg CPU percentage given the previous average and the number of data points. -func calculateAvgCPU(statsCPU float64, prevAvg float64, prevData int64) float64 { - avgPer := ((prevAvg * float64(prevData)) + statsCPU) / (float64(prevData) + 1) - return avgPer -} |