summaryrefslogtreecommitdiff
path: root/libpod/oci_conmon_linux.go
diff options
context:
space:
mode:
Diffstat (limited to 'libpod/oci_conmon_linux.go')
-rw-r--r--libpod/oci_conmon_linux.go89
1 files changed, 54 insertions, 35 deletions
diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go
index a5530e448..ce888c690 100644
--- a/libpod/oci_conmon_linux.go
+++ b/libpod/oci_conmon_linux.go
@@ -20,8 +20,8 @@ import (
"text/template"
"time"
+ "github.com/containers/common/pkg/config"
conmonConfig "github.com/containers/conmon/runner/config"
- "github.com/containers/libpod/libpod/config"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/cgroups"
"github.com/containers/libpod/pkg/errorhandling"
@@ -80,13 +80,13 @@ func newConmonOCIRuntime(name string, paths []string, conmonPath string, runtime
runtime.name = name
runtime.conmonPath = conmonPath
- runtime.conmonEnv = runtimeCfg.ConmonEnvVars
- runtime.cgroupManager = runtimeCfg.CgroupManager
- runtime.tmpDir = runtimeCfg.TmpDir
- runtime.logSizeMax = runtimeCfg.MaxLogSize
- runtime.noPivot = runtimeCfg.NoPivotRoot
- runtime.reservePorts = runtimeCfg.EnablePortReservation
- runtime.sdNotify = runtimeCfg.SDNotify
+ runtime.conmonEnv = runtimeCfg.Engine.ConmonEnvVars
+ runtime.cgroupManager = runtimeCfg.Engine.CgroupManager
+ runtime.tmpDir = runtimeCfg.Engine.TmpDir
+ runtime.logSizeMax = runtimeCfg.Containers.LogSizeMax
+ runtime.noPivot = runtimeCfg.Engine.NoPivotRoot
+ runtime.reservePorts = runtimeCfg.Engine.EnablePortReservation
+ runtime.sdNotify = runtimeCfg.Engine.SDNotify
// TODO: probe OCI runtime for feature and enable automatically if
// available.
@@ -127,7 +127,7 @@ func newConmonOCIRuntime(name string, paths []string, conmonPath string, runtime
runtime.exitsDir = filepath.Join(runtime.tmpDir, "exits")
runtime.socketsDir = filepath.Join(runtime.tmpDir, "socket")
- if runtime.cgroupManager != define.CgroupfsCgroupsManager && runtime.cgroupManager != define.SystemdCgroupsManager {
+ if runtime.cgroupManager != config.CgroupfsCgroupsManager && runtime.cgroupManager != config.SystemdCgroupsManager {
return nil, errors.Wrapf(define.ErrInvalidArg, "invalid cgroup manager specified: %s", runtime.cgroupManager)
}
@@ -177,7 +177,7 @@ func hasCurrentUserMapped(ctr *Container) bool {
// CreateContainer creates a container.
func (r *ConmonOCIRuntime) CreateContainer(ctr *Container, restoreOptions *ContainerCheckpointOptions) (err error) {
if !hasCurrentUserMapped(ctr) {
- for _, i := range []string{ctr.state.RunDir, ctr.runtime.config.TmpDir, ctr.config.StaticDir, ctr.state.Mountpoint, ctr.runtime.config.VolumePath} {
+ for _, i := range []string{ctr.state.RunDir, ctr.runtime.config.Engine.TmpDir, ctr.config.StaticDir, ctr.state.Mountpoint, ctr.runtime.config.Engine.VolumePath} {
if err := makeAccessible(i, ctr.RootUID(), ctr.RootGID()); err != nil {
return err
}
@@ -519,7 +519,7 @@ func (r *ConmonOCIRuntime) HTTPAttach(ctr *Container, httpConn net.Conn, httpBuf
logrus.Debugf("Successfully connected to container %s attach socket %s", ctr.ID(), socketPath)
- detachString := define.DefaultDetachKeys
+ detachString := ctr.runtime.config.Engine.DetachKeys
if detachKeys != nil {
detachString = *detachKeys
}
@@ -769,47 +769,67 @@ func (r *ConmonOCIRuntime) ExecContainer(c *Container, sessionID string, options
attachChan := make(chan error)
go func() {
// attachToExec is responsible for closing pipes
- attachChan <- c.attachToExec(options.Streams, options.DetachKeys, options.Resize, sessionID, parentStartPipe, parentAttachPipe)
+ attachChan <- c.attachToExec(options.Streams, options.DetachKeys, sessionID, parentStartPipe, parentAttachPipe)
close(attachChan)
}()
attachToExecCalled = true
+ if err := execCmd.Wait(); err != nil {
+ return -1, nil, errors.Wrapf(err, "cannot run conmon")
+ }
+
pid, err := readConmonPipeData(parentSyncPipe, ociLog)
return pid, attachChan, err
}
+// ExecAttachResize resizes the TTY of the given exec session.
+func (r *ConmonOCIRuntime) ExecAttachResize(ctr *Container, sessionID string, newSize remotecommand.TerminalSize) error {
+ // TODO: probably want a dedicated function to get ctl file path?
+ controlPath := filepath.Join(ctr.execBundlePath(sessionID), "ctl")
+ controlFile, err := os.OpenFile(controlPath, unix.O_WRONLY, 0)
+ if err != nil {
+ return errors.Wrapf(err, "could not open ctl file for terminal resize for container %s exec session %s", ctr.ID(), sessionID)
+ }
+ defer controlFile.Close()
+
+ if _, err = fmt.Fprintf(controlFile, "%d %d %d\n", 1, newSize.Height, newSize.Width); err != nil {
+ return errors.Wrapf(err, "failed to write to ctl file to resize terminal")
+ }
+
+ return nil
+}
+
// ExecStopContainer stops a given exec session in a running container.
func (r *ConmonOCIRuntime) ExecStopContainer(ctr *Container, sessionID string, timeout uint) error {
- session, ok := ctr.state.ExecSessions[sessionID]
- if !ok {
- // TODO This should probably be a separate error
- return errors.Wrapf(define.ErrInvalidArg, "no exec session with ID %s found in container %s", sessionID, ctr.ID())
+ pid, err := ctr.getExecSessionPID(sessionID)
+ if err != nil {
+ return err
}
logrus.Debugf("Going to stop container %s exec session %s", ctr.ID(), sessionID)
// Is the session dead?
// Ping the PID with signal 0 to see if it still exists.
- if err := unix.Kill(session.PID, 0); err != nil {
+ if err := unix.Kill(pid, 0); err != nil {
if err == unix.ESRCH {
return nil
}
- return errors.Wrapf(err, "error pinging container %s exec session %s PID %d with signal 0", ctr.ID(), sessionID, session.PID)
+ return errors.Wrapf(err, "error pinging container %s exec session %s PID %d with signal 0", ctr.ID(), sessionID, pid)
}
if timeout > 0 {
// Use SIGTERM by default, then SIGSTOP after timeout.
- logrus.Debugf("Killing exec session %s (PID %d) of container %s with SIGTERM", sessionID, session.PID, ctr.ID())
- if err := unix.Kill(session.PID, unix.SIGTERM); err != nil {
+ logrus.Debugf("Killing exec session %s (PID %d) of container %s with SIGTERM", sessionID, pid, ctr.ID())
+ if err := unix.Kill(pid, unix.SIGTERM); err != nil {
if err == unix.ESRCH {
return nil
}
- return errors.Wrapf(err, "error killing container %s exec session %s PID %d with SIGTERM", ctr.ID(), sessionID, session.PID)
+ return errors.Wrapf(err, "error killing container %s exec session %s PID %d with SIGTERM", ctr.ID(), sessionID, pid)
}
// Wait for the PID to stop
- if err := waitPidStop(session.PID, time.Duration(timeout)*time.Second); err != nil {
+ if err := waitPidStop(pid, time.Duration(timeout)*time.Second); err != nil {
logrus.Warnf("Timed out waiting for container %s exec session %s to stop, resorting to SIGKILL", ctr.ID(), sessionID)
} else {
// No error, container is dead
@@ -818,17 +838,17 @@ func (r *ConmonOCIRuntime) ExecStopContainer(ctr *Container, sessionID string, t
}
// SIGTERM did not work. On to SIGKILL.
- logrus.Debugf("Killing exec session %s (PID %d) of container %s with SIGKILL", sessionID, session.PID, ctr.ID())
- if err := unix.Kill(session.PID, unix.SIGTERM); err != nil {
+ logrus.Debugf("Killing exec session %s (PID %d) of container %s with SIGKILL", sessionID, pid, ctr.ID())
+ if err := unix.Kill(pid, unix.SIGTERM); err != nil {
if err == unix.ESRCH {
return nil
}
- return errors.Wrapf(err, "error killing container %s exec session %s PID %d with SIGKILL", ctr.ID(), sessionID, session.PID)
+ return errors.Wrapf(err, "error killing container %s exec session %s PID %d with SIGKILL", ctr.ID(), sessionID, pid)
}
// Wait for the PID to stop
- if err := waitPidStop(session.PID, killContainerTimeout*time.Second); err != nil {
- return errors.Wrapf(err, "timed out waiting for container %s exec session %s PID %d to stop after SIGKILL", ctr.ID(), sessionID, session.PID)
+ if err := waitPidStop(pid, killContainerTimeout*time.Second); err != nil {
+ return errors.Wrapf(err, "timed out waiting for container %s exec session %s PID %d to stop after SIGKILL", ctr.ID(), sessionID, pid)
}
return nil
@@ -836,21 +856,20 @@ func (r *ConmonOCIRuntime) ExecStopContainer(ctr *Container, sessionID string, t
// ExecUpdateStatus checks if the given exec session is still running.
func (r *ConmonOCIRuntime) ExecUpdateStatus(ctr *Container, sessionID string) (bool, error) {
- session, ok := ctr.state.ExecSessions[sessionID]
- if !ok {
- // TODO This should probably be a separate error
- return false, errors.Wrapf(define.ErrInvalidArg, "no exec session with ID %s found in container %s", sessionID, ctr.ID())
+ pid, err := ctr.getExecSessionPID(sessionID)
+ if err != nil {
+ return false, err
}
logrus.Debugf("Checking status of container %s exec session %s", ctr.ID(), sessionID)
// Is the session dead?
// Ping the PID with signal 0 to see if it still exists.
- if err := unix.Kill(session.PID, 0); err != nil {
+ if err := unix.Kill(pid, 0); err != nil {
if err == unix.ESRCH {
return false, nil
}
- return false, errors.Wrapf(err, "error pinging container %s exec session %s PID %d with signal 0", ctr.ID(), sessionID, session.PID)
+ return false, errors.Wrapf(err, "error pinging container %s exec session %s PID %d with signal 0", ctr.ID(), sessionID, pid)
}
return true, nil
@@ -1346,7 +1365,7 @@ func (r *ConmonOCIRuntime) configureConmonEnv(runtimeDir string) ([]string, []*o
func (r *ConmonOCIRuntime) sharedConmonArgs(ctr *Container, cuuid, bundlePath, pidPath, logPath, exitDir, ociLogPath, logTag string) []string {
// set the conmon API version to be able to use the correct sync struct keys
args := []string{"--api-version", "1"}
- if r.cgroupManager == define.SystemdCgroupsManager && !ctr.config.NoCgroups {
+ if r.cgroupManager == config.SystemdCgroupsManager && !ctr.config.NoCgroups {
args = append(args, "-s")
}
args = append(args, "-c", ctr.ID())
@@ -1464,7 +1483,7 @@ func (r *ConmonOCIRuntime) moveConmonToCgroupAndSignal(ctr *Container, cmd *exec
if mustCreateCgroup {
cgroupParent := ctr.CgroupParent()
- if r.cgroupManager == define.SystemdCgroupsManager {
+ if r.cgroupManager == config.SystemdCgroupsManager {
unitName := createUnitName("libpod-conmon", ctr.ID())
realCgroupParent := cgroupParent