diff options
Diffstat (limited to 'libpod')
-rw-r--r-- | libpod/container_exec.go | 102 | ||||
-rw-r--r-- | libpod/container_internal.go | 2 | ||||
-rw-r--r-- | libpod/container_internal_linux.go | 2 | ||||
-rw-r--r-- | libpod/container_internal_test.go | 2 | ||||
-rw-r--r-- | libpod/define/healthchecks.go | 36 | ||||
-rw-r--r-- | libpod/healthcheck.go | 65 | ||||
-rw-r--r-- | libpod/oci.go | 15 | ||||
-rw-r--r-- | libpod/oci_conmon_exec_linux.go | 599 | ||||
-rw-r--r-- | libpod/oci_conmon_linux.go | 527 | ||||
-rw-r--r-- | libpod/oci_missing.go | 5 | ||||
-rw-r--r-- | libpod/runtime_ctr.go | 2 |
11 files changed, 761 insertions, 596 deletions
diff --git a/libpod/container_exec.go b/libpod/container_exec.go index 6ad767b4b..f2943b73c 100644 --- a/libpod/container_exec.go +++ b/libpod/container_exec.go @@ -62,6 +62,13 @@ type ExecConfig struct { // given is the number that will be passed into the exec session, // starting at 3. PreserveFDs uint `json:"preserveFds,omitempty"` + // ExitCommand is the exec session's exit command. + // This command will be executed when the exec session exits. + // If unset, no command will be executed. + // Two arguments will be appended to the exit command by Libpod: + // The ID of the exec session, and the ID of the container the exec + // session is a part of (in that order). + ExitCommand []string `json:"exitCommand,omitempty"` } // ExecSession contains information on a single exec session attached to a given @@ -191,6 +198,10 @@ func (c *Container) ExecCreate(config *ExecConfig) (string, error) { return "", errors.Wrapf(err, "error copying exec configuration into exec session") } + if len(session.Config.ExitCommand) > 0 { + session.Config.ExitCommand = append(session.Config.ExitCommand, []string{session.ID(), c.ID()}...) + } + if c.state.ExecSessions == nil { c.state.ExecSessions = make(map[string]*ExecSession) } @@ -210,11 +221,52 @@ func (c *Container) ExecCreate(config *ExecConfig) (string, error) { } // ExecStart starts an exec session in the container, but does not attach to it. -// Returns immediately upon starting the exec session. +// Returns immediately upon starting the exec session, unlike other ExecStart +// functions, which will only return when the exec session exits. func (c *Container) ExecStart(sessionID string) error { - // Will be implemented in part 2, migrating Start and implementing - // detached Start. - return define.ErrNotImplemented + if !c.batched { + c.lock.Lock() + defer c.lock.Unlock() + + if err := c.syncContainer(); err != nil { + return err + } + } + + // Verify that we are in a good state to continue + if !c.ensureState(define.ContainerStateRunning) { + return errors.Wrapf(define.ErrCtrStateInvalid, "can only start exec sessions when their container is running") + } + + session, ok := c.state.ExecSessions[sessionID] + if !ok { + return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID) + } + + if session.State != define.ExecStateCreated { + return errors.Wrapf(define.ErrExecSessionStateInvalid, "can only start created exec sessions, while container %s session %s state is %q", c.ID(), session.ID(), session.State.String()) + } + + logrus.Infof("Going to start container %s exec session %s and attach to it", c.ID(), session.ID()) + + opts, err := prepareForExec(c, session) + if err != nil { + return err + } + + pid, err := c.ociRuntime.ExecContainerDetached(c, session.ID(), opts, session.Config.AttachStdin) + if err != nil { + return err + } + + c.newContainerEvent(events.Exec) + logrus.Debugf("Successfully started exec session %s in container %s", session.ID(), c.ID()) + + // Update and save session to reflect PID/running + session.PID = pid + session.State = define.ExecStateRunning + + return c.save() } // ExecStartAndAttach starts and attaches to an exec session in a container. @@ -511,7 +563,27 @@ func (c *Container) ExecCleanup(sessionID string) error { } if session.State == define.ExecStateRunning { - return errors.Wrapf(define.ErrExecSessionStateInvalid, "cannot clean up container %s exec session %s as it is running", c.ID(), session.ID()) + // Check if the exec session is still running. + alive, err := c.ociRuntime.ExecUpdateStatus(c, session.ID()) + if err != nil { + return err + } + + if alive { + return errors.Wrapf(define.ErrExecSessionStateInvalid, "cannot clean up container %s exec session %s as it is running", c.ID(), session.ID()) + } + + exitCode, err := c.readExecExitCode(session.ID()) + if err != nil { + return err + } + session.ExitCode = exitCode + session.PID = 0 + session.State = define.ExecStateStopped + + if err := c.save(); err != nil { + return err + } } logrus.Infof("Cleaning up container %s exec session %s", c.ID(), session.ID()) @@ -541,11 +613,11 @@ func (c *Container) ExecRemove(sessionID string, force bool) error { // Update status of exec session if running, so we cna check if it // stopped in the meantime. if session.State == define.ExecStateRunning { - stopped, err := c.ociRuntime.ExecUpdateStatus(c, session.ID()) + running, err := c.ociRuntime.ExecUpdateStatus(c, session.ID()) if err != nil { return err } - if stopped { + if !running { session.State = define.ExecStateStopped // TODO: should we retrieve exit code here? // TODO: Might be worth saving state here. @@ -800,13 +872,6 @@ func (c *Container) getActiveExecSessions() ([]string, error) { continue } if !alive { - if err := c.cleanupExecBundle(id); err != nil { - if lastErr != nil { - logrus.Errorf("Error checking container %s exec sessions: %v", c.ID(), lastErr) - } - lastErr = err - } - _, isLegacy := c.state.LegacyExecSessions[id] if isLegacy { delete(c.state.LegacyExecSessions, id) @@ -826,6 +891,12 @@ func (c *Container) getActiveExecSessions() ([]string, error) { needSave = true } + if err := c.cleanupExecBundle(id); err != nil { + if lastErr != nil { + logrus.Errorf("Error checking container %s exec sessions: %v", c.ID(), lastErr) + } + lastErr = err + } } else { activeSessions = append(activeSessions, id) } @@ -846,6 +917,8 @@ func (c *Container) getActiveExecSessions() ([]string, error) { func (c *Container) removeAllExecSessions() error { knownSessions := c.getKnownExecSessions() + logrus.Debugf("Removing all exec sessions for container %s", c.ID()) + var lastErr error for _, id := range knownSessions { if err := c.ociRuntime.ExecStopContainer(c, id, c.StopTimeout()); err != nil { @@ -910,6 +983,7 @@ func prepareForExec(c *Container, session *ExecSession) (*ExecOptions, error) { opts.User = user opts.PreserveFDs = session.Config.PreserveFDs opts.DetachKeys = session.Config.DetachKeys + opts.ExitCommand = session.Config.ExitCommand return opts, nil } diff --git a/libpod/container_internal.go b/libpod/container_internal.go index 909ad9851..43e873bd6 100644 --- a/libpod/container_internal.go +++ b/libpod/container_internal.go @@ -1169,7 +1169,7 @@ func (c *Container) start() error { c.state.State = define.ContainerStateRunning if c.config.HealthCheckConfig != nil { - if err := c.updateHealthStatus(HealthCheckStarting); err != nil { + if err := c.updateHealthStatus(define.HealthCheckStarting); err != nil { logrus.Error(err) } if err := c.startTimer(); err != nil { diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go index 8ee0fb456..2bd6099f0 100644 --- a/libpod/container_internal_linux.go +++ b/libpod/container_internal_linux.go @@ -1236,7 +1236,7 @@ func (c *Container) makeBindMounts() error { } // Add Secret Mounts - secretMounts := secrets.SecretMountsWithUIDGID(c.config.MountLabel, c.state.RunDir, c.runtime.config.Containers.DefaultMountsFile, c.state.RunDir, c.RootUID(), c.RootGID(), rootless.IsRootless(), false) + secretMounts := secrets.SecretMountsWithUIDGID(c.config.MountLabel, c.state.RunDir, c.runtime.config.Containers.DefaultMountsFile, c.state.Mountpoint, c.RootUID(), c.RootGID(), rootless.IsRootless(), false) for _, mount := range secretMounts { if _, ok := c.state.BindMounts[mount.Destination]; !ok { c.state.BindMounts[mount.Destination] = mount.Source diff --git a/libpod/container_internal_test.go b/libpod/container_internal_test.go index 5428504ef..fdf7c2e20 100644 --- a/libpod/container_internal_test.go +++ b/libpod/container_internal_test.go @@ -60,7 +60,7 @@ func TestPostDeleteHooks(t *testing.T) { t.Fatal(err) } - stateRegexp := `{"ociVersion":"1\.0\.1-dev","id":"123abc","status":"stopped","bundle":"` + strings.TrimSuffix(os.TempDir(), "/") + `/libpod_test_[0-9]*","annotations":{"a":"b"}}` + stateRegexp := `{"ociVersion":"1\.0\.2-dev","id":"123abc","status":"stopped","bundle":"` + strings.TrimSuffix(os.TempDir(), "/") + `/libpod_test_[0-9]*","annotations":{"a":"b"}}` for _, p := range []string{statePath, copyPath} { path := p t.Run(path, func(t *testing.T) { diff --git a/libpod/define/healthchecks.go b/libpod/define/healthchecks.go new file mode 100644 index 000000000..4114262b6 --- /dev/null +++ b/libpod/define/healthchecks.go @@ -0,0 +1,36 @@ +package define + +const ( + // HealthCheckHealthy describes a healthy container + HealthCheckHealthy string = "healthy" + // HealthCheckUnhealthy describes an unhealthy container + HealthCheckUnhealthy string = "unhealthy" + // HealthCheckStarting describes the time between when the container starts + // and the start-period (time allowed for the container to start and application + // to be running) expires. + HealthCheckStarting string = "starting" +) + +// HealthCheckStatus represents the current state of a container +type HealthCheckStatus int + +const ( + // HealthCheckSuccess means the health worked + HealthCheckSuccess HealthCheckStatus = iota + // HealthCheckFailure means the health ran and failed + HealthCheckFailure HealthCheckStatus = iota + // HealthCheckContainerStopped means the health check cannot + // be run because the container is stopped + HealthCheckContainerStopped HealthCheckStatus = iota + // HealthCheckContainerNotFound means the container could + // not be found in local store + HealthCheckContainerNotFound HealthCheckStatus = iota + // HealthCheckNotDefined means the container has no health + // check defined in it + HealthCheckNotDefined HealthCheckStatus = iota + // HealthCheckInternalError means some something failed obtaining or running + // a given health check + HealthCheckInternalError HealthCheckStatus = iota + // HealthCheckDefined means the healthcheck was found on the container + HealthCheckDefined HealthCheckStatus = iota +) diff --git a/libpod/healthcheck.go b/libpod/healthcheck.go index aec5fa4e0..0006b7c06 100644 --- a/libpod/healthcheck.go +++ b/libpod/healthcheck.go @@ -14,43 +14,12 @@ import ( "github.com/sirupsen/logrus" ) -// HealthCheckStatus represents the current state of a container -type HealthCheckStatus int - const ( - // HealthCheckSuccess means the health worked - HealthCheckSuccess HealthCheckStatus = iota - // HealthCheckFailure means the health ran and failed - HealthCheckFailure HealthCheckStatus = iota - // HealthCheckContainerStopped means the health check cannot - // be run because the container is stopped - HealthCheckContainerStopped HealthCheckStatus = iota - // HealthCheckContainerNotFound means the container could - // not be found in local store - HealthCheckContainerNotFound HealthCheckStatus = iota - // HealthCheckNotDefined means the container has no health - // check defined in it - HealthCheckNotDefined HealthCheckStatus = iota - // HealthCheckInternalError means some something failed obtaining or running - // a given health check - HealthCheckInternalError HealthCheckStatus = iota - // HealthCheckDefined means the healthcheck was found on the container - HealthCheckDefined HealthCheckStatus = iota - // MaxHealthCheckNumberLogs is the maximum number of attempts we keep // in the healthcheck history file MaxHealthCheckNumberLogs int = 5 // MaxHealthCheckLogLength in characters MaxHealthCheckLogLength = 500 - - // HealthCheckHealthy describes a healthy container - HealthCheckHealthy string = "healthy" - // HealthCheckUnhealthy describes an unhealthy container - HealthCheckUnhealthy string = "unhealthy" - // HealthCheckStarting describes the time between when the container starts - // and the start-period (time allowed for the container to start and application - // to be running) expires. - HealthCheckStarting string = "starting" ) // hcWriteCloser allows us to use bufio as a WriteCloser @@ -65,10 +34,10 @@ func (hcwc hcWriteCloser) Close() error { // HealthCheck verifies the state and validity of the healthcheck configuration // on the container and then executes the healthcheck -func (r *Runtime) HealthCheck(name string) (HealthCheckStatus, error) { +func (r *Runtime) HealthCheck(name string) (define.HealthCheckStatus, error) { container, err := r.LookupContainer(name) if err != nil { - return HealthCheckContainerNotFound, errors.Wrapf(err, "unable to lookup %s to perform a health check", name) + return define.HealthCheckContainerNotFound, errors.Wrapf(err, "unable to lookup %s to perform a health check", name) } hcStatus, err := checkHealthCheckCanBeRun(container) if err == nil { @@ -78,7 +47,7 @@ func (r *Runtime) HealthCheck(name string) (HealthCheckStatus, error) { } // runHealthCheck runs the health check as defined by the container -func (c *Container) runHealthCheck() (HealthCheckStatus, error) { +func (c *Container) runHealthCheck() (define.HealthCheckStatus, error) { var ( newCommand []string returnCode int @@ -87,11 +56,11 @@ func (c *Container) runHealthCheck() (HealthCheckStatus, error) { ) hcCommand := c.HealthCheckConfig().Test if len(hcCommand) < 1 { - return HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID()) + return define.HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID()) } switch hcCommand[0] { case "", "NONE": - return HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID()) + return define.HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID()) case "CMD": newCommand = hcCommand[1:] case "CMD-SHELL": @@ -102,7 +71,7 @@ func (c *Container) runHealthCheck() (HealthCheckStatus, error) { newCommand = hcCommand } if len(newCommand) < 1 || newCommand[0] == "" { - return HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID()) + return define.HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID()) } captureBuffer := bufio.NewWriter(&capture) hcw := hcWriteCloser{ @@ -120,13 +89,13 @@ func (c *Container) runHealthCheck() (HealthCheckStatus, error) { logrus.Debugf("executing health check command %s for %s", strings.Join(newCommand, " "), c.ID()) timeStart := time.Now() - hcResult := HealthCheckSuccess + hcResult := define.HealthCheckSuccess config := new(ExecConfig) config.Command = newCommand _, hcErr := c.Exec(config, streams, nil) if hcErr != nil { errCause := errors.Cause(hcErr) - hcResult = HealthCheckFailure + hcResult = define.HealthCheckFailure if errCause == define.ErrOCIRuntimeNotFound || errCause == define.ErrOCIRuntimePermissionDenied || errCause == define.ErrOCIRuntime { @@ -154,7 +123,7 @@ func (c *Container) runHealthCheck() (HealthCheckStatus, error) { if timeEnd.Sub(timeStart) > c.HealthCheckConfig().Timeout { returnCode = -1 - hcResult = HealthCheckFailure + hcResult = define.HealthCheckFailure hcErr = errors.Errorf("healthcheck command exceeded timeout of %s", c.HealthCheckConfig().Timeout.String()) } hcl := newHealthCheckLog(timeStart, timeEnd, returnCode, eventLog) @@ -164,18 +133,18 @@ func (c *Container) runHealthCheck() (HealthCheckStatus, error) { return hcResult, hcErr } -func checkHealthCheckCanBeRun(c *Container) (HealthCheckStatus, error) { +func checkHealthCheckCanBeRun(c *Container) (define.HealthCheckStatus, error) { cstate, err := c.State() if err != nil { - return HealthCheckInternalError, err + return define.HealthCheckInternalError, err } if cstate != define.ContainerStateRunning { - return HealthCheckContainerStopped, errors.Errorf("container %s is not running", c.ID()) + return define.HealthCheckContainerStopped, errors.Errorf("container %s is not running", c.ID()) } if !c.HasHealthCheck() { - return HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID()) + return define.HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID()) } - return HealthCheckDefined, nil + return define.HealthCheckDefined, nil } func newHealthCheckLog(start, end time.Time, exitCode int, log string) define.HealthCheckLog { @@ -210,18 +179,18 @@ func (c *Container) updateHealthCheckLog(hcl define.HealthCheckLog, inStartPerio } if hcl.ExitCode == 0 { // set status to healthy, reset failing state to 0 - healthCheck.Status = HealthCheckHealthy + healthCheck.Status = define.HealthCheckHealthy healthCheck.FailingStreak = 0 } else { if len(healthCheck.Status) < 1 { - healthCheck.Status = HealthCheckHealthy + healthCheck.Status = define.HealthCheckHealthy } if !inStartPeriod { // increment failing streak healthCheck.FailingStreak += 1 // if failing streak > retries, then status to unhealthy if healthCheck.FailingStreak >= c.HealthCheckConfig().Retries { - healthCheck.Status = HealthCheckUnhealthy + healthCheck.Status = define.HealthCheckUnhealthy } } } diff --git a/libpod/oci.go b/libpod/oci.go index 6b1886f80..7c5218319 100644 --- a/libpod/oci.go +++ b/libpod/oci.go @@ -68,10 +68,10 @@ type OCIRuntime interface { AttachResize(ctr *Container, newSize remotecommand.TerminalSize) error // ExecContainer executes a command in a running container. - // Returns an int (exit code), error channel (errors from attach), and - // error (errors that occurred attempting to start the exec session). - // This returns once the exec session is running - not once it has - // completed, as one might expect. The attach session will remain + // Returns an int (PID of exec session), error channel (errors from + // attach), and error (errors that occurred attempting to start the exec + // session). This returns once the exec session is running - not once it + // has completed, as one might expect. The attach session will remain // running, in a goroutine that will return via the chan error in the // return signature. ExecContainer(ctr *Container, sessionID string, options *ExecOptions, streams *define.AttachStreams) (int, chan error, error) @@ -81,6 +81,10 @@ type OCIRuntime interface { // start, with a goroutine running in the background to handle attach). // The HTTP attach itself maintains the same invariants as HTTPAttach. ExecContainerHTTP(ctr *Container, sessionID string, options *ExecOptions, httpConn net.Conn, httpBuf *bufio.ReadWriter, streams *HTTPAttachStreams, cancel <-chan bool) (int, chan error, error) + // ExecContainerDetached executes a command in a running container, but + // does not attach to it. Returns the PID of the exec session and an + // error (if starting the exec session failed) + ExecContainerDetached(ctr *Container, sessionID string, options *ExecOptions, stdin bool) (int, error) // ExecAttachResize resizes the terminal of a running exec session. Only // allowed with sessions that were created with a TTY. ExecAttachResize(ctr *Container, sessionID string, newSize remotecommand.TerminalSize) error @@ -165,6 +169,9 @@ type ExecOptions struct { // If provided but set to "", detaching from the container will be // disabled. DetachKeys *string + // ExitCommand is a command that will be run after the exec session + // exits. + ExitCommand []string } // HTTPAttachStreams informs the HTTPAttach endpoint which of the container's diff --git a/libpod/oci_conmon_exec_linux.go b/libpod/oci_conmon_exec_linux.go new file mode 100644 index 000000000..51819f90a --- /dev/null +++ b/libpod/oci_conmon_exec_linux.go @@ -0,0 +1,599 @@ +package libpod + +import ( + "bufio" + "fmt" + "net" + "os" + "os/exec" + "path/filepath" + "syscall" + "time" + + "github.com/containers/common/pkg/config" + "github.com/containers/libpod/libpod/define" + "github.com/containers/libpod/pkg/errorhandling" + "github.com/containers/libpod/pkg/util" + "github.com/containers/libpod/utils" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" + "k8s.io/client-go/tools/remotecommand" +) + +// ExecContainer executes a command in a running container +func (r *ConmonOCIRuntime) ExecContainer(c *Container, sessionID string, options *ExecOptions, streams *define.AttachStreams) (int, chan error, error) { + if options == nil { + return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide an ExecOptions struct to ExecContainer") + } + if len(options.Cmd) == 0 { + return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide a command to execute") + } + + if sessionID == "" { + return -1, nil, errors.Wrapf(define.ErrEmptyID, "must provide a session ID for exec") + } + + // TODO: Should we default this to false? + // Or maybe make streams mandatory? + attachStdin := true + if streams != nil { + attachStdin = streams.AttachInput + } + + var ociLog string + if logrus.GetLevel() != logrus.DebugLevel && r.supportsJSON { + ociLog = c.execOCILog(sessionID) + } + + execCmd, pipes, err := r.startExec(c, sessionID, options, attachStdin, ociLog) + if err != nil { + return -1, nil, err + } + + // Only close sync pipe. Start and attach are consumed in the attach + // goroutine. + defer func() { + if pipes.syncPipe != nil && !pipes.syncClosed { + errorhandling.CloseQuiet(pipes.syncPipe) + pipes.syncClosed = true + } + }() + + // TODO Only create if !detach + // Attach to the container before starting it + attachChan := make(chan error) + go func() { + // attachToExec is responsible for closing pipes + attachChan <- c.attachToExec(streams, options.DetachKeys, sessionID, pipes.startPipe, pipes.attachPipe) + close(attachChan) + }() + + if err := execCmd.Wait(); err != nil { + return -1, nil, errors.Wrapf(err, "cannot run conmon") + } + + pid, err := readConmonPipeData(pipes.syncPipe, ociLog) + + return pid, attachChan, err +} + +// ExecContainerHTTP executes a new command in an existing container and +// forwards its standard streams over an attach +func (r *ConmonOCIRuntime) ExecContainerHTTP(ctr *Container, sessionID string, options *ExecOptions, httpConn net.Conn, httpBuf *bufio.ReadWriter, streams *HTTPAttachStreams, cancel <-chan bool) (int, chan error, error) { + if streams != nil { + if !streams.Stdin && !streams.Stdout && !streams.Stderr { + return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide at least one stream to attach to") + } + } + + if options == nil { + return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide exec options to ExecContainerHTTP") + } + + detachString := config.DefaultDetachKeys + if options.DetachKeys != nil { + detachString = *options.DetachKeys + } + detachKeys, err := processDetachKeys(detachString) + if err != nil { + return -1, nil, err + } + + // TODO: Should we default this to false? + // Or maybe make streams mandatory? + attachStdin := true + if streams != nil { + attachStdin = streams.Stdin + } + + var ociLog string + if logrus.GetLevel() != logrus.DebugLevel && r.supportsJSON { + ociLog = ctr.execOCILog(sessionID) + } + + execCmd, pipes, err := r.startExec(ctr, sessionID, options, attachStdin, ociLog) + if err != nil { + return -1, nil, err + } + + // Only close sync pipe. Start and attach are consumed in the attach + // goroutine. + defer func() { + if pipes.syncPipe != nil && !pipes.syncClosed { + errorhandling.CloseQuiet(pipes.syncPipe) + pipes.syncClosed = true + } + }() + + attachChan := make(chan error) + go func() { + // attachToExec is responsible for closing pipes + attachChan <- attachExecHTTP(ctr, sessionID, httpBuf, streams, pipes, detachKeys, options.Terminal, cancel) + close(attachChan) + }() + + // Wait for conmon to succeed, when return. + if err := execCmd.Wait(); err != nil { + return -1, nil, errors.Wrapf(err, "cannot run conmon") + } + + pid, err := readConmonPipeData(pipes.syncPipe, ociLog) + + return pid, attachChan, err +} + +// ExecContainerDetached executes a command in a running container, but does +// not attach to it. +func (r *ConmonOCIRuntime) ExecContainerDetached(ctr *Container, sessionID string, options *ExecOptions, stdin bool) (int, error) { + if options == nil { + return -1, errors.Wrapf(define.ErrInvalidArg, "must provide exec options to ExecContainerHTTP") + } + + var ociLog string + if logrus.GetLevel() != logrus.DebugLevel && r.supportsJSON { + ociLog = ctr.execOCILog(sessionID) + } + + execCmd, pipes, err := r.startExec(ctr, sessionID, options, stdin, ociLog) + if err != nil { + return -1, err + } + + defer func() { + pipes.cleanup() + }() + + // Wait for Conmon to tell us we're ready to attach. + // We aren't actually *going* to attach, but this means that we're good + // to proceed. + if _, err := readConmonPipeData(pipes.attachPipe, ""); err != nil { + return -1, err + } + + // Start the exec session + if err := writeConmonPipeData(pipes.startPipe); err != nil { + return -1, err + } + + // Wait for conmon to succeed, when return. + if err := execCmd.Wait(); err != nil { + return -1, errors.Wrapf(err, "cannot run conmon") + } + + pid, err := readConmonPipeData(pipes.syncPipe, ociLog) + + return pid, err +} + +// ExecAttachResize resizes the TTY of the given exec session. +func (r *ConmonOCIRuntime) ExecAttachResize(ctr *Container, sessionID string, newSize remotecommand.TerminalSize) error { + controlFile, err := openControlFile(ctr, ctr.execBundlePath(sessionID)) + if err != nil { + return err + } + defer controlFile.Close() + + if _, err = fmt.Fprintf(controlFile, "%d %d %d\n", 1, newSize.Height, newSize.Width); err != nil { + return errors.Wrapf(err, "failed to write to ctl file to resize terminal") + } + + return nil +} + +// ExecStopContainer stops a given exec session in a running container. +func (r *ConmonOCIRuntime) ExecStopContainer(ctr *Container, sessionID string, timeout uint) error { + pid, err := ctr.getExecSessionPID(sessionID) + if err != nil { + return err + } + + logrus.Debugf("Going to stop container %s exec session %s", ctr.ID(), sessionID) + + // Is the session dead? + // Ping the PID with signal 0 to see if it still exists. + if err := unix.Kill(pid, 0); err != nil { + if err == unix.ESRCH { + return nil + } + return errors.Wrapf(err, "error pinging container %s exec session %s PID %d with signal 0", ctr.ID(), sessionID, pid) + } + + if timeout > 0 { + // Use SIGTERM by default, then SIGSTOP after timeout. + logrus.Debugf("Killing exec session %s (PID %d) of container %s with SIGTERM", sessionID, pid, ctr.ID()) + if err := unix.Kill(pid, unix.SIGTERM); err != nil { + if err == unix.ESRCH { + return nil + } + return errors.Wrapf(err, "error killing container %s exec session %s PID %d with SIGTERM", ctr.ID(), sessionID, pid) + } + + // Wait for the PID to stop + if err := waitPidStop(pid, time.Duration(timeout)*time.Second); err != nil { + logrus.Warnf("Timed out waiting for container %s exec session %s to stop, resorting to SIGKILL", ctr.ID(), sessionID) + } else { + // No error, container is dead + return nil + } + } + + // SIGTERM did not work. On to SIGKILL. + logrus.Debugf("Killing exec session %s (PID %d) of container %s with SIGKILL", sessionID, pid, ctr.ID()) + if err := unix.Kill(pid, unix.SIGTERM); err != nil { + if err == unix.ESRCH { + return nil + } + return errors.Wrapf(err, "error killing container %s exec session %s PID %d with SIGKILL", ctr.ID(), sessionID, pid) + } + + // Wait for the PID to stop + if err := waitPidStop(pid, killContainerTimeout*time.Second); err != nil { + return errors.Wrapf(err, "timed out waiting for container %s exec session %s PID %d to stop after SIGKILL", ctr.ID(), sessionID, pid) + } + + return nil +} + +// ExecUpdateStatus checks if the given exec session is still running. +func (r *ConmonOCIRuntime) ExecUpdateStatus(ctr *Container, sessionID string) (bool, error) { + pid, err := ctr.getExecSessionPID(sessionID) + if err != nil { + return false, err + } + + logrus.Debugf("Checking status of container %s exec session %s", ctr.ID(), sessionID) + + // Is the session dead? + // Ping the PID with signal 0 to see if it still exists. + if err := unix.Kill(pid, 0); err != nil { + if err == unix.ESRCH { + return false, nil + } + return false, errors.Wrapf(err, "error pinging container %s exec session %s PID %d with signal 0", ctr.ID(), sessionID, pid) + } + + return true, nil +} + +// ExecContainerCleanup cleans up files created when a command is run via +// ExecContainer. This includes the attach socket for the exec session. +func (r *ConmonOCIRuntime) ExecContainerCleanup(ctr *Container, sessionID string) error { + // Clean up the sockets dir. Issue #3962 + // Also ignore if it doesn't exist for some reason; hence the conditional return below + if err := os.RemoveAll(filepath.Join(r.socketsDir, sessionID)); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// ExecAttachSocketPath is the path to a container's exec session attach socket. +func (r *ConmonOCIRuntime) ExecAttachSocketPath(ctr *Container, sessionID string) (string, error) { + // We don't even use container, so don't validity check it + if sessionID == "" { + return "", errors.Wrapf(define.ErrInvalidArg, "must provide a valid session ID to get attach socket path") + } + + return filepath.Join(r.socketsDir, sessionID, "attach"), nil +} + +// This contains pipes used by the exec API. +type execPipes struct { + syncPipe *os.File + syncClosed bool + startPipe *os.File + startClosed bool + attachPipe *os.File + attachClosed bool +} + +func (p *execPipes) cleanup() { + if p.syncPipe != nil && !p.syncClosed { + errorhandling.CloseQuiet(p.syncPipe) + p.syncClosed = true + } + if p.startPipe != nil && !p.startClosed { + errorhandling.CloseQuiet(p.startPipe) + p.startClosed = true + } + if p.attachPipe != nil && !p.attachClosed { + errorhandling.CloseQuiet(p.attachPipe) + p.attachClosed = true + } +} + +// Start an exec session's conmon parent from the given options. +func (r *ConmonOCIRuntime) startExec(c *Container, sessionID string, options *ExecOptions, attachStdin bool, ociLog string) (_ *exec.Cmd, _ *execPipes, deferredErr error) { + pipes := new(execPipes) + + if options == nil { + return nil, nil, errors.Wrapf(define.ErrInvalidArg, "must provide an ExecOptions struct to ExecContainer") + } + if len(options.Cmd) == 0 { + return nil, nil, errors.Wrapf(define.ErrInvalidArg, "must provide a command to execute") + } + + if sessionID == "" { + return nil, nil, errors.Wrapf(define.ErrEmptyID, "must provide a session ID for exec") + } + + // create sync pipe to receive the pid + parentSyncPipe, childSyncPipe, err := newPipe() + if err != nil { + return nil, nil, errors.Wrapf(err, "error creating socket pair") + } + pipes.syncPipe = parentSyncPipe + + defer func() { + if deferredErr != nil { + pipes.cleanup() + } + }() + + // create start pipe to set the cgroup before running + // attachToExec is responsible for closing parentStartPipe + childStartPipe, parentStartPipe, err := newPipe() + if err != nil { + return nil, nil, errors.Wrapf(err, "error creating socket pair") + } + pipes.startPipe = parentStartPipe + + // create the attach pipe to allow attach socket to be created before + // $RUNTIME exec starts running. This is to make sure we can capture all output + // from the process through that socket, rather than half reading the log, half attaching to the socket + // attachToExec is responsible for closing parentAttachPipe + parentAttachPipe, childAttachPipe, err := newPipe() + if err != nil { + return nil, nil, errors.Wrapf(err, "error creating socket pair") + } + pipes.attachPipe = parentAttachPipe + + childrenClosed := false + defer func() { + if !childrenClosed { + errorhandling.CloseQuiet(childSyncPipe) + errorhandling.CloseQuiet(childAttachPipe) + errorhandling.CloseQuiet(childStartPipe) + } + }() + + runtimeDir, err := util.GetRuntimeDir() + if err != nil { + return nil, nil, err + } + + finalEnv := make([]string, 0, len(options.Env)) + for k, v := range options.Env { + finalEnv = append(finalEnv, fmt.Sprintf("%s=%s", k, v)) + } + + processFile, err := prepareProcessExec(c, options.Cmd, finalEnv, options.Terminal, options.Cwd, options.User, sessionID) + if err != nil { + return nil, nil, err + } + + args := r.sharedConmonArgs(c, sessionID, c.execBundlePath(sessionID), c.execPidPath(sessionID), c.execLogPath(sessionID), c.execExitFileDir(sessionID), ociLog, "") + + if options.PreserveFDs > 0 { + args = append(args, formatRuntimeOpts("--preserve-fds", fmt.Sprintf("%d", options.PreserveFDs))...) + } + + for _, capability := range options.CapAdd { + args = append(args, formatRuntimeOpts("--cap", capability)...) + } + + if options.Terminal { + args = append(args, "-t") + } + + if attachStdin { + args = append(args, "-i") + } + + // Append container ID and command + args = append(args, "-e") + // TODO make this optional when we can detach + args = append(args, "--exec-attach") + args = append(args, "--exec-process-spec", processFile.Name()) + + if len(options.ExitCommand) > 0 { + args = append(args, "--exit-command", options.ExitCommand[0]) + for _, arg := range options.ExitCommand[1:] { + args = append(args, []string{"--exit-command-arg", arg}...) + } + } + + logrus.WithFields(logrus.Fields{ + "args": args, + }).Debugf("running conmon: %s", r.conmonPath) + // TODO: Need to pass this back so we can wait on it. + execCmd := exec.Command(r.conmonPath, args...) + + // TODO: This is commented because it doesn't make much sense in HTTP + // attach, and I'm not certain it does for non-HTTP attach as well. + // if streams != nil { + // // Don't add the InputStream to the execCmd. Instead, the data should be passed + // // through CopyDetachable + // if streams.AttachOutput { + // execCmd.Stdout = options.Streams.OutputStream + // } + // if streams.AttachError { + // execCmd.Stderr = options.Streams.ErrorStream + // } + // } + + conmonEnv, extraFiles, err := r.configureConmonEnv(runtimeDir) + if err != nil { + return nil, nil, err + } + + if options.PreserveFDs > 0 { + for fd := 3; fd < int(3+options.PreserveFDs); fd++ { + execCmd.ExtraFiles = append(execCmd.ExtraFiles, os.NewFile(uintptr(fd), fmt.Sprintf("fd-%d", fd))) + } + } + + // we don't want to step on users fds they asked to preserve + // Since 0-2 are used for stdio, start the fds we pass in at preserveFDs+3 + execCmd.Env = r.conmonEnv + execCmd.Env = append(execCmd.Env, fmt.Sprintf("_OCI_SYNCPIPE=%d", options.PreserveFDs+3), fmt.Sprintf("_OCI_STARTPIPE=%d", options.PreserveFDs+4), fmt.Sprintf("_OCI_ATTACHPIPE=%d", options.PreserveFDs+5)) + execCmd.Env = append(execCmd.Env, conmonEnv...) + + execCmd.ExtraFiles = append(execCmd.ExtraFiles, childSyncPipe, childStartPipe, childAttachPipe) + execCmd.ExtraFiles = append(execCmd.ExtraFiles, extraFiles...) + execCmd.Dir = c.execBundlePath(sessionID) + execCmd.SysProcAttr = &syscall.SysProcAttr{ + Setpgid: true, + } + + err = startCommandGivenSelinux(execCmd) + + // We don't need children pipes on the parent side + errorhandling.CloseQuiet(childSyncPipe) + errorhandling.CloseQuiet(childAttachPipe) + errorhandling.CloseQuiet(childStartPipe) + childrenClosed = true + + if err != nil { + return nil, nil, errors.Wrapf(err, "cannot start container %s", c.ID()) + } + if err := r.moveConmonToCgroupAndSignal(c, execCmd, parentStartPipe); err != nil { + return nil, nil, err + } + + if options.PreserveFDs > 0 { + for fd := 3; fd < int(3+options.PreserveFDs); fd++ { + // These fds were passed down to the runtime. Close them + // and not interfere + if err := os.NewFile(uintptr(fd), fmt.Sprintf("fd-%d", fd)).Close(); err != nil { + logrus.Debugf("unable to close file fd-%d", fd) + } + } + } + + return execCmd, pipes, nil +} + +// Attach to a container over HTTP +func attachExecHTTP(c *Container, sessionID string, httpBuf *bufio.ReadWriter, streams *HTTPAttachStreams, pipes *execPipes, detachKeys []byte, isTerminal bool, cancel <-chan bool) error { + if pipes == nil || pipes.startPipe == nil || pipes.attachPipe == nil { + return errors.Wrapf(define.ErrInvalidArg, "must provide a start and attach pipe to finish an exec attach") + } + + defer func() { + if !pipes.startClosed { + errorhandling.CloseQuiet(pipes.startPipe) + pipes.startClosed = true + } + if !pipes.attachClosed { + errorhandling.CloseQuiet(pipes.attachPipe) + pipes.attachClosed = true + } + }() + + logrus.Debugf("Attaching to container %s exec session %s", c.ID(), sessionID) + + // set up the socket path, such that it is the correct length and location for exec + sockPath, err := c.execAttachSocketPath(sessionID) + if err != nil { + return err + } + socketPath := buildSocketPath(sockPath) + + // 2: read from attachFd that the parent process has set up the console socket + if _, err := readConmonPipeData(pipes.attachPipe, ""); err != nil { + return err + } + + // 2: then attach + conn, err := net.DialUnix("unixpacket", nil, &net.UnixAddr{Name: socketPath, Net: "unixpacket"}) + if err != nil { + return errors.Wrapf(err, "failed to connect to container's attach socket: %v", socketPath) + } + defer func() { + if err := conn.Close(); err != nil { + logrus.Errorf("unable to close socket: %q", err) + } + }() + + // Make a channel to pass errors back + errChan := make(chan error) + + attachStdout := true + attachStderr := true + attachStdin := true + if streams != nil { + attachStdout = streams.Stdout + attachStderr = streams.Stderr + attachStdin = streams.Stdin + } + + // Next, STDIN. Avoid entirely if attachStdin unset. + if attachStdin { + go func() { + logrus.Debugf("Beginning STDIN copy") + _, err := utils.CopyDetachable(conn, httpBuf, detachKeys) + logrus.Debugf("STDIN copy completed") + errChan <- err + }() + } + + // 4: send start message to child + if err := writeConmonPipeData(pipes.startPipe); err != nil { + return err + } + + // Handle STDOUT/STDERR *after* start message is sent + go func() { + var err error + if isTerminal { + // Hack: return immediately if attachStdout not set to + // emulate Docker. + // Basically, when terminal is set, STDERR goes nowhere. + // Everything does over STDOUT. + // Therefore, if not attaching STDOUT - we'll never copy + // anything from here. + logrus.Debugf("Performing terminal HTTP attach for container %s", c.ID()) + if attachStdout { + err = httpAttachTerminalCopy(conn, httpBuf, c.ID()) + } + } else { + logrus.Debugf("Performing non-terminal HTTP attach for container %s", c.ID()) + err = httpAttachNonTerminalCopy(conn, httpBuf, c.ID(), attachStdin, attachStdout, attachStderr) + } + errChan <- err + logrus.Debugf("STDOUT/ERR copy completed") + }() + + if cancel != nil { + select { + case err := <-errChan: + return err + case <-cancel: + return nil + } + } else { + var connErr error = <-errChan + return connErr + } +} diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go index 7ba36fe7c..9c92b036e 100644 --- a/libpod/oci_conmon_linux.go +++ b/libpod/oci_conmon_linux.go @@ -635,229 +635,6 @@ func (r *ConmonOCIRuntime) AttachResize(ctr *Container, newSize remotecommand.Te return nil } -// ExecContainer executes a command in a running container -func (r *ConmonOCIRuntime) ExecContainer(c *Container, sessionID string, options *ExecOptions, streams *define.AttachStreams) (int, chan error, error) { - if options == nil { - return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide an ExecOptions struct to ExecContainer") - } - if len(options.Cmd) == 0 { - return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide a command to execute") - } - - if sessionID == "" { - return -1, nil, errors.Wrapf(define.ErrEmptyID, "must provide a session ID for exec") - } - - // TODO: Should we default this to false? - // Or maybe make streams mandatory? - attachStdin := true - if streams != nil { - attachStdin = streams.AttachInput - } - - var ociLog string - if logrus.GetLevel() != logrus.DebugLevel && r.supportsJSON { - ociLog = c.execOCILog(sessionID) - } - - execCmd, pipes, err := r.startExec(c, sessionID, options, attachStdin, ociLog) - if err != nil { - return -1, nil, err - } - - // Only close sync pipe. Start and attach are consumed in the attach - // goroutine. - defer func() { - if pipes.syncPipe != nil && !pipes.syncClosed { - errorhandling.CloseQuiet(pipes.syncPipe) - pipes.syncClosed = true - } - }() - - // TODO Only create if !detach - // Attach to the container before starting it - attachChan := make(chan error) - go func() { - // attachToExec is responsible for closing pipes - attachChan <- c.attachToExec(streams, options.DetachKeys, sessionID, pipes.startPipe, pipes.attachPipe) - close(attachChan) - }() - - if err := execCmd.Wait(); err != nil { - return -1, nil, errors.Wrapf(err, "cannot run conmon") - } - - pid, err := readConmonPipeData(pipes.syncPipe, ociLog) - - return pid, attachChan, err -} - -// ExecContainerHTTP executes a new command in an existing container and -// forwards its standard streams over an attach -func (r *ConmonOCIRuntime) ExecContainerHTTP(ctr *Container, sessionID string, options *ExecOptions, httpConn net.Conn, httpBuf *bufio.ReadWriter, streams *HTTPAttachStreams, cancel <-chan bool) (int, chan error, error) { - if streams != nil { - if !streams.Stdin && !streams.Stdout && !streams.Stderr { - return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide at least one stream to attach to") - } - } - - if options == nil { - return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide exec options to ExecContainerHTTP") - } - - detachString := config.DefaultDetachKeys - if options.DetachKeys != nil { - detachString = *options.DetachKeys - } - detachKeys, err := processDetachKeys(detachString) - if err != nil { - return -1, nil, err - } - - // TODO: Should we default this to false? - // Or maybe make streams mandatory? - attachStdin := true - if streams != nil { - attachStdin = streams.Stdin - } - - var ociLog string - if logrus.GetLevel() != logrus.DebugLevel && r.supportsJSON { - ociLog = ctr.execOCILog(sessionID) - } - - execCmd, pipes, err := r.startExec(ctr, sessionID, options, attachStdin, ociLog) - if err != nil { - return -1, nil, err - } - - // Only close sync pipe. Start and attach are consumed in the attach - // goroutine. - defer func() { - if pipes.syncPipe != nil && !pipes.syncClosed { - errorhandling.CloseQuiet(pipes.syncPipe) - pipes.syncClosed = true - } - }() - - attachChan := make(chan error) - go func() { - // attachToExec is responsible for closing pipes - attachChan <- attachExecHTTP(ctr, sessionID, httpBuf, streams, pipes, detachKeys, options.Terminal, cancel) - close(attachChan) - }() - - // Wait for conmon to succeed, when return. - if err := execCmd.Wait(); err != nil { - return -1, nil, errors.Wrapf(err, "cannot run conmon") - } - - pid, err := readConmonPipeData(pipes.syncPipe, ociLog) - - return pid, attachChan, err -} - -// ExecAttachResize resizes the TTY of the given exec session. -func (r *ConmonOCIRuntime) ExecAttachResize(ctr *Container, sessionID string, newSize remotecommand.TerminalSize) error { - controlFile, err := openControlFile(ctr, ctr.execBundlePath(sessionID)) - if err != nil { - return err - } - defer controlFile.Close() - - if _, err = fmt.Fprintf(controlFile, "%d %d %d\n", 1, newSize.Height, newSize.Width); err != nil { - return errors.Wrapf(err, "failed to write to ctl file to resize terminal") - } - - return nil -} - -// ExecStopContainer stops a given exec session in a running container. -func (r *ConmonOCIRuntime) ExecStopContainer(ctr *Container, sessionID string, timeout uint) error { - pid, err := ctr.getExecSessionPID(sessionID) - if err != nil { - return err - } - - logrus.Debugf("Going to stop container %s exec session %s", ctr.ID(), sessionID) - - // Is the session dead? - // Ping the PID with signal 0 to see if it still exists. - if err := unix.Kill(pid, 0); err != nil { - if err == unix.ESRCH { - return nil - } - return errors.Wrapf(err, "error pinging container %s exec session %s PID %d with signal 0", ctr.ID(), sessionID, pid) - } - - if timeout > 0 { - // Use SIGTERM by default, then SIGSTOP after timeout. - logrus.Debugf("Killing exec session %s (PID %d) of container %s with SIGTERM", sessionID, pid, ctr.ID()) - if err := unix.Kill(pid, unix.SIGTERM); err != nil { - if err == unix.ESRCH { - return nil - } - return errors.Wrapf(err, "error killing container %s exec session %s PID %d with SIGTERM", ctr.ID(), sessionID, pid) - } - - // Wait for the PID to stop - if err := waitPidStop(pid, time.Duration(timeout)*time.Second); err != nil { - logrus.Warnf("Timed out waiting for container %s exec session %s to stop, resorting to SIGKILL", ctr.ID(), sessionID) - } else { - // No error, container is dead - return nil - } - } - - // SIGTERM did not work. On to SIGKILL. - logrus.Debugf("Killing exec session %s (PID %d) of container %s with SIGKILL", sessionID, pid, ctr.ID()) - if err := unix.Kill(pid, unix.SIGTERM); err != nil { - if err == unix.ESRCH { - return nil - } - return errors.Wrapf(err, "error killing container %s exec session %s PID %d with SIGKILL", ctr.ID(), sessionID, pid) - } - - // Wait for the PID to stop - if err := waitPidStop(pid, killContainerTimeout*time.Second); err != nil { - return errors.Wrapf(err, "timed out waiting for container %s exec session %s PID %d to stop after SIGKILL", ctr.ID(), sessionID, pid) - } - - return nil -} - -// ExecUpdateStatus checks if the given exec session is still running. -func (r *ConmonOCIRuntime) ExecUpdateStatus(ctr *Container, sessionID string) (bool, error) { - pid, err := ctr.getExecSessionPID(sessionID) - if err != nil { - return false, err - } - - logrus.Debugf("Checking status of container %s exec session %s", ctr.ID(), sessionID) - - // Is the session dead? - // Ping the PID with signal 0 to see if it still exists. - if err := unix.Kill(pid, 0); err != nil { - if err == unix.ESRCH { - return false, nil - } - return false, errors.Wrapf(err, "error pinging container %s exec session %s PID %d with signal 0", ctr.ID(), sessionID, pid) - } - - return true, nil -} - -// ExecContainerCleanup cleans up files created when a command is run via -// ExecContainer. This includes the attach socket for the exec session. -func (r *ConmonOCIRuntime) ExecContainerCleanup(ctr *Container, sessionID string) error { - // Clean up the sockets dir. Issue #3962 - // Also ignore if it doesn't exist for some reason; hence the conditional return below - if err := os.RemoveAll(filepath.Join(r.socketsDir, sessionID)); err != nil && !os.IsNotExist(err) { - return err - } - return nil -} - // CheckpointContainer checkpoints the given container. func (r *ConmonOCIRuntime) CheckpointContainer(ctr *Container, options ContainerCheckpointOptions) error { if err := label.SetSocketLabel(ctr.ProcessLabel()); err != nil { @@ -934,16 +711,6 @@ func (r *ConmonOCIRuntime) AttachSocketPath(ctr *Container) (string, error) { return filepath.Join(r.socketsDir, ctr.ID(), "attach"), nil } -// ExecAttachSocketPath is the path to a container's exec session attach socket. -func (r *ConmonOCIRuntime) ExecAttachSocketPath(ctr *Container, sessionID string) (string, error) { - // We don't even use container, so don't validity check it - if sessionID == "" { - return "", errors.Wrapf(define.ErrInvalidArg, "must provide a valid session ID to get attach socket path") - } - - return filepath.Join(r.socketsDir, sessionID, "attach"), nil -} - // ExitFilePath is the path to a container's exit file. func (r *ConmonOCIRuntime) ExitFilePath(ctr *Container) (string, error) { if ctr == nil { @@ -1765,297 +1532,3 @@ func httpAttachNonTerminalCopy(container *net.UnixConn, http *bufio.ReadWriter, } } - -// This contains pipes used by the exec API. -type execPipes struct { - syncPipe *os.File - syncClosed bool - startPipe *os.File - startClosed bool - attachPipe *os.File - attachClosed bool -} - -func (p *execPipes) cleanup() { - if p.syncPipe != nil && !p.syncClosed { - errorhandling.CloseQuiet(p.syncPipe) - p.syncClosed = true - } - if p.startPipe != nil && !p.startClosed { - errorhandling.CloseQuiet(p.startPipe) - p.startClosed = true - } - if p.attachPipe != nil && !p.attachClosed { - errorhandling.CloseQuiet(p.attachPipe) - p.attachClosed = true - } -} - -// Start an exec session's conmon parent from the given options. -func (r *ConmonOCIRuntime) startExec(c *Container, sessionID string, options *ExecOptions, attachStdin bool, ociLog string) (_ *exec.Cmd, _ *execPipes, deferredErr error) { - pipes := new(execPipes) - - if options == nil { - return nil, nil, errors.Wrapf(define.ErrInvalidArg, "must provide an ExecOptions struct to ExecContainer") - } - if len(options.Cmd) == 0 { - return nil, nil, errors.Wrapf(define.ErrInvalidArg, "must provide a command to execute") - } - - if sessionID == "" { - return nil, nil, errors.Wrapf(define.ErrEmptyID, "must provide a session ID for exec") - } - - // create sync pipe to receive the pid - parentSyncPipe, childSyncPipe, err := newPipe() - if err != nil { - return nil, nil, errors.Wrapf(err, "error creating socket pair") - } - pipes.syncPipe = parentSyncPipe - - defer func() { - if deferredErr != nil { - pipes.cleanup() - } - }() - - // create start pipe to set the cgroup before running - // attachToExec is responsible for closing parentStartPipe - childStartPipe, parentStartPipe, err := newPipe() - if err != nil { - return nil, nil, errors.Wrapf(err, "error creating socket pair") - } - pipes.startPipe = parentStartPipe - - // create the attach pipe to allow attach socket to be created before - // $RUNTIME exec starts running. This is to make sure we can capture all output - // from the process through that socket, rather than half reading the log, half attaching to the socket - // attachToExec is responsible for closing parentAttachPipe - parentAttachPipe, childAttachPipe, err := newPipe() - if err != nil { - return nil, nil, errors.Wrapf(err, "error creating socket pair") - } - pipes.attachPipe = parentAttachPipe - - childrenClosed := false - defer func() { - if !childrenClosed { - errorhandling.CloseQuiet(childSyncPipe) - errorhandling.CloseQuiet(childAttachPipe) - errorhandling.CloseQuiet(childStartPipe) - } - }() - - runtimeDir, err := util.GetRuntimeDir() - if err != nil { - return nil, nil, err - } - - finalEnv := make([]string, 0, len(options.Env)) - for k, v := range options.Env { - finalEnv = append(finalEnv, fmt.Sprintf("%s=%s", k, v)) - } - - processFile, err := prepareProcessExec(c, options.Cmd, finalEnv, options.Terminal, options.Cwd, options.User, sessionID) - if err != nil { - return nil, nil, err - } - - args := r.sharedConmonArgs(c, sessionID, c.execBundlePath(sessionID), c.execPidPath(sessionID), c.execLogPath(sessionID), c.execExitFileDir(sessionID), ociLog, "") - - if options.PreserveFDs > 0 { - args = append(args, formatRuntimeOpts("--preserve-fds", fmt.Sprintf("%d", options.PreserveFDs))...) - } - - for _, capability := range options.CapAdd { - args = append(args, formatRuntimeOpts("--cap", capability)...) - } - - if options.Terminal { - args = append(args, "-t") - } - - if attachStdin { - args = append(args, "-i") - } - - // Append container ID and command - args = append(args, "-e") - // TODO make this optional when we can detach - args = append(args, "--exec-attach") - args = append(args, "--exec-process-spec", processFile.Name()) - - logrus.WithFields(logrus.Fields{ - "args": args, - }).Debugf("running conmon: %s", r.conmonPath) - // TODO: Need to pass this back so we can wait on it. - execCmd := exec.Command(r.conmonPath, args...) - - // TODO: This is commented because it doesn't make much sense in HTTP - // attach, and I'm not certain it does for non-HTTP attach as well. - // if streams != nil { - // // Don't add the InputStream to the execCmd. Instead, the data should be passed - // // through CopyDetachable - // if streams.AttachOutput { - // execCmd.Stdout = options.Streams.OutputStream - // } - // if streams.AttachError { - // execCmd.Stderr = options.Streams.ErrorStream - // } - // } - - conmonEnv, extraFiles, err := r.configureConmonEnv(runtimeDir) - if err != nil { - return nil, nil, err - } - - if options.PreserveFDs > 0 { - for fd := 3; fd < int(3+options.PreserveFDs); fd++ { - execCmd.ExtraFiles = append(execCmd.ExtraFiles, os.NewFile(uintptr(fd), fmt.Sprintf("fd-%d", fd))) - } - } - - // we don't want to step on users fds they asked to preserve - // Since 0-2 are used for stdio, start the fds we pass in at preserveFDs+3 - execCmd.Env = r.conmonEnv - execCmd.Env = append(execCmd.Env, fmt.Sprintf("_OCI_SYNCPIPE=%d", options.PreserveFDs+3), fmt.Sprintf("_OCI_STARTPIPE=%d", options.PreserveFDs+4), fmt.Sprintf("_OCI_ATTACHPIPE=%d", options.PreserveFDs+5)) - execCmd.Env = append(execCmd.Env, conmonEnv...) - - execCmd.ExtraFiles = append(execCmd.ExtraFiles, childSyncPipe, childStartPipe, childAttachPipe) - execCmd.ExtraFiles = append(execCmd.ExtraFiles, extraFiles...) - execCmd.Dir = c.execBundlePath(sessionID) - execCmd.SysProcAttr = &syscall.SysProcAttr{ - Setpgid: true, - } - - err = startCommandGivenSelinux(execCmd) - - // We don't need children pipes on the parent side - errorhandling.CloseQuiet(childSyncPipe) - errorhandling.CloseQuiet(childAttachPipe) - errorhandling.CloseQuiet(childStartPipe) - childrenClosed = true - - if err != nil { - return nil, nil, errors.Wrapf(err, "cannot start container %s", c.ID()) - } - if err := r.moveConmonToCgroupAndSignal(c, execCmd, parentStartPipe); err != nil { - return nil, nil, err - } - - if options.PreserveFDs > 0 { - for fd := 3; fd < int(3+options.PreserveFDs); fd++ { - // These fds were passed down to the runtime. Close them - // and not interfere - if err := os.NewFile(uintptr(fd), fmt.Sprintf("fd-%d", fd)).Close(); err != nil { - logrus.Debugf("unable to close file fd-%d", fd) - } - } - } - - return execCmd, pipes, nil -} - -// Attach to a container over HTTP -func attachExecHTTP(c *Container, sessionID string, httpBuf *bufio.ReadWriter, streams *HTTPAttachStreams, pipes *execPipes, detachKeys []byte, isTerminal bool, cancel <-chan bool) error { - if pipes == nil || pipes.startPipe == nil || pipes.attachPipe == nil { - return errors.Wrapf(define.ErrInvalidArg, "must provide a start and attach pipe to finish an exec attach") - } - - defer func() { - if !pipes.startClosed { - errorhandling.CloseQuiet(pipes.startPipe) - pipes.startClosed = true - } - if !pipes.attachClosed { - errorhandling.CloseQuiet(pipes.attachPipe) - pipes.attachClosed = true - } - }() - - logrus.Debugf("Attaching to container %s exec session %s", c.ID(), sessionID) - - // set up the socket path, such that it is the correct length and location for exec - sockPath, err := c.execAttachSocketPath(sessionID) - if err != nil { - return err - } - socketPath := buildSocketPath(sockPath) - - // 2: read from attachFd that the parent process has set up the console socket - if _, err := readConmonPipeData(pipes.attachPipe, ""); err != nil { - return err - } - - // 2: then attach - conn, err := net.DialUnix("unixpacket", nil, &net.UnixAddr{Name: socketPath, Net: "unixpacket"}) - if err != nil { - return errors.Wrapf(err, "failed to connect to container's attach socket: %v", socketPath) - } - defer func() { - if err := conn.Close(); err != nil { - logrus.Errorf("unable to close socket: %q", err) - } - }() - - // Make a channel to pass errors back - errChan := make(chan error) - - attachStdout := true - attachStderr := true - attachStdin := true - if streams != nil { - attachStdout = streams.Stdout - attachStderr = streams.Stderr - attachStdin = streams.Stdin - } - - // Next, STDIN. Avoid entirely if attachStdin unset. - if attachStdin { - go func() { - logrus.Debugf("Beginning STDIN copy") - _, err := utils.CopyDetachable(conn, httpBuf, detachKeys) - logrus.Debugf("STDIN copy completed") - errChan <- err - }() - } - - // 4: send start message to child - if err := writeConmonPipeData(pipes.startPipe); err != nil { - return err - } - - // Handle STDOUT/STDERR *after* start message is sent - go func() { - var err error - if isTerminal { - // Hack: return immediately if attachStdout not set to - // emulate Docker. - // Basically, when terminal is set, STDERR goes nowhere. - // Everything does over STDOUT. - // Therefore, if not attaching STDOUT - we'll never copy - // anything from here. - logrus.Debugf("Performing terminal HTTP attach for container %s", c.ID()) - if attachStdout { - err = httpAttachTerminalCopy(conn, httpBuf, c.ID()) - } - } else { - logrus.Debugf("Performing non-terminal HTTP attach for container %s", c.ID()) - err = httpAttachNonTerminalCopy(conn, httpBuf, c.ID(), attachStdin, attachStdout, attachStderr) - } - errChan <- err - logrus.Debugf("STDOUT/ERR copy completed") - }() - - if cancel != nil { - select { - case err := <-errChan: - return err - case <-cancel: - return nil - } - } else { - var connErr error = <-errChan - return connErr - } -} diff --git a/libpod/oci_missing.go b/libpod/oci_missing.go index 626740f72..4da16876c 100644 --- a/libpod/oci_missing.go +++ b/libpod/oci_missing.go @@ -130,6 +130,11 @@ func (r *MissingRuntime) ExecContainerHTTP(ctr *Container, sessionID string, opt return -1, nil, r.printError() } +// ExecContainerDetached is not available as the runtime is missing +func (r *MissingRuntime) ExecContainerDetached(ctr *Container, sessionID string, options *ExecOptions, stdin bool) (int, error) { + return -1, r.printError() +} + // ExecAttachResize is not available as the runtime is missing. func (r *MissingRuntime) ExecAttachResize(ctr *Container, sessionID string, newSize remotecommand.TerminalSize) error { return r.printError() diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go index c670822a0..655b42e51 100644 --- a/libpod/runtime_ctr.go +++ b/libpod/runtime_ctr.go @@ -390,6 +390,8 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool, } } + logrus.Debugf("Removing container %s", c.ID()) + // We need to lock the pod before we lock the container. // To avoid races around removing a container and the pod it is in. // Don't need to do this in pod removal case - we're evicting the entire |