summaryrefslogtreecommitdiff
path: root/libpod/container_api.go
diff options
context:
space:
mode:
Diffstat (limited to 'libpod/container_api.go')
-rw-r--r--libpod/container_api.go279
1 files changed, 122 insertions, 157 deletions
diff --git a/libpod/container_api.go b/libpod/container_api.go
index eff5bfe5f..cd020e429 100644
--- a/libpod/container_api.go
+++ b/libpod/container_api.go
@@ -2,18 +2,13 @@ package libpod
import (
"context"
- "fmt"
"io"
"io/ioutil"
"os"
- "strconv"
- "sync"
"time"
- "github.com/containers/libpod/libpod/driver"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/events"
- "github.com/containers/libpod/pkg/inspect"
- "github.com/containers/libpod/pkg/lookup"
"github.com/containers/storage/pkg/stringid"
"github.com/docker/docker/oci/caps"
"github.com/opentracing/opentracing-go"
@@ -38,10 +33,10 @@ func (c *Container) Init(ctx context.Context) (err error) {
}
}
- if !(c.state.State == ContainerStateConfigured ||
- c.state.State == ContainerStateStopped ||
- c.state.State == ContainerStateExited) {
- return errors.Wrapf(ErrCtrStateInvalid, "container %s has already been created in runtime", c.ID())
+ if !(c.state.State == define.ContainerStateConfigured ||
+ c.state.State == define.ContainerStateStopped ||
+ c.state.State == define.ContainerStateExited) {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "container %s has already been created in runtime", c.ID())
}
// don't recursively start
@@ -56,7 +51,7 @@ func (c *Container) Init(ctx context.Context) (err error) {
return err
}
- if c.state.State == ContainerStateStopped {
+ if c.state.State == define.ContainerStateStopped {
// Reinitialize the container
return c.reinit(ctx, false)
}
@@ -117,24 +112,27 @@ func (c *Container) StartAndAttach(ctx context.Context, streams *AttachStreams,
if err := c.prepareToStart(ctx, recursive); err != nil {
return nil, err
}
-
attachChan := make(chan error)
// We need to ensure that we don't return until start() fired in attach.
- // Use a WaitGroup to sync this.
- wg := new(sync.WaitGroup)
- wg.Add(1)
+ // Use a channel to sync
+ startedChan := make(chan bool)
// Attach to the container before starting it
go func() {
- if err := c.attach(streams, keys, resize, true, wg); err != nil {
+ if err := c.attach(streams, keys, resize, true, startedChan); err != nil {
attachChan <- err
}
close(attachChan)
}()
- wg.Wait()
- c.newContainerEvent(events.Attach)
+ select {
+ case err := <-attachChan:
+ return nil, err
+ case <-startedChan:
+ c.newContainerEvent(events.Attach)
+ }
+
return attachChan, nil
}
@@ -179,15 +177,15 @@ func (c *Container) StopWithTimeout(timeout uint) error {
}
}
- if c.state.State == ContainerStateConfigured ||
- c.state.State == ContainerStateUnknown ||
- c.state.State == ContainerStatePaused {
- return errors.Wrapf(ErrCtrStateInvalid, "can only stop created, running, or stopped containers. %s in state %s", c.ID(), c.state.State.String())
+ if c.state.State == define.ContainerStateConfigured ||
+ c.state.State == define.ContainerStateUnknown ||
+ c.state.State == define.ContainerStatePaused {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "can only stop created, running, or stopped containers. %s is in state %s", c.ID(), c.state.State.String())
}
- if c.state.State == ContainerStateStopped ||
- c.state.State == ContainerStateExited {
- return ErrCtrStopped
+ if c.state.State == define.ContainerStateStopped ||
+ c.state.State == define.ContainerStateExited {
+ return define.ErrCtrStopped
}
defer c.newContainerEvent(events.Stop)
return c.stop(timeout)
@@ -204,12 +202,12 @@ func (c *Container) Kill(signal uint) error {
}
}
- if c.state.State != ContainerStateRunning {
- return errors.Wrapf(ErrCtrStateInvalid, "can only kill running containers")
+ if c.state.State != define.ContainerStateRunning {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "can only kill running containers. %s is in state %s", c.ID(), c.state.State.String())
}
defer c.newContainerEvent(events.Kill)
- if err := c.runtime.ociRuntime.killContainer(c, signal); err != nil {
+ if err := c.ociRuntime.killContainer(c, signal); err != nil {
return err
}
@@ -219,49 +217,33 @@ func (c *Container) Kill(signal uint) error {
}
// Exec starts a new process inside the container
+// Returns an exit code and an error. If Exec was not able to exec in the container before a failure, an exit code of 126 is returned.
+// If another generic error happens, an exit code of 125 is returned.
+// Sometimes, the $RUNTIME exec call errors, and if that is the case, the exit code is the exit code of the call.
+// Otherwise, the exit code will be the exit code of the executed call inside of the container.
// TODO investigate allowing exec without attaching
-func (c *Container) Exec(tty, privileged bool, env, cmd []string, user, workDir string, streams *AttachStreams, preserveFDs int) error {
+func (c *Container) Exec(tty, privileged bool, env, cmd []string, user, workDir string, streams *AttachStreams, preserveFDs int, resize chan remotecommand.TerminalSize, detachKeys string) (int, error) {
var capList []string
-
- locked := false
if !c.batched {
- locked = true
-
c.lock.Lock()
- defer func() {
- if locked {
- c.lock.Unlock()
- }
- }()
+ defer c.lock.Unlock()
if err := c.syncContainer(); err != nil {
- return err
+ return define.ExecErrorCodeCannotInvoke, err
}
}
conState := c.state.State
// TODO can probably relax this once we track exec sessions
- if conState != ContainerStateRunning {
- return errors.Errorf("cannot exec into container that is not running")
+ if conState != define.ContainerStateRunning {
+ return define.ExecErrorCodeCannotInvoke, errors.Wrapf(define.ErrCtrStateInvalid, "cannot exec into container that is not running")
}
+
if privileged || c.config.Privileged {
capList = caps.GetAllCapabilities()
}
- // If user was set, look it up in the container to get a UID to use on
- // the host
- hostUser := ""
- if user != "" {
- execUser, err := lookup.GetUserGroupInfo(c.state.Mountpoint, user, nil)
- if err != nil {
- return err
- }
-
- // runc expects user formatted as uid:gid
- hostUser = fmt.Sprintf("%d:%d", execUser.Uid, execUser.Gid)
- }
-
// Generate exec session ID
// Ensure we don't conflict with an existing session ID
sessionID := stringid.GenerateNonCryptoID()
@@ -275,52 +257,33 @@ func (c *Container) Exec(tty, privileged bool, env, cmd []string, user, workDir
break
}
}
- if found == true {
+ if found {
sessionID = stringid.GenerateNonCryptoID()
}
}
logrus.Debugf("Creating new exec session in container %s with session id %s", c.ID(), sessionID)
-
- execCmd, err := c.runtime.ociRuntime.execContainer(c, cmd, capList, env, tty, workDir, hostUser, sessionID, streams, preserveFDs)
- if err != nil {
- return errors.Wrapf(err, "error exec %s", c.ID())
+ if err := c.createExecBundle(sessionID); err != nil {
+ return define.ExecErrorCodeCannotInvoke, err
}
- chWait := make(chan error)
- go func() {
- chWait <- execCmd.Wait()
- close(chWait)
- }()
-
- pidFile := c.execPidPath(sessionID)
- // 60 second seems a reasonable time to wait
- // https://github.com/containers/libpod/issues/1495
- // https://github.com/containers/libpod/issues/1816
- const pidWaitTimeout = 60000
- // Wait until the runtime makes the pidfile
- exited, err := WaitForFile(pidFile, chWait, pidWaitTimeout*time.Millisecond)
- if err != nil {
- if exited {
- // If the runtime exited, propagate the error we got from the process.
- return err
+ defer func() {
+ // cleanup exec bundle
+ if err := c.cleanupExecBundle(sessionID); err != nil {
+ logrus.Errorf("Error removing exec session %s bundle path for container %s: %v", sessionID, c.ID(), err)
}
- return errors.Wrapf(err, "timed out waiting for runtime to create pidfile for exec session in container %s", c.ID())
- }
+ }()
- // Pidfile exists, read it
- contents, err := ioutil.ReadFile(pidFile)
+ pid, attachChan, err := c.ociRuntime.execContainer(c, cmd, capList, env, tty, workDir, user, sessionID, streams, preserveFDs, resize, detachKeys)
if err != nil {
- // We don't know the PID of the exec session
- // However, it may still be alive
- // TODO handle this better
- return errors.Wrapf(err, "could not read pidfile for exec session %s in container %s", sessionID, c.ID())
- }
- pid, err := strconv.ParseInt(string(contents), 10, 32)
- if err != nil {
- // As above, we don't have a valid PID, but the exec session is likely still alive
- // TODO handle this better
- return errors.Wrapf(err, "error parsing PID of exec session %s in container %s", sessionID, c.ID())
+ ec := define.ExecErrorCodeGeneric
+ // Conmon will pass a non-zero exit code from the runtime as a pid here.
+ // we differentiate a pid with an exit code by sending it as negative, so reverse
+ // that change and return the exit code the runtime failed with.
+ if pid < 0 {
+ ec = -1 * pid
+ }
+ return ec, err
}
// We have the PID, add it to state
@@ -330,12 +293,12 @@ func (c *Container) Exec(tty, privileged bool, env, cmd []string, user, workDir
session := new(ExecSession)
session.ID = sessionID
session.Command = cmd
- session.PID = int(pid)
+ session.PID = pid
c.state.ExecSessions[sessionID] = session
if err := c.save(); err != nil {
// Now we have a PID but we can't save it in the DB
// TODO handle this better
- return errors.Wrapf(err, "error saving exec sessions %s for container %s", sessionID, c.ID())
+ return define.ExecErrorCodeGeneric, errors.Wrapf(err, "error saving exec sessions %s for container %s", sessionID, c.ID())
}
c.newContainerEvent(events.Exec)
logrus.Debugf("Successfully started exec session %s in container %s", sessionID, c.ID())
@@ -343,23 +306,33 @@ func (c *Container) Exec(tty, privileged bool, env, cmd []string, user, workDir
// Unlock so other processes can use the container
if !c.batched {
c.lock.Unlock()
- locked = false
}
- var waitErr error
- if !exited {
- waitErr = <-chWait
+ lastErr := <-attachChan
+
+ exitCode, err := c.readExecExitCode(sessionID)
+ if err != nil {
+ if lastErr != nil {
+ logrus.Errorf(lastErr.Error())
+ }
+ lastErr = err
+ }
+ if exitCode != 0 {
+ if lastErr != nil {
+ logrus.Errorf(lastErr.Error())
+ }
+ lastErr = errors.Wrapf(define.ErrOCIRuntime, "non zero exit code: %d", exitCode)
}
// Lock again
if !c.batched {
- locked = true
c.lock.Lock()
}
// Sync the container again to pick up changes in state
if err := c.syncContainer(); err != nil {
- return errors.Wrapf(err, "error syncing container %s state to remove exec session %s", c.ID(), sessionID)
+ logrus.Errorf("error syncing container %s state to remove exec session %s", c.ID(), sessionID)
+ return exitCode, lastErr
}
// Remove the exec session from state
@@ -367,7 +340,7 @@ func (c *Container) Exec(tty, privileged bool, env, cmd []string, user, workDir
if err := c.save(); err != nil {
logrus.Errorf("Error removing exec session %s from container %s state: %v", sessionID, c.ID(), err)
}
- return waitErr
+ return exitCode, lastErr
}
// AttachStreams contains streams that will be attached to the container
@@ -400,10 +373,10 @@ func (c *Container) Attach(streams *AttachStreams, keys string, resize <-chan re
c.lock.Unlock()
}
- if c.state.State != ContainerStateCreated &&
- c.state.State != ContainerStateRunning &&
- c.state.State != ContainerStateExited {
- return errors.Wrapf(ErrCtrStateInvalid, "can only attach to created or running containers")
+ if c.state.State != define.ContainerStateCreated &&
+ c.state.State != define.ContainerStateRunning &&
+ c.state.State != define.ContainerStateExited {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "can only attach to created or running containers")
}
defer c.newContainerEvent(events.Attach)
return c.attach(streams, keys, resize, false, nil)
@@ -441,13 +414,13 @@ func (c *Container) Unmount(force bool) error {
return errors.Wrapf(err, "can't determine how many times %s is mounted, refusing to unmount", c.ID())
}
if mounted == 1 {
- if c.state.State == ContainerStateRunning || c.state.State == ContainerStatePaused {
- return errors.Wrapf(ErrCtrStateInvalid, "cannot unmount storage for container %s as it is running or paused", c.ID())
+ if c.state.State == define.ContainerStateRunning || c.state.State == define.ContainerStatePaused {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "cannot unmount storage for container %s as it is running or paused", c.ID())
}
if len(c.state.ExecSessions) != 0 {
- return errors.Wrapf(ErrCtrStateInvalid, "container %s has active exec sessions, refusing to unmount", c.ID())
+ return errors.Wrapf(define.ErrCtrStateInvalid, "container %s has active exec sessions, refusing to unmount", c.ID())
}
- return errors.Wrapf(ErrInternal, "can't unmount %s last mount, it is still in use", c.ID())
+ return errors.Wrapf(define.ErrInternal, "can't unmount %s last mount, it is still in use", c.ID())
}
}
defer c.newContainerEvent(events.Unmount)
@@ -465,11 +438,11 @@ func (c *Container) Pause() error {
}
}
- if c.state.State == ContainerStatePaused {
- return errors.Wrapf(ErrCtrStateInvalid, "%q is already paused", c.ID())
+ if c.state.State == define.ContainerStatePaused {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "%q is already paused", c.ID())
}
- if c.state.State != ContainerStateRunning {
- return errors.Wrapf(ErrCtrStateInvalid, "%q is not running, can't pause", c.state.State)
+ if c.state.State != define.ContainerStateRunning {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "%q is not running, can't pause", c.state.State)
}
defer c.newContainerEvent(events.Pause)
return c.pause()
@@ -486,8 +459,8 @@ func (c *Container) Unpause() error {
}
}
- if c.state.State != ContainerStatePaused {
- return errors.Wrapf(ErrCtrStateInvalid, "%q is not paused, can't unpause", c.ID())
+ if c.state.State != define.ContainerStatePaused {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "%q is not paused, can't unpause", c.ID())
}
defer c.newContainerEvent(events.Unpause)
return c.unpause()
@@ -511,7 +484,7 @@ func (c *Container) Export(path string) error {
// AddArtifact creates and writes to an artifact file for the container
func (c *Container) AddArtifact(name string, data []byte) error {
if !c.valid {
- return ErrCtrRemoved
+ return define.ErrCtrRemoved
}
return ioutil.WriteFile(c.getArtifactPath(name), data, 0740)
@@ -520,7 +493,7 @@ func (c *Container) AddArtifact(name string, data []byte) error {
// GetArtifact reads the specified artifact file from the container
func (c *Container) GetArtifact(name string) ([]byte, error) {
if !c.valid {
- return nil, ErrCtrRemoved
+ return nil, define.ErrCtrRemoved
}
return ioutil.ReadFile(c.getArtifactPath(name))
@@ -529,38 +502,12 @@ func (c *Container) GetArtifact(name string) ([]byte, error) {
// RemoveArtifact deletes the specified artifacts file
func (c *Container) RemoveArtifact(name string) error {
if !c.valid {
- return ErrCtrRemoved
+ return define.ErrCtrRemoved
}
return os.Remove(c.getArtifactPath(name))
}
-// Inspect a container for low-level information
-func (c *Container) Inspect(size bool) (*inspect.ContainerInspectData, error) {
- if !c.batched {
- c.lock.Lock()
- defer c.lock.Unlock()
-
- if err := c.syncContainer(); err != nil {
- return nil, err
- }
- }
-
- storeCtr, err := c.runtime.store.Container(c.ID())
- if err != nil {
- return nil, errors.Wrapf(err, "error getting container from store %q", c.ID())
- }
- layer, err := c.runtime.store.Layer(storeCtr.LayerID)
- if err != nil {
- return nil, errors.Wrapf(err, "error reading information about layer %q", storeCtr.LayerID)
- }
- driverData, err := driver.GetDriverData(c.runtime.store, layer.ID)
- if err != nil {
- return nil, errors.Wrapf(err, "error getting graph driver info %q", c.ID())
- }
- return c.getContainerInspectData(size, driverData)
-}
-
// Wait blocks until the container exits and returns its exit code.
func (c *Container) Wait() (int32, error) {
return c.WaitWithInterval(DefaultWaitInterval)
@@ -570,7 +517,7 @@ func (c *Container) Wait() (int32, error) {
// code. The argument is the interval at which checks the container's status.
func (c *Container) WaitWithInterval(waitTimeout time.Duration) (int32, error) {
if !c.valid {
- return -1, ErrCtrRemoved
+ return -1, define.ErrCtrRemoved
}
err := wait.PollImmediateInfinite(waitTimeout,
func() (bool, error) {
@@ -605,8 +552,8 @@ func (c *Container) Cleanup(ctx context.Context) error {
}
// Check if state is good
- if c.state.State == ContainerStateRunning || c.state.State == ContainerStatePaused {
- return errors.Wrapf(ErrCtrStateInvalid, "container %s is running or paused, refusing to clean up", c.ID())
+ if c.state.State == define.ContainerStateRunning || c.state.State == define.ContainerStatePaused {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is running or paused, refusing to clean up", c.ID())
}
// Handle restart policy.
@@ -624,7 +571,7 @@ func (c *Container) Cleanup(ctx context.Context) error {
// Check if we have active exec sessions
if len(c.state.ExecSessions) != 0 {
- return errors.Wrapf(ErrCtrStateInvalid, "container %s has active exec sessions, refusing to clean up", c.ID())
+ return errors.Wrapf(define.ErrCtrStateInvalid, "container %s has active exec sessions, refusing to clean up", c.ID())
}
defer c.newContainerEvent(events.Cleanup)
return c.cleanup(ctx)
@@ -655,6 +602,7 @@ func (c *Container) Batch(batchFunc func(*Container) error) error {
newCtr.config = c.config
newCtr.state = c.state
newCtr.runtime = c.runtime
+ newCtr.ociRuntime = c.ociRuntime
newCtr.lock = c.lock
newCtr.valid = true
@@ -682,11 +630,11 @@ func (c *Container) Sync() error {
// If runtime knows about the container, update its status in runtime
// And then save back to disk
- if (c.state.State != ContainerStateUnknown) &&
- (c.state.State != ContainerStateConfigured) &&
- (c.state.State != ContainerStateExited) {
+ if (c.state.State != define.ContainerStateUnknown) &&
+ (c.state.State != define.ContainerStateConfigured) &&
+ (c.state.State != define.ContainerStateExited) {
oldState := c.state.State
- if err := c.runtime.ociRuntime.updateContainerStatus(c, true); err != nil {
+ if err := c.ociRuntime.updateContainerStatus(c, true); err != nil {
return err
}
// Only save back to DB if state changed
@@ -713,27 +661,27 @@ func (c *Container) Refresh(ctx context.Context) error {
}
wasCreated := false
- if c.state.State == ContainerStateCreated {
+ if c.state.State == define.ContainerStateCreated {
wasCreated = true
}
wasRunning := false
- if c.state.State == ContainerStateRunning {
+ if c.state.State == define.ContainerStateRunning {
wasRunning = true
}
wasPaused := false
- if c.state.State == ContainerStatePaused {
+ if c.state.State == define.ContainerStatePaused {
wasPaused = true
}
// First, unpause the container if it's paused
- if c.state.State == ContainerStatePaused {
+ if c.state.State == define.ContainerStatePaused {
if err := c.unpause(); err != nil {
return err
}
}
// Next, if the container is running, stop it
- if c.state.State == ContainerStateRunning {
+ if c.state.State == define.ContainerStateRunning {
if err := c.stop(c.config.StopTimeout); err != nil {
return err
}
@@ -743,14 +691,14 @@ func (c *Container) Refresh(ctx context.Context) error {
if len(c.state.ExecSessions) > 0 {
logrus.Infof("Killing %d exec sessions in container %s. They will not be restored after refresh.",
len(c.state.ExecSessions), c.ID())
- if err := c.runtime.ociRuntime.execStopContainer(c, c.config.StopTimeout); err != nil {
+ if err := c.ociRuntime.execStopContainer(c, c.config.StopTimeout); err != nil {
return err
}
}
// If the container is in ContainerStateStopped, we need to delete it
// from the runtime and clear conmon state
- if c.state.State == ContainerStateStopped {
+ if c.state.State == define.ContainerStateStopped {
if err := c.delete(ctx); err != nil {
return err
}
@@ -815,11 +763,28 @@ type ContainerCheckpointOptions struct {
// TCPEstablished tells the API to checkpoint a container
// even if it contains established TCP connections
TCPEstablished bool
+ // TargetFile tells the API to read (or write) the checkpoint image
+ // from (or to) the filename set in TargetFile
+ TargetFile string
+ // Name tells the API that during restore from an exported
+ // checkpoint archive a new name should be used for the
+ // restored container
+ Name string
+ // IgnoreRootfs tells the API to not export changes to
+ // the container's root file-system (or to not import)
+ IgnoreRootfs bool
}
// Checkpoint checkpoints a container
func (c *Container) Checkpoint(ctx context.Context, options ContainerCheckpointOptions) error {
logrus.Debugf("Trying to checkpoint container %s", c.ID())
+
+ if options.TargetFile != "" {
+ if err := c.prepareCheckpointExport(); err != nil {
+ return err
+ }
+ }
+
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()