summaryrefslogtreecommitdiff
path: root/libpod/container_api.go
diff options
context:
space:
mode:
Diffstat (limited to 'libpod/container_api.go')
-rw-r--r--libpod/container_api.go97
1 files changed, 58 insertions, 39 deletions
diff --git a/libpod/container_api.go b/libpod/container_api.go
index e522038a3..bc92cae69 100644
--- a/libpod/container_api.go
+++ b/libpod/container_api.go
@@ -10,8 +10,8 @@ import (
"time"
"github.com/containers/libpod/libpod/driver"
- "github.com/containers/libpod/pkg/chrootuser"
"github.com/containers/libpod/pkg/inspect"
+ "github.com/containers/libpod/pkg/lookup"
"github.com/containers/storage/pkg/stringid"
"github.com/docker/docker/daemon/caps"
"github.com/pkg/errors"
@@ -39,16 +39,13 @@ func (c *Container) Init(ctx context.Context) (err error) {
notRunning, err := c.checkDependenciesRunning()
if err != nil {
- return errors.Wrapf(err, "error checking dependencies for container %s")
+ return errors.Wrapf(err, "error checking dependencies for container %s", c.ID())
}
if len(notRunning) > 0 {
depString := strings.Join(notRunning, ",")
return errors.Wrapf(ErrCtrStateInvalid, "some dependencies of container %s are not started: %s", c.ID(), depString)
}
- if err := c.prepare(); err != nil {
- return err
- }
defer func() {
if err != nil {
if err2 := c.cleanup(ctx); err2 != nil {
@@ -57,6 +54,10 @@ func (c *Container) Init(ctx context.Context) (err error) {
}
}()
+ if err := c.prepare(); err != nil {
+ return err
+ }
+
if c.state.State == ContainerStateStopped {
// Reinitialize the container
return c.reinit(ctx)
@@ -92,16 +93,13 @@ func (c *Container) Start(ctx context.Context) (err error) {
notRunning, err := c.checkDependenciesRunning()
if err != nil {
- return errors.Wrapf(err, "error checking dependencies for container %s")
+ return errors.Wrapf(err, "error checking dependencies for container %s", c.ID())
}
if len(notRunning) > 0 {
depString := strings.Join(notRunning, ",")
return errors.Wrapf(ErrCtrStateInvalid, "some dependencies of container %s are not started: %s", c.ID(), depString)
}
- if err := c.prepare(); err != nil {
- return err
- }
defer func() {
if err != nil {
if err2 := c.cleanup(ctx); err2 != nil {
@@ -110,6 +108,10 @@ func (c *Container) Start(ctx context.Context) (err error) {
}
}()
+ if err := c.prepare(); err != nil {
+ return err
+ }
+
if c.state.State == ContainerStateStopped {
// Reinitialize the container if we need to
if err := c.reinit(ctx); err != nil {
@@ -157,16 +159,13 @@ func (c *Container) StartAndAttach(ctx context.Context, streams *AttachStreams,
notRunning, err := c.checkDependenciesRunning()
if err != nil {
- return nil, errors.Wrapf(err, "error checking dependencies for container %s")
+ return nil, errors.Wrapf(err, "error checking dependencies for container %s", c.ID())
}
if len(notRunning) > 0 {
depString := strings.Join(notRunning, ",")
return nil, errors.Wrapf(ErrCtrStateInvalid, "some dependencies of container %s are not started: %s", c.ID(), depString)
}
- if err := c.prepare(); err != nil {
- return nil, err
- }
defer func() {
if err != nil {
if err2 := c.cleanup(ctx); err2 != nil {
@@ -175,6 +174,10 @@ func (c *Container) StartAndAttach(ctx context.Context, streams *AttachStreams,
}
}()
+ if err := c.prepare(); err != nil {
+ return nil, err
+ }
+
if c.state.State == ContainerStateStopped {
// Reinitialize the container if we need to
if err := c.reinit(ctx); err != nil {
@@ -292,13 +295,13 @@ func (c *Container) Exec(tty, privileged bool, env, cmd []string, user string) e
// the host
hostUser := ""
if user != "" {
- uid, gid, err := chrootuser.GetUser(c.state.Mountpoint, user)
+ execUser, err := lookup.GetUserGroupInfo(c.state.Mountpoint, user, nil)
if err != nil {
- return errors.Wrapf(err, "error getting user to launch exec session as")
+ return err
}
// runc expects user formatted as uid:gid
- hostUser = fmt.Sprintf("%d:%d", uid, gid)
+ hostUser = fmt.Sprintf("%d:%d", execUser.Uid, execUser.Gid)
}
// Generate exec session ID
@@ -325,25 +328,25 @@ func (c *Container) Exec(tty, privileged bool, env, cmd []string, user string) e
if err != nil {
return errors.Wrapf(err, "error exec %s", c.ID())
}
+ chWait := make(chan error)
+ go func() {
+ chWait <- execCmd.Wait()
+ }()
+ defer close(chWait)
pidFile := c.execPidPath(sessionID)
- // 1 second seems a reasonable time to wait
- // See https://github.com/containers/libpod/issues/1495
- const pidWaitTimeout = 1000
+ // 60 second seems a reasonable time to wait
+ // https://github.com/containers/libpod/issues/1495
+ // https://github.com/containers/libpod/issues/1816
+ const pidWaitTimeout = 60000
// Wait until the runtime makes the pidfile
- // TODO: If runtime errors before the PID file is created, we have to
- // wait for timeout here
- if err := WaitForFile(pidFile, pidWaitTimeout*time.Millisecond); err != nil {
- logrus.Debugf("Timed out waiting for pidfile from runtime for container %s exec", c.ID())
-
- // Check if an error occurred in the process before we made a pidfile
- // TODO: Wait() here is a poor choice - is there a way to see if
- // a process has finished, instead of waiting for it to finish?
- if err := execCmd.Wait(); err != nil {
+ exited, err := WaitForFile(pidFile, chWait, pidWaitTimeout*time.Millisecond)
+ if err != nil {
+ if exited {
+ // If the runtime exited, propagate the error we got from the process.
return err
}
-
return errors.Wrapf(err, "timed out waiting for runtime to create pidfile for exec session in container %s", c.ID())
}
@@ -385,7 +388,10 @@ func (c *Container) Exec(tty, privileged bool, env, cmd []string, user string) e
locked = false
}
- waitErr := execCmd.Wait()
+ var waitErr error
+ if !exited {
+ waitErr = <-chWait
+ }
// Lock again
if !c.batched {
@@ -685,7 +691,7 @@ func (c *Container) Sync() error {
(c.state.State != ContainerStateConfigured) {
oldState := c.state.State
// TODO: optionally replace this with a stat for the exit file
- if err := c.runtime.ociRuntime.updateContainerStatus(c); err != nil {
+ if err := c.runtime.ociRuntime.updateContainerStatus(c, true); err != nil {
return err
}
// Only save back to DB if state changed
@@ -712,7 +718,7 @@ func (c *Container) RestartWithTimeout(ctx context.Context, timeout uint) (err e
notRunning, err := c.checkDependenciesRunning()
if err != nil {
- return errors.Wrapf(err, "error checking dependencies for container %s")
+ return errors.Wrapf(err, "error checking dependencies for container %s", c.ID())
}
if len(notRunning) > 0 {
depString := strings.Join(notRunning, ",")
@@ -797,7 +803,7 @@ func (c *Container) Refresh(ctx context.Context) error {
return err
}
- logrus.Debugf("Successfully refresh container %s state")
+ logrus.Debugf("Successfully refresh container %s state", c.ID())
// Initialize the container if it was created in runc
if wasCreated || wasRunning || wasPaused {
@@ -826,9 +832,22 @@ func (c *Container) Refresh(ctx context.Context) error {
return nil
}
+// ContainerCheckpointOptions is a struct used to pass the parameters
+// for checkpointing (and restoring) to the corresponding functions
+type ContainerCheckpointOptions struct {
+ // Keep tells the API to not delete checkpoint artifacts
+ Keep bool
+ // KeepRunning tells the API to keep the container running
+ // after writing the checkpoint to disk
+ KeepRunning bool
+ // TCPEstablished tells the API to checkpoint a container
+ // even if it contains established TCP connections
+ TCPEstablished bool
+}
+
// Checkpoint checkpoints a container
-func (c *Container) Checkpoint(ctx context.Context, keep bool) error {
- logrus.Debugf("Trying to checkpoint container %s", c)
+func (c *Container) Checkpoint(ctx context.Context, options ContainerCheckpointOptions) error {
+ logrus.Debugf("Trying to checkpoint container %s", c.ID())
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
@@ -838,12 +857,12 @@ func (c *Container) Checkpoint(ctx context.Context, keep bool) error {
}
}
- return c.checkpoint(ctx, keep)
+ return c.checkpoint(ctx, options)
}
// Restore restores a container
-func (c *Container) Restore(ctx context.Context, keep bool) (err error) {
- logrus.Debugf("Trying to restore container %s", c)
+func (c *Container) Restore(ctx context.Context, options ContainerCheckpointOptions) (err error) {
+ logrus.Debugf("Trying to restore container %s", c.ID())
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
@@ -853,5 +872,5 @@ func (c *Container) Restore(ctx context.Context, keep bool) (err error) {
}
}
- return c.restore(ctx, keep)
+ return c.restore(ctx, options)
}