aboutsummaryrefslogtreecommitdiff
path: root/libpod/container_internal.go
diff options
context:
space:
mode:
Diffstat (limited to 'libpod/container_internal.go')
-rw-r--r--libpod/container_internal.go394
1 files changed, 261 insertions, 133 deletions
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index b363c193a..83ee5640e 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -12,6 +12,7 @@ import (
"strings"
"time"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/events"
"github.com/containers/libpod/pkg/ctime"
"github.com/containers/libpod/pkg/hooks"
@@ -21,15 +22,17 @@ import (
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/mount"
spec "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/opencontainers/runtime-tools/generate"
"github.com/opencontainers/selinux/go-selinux/label"
- opentracing "github.com/opentracing/opentracing-go"
+ "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
const (
// name of the directory holding the artifacts
- artifactsDir = "artifacts"
+ artifactsDir = "artifacts"
+ execDirPermission = 0755
)
// rootFsSize gets the size of the container's root filesystem
@@ -127,17 +130,94 @@ func (c *Container) CheckpointPath() string {
// AttachSocketPath retrieves the path of the container's attach socket
func (c *Container) AttachSocketPath() string {
- return filepath.Join(c.runtime.ociRuntime.socketsDir, c.ID(), "attach")
+ return filepath.Join(c.ociRuntime.socketsDir, c.ID(), "attach")
+}
+
+// exitFilePath gets the path to the container's exit file
+func (c *Container) exitFilePath() string {
+ return filepath.Join(c.ociRuntime.exitsDir, c.ID())
+}
+
+// create a bundle path and associated files for an exec session
+func (c *Container) createExecBundle(sessionID string) (err error) {
+ bundlePath := c.execBundlePath(sessionID)
+ if createErr := os.MkdirAll(bundlePath, execDirPermission); createErr != nil {
+ return createErr
+ }
+ defer func() {
+ if err != nil {
+ if err2 := os.RemoveAll(bundlePath); err != nil {
+ logrus.Warnf("error removing exec bundle after creation caused another error: %v", err2)
+ }
+ }
+ }()
+ if err2 := os.MkdirAll(c.execExitFileDir(sessionID), execDirPermission); err2 != nil {
+ // The directory is allowed to exist
+ if !os.IsExist(err2) {
+ err = errors.Wrapf(err2, "error creating OCI runtime exit file path %s", c.execExitFileDir(sessionID))
+ }
+ }
+ return
+}
+
+// cleanup an exec session after its done
+func (c *Container) cleanupExecBundle(sessionID string) error {
+ return os.RemoveAll(c.execBundlePath(sessionID))
+}
+
+// the path to a containers exec session bundle
+func (c *Container) execBundlePath(sessionID string) string {
+ return filepath.Join(c.bundlePath(), sessionID)
}
// Get PID file path for a container's exec session
func (c *Container) execPidPath(sessionID string) string {
- return filepath.Join(c.state.RunDir, "exec_pid_"+sessionID)
+ return filepath.Join(c.execBundlePath(sessionID), "exec_pid")
}
-// exitFilePath gets the path to the container's exit file
-func (c *Container) exitFilePath() string {
- return filepath.Join(c.runtime.ociRuntime.exitsDir, c.ID())
+// the log path for an exec session
+func (c *Container) execLogPath(sessionID string) string {
+ return filepath.Join(c.execBundlePath(sessionID), "exec_log")
+}
+
+// the socket conmon creates for an exec session
+func (c *Container) execAttachSocketPath(sessionID string) string {
+ return filepath.Join(c.ociRuntime.socketsDir, sessionID, "attach")
+}
+
+// execExitFileDir gets the path to the container's exit file
+func (c *Container) execExitFileDir(sessionID string) string {
+ return filepath.Join(c.execBundlePath(sessionID), "exit")
+}
+
+// execOCILog returns the file path for the exec sessions oci log
+func (c *Container) execOCILog(sessionID string) string {
+ if !c.ociRuntime.supportsJSON {
+ return ""
+ }
+ return filepath.Join(c.execBundlePath(sessionID), "oci-log")
+}
+
+// readExecExitCode reads the exit file for an exec session and returns
+// the exit code
+func (c *Container) readExecExitCode(sessionID string) (int, error) {
+ exitFile := filepath.Join(c.execExitFileDir(sessionID), c.ID())
+ chWait := make(chan error)
+ defer close(chWait)
+
+ _, err := WaitForFile(exitFile, chWait, time.Second*5)
+ if err != nil {
+ return -1, err
+ }
+ ec, err := ioutil.ReadFile(exitFile)
+ if err != nil {
+ return -1, err
+ }
+ ecInt, err := strconv.Atoi(string(ec))
+ if err != nil {
+ return -1, err
+ }
+ return ecInt, nil
}
// Wait for the container's exit file to appear.
@@ -154,7 +234,7 @@ func (c *Container) waitForExitFileAndSync() error {
// Reset our state
c.state.ExitCode = -1
c.state.FinishedTime = time.Now()
- c.state.State = ContainerStateStopped
+ c.state.State = define.ContainerStateStopped
if err2 := c.save(); err2 != nil {
logrus.Errorf("Error saving container %s state: %v", c.ID(), err2)
@@ -163,7 +243,7 @@ func (c *Container) waitForExitFileAndSync() error {
return err
}
- if err := c.runtime.ociRuntime.updateContainerStatus(c, false); err != nil {
+ if err := c.ociRuntime.updateContainerStatus(c, false); err != nil {
return err
}
@@ -239,10 +319,10 @@ func (c *Container) handleRestartPolicy(ctx context.Context) (restarted bool, er
// Is the container running again?
// If so, we don't have to do anything
- if c.state.State == ContainerStateRunning || c.state.State == ContainerStatePaused {
+ if c.state.State == define.ContainerStateRunning || c.state.State == define.ContainerStatePaused {
return false, nil
- } else if c.state.State == ContainerStateUnknown {
- return false, errors.Wrapf(ErrInternal, "invalid container state encountered in restart attempt!")
+ } else if c.state.State == define.ContainerStateUnknown {
+ return false, errors.Wrapf(define.ErrInternal, "invalid container state encountered in restart attempt!")
}
c.newContainerEvent(events.Restart)
@@ -265,13 +345,13 @@ func (c *Container) handleRestartPolicy(ctx context.Context) (restarted bool, er
return false, err
}
- if c.state.State == ContainerStateStopped {
+ if c.state.State == define.ContainerStateStopped {
// Reinitialize the container if we need to
if err := c.reinit(ctx, true); err != nil {
return false, err
}
- } else if c.state.State == ContainerStateConfigured ||
- c.state.State == ContainerStateExited {
+ } else if c.state.State == define.ContainerStateConfigured ||
+ c.state.State == define.ContainerStateExited {
// Initialize the container
if err := c.init(ctx, true); err != nil {
return false, err
@@ -293,20 +373,20 @@ func (c *Container) syncContainer() error {
}
// If runtime knows about the container, update its status in runtime
// And then save back to disk
- if (c.state.State != ContainerStateUnknown) &&
- (c.state.State != ContainerStateConfigured) &&
- (c.state.State != ContainerStateExited) {
+ if (c.state.State != define.ContainerStateUnknown) &&
+ (c.state.State != define.ContainerStateConfigured) &&
+ (c.state.State != define.ContainerStateExited) {
oldState := c.state.State
// TODO: optionally replace this with a stat for the exit file
- if err := c.runtime.ociRuntime.updateContainerStatus(c, false); err != nil {
+ if err := c.ociRuntime.updateContainerStatus(c, false); err != nil {
return err
}
// Only save back to DB if state changed
if c.state.State != oldState {
// Check for a restart policy match
if c.config.RestartPolicy != RestartPolicyNone && c.config.RestartPolicy != RestartPolicyNo &&
- (oldState == ContainerStateRunning || oldState == ContainerStatePaused) &&
- (c.state.State == ContainerStateStopped || c.state.State == ContainerStateExited) &&
+ (oldState == define.ContainerStateRunning || oldState == define.ContainerStatePaused) &&
+ (c.state.State == define.ContainerStateStopped || c.state.State == define.ContainerStateExited) &&
!c.state.StoppedByUser {
c.state.RestartPolicyMatch = true
}
@@ -318,7 +398,7 @@ func (c *Container) syncContainer() error {
}
if !c.valid {
- return errors.Wrapf(ErrCtrRemoved, "container %s is not valid", c.ID())
+ return errors.Wrapf(define.ErrCtrRemoved, "container %s is not valid", c.ID())
}
return nil
@@ -331,16 +411,16 @@ func (c *Container) setupStorage(ctx context.Context) error {
defer span.Finish()
if !c.valid {
- return errors.Wrapf(ErrCtrRemoved, "container %s is not valid", c.ID())
+ return errors.Wrapf(define.ErrCtrRemoved, "container %s is not valid", c.ID())
}
- if c.state.State != ContainerStateConfigured {
- return errors.Wrapf(ErrCtrStateInvalid, "container %s must be in Configured state to have storage set up", c.ID())
+ if c.state.State != define.ContainerStateConfigured {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "container %s must be in Configured state to have storage set up", c.ID())
}
// Need both an image ID and image name, plus a bool telling us whether to use the image configuration
if c.config.Rootfs == "" && (c.config.RootfsImageID == "" || c.config.RootfsImageName == "") {
- return errors.Wrapf(ErrInvalidArg, "must provide image ID and image name to use an image")
+ return errors.Wrapf(define.ErrInvalidArg, "must provide image ID and image name to use an image")
}
options := storage.ContainerOptions{
@@ -350,6 +430,16 @@ func (c *Container) setupStorage(ctx context.Context) error {
},
LabelOpts: c.config.LabelOpts,
}
+ if c.restoreFromCheckpoint {
+ // If restoring from a checkpoint, the root file-system
+ // needs to be mounted with the same SELinux labels as
+ // it was mounted previously.
+ if options.Flags == nil {
+ options.Flags = make(map[string]interface{})
+ }
+ options.Flags["ProcessLabel"] = c.config.ProcessLabel
+ options.Flags["MountLabel"] = c.config.MountLabel
+ }
if c.config.Privileged {
privOpt := func(opt string) bool {
for _, privopt := range []string{"nodev", "nosuid", "noexec"} {
@@ -359,7 +449,8 @@ func (c *Container) setupStorage(ctx context.Context) error {
}
return false
}
- defOptions, err := storage.GetDefaultMountOptions()
+
+ defOptions, err := storage.GetMountOptions(c.runtime.store.GraphDriverName(), c.runtime.store.GraphOptions())
if err != nil {
return errors.Wrapf(err, "error getting default mount options")
}
@@ -415,8 +506,8 @@ func (c *Container) setupStorage(ctx context.Context) error {
// Tear down a container's storage prior to removal
func (c *Container) teardownStorage() error {
- if c.state.State == ContainerStateRunning || c.state.State == ContainerStatePaused {
- return errors.Wrapf(ErrCtrStateInvalid, "cannot remove storage for container %s as it is running or paused", c.ID())
+ if c.state.State == define.ContainerStateRunning || c.state.State == define.ContainerStatePaused {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "cannot remove storage for container %s as it is running or paused", c.ID())
}
artifacts := filepath.Join(c.config.StaticDir, artifactsDir)
@@ -449,10 +540,11 @@ func (c *Container) teardownStorage() error {
// It does not save the results - assumes the database will do that for us
func resetState(state *ContainerState) error {
state.PID = 0
+ state.ConmonPID = 0
state.Mountpoint = ""
state.Mounted = false
- if state.State != ContainerStateExited {
- state.State = ContainerStateConfigured
+ if state.State != define.ContainerStateExited {
+ state.State = define.ContainerStateConfigured
}
state.ExecSessions = make(map[string]*ExecSession)
state.NetworkStatus = nil
@@ -476,7 +568,7 @@ func (c *Container) refresh() error {
}
if !c.valid {
- return errors.Wrapf(ErrCtrRemoved, "container %s is not valid - may have been removed", c.ID())
+ return errors.Wrapf(define.ErrCtrRemoved, "container %s is not valid - may have been removed", c.ID())
}
// We need to get the container's temporary directory from c/storage
@@ -507,7 +599,7 @@ func (c *Container) refresh() error {
// We need to pick up a new lock
lock, err := c.runtime.lockManager.AllocateAndRetrieveLock(c.config.LockID)
if err != nil {
- return errors.Wrapf(err, "error acquiring lock for container %s", c.ID())
+ return errors.Wrapf(err, "error acquiring lock %d for container %s", c.config.LockID, c.ID())
}
c.lock = lock
@@ -545,13 +637,13 @@ func (c *Container) removeConmonFiles() error {
// Instead of outright deleting the exit file, rename it (if it exists).
// We want to retain it so we can get the exit code of containers which
// are removed (at least until we have a workable events system)
- exitFile := filepath.Join(c.runtime.ociRuntime.exitsDir, c.ID())
- oldExitFile := filepath.Join(c.runtime.ociRuntime.exitsDir, fmt.Sprintf("%s-old", c.ID()))
+ exitFile := filepath.Join(c.ociRuntime.exitsDir, c.ID())
+ oldExitFile := filepath.Join(c.ociRuntime.exitsDir, fmt.Sprintf("%s-old", c.ID()))
if _, err := os.Stat(exitFile); err != nil {
if !os.IsNotExist(err) {
return errors.Wrapf(err, "error running stat on container %s exit file", c.ID())
}
- } else if err == nil {
+ } else {
// Rename should replace the old exit file (if it exists)
if err := os.Rename(exitFile, oldExitFile); err != nil {
return errors.Wrapf(err, "error renaming container %s exit file", c.ID())
@@ -564,11 +656,11 @@ func (c *Container) removeConmonFiles() error {
func (c *Container) export(path string) error {
mountPoint := c.state.Mountpoint
if !c.state.Mounted {
- mount, err := c.runtime.store.Mount(c.ID(), c.config.MountLabel)
+ containerMount, err := c.runtime.store.Mount(c.ID(), c.config.MountLabel)
if err != nil {
return errors.Wrapf(err, "error mounting container %q", c.ID())
}
- mountPoint = mount
+ mountPoint = containerMount
defer func() {
if _, err := c.runtime.store.Unmount(c.ID(), false); err != nil {
logrus.Errorf("error unmounting container %q: %v", c.ID(), err)
@@ -606,7 +698,7 @@ func (c *Container) isStopped() (bool, error) {
if err != nil {
return true, err
}
- return (c.state.State != ContainerStateRunning && c.state.State != ContainerStatePaused), nil
+ return c.state.State != define.ContainerStateRunning && c.state.State != define.ContainerStatePaused, nil
}
// save container state to the database
@@ -622,11 +714,11 @@ func (c *Container) save() error {
// Otherwise, this function will return with error if there are dependencies of this container that aren't running.
func (c *Container) prepareToStart(ctx context.Context, recursive bool) (err error) {
// Container must be created or stopped to be started
- if !(c.state.State == ContainerStateConfigured ||
- c.state.State == ContainerStateCreated ||
- c.state.State == ContainerStateStopped ||
- c.state.State == ContainerStateExited) {
- return errors.Wrapf(ErrCtrStateInvalid, "container %s must be in Created or Stopped state to be started", c.ID())
+ if !(c.state.State == define.ContainerStateConfigured ||
+ c.state.State == define.ContainerStateCreated ||
+ c.state.State == define.ContainerStateStopped ||
+ c.state.State == define.ContainerStateExited) {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "container %s must be in Created or Stopped state to be started", c.ID())
}
if !recursive {
@@ -651,13 +743,13 @@ func (c *Container) prepareToStart(ctx context.Context, recursive bool) (err err
return err
}
- if c.state.State == ContainerStateStopped {
+ if c.state.State == define.ContainerStateStopped {
// Reinitialize the container if we need to
if err := c.reinit(ctx, false); err != nil {
return err
}
- } else if c.state.State == ContainerStateConfigured ||
- c.state.State == ContainerStateExited {
+ } else if c.state.State == define.ContainerStateConfigured ||
+ c.state.State == define.ContainerStateExited {
// Or initialize it if necessary
if err := c.init(ctx, false); err != nil {
return err
@@ -674,7 +766,7 @@ func (c *Container) checkDependenciesAndHandleError(ctx context.Context) error {
}
if len(notRunning) > 0 {
depString := strings.Join(notRunning, ",")
- return errors.Wrapf(ErrCtrStateInvalid, "some dependencies of container %s are not started: %s", c.ID(), depString)
+ return errors.Wrapf(define.ErrCtrStateInvalid, "some dependencies of container %s are not started: %s", c.ID(), depString)
}
return nil
@@ -712,7 +804,7 @@ func (c *Container) startDependencies(ctx context.Context) error {
if len(graph.nodes) == 0 {
return nil
}
- return errors.Wrapf(ErrNoSuchCtr, "All dependencies have dependencies of %s", c.ID())
+ return errors.Wrapf(define.ErrNoSuchCtr, "All dependencies have dependencies of %s", c.ID())
}
ctrErrors := make(map[string]error)
@@ -728,7 +820,7 @@ func (c *Container) startDependencies(ctx context.Context) error {
for _, e := range ctrErrors {
logrus.Errorf("%q", e)
}
- return errors.Wrapf(ErrInternal, "error starting some containers")
+ return errors.Wrapf(define.ErrInternal, "error starting some containers")
}
return nil
}
@@ -760,7 +852,7 @@ func (c *Container) getAllDependencies(visited map[string]*Container) error {
}
// if the dependency is already running, we can assume its dependencies are also running
// so no need to add them to those we need to start
- if status != ContainerStateRunning {
+ if status != define.ContainerStateRunning {
visited[depID] = dep
if err := dep.getAllDependencies(visited); err != nil {
return err
@@ -792,7 +884,7 @@ func (c *Container) checkDependenciesRunning() ([]string, error) {
if err != nil {
return nil, errors.Wrapf(err, "error retrieving state of dependency %s of container %s", dep, c.ID())
}
- if state != ContainerStateRunning {
+ if state != define.ContainerStateRunning {
notRunning = append(notRunning, dep)
}
depCtrs[dep] = depCtr
@@ -801,34 +893,6 @@ func (c *Container) checkDependenciesRunning() ([]string, error) {
return notRunning, nil
}
-// Check if a container's dependencies are running
-// Returns a []string containing the IDs of dependencies that are not running
-// Assumes depencies are already locked, and will be passed in
-// Accepts a map[string]*Container containing, at a minimum, the locked
-// dependency containers
-// (This must be a map from container ID to container)
-func (c *Container) checkDependenciesRunningLocked(depCtrs map[string]*Container) ([]string, error) {
- deps := c.Dependencies()
- notRunning := []string{}
-
- for _, dep := range deps {
- depCtr, ok := depCtrs[dep]
- if !ok {
- return nil, errors.Wrapf(ErrNoSuchCtr, "container %s depends on container %s but it is not on containers passed to checkDependenciesRunning", c.ID(), dep)
- }
-
- if err := c.syncContainer(); err != nil {
- return nil, err
- }
-
- if depCtr.state.State != ContainerStateRunning {
- notRunning = append(notRunning, dep)
- }
- }
-
- return notRunning, nil
-}
-
func (c *Container) completeNetworkSetup() error {
netDisabled, err := c.NetworkDisabled()
if err != nil {
@@ -852,19 +916,19 @@ func (c *Container) init(ctx context.Context, retainRetries bool) error {
span.SetTag("struct", "container")
defer span.Finish()
- // Generate the OCI spec
- spec, err := c.generateSpec(ctx)
+ // Generate the OCI newSpec
+ newSpec, err := c.generateSpec(ctx)
if err != nil {
return err
}
- // Save the OCI spec to disk
- if err := c.saveSpec(spec); err != nil {
+ // Save the OCI newSpec to disk
+ if err := c.saveSpec(newSpec); err != nil {
return err
}
// With the spec complete, do an OCI create
- if err := c.runtime.ociRuntime.createContainer(c, c.config.CgroupParent, nil); err != nil {
+ if err := c.ociRuntime.createContainer(c, nil); err != nil {
return err
}
@@ -872,7 +936,7 @@ func (c *Container) init(ctx context.Context, retainRetries bool) error {
c.state.ExitCode = 0
c.state.Exited = false
- c.state.State = ContainerStateCreated
+ c.state.State = define.ContainerStateCreated
c.state.StoppedByUser = false
c.state.RestartPolicyMatch = false
@@ -903,7 +967,7 @@ func (c *Container) cleanupRuntime(ctx context.Context) error {
// If the container is not ContainerStateStopped or
// ContainerStateCreated, do nothing.
- if c.state.State != ContainerStateStopped && c.state.State != ContainerStateCreated {
+ if c.state.State != define.ContainerStateStopped && c.state.State != define.ContainerStateCreated {
return nil
}
@@ -919,10 +983,10 @@ func (c *Container) cleanupRuntime(ctx context.Context) error {
// If we were Stopped, we are now Exited, as we've removed ourself
// from the runtime.
// If we were Created, we are now Configured.
- if c.state.State == ContainerStateStopped {
- c.state.State = ContainerStateExited
- } else if c.state.State == ContainerStateCreated {
- c.state.State = ContainerStateConfigured
+ if c.state.State == define.ContainerStateStopped {
+ c.state.State = define.ContainerStateExited
+ } else if c.state.State == define.ContainerStateCreated {
+ c.state.State = define.ContainerStateConfigured
}
if c.valid {
@@ -961,17 +1025,17 @@ func (c *Container) reinit(ctx context.Context, retainRetries bool) error {
// Does not lock or check validity
func (c *Container) initAndStart(ctx context.Context) (err error) {
// If we are ContainerStateUnknown, throw an error
- if c.state.State == ContainerStateUnknown {
- return errors.Wrapf(ErrCtrStateInvalid, "container %s is in an unknown state", c.ID())
+ if c.state.State == define.ContainerStateUnknown {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is in an unknown state", c.ID())
}
// If we are running, do nothing
- if c.state.State == ContainerStateRunning {
+ if c.state.State == define.ContainerStateRunning {
return nil
}
// If we are paused, throw an error
- if c.state.State == ContainerStatePaused {
- return errors.Wrapf(ErrCtrStateInvalid, "cannot start paused container %s", c.ID())
+ if c.state.State == define.ContainerStatePaused {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "cannot start paused container %s", c.ID())
}
defer func() {
@@ -988,14 +1052,14 @@ func (c *Container) initAndStart(ctx context.Context) (err error) {
// If we are ContainerStateStopped we need to remove from runtime
// And reset to ContainerStateConfigured
- if c.state.State == ContainerStateStopped {
+ if c.state.State == define.ContainerStateStopped {
logrus.Debugf("Recreating container %s in OCI runtime", c.ID())
if err := c.reinit(ctx, false); err != nil {
return err
}
- } else if c.state.State == ContainerStateConfigured ||
- c.state.State == ContainerStateExited {
+ } else if c.state.State == define.ContainerStateConfigured ||
+ c.state.State == define.ContainerStateExited {
if err := c.init(ctx, false); err != nil {
return err
}
@@ -1011,12 +1075,12 @@ func (c *Container) start() error {
logrus.Debugf("Starting container %s with command %v", c.ID(), c.config.Spec.Process.Args)
}
- if err := c.runtime.ociRuntime.startContainer(c); err != nil {
+ if err := c.ociRuntime.startContainer(c); err != nil {
return err
}
logrus.Debugf("Started container %s", c.ID())
- c.state.State = ContainerStateRunning
+ c.state.State = define.ContainerStateRunning
if c.config.HealthCheckConfig != nil {
if err := c.updateHealthStatus(HealthCheckStarting); err != nil {
@@ -1036,10 +1100,12 @@ func (c *Container) start() error {
func (c *Container) stop(timeout uint) error {
logrus.Debugf("Stopping ctr %s (timeout %d)", c.ID(), timeout)
- if err := c.runtime.ociRuntime.stopContainer(c, timeout); err != nil {
+ if err := c.ociRuntime.stopContainer(c, timeout); err != nil {
return err
}
+ c.state.PID = 0
+ c.state.ConmonPID = 0
c.state.StoppedByUser = true
if err := c.save(); err != nil {
return errors.Wrapf(err, "error saving container %s state after stopping", c.ID())
@@ -1051,39 +1117,39 @@ func (c *Container) stop(timeout uint) error {
// Internal, non-locking function to pause a container
func (c *Container) pause() error {
- if err := c.runtime.ociRuntime.pauseContainer(c); err != nil {
+ if err := c.ociRuntime.pauseContainer(c); err != nil {
return err
}
logrus.Debugf("Paused container %s", c.ID())
- c.state.State = ContainerStatePaused
+ c.state.State = define.ContainerStatePaused
return c.save()
}
// Internal, non-locking function to unpause a container
func (c *Container) unpause() error {
- if err := c.runtime.ociRuntime.unpauseContainer(c); err != nil {
+ if err := c.ociRuntime.unpauseContainer(c); err != nil {
return err
}
logrus.Debugf("Unpaused container %s", c.ID())
- c.state.State = ContainerStateRunning
+ c.state.State = define.ContainerStateRunning
return c.save()
}
// Internal, non-locking function to restart a container
func (c *Container) restartWithTimeout(ctx context.Context, timeout uint) (err error) {
- if c.state.State == ContainerStateUnknown || c.state.State == ContainerStatePaused {
- return errors.Wrapf(ErrCtrStateInvalid, "unable to restart a container in a paused or unknown state")
+ if c.state.State == define.ContainerStateUnknown || c.state.State == define.ContainerStatePaused {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "unable to restart a container in a paused or unknown state")
}
c.newContainerEvent(events.Restart)
- if c.state.State == ContainerStateRunning {
+ if c.state.State == define.ContainerStateRunning {
if err := c.stop(timeout); err != nil {
return err
}
@@ -1099,13 +1165,13 @@ func (c *Container) restartWithTimeout(ctx context.Context, timeout uint) (err e
return err
}
- if c.state.State == ContainerStateStopped {
+ if c.state.State == define.ContainerStateStopped {
// Reinitialize the container if we need to
if err := c.reinit(ctx, false); err != nil {
return err
}
- } else if c.state.State == ContainerStateConfigured ||
- c.state.State == ContainerStateExited {
+ } else if c.state.State == define.ContainerStateConfigured ||
+ c.state.State == define.ContainerStateExited {
// Initialize the container
if err := c.init(ctx, false); err != nil {
return err
@@ -1161,8 +1227,8 @@ func (c *Container) cleanupStorage() error {
return nil
}
- for _, mount := range c.config.Mounts {
- if err := c.unmountSHM(mount); err != nil {
+ for _, containerMount := range c.config.Mounts {
+ if err := c.unmountSHM(containerMount); err != nil {
return err
}
}
@@ -1243,7 +1309,7 @@ func (c *Container) delete(ctx context.Context) (err error) {
span.SetTag("struct", "container")
defer span.Finish()
- if err := c.runtime.ociRuntime.deleteContainer(c); err != nil {
+ if err := c.ociRuntime.deleteContainer(c); err != nil {
return errors.Wrapf(err, "error removing container %s from runtime", c.ID())
}
@@ -1276,6 +1342,7 @@ func (c *Container) postDeleteHooks(ctx context.Context) (err error) {
return err
}
for i, hook := range extensionHooks {
+ hook := hook
logrus.Debugf("container %s: invoke poststop hook %d, path %s", c.ID(), i, hook.Path)
var stderr, stdout bytes.Buffer
hookErr, err := exec.Run(ctx, &hook, state, &stdout, &stderr, exec.DefaultPostKillTimeout)
@@ -1345,7 +1412,7 @@ func (c *Container) appendStringToRundir(destFile, output string) (string, error
return filepath.Join(c.state.RunDir, destFile), nil
}
-// Save OCI spec to disk, replacing any existing specs for the container
+// saveSpec saves the OCI spec to disk, replacing any existing specs for the container
func (c *Container) saveSpec(spec *spec.Spec) error {
// If the OCI spec already exists, we need to replace it
// Cannot guarantee some things, e.g. network namespaces, have the same
@@ -1393,14 +1460,14 @@ func (c *Container) setupOCIHooks(ctx context.Context, config *spec.Spec) (exten
}
return nil, err
}
- hooks, err := manager.Hooks(config, c.Spec().Annotations, len(c.config.UserVolumes) > 0)
+ ociHooks, err := manager.Hooks(config, c.Spec().Annotations, len(c.config.UserVolumes) > 0)
if err != nil {
return nil, err
}
- if len(hooks) > 0 || config.Hooks != nil {
- logrus.Warnf("implicit hook directories are deprecated; set --hooks-dir=%q explicitly to continue to load hooks from this directory", hDir)
+ if len(ociHooks) > 0 || config.Hooks != nil {
+ logrus.Warnf("implicit hook directories are deprecated; set --ociHooks-dir=%q explicitly to continue to load ociHooks from this directory", hDir)
}
- for i, hook := range hooks {
+ for i, hook := range ociHooks {
allHooks[i] = hook
}
}
@@ -1454,13 +1521,6 @@ func (c *Container) unmount(force bool) error {
return nil
}
-// getExcludedCGroups returns a string slice of cgroups we want to exclude
-// because runc or other components are unaware of them.
-func getExcludedCGroups() (excludes []string) {
- excludes = []string{"rdma"}
- return
-}
-
// this should be from chrootarchive.
func (c *Container) copyWithTarFromImage(src, dest string) error {
mountpoint, err := c.mount()
@@ -1482,18 +1542,86 @@ func (c *Container) copyWithTarFromImage(src, dest string) error {
// If it is, we'll remove the container anyways.
// Returns nil if safe to remove, or an error describing why it's unsafe if not.
func (c *Container) checkReadyForRemoval() error {
- if c.state.State == ContainerStateUnknown {
- return errors.Wrapf(ErrCtrStateInvalid, "container %s is in invalid state", c.ID())
+ if c.state.State == define.ContainerStateUnknown {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is in invalid state", c.ID())
}
- if c.state.State == ContainerStateRunning ||
- c.state.State == ContainerStatePaused {
- return errors.Wrapf(ErrCtrStateInvalid, "cannot remove container %s as it is %s - running or paused containers cannot be removed", c.ID(), c.state.State.String())
+ if c.state.State == define.ContainerStateRunning ||
+ c.state.State == define.ContainerStatePaused {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "cannot remove container %s as it is %s - running or paused containers cannot be removed", c.ID(), c.state.State.String())
}
if len(c.state.ExecSessions) != 0 {
- return errors.Wrapf(ErrCtrStateInvalid, "cannot remove container %s as it has active exec sessions", c.ID())
+ return errors.Wrapf(define.ErrCtrStateInvalid, "cannot remove container %s as it has active exec sessions", c.ID())
}
return nil
}
+
+// writeJSONFile marshalls and writes the given data to a JSON file
+// in the bundle path
+func (c *Container) writeJSONFile(v interface{}, file string) (err error) {
+ fileJSON, err := json.MarshalIndent(v, "", " ")
+ if err != nil {
+ return errors.Wrapf(err, "error writing JSON to %s for container %s", file, c.ID())
+ }
+ file = filepath.Join(c.bundlePath(), file)
+ if err := ioutil.WriteFile(file, fileJSON, 0644); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// prepareCheckpointExport writes the config and spec to
+// JSON files for later export
+func (c *Container) prepareCheckpointExport() (err error) {
+ // save live config
+ if err := c.writeJSONFile(c.Config(), "config.dump"); err != nil {
+ return err
+ }
+
+ // save spec
+ jsonPath := filepath.Join(c.bundlePath(), "config.json")
+ g, err := generate.NewFromFile(jsonPath)
+ if err != nil {
+ logrus.Debugf("generating spec for container %q failed with %v", c.ID(), err)
+ return err
+ }
+ if err := c.writeJSONFile(g.Config, "spec.dump"); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// sortUserVolumes sorts the volumes specified for a container
+// between named and normal volumes
+func (c *Container) sortUserVolumes(ctrSpec *spec.Spec) ([]*ContainerNamedVolume, []spec.Mount) {
+ namedUserVolumes := []*ContainerNamedVolume{}
+ userMounts := []spec.Mount{}
+
+ // We need to parse all named volumes and mounts into maps, so we don't
+ // end up with repeated lookups for each user volume.
+ // Map destination to struct, as destination is what is stored in
+ // UserVolumes.
+ namedVolumes := make(map[string]*ContainerNamedVolume)
+ mounts := make(map[string]spec.Mount)
+ for _, namedVol := range c.config.NamedVolumes {
+ namedVolumes[namedVol.Dest] = namedVol
+ }
+ for _, mount := range ctrSpec.Mounts {
+ mounts[mount.Destination] = mount
+ }
+
+ for _, vol := range c.config.UserVolumes {
+ if volume, ok := namedVolumes[vol]; ok {
+ namedUserVolumes = append(namedUserVolumes, volume)
+ } else if mount, ok := mounts[vol]; ok {
+ userMounts = append(userMounts, mount)
+ } else {
+ logrus.Warnf("Could not find mount at destination %q when parsing user volumes for container %s", vol, c.ID())
+ }
+ }
+ return namedUserVolumes, userMounts
+}