diff options
Diffstat (limited to 'libpod/container_internal.go')
-rw-r--r-- | libpod/container_internal.go | 206 |
1 files changed, 117 insertions, 89 deletions
diff --git a/libpod/container_internal.go b/libpod/container_internal.go index 5d824908c..c409da96a 100644 --- a/libpod/container_internal.go +++ b/libpod/container_internal.go @@ -12,6 +12,7 @@ import ( "strings" "time" + "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/events" "github.com/containers/libpod/pkg/ctime" "github.com/containers/libpod/pkg/hooks" @@ -23,7 +24,7 @@ import ( spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" "github.com/opencontainers/selinux/go-selinux/label" - opentracing "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -155,7 +156,7 @@ func (c *Container) waitForExitFileAndSync() error { // Reset our state c.state.ExitCode = -1 c.state.FinishedTime = time.Now() - c.state.State = ContainerStateStopped + c.state.State = define.ContainerStateStopped if err2 := c.save(); err2 != nil { logrus.Errorf("Error saving container %s state: %v", c.ID(), err2) @@ -240,10 +241,10 @@ func (c *Container) handleRestartPolicy(ctx context.Context) (restarted bool, er // Is the container running again? // If so, we don't have to do anything - if c.state.State == ContainerStateRunning || c.state.State == ContainerStatePaused { + if c.state.State == define.ContainerStateRunning || c.state.State == define.ContainerStatePaused { return false, nil - } else if c.state.State == ContainerStateUnknown { - return false, errors.Wrapf(ErrInternal, "invalid container state encountered in restart attempt!") + } else if c.state.State == define.ContainerStateUnknown { + return false, errors.Wrapf(define.ErrInternal, "invalid container state encountered in restart attempt!") } c.newContainerEvent(events.Restart) @@ -266,13 +267,13 @@ func (c *Container) handleRestartPolicy(ctx context.Context) (restarted bool, er return false, err } - if c.state.State == ContainerStateStopped { + if c.state.State == define.ContainerStateStopped { // Reinitialize the container if we need to if err := c.reinit(ctx, true); err != nil { return false, err } - } else if c.state.State == ContainerStateConfigured || - c.state.State == ContainerStateExited { + } else if c.state.State == define.ContainerStateConfigured || + c.state.State == define.ContainerStateExited { // Initialize the container if err := c.init(ctx, true); err != nil { return false, err @@ -294,9 +295,9 @@ func (c *Container) syncContainer() error { } // If runtime knows about the container, update its status in runtime // And then save back to disk - if (c.state.State != ContainerStateUnknown) && - (c.state.State != ContainerStateConfigured) && - (c.state.State != ContainerStateExited) { + if (c.state.State != define.ContainerStateUnknown) && + (c.state.State != define.ContainerStateConfigured) && + (c.state.State != define.ContainerStateExited) { oldState := c.state.State // TODO: optionally replace this with a stat for the exit file if err := c.ociRuntime.updateContainerStatus(c, false); err != nil { @@ -306,8 +307,8 @@ func (c *Container) syncContainer() error { if c.state.State != oldState { // Check for a restart policy match if c.config.RestartPolicy != RestartPolicyNone && c.config.RestartPolicy != RestartPolicyNo && - (oldState == ContainerStateRunning || oldState == ContainerStatePaused) && - (c.state.State == ContainerStateStopped || c.state.State == ContainerStateExited) && + (oldState == define.ContainerStateRunning || oldState == define.ContainerStatePaused) && + (c.state.State == define.ContainerStateStopped || c.state.State == define.ContainerStateExited) && !c.state.StoppedByUser { c.state.RestartPolicyMatch = true } @@ -319,7 +320,7 @@ func (c *Container) syncContainer() error { } if !c.valid { - return errors.Wrapf(ErrCtrRemoved, "container %s is not valid", c.ID()) + return errors.Wrapf(define.ErrCtrRemoved, "container %s is not valid", c.ID()) } return nil @@ -332,16 +333,16 @@ func (c *Container) setupStorage(ctx context.Context) error { defer span.Finish() if !c.valid { - return errors.Wrapf(ErrCtrRemoved, "container %s is not valid", c.ID()) + return errors.Wrapf(define.ErrCtrRemoved, "container %s is not valid", c.ID()) } - if c.state.State != ContainerStateConfigured { - return errors.Wrapf(ErrCtrStateInvalid, "container %s must be in Configured state to have storage set up", c.ID()) + if c.state.State != define.ContainerStateConfigured { + return errors.Wrapf(define.ErrCtrStateInvalid, "container %s must be in Configured state to have storage set up", c.ID()) } // Need both an image ID and image name, plus a bool telling us whether to use the image configuration if c.config.Rootfs == "" && (c.config.RootfsImageID == "" || c.config.RootfsImageName == "") { - return errors.Wrapf(ErrInvalidArg, "must provide image ID and image name to use an image") + return errors.Wrapf(define.ErrInvalidArg, "must provide image ID and image name to use an image") } options := storage.ContainerOptions{ @@ -427,8 +428,8 @@ func (c *Container) setupStorage(ctx context.Context) error { // Tear down a container's storage prior to removal func (c *Container) teardownStorage() error { - if c.state.State == ContainerStateRunning || c.state.State == ContainerStatePaused { - return errors.Wrapf(ErrCtrStateInvalid, "cannot remove storage for container %s as it is running or paused", c.ID()) + if c.state.State == define.ContainerStateRunning || c.state.State == define.ContainerStatePaused { + return errors.Wrapf(define.ErrCtrStateInvalid, "cannot remove storage for container %s as it is running or paused", c.ID()) } artifacts := filepath.Join(c.config.StaticDir, artifactsDir) @@ -461,10 +462,11 @@ func (c *Container) teardownStorage() error { // It does not save the results - assumes the database will do that for us func resetState(state *ContainerState) error { state.PID = 0 + state.ConmonPID = 0 state.Mountpoint = "" state.Mounted = false - if state.State != ContainerStateExited { - state.State = ContainerStateConfigured + if state.State != define.ContainerStateExited { + state.State = define.ContainerStateConfigured } state.ExecSessions = make(map[string]*ExecSession) state.NetworkStatus = nil @@ -488,7 +490,7 @@ func (c *Container) refresh() error { } if !c.valid { - return errors.Wrapf(ErrCtrRemoved, "container %s is not valid - may have been removed", c.ID()) + return errors.Wrapf(define.ErrCtrRemoved, "container %s is not valid - may have been removed", c.ID()) } // We need to get the container's temporary directory from c/storage @@ -563,7 +565,7 @@ func (c *Container) removeConmonFiles() error { if !os.IsNotExist(err) { return errors.Wrapf(err, "error running stat on container %s exit file", c.ID()) } - } else if err == nil { + } else { // Rename should replace the old exit file (if it exists) if err := os.Rename(exitFile, oldExitFile); err != nil { return errors.Wrapf(err, "error renaming container %s exit file", c.ID()) @@ -576,11 +578,11 @@ func (c *Container) removeConmonFiles() error { func (c *Container) export(path string) error { mountPoint := c.state.Mountpoint if !c.state.Mounted { - mount, err := c.runtime.store.Mount(c.ID(), c.config.MountLabel) + containerMount, err := c.runtime.store.Mount(c.ID(), c.config.MountLabel) if err != nil { return errors.Wrapf(err, "error mounting container %q", c.ID()) } - mountPoint = mount + mountPoint = containerMount defer func() { if _, err := c.runtime.store.Unmount(c.ID(), false); err != nil { logrus.Errorf("error unmounting container %q: %v", c.ID(), err) @@ -618,7 +620,7 @@ func (c *Container) isStopped() (bool, error) { if err != nil { return true, err } - return (c.state.State != ContainerStateRunning && c.state.State != ContainerStatePaused), nil + return c.state.State != define.ContainerStateRunning && c.state.State != define.ContainerStatePaused, nil } // save container state to the database @@ -634,11 +636,11 @@ func (c *Container) save() error { // Otherwise, this function will return with error if there are dependencies of this container that aren't running. func (c *Container) prepareToStart(ctx context.Context, recursive bool) (err error) { // Container must be created or stopped to be started - if !(c.state.State == ContainerStateConfigured || - c.state.State == ContainerStateCreated || - c.state.State == ContainerStateStopped || - c.state.State == ContainerStateExited) { - return errors.Wrapf(ErrCtrStateInvalid, "container %s must be in Created or Stopped state to be started", c.ID()) + if !(c.state.State == define.ContainerStateConfigured || + c.state.State == define.ContainerStateCreated || + c.state.State == define.ContainerStateStopped || + c.state.State == define.ContainerStateExited) { + return errors.Wrapf(define.ErrCtrStateInvalid, "container %s must be in Created or Stopped state to be started", c.ID()) } if !recursive { @@ -663,13 +665,13 @@ func (c *Container) prepareToStart(ctx context.Context, recursive bool) (err err return err } - if c.state.State == ContainerStateStopped { + if c.state.State == define.ContainerStateStopped { // Reinitialize the container if we need to if err := c.reinit(ctx, false); err != nil { return err } - } else if c.state.State == ContainerStateConfigured || - c.state.State == ContainerStateExited { + } else if c.state.State == define.ContainerStateConfigured || + c.state.State == define.ContainerStateExited { // Or initialize it if necessary if err := c.init(ctx, false); err != nil { return err @@ -686,7 +688,7 @@ func (c *Container) checkDependenciesAndHandleError(ctx context.Context) error { } if len(notRunning) > 0 { depString := strings.Join(notRunning, ",") - return errors.Wrapf(ErrCtrStateInvalid, "some dependencies of container %s are not started: %s", c.ID(), depString) + return errors.Wrapf(define.ErrCtrStateInvalid, "some dependencies of container %s are not started: %s", c.ID(), depString) } return nil @@ -724,7 +726,7 @@ func (c *Container) startDependencies(ctx context.Context) error { if len(graph.nodes) == 0 { return nil } - return errors.Wrapf(ErrNoSuchCtr, "All dependencies have dependencies of %s", c.ID()) + return errors.Wrapf(define.ErrNoSuchCtr, "All dependencies have dependencies of %s", c.ID()) } ctrErrors := make(map[string]error) @@ -740,7 +742,7 @@ func (c *Container) startDependencies(ctx context.Context) error { for _, e := range ctrErrors { logrus.Errorf("%q", e) } - return errors.Wrapf(ErrInternal, "error starting some containers") + return errors.Wrapf(define.ErrInternal, "error starting some containers") } return nil } @@ -772,7 +774,7 @@ func (c *Container) getAllDependencies(visited map[string]*Container) error { } // if the dependency is already running, we can assume its dependencies are also running // so no need to add them to those we need to start - if status != ContainerStateRunning { + if status != define.ContainerStateRunning { visited[depID] = dep if err := dep.getAllDependencies(visited); err != nil { return err @@ -804,7 +806,7 @@ func (c *Container) checkDependenciesRunning() ([]string, error) { if err != nil { return nil, errors.Wrapf(err, "error retrieving state of dependency %s of container %s", dep, c.ID()) } - if state != ContainerStateRunning { + if state != define.ContainerStateRunning { notRunning = append(notRunning, dep) } depCtrs[dep] = depCtr @@ -826,14 +828,14 @@ func (c *Container) checkDependenciesRunningLocked(depCtrs map[string]*Container for _, dep := range deps { depCtr, ok := depCtrs[dep] if !ok { - return nil, errors.Wrapf(ErrNoSuchCtr, "container %s depends on container %s but it is not on containers passed to checkDependenciesRunning", c.ID(), dep) + return nil, errors.Wrapf(define.ErrNoSuchCtr, "container %s depends on container %s but it is not on containers passed to checkDependenciesRunning", c.ID(), dep) } if err := c.syncContainer(); err != nil { return nil, err } - if depCtr.state.State != ContainerStateRunning { + if depCtr.state.State != define.ContainerStateRunning { notRunning = append(notRunning, dep) } } @@ -864,18 +866,18 @@ func (c *Container) init(ctx context.Context, retainRetries bool) error { span.SetTag("struct", "container") defer span.Finish() - // Generate the OCI spec - spec, err := c.generateSpec(ctx) + // Generate the OCI newSpec + newSpec, err := c.generateSpec(ctx) if err != nil { return err } - // Save the OCI spec to disk - if err := c.saveSpec(spec); err != nil { + // Save the OCI newSpec to disk + if err := c.saveSpec(newSpec); err != nil { return err } - // With the spec complete, do an OCI create + // With the newSpec complete, do an OCI create if err := c.ociRuntime.createContainer(c, c.config.CgroupParent, nil); err != nil { return err } @@ -884,7 +886,7 @@ func (c *Container) init(ctx context.Context, retainRetries bool) error { c.state.ExitCode = 0 c.state.Exited = false - c.state.State = ContainerStateCreated + c.state.State = define.ContainerStateCreated c.state.StoppedByUser = false c.state.RestartPolicyMatch = false @@ -915,7 +917,7 @@ func (c *Container) cleanupRuntime(ctx context.Context) error { // If the container is not ContainerStateStopped or // ContainerStateCreated, do nothing. - if c.state.State != ContainerStateStopped && c.state.State != ContainerStateCreated { + if c.state.State != define.ContainerStateStopped && c.state.State != define.ContainerStateCreated { return nil } @@ -931,10 +933,10 @@ func (c *Container) cleanupRuntime(ctx context.Context) error { // If we were Stopped, we are now Exited, as we've removed ourself // from the runtime. // If we were Created, we are now Configured. - if c.state.State == ContainerStateStopped { - c.state.State = ContainerStateExited - } else if c.state.State == ContainerStateCreated { - c.state.State = ContainerStateConfigured + if c.state.State == define.ContainerStateStopped { + c.state.State = define.ContainerStateExited + } else if c.state.State == define.ContainerStateCreated { + c.state.State = define.ContainerStateConfigured } if c.valid { @@ -973,17 +975,17 @@ func (c *Container) reinit(ctx context.Context, retainRetries bool) error { // Does not lock or check validity func (c *Container) initAndStart(ctx context.Context) (err error) { // If we are ContainerStateUnknown, throw an error - if c.state.State == ContainerStateUnknown { - return errors.Wrapf(ErrCtrStateInvalid, "container %s is in an unknown state", c.ID()) + if c.state.State == define.ContainerStateUnknown { + return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is in an unknown state", c.ID()) } // If we are running, do nothing - if c.state.State == ContainerStateRunning { + if c.state.State == define.ContainerStateRunning { return nil } // If we are paused, throw an error - if c.state.State == ContainerStatePaused { - return errors.Wrapf(ErrCtrStateInvalid, "cannot start paused container %s", c.ID()) + if c.state.State == define.ContainerStatePaused { + return errors.Wrapf(define.ErrCtrStateInvalid, "cannot start paused container %s", c.ID()) } defer func() { @@ -1000,14 +1002,14 @@ func (c *Container) initAndStart(ctx context.Context) (err error) { // If we are ContainerStateStopped we need to remove from runtime // And reset to ContainerStateConfigured - if c.state.State == ContainerStateStopped { + if c.state.State == define.ContainerStateStopped { logrus.Debugf("Recreating container %s in OCI runtime", c.ID()) if err := c.reinit(ctx, false); err != nil { return err } - } else if c.state.State == ContainerStateConfigured || - c.state.State == ContainerStateExited { + } else if c.state.State == define.ContainerStateConfigured || + c.state.State == define.ContainerStateExited { if err := c.init(ctx, false); err != nil { return err } @@ -1028,7 +1030,7 @@ func (c *Container) start() error { } logrus.Debugf("Started container %s", c.ID()) - c.state.State = ContainerStateRunning + c.state.State = define.ContainerStateRunning if c.config.HealthCheckConfig != nil { if err := c.updateHealthStatus(HealthCheckStarting); err != nil { @@ -1052,6 +1054,8 @@ func (c *Container) stop(timeout uint) error { return err } + c.state.PID = 0 + c.state.ConmonPID = 0 c.state.StoppedByUser = true if err := c.save(); err != nil { return errors.Wrapf(err, "error saving container %s state after stopping", c.ID()) @@ -1069,7 +1073,7 @@ func (c *Container) pause() error { logrus.Debugf("Paused container %s", c.ID()) - c.state.State = ContainerStatePaused + c.state.State = define.ContainerStatePaused return c.save() } @@ -1082,20 +1086,20 @@ func (c *Container) unpause() error { logrus.Debugf("Unpaused container %s", c.ID()) - c.state.State = ContainerStateRunning + c.state.State = define.ContainerStateRunning return c.save() } // Internal, non-locking function to restart a container func (c *Container) restartWithTimeout(ctx context.Context, timeout uint) (err error) { - if c.state.State == ContainerStateUnknown || c.state.State == ContainerStatePaused { - return errors.Wrapf(ErrCtrStateInvalid, "unable to restart a container in a paused or unknown state") + if c.state.State == define.ContainerStateUnknown || c.state.State == define.ContainerStatePaused { + return errors.Wrapf(define.ErrCtrStateInvalid, "unable to restart a container in a paused or unknown state") } c.newContainerEvent(events.Restart) - if c.state.State == ContainerStateRunning { + if c.state.State == define.ContainerStateRunning { if err := c.stop(timeout); err != nil { return err } @@ -1111,13 +1115,13 @@ func (c *Container) restartWithTimeout(ctx context.Context, timeout uint) (err e return err } - if c.state.State == ContainerStateStopped { + if c.state.State == define.ContainerStateStopped { // Reinitialize the container if we need to if err := c.reinit(ctx, false); err != nil { return err } - } else if c.state.State == ContainerStateConfigured || - c.state.State == ContainerStateExited { + } else if c.state.State == define.ContainerStateConfigured || + c.state.State == define.ContainerStateExited { // Initialize the container if err := c.init(ctx, false); err != nil { return err @@ -1173,8 +1177,8 @@ func (c *Container) cleanupStorage() error { return nil } - for _, mount := range c.config.Mounts { - if err := c.unmountSHM(mount); err != nil { + for _, containerMount := range c.config.Mounts { + if err := c.unmountSHM(containerMount); err != nil { return err } } @@ -1405,14 +1409,14 @@ func (c *Container) setupOCIHooks(ctx context.Context, config *spec.Spec) (exten } return nil, err } - hooks, err := manager.Hooks(config, c.Spec().Annotations, len(c.config.UserVolumes) > 0) + ociHooks, err := manager.Hooks(config, c.Spec().Annotations, len(c.config.UserVolumes) > 0) if err != nil { return nil, err } - if len(hooks) > 0 || config.Hooks != nil { - logrus.Warnf("implicit hook directories are deprecated; set --hooks-dir=%q explicitly to continue to load hooks from this directory", hDir) + if len(ociHooks) > 0 || config.Hooks != nil { + logrus.Warnf("implicit hook directories are deprecated; set --ociHooks-dir=%q explicitly to continue to load ociHooks from this directory", hDir) } - for i, hook := range hooks { + for i, hook := range ociHooks { allHooks[i] = hook } } @@ -1470,13 +1474,6 @@ func (c *Container) unmount(force bool) error { return nil } -// getExcludedCGroups returns a string slice of cgroups we want to exclude -// because runc or other components are unaware of them. -func getExcludedCGroups() (excludes []string) { - excludes = []string{"rdma"} - return -} - // this should be from chrootarchive. func (c *Container) copyWithTarFromImage(src, dest string) error { mountpoint, err := c.mount() @@ -1498,17 +1495,17 @@ func (c *Container) copyWithTarFromImage(src, dest string) error { // If it is, we'll remove the container anyways. // Returns nil if safe to remove, or an error describing why it's unsafe if not. func (c *Container) checkReadyForRemoval() error { - if c.state.State == ContainerStateUnknown { - return errors.Wrapf(ErrCtrStateInvalid, "container %s is in invalid state", c.ID()) + if c.state.State == define.ContainerStateUnknown { + return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is in invalid state", c.ID()) } - if c.state.State == ContainerStateRunning || - c.state.State == ContainerStatePaused { - return errors.Wrapf(ErrCtrStateInvalid, "cannot remove container %s as it is %s - running or paused containers cannot be removed", c.ID(), c.state.State.String()) + if c.state.State == define.ContainerStateRunning || + c.state.State == define.ContainerStatePaused { + return errors.Wrapf(define.ErrCtrStateInvalid, "cannot remove container %s as it is %s - running or paused containers cannot be removed", c.ID(), c.state.State.String()) } if len(c.state.ExecSessions) != 0 { - return errors.Wrapf(ErrCtrStateInvalid, "cannot remove container %s as it has active exec sessions", c.ID()) + return errors.Wrapf(define.ErrCtrStateInvalid, "cannot remove container %s as it has active exec sessions", c.ID()) } return nil @@ -1550,3 +1547,34 @@ func (c *Container) prepareCheckpointExport() (err error) { return nil } + +// sortUserVolumes sorts the volumes specified for a container +// between named and normal volumes +func (c *Container) sortUserVolumes(ctrSpec *spec.Spec) ([]*ContainerNamedVolume, []spec.Mount) { + namedUserVolumes := []*ContainerNamedVolume{} + userMounts := []spec.Mount{} + + // We need to parse all named volumes and mounts into maps, so we don't + // end up with repeated lookups for each user volume. + // Map destination to struct, as destination is what is stored in + // UserVolumes. + namedVolumes := make(map[string]*ContainerNamedVolume) + mounts := make(map[string]spec.Mount) + for _, namedVol := range c.config.NamedVolumes { + namedVolumes[namedVol.Dest] = namedVol + } + for _, mount := range ctrSpec.Mounts { + mounts[mount.Destination] = mount + } + + for _, vol := range c.config.UserVolumes { + if volume, ok := namedVolumes[vol]; ok { + namedUserVolumes = append(namedUserVolumes, volume) + } else if mount, ok := mounts[vol]; ok { + userMounts = append(userMounts, mount) + } else { + logrus.Warnf("Could not find mount at destination %q when parsing user volumes for container %s", vol, c.ID()) + } + } + return namedUserVolumes, userMounts +} |