summaryrefslogtreecommitdiff
path: root/libpod/container_internal.go
diff options
context:
space:
mode:
Diffstat (limited to 'libpod/container_internal.go')
-rw-r--r--libpod/container_internal.go337
1 files changed, 223 insertions, 114 deletions
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index b0dcc853e..e3753d825 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -10,22 +10,18 @@ import (
"path/filepath"
"strconv"
"strings"
- "syscall"
"time"
- "github.com/containers/buildah/imagebuildah"
"github.com/containers/libpod/pkg/ctime"
"github.com/containers/libpod/pkg/hooks"
"github.com/containers/libpod/pkg/hooks/exec"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
- "github.com/containers/storage/pkg/chrootarchive"
"github.com/containers/storage/pkg/mount"
- "github.com/opencontainers/runc/libcontainer/user"
spec "github.com/opencontainers/runtime-spec/specs-go"
- "github.com/opencontainers/runtime-tools/generate"
"github.com/opencontainers/selinux/go-selinux/label"
+ opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/text/language"
@@ -252,6 +248,10 @@ func (c *Container) syncContainer() error {
// Create container root filesystem for use
func (c *Container) setupStorage(ctx context.Context) error {
+ span, _ := opentracing.StartSpanFromContext(ctx, "setupStorage")
+ span.SetTag("type", "container")
+ defer span.Finish()
+
if !c.valid {
return errors.Wrapf(ErrCtrRemoved, "container %s is not valid", c.ID())
}
@@ -489,9 +489,20 @@ func (c *Container) removeConmonFiles() error {
return errors.Wrapf(err, "error removing container %s OOM file", c.ID())
}
+ // Instead of outright deleting the exit file, rename it (if it exists).
+ // We want to retain it so we can get the exit code of containers which
+ // are removed (at least until we have a workable events system)
exitFile := filepath.Join(c.runtime.ociRuntime.exitsDir, c.ID())
- if err := os.Remove(exitFile); err != nil && !os.IsNotExist(err) {
- return errors.Wrapf(err, "error removing container %s exit file", c.ID())
+ oldExitFile := filepath.Join(c.runtime.ociRuntime.exitsDir, fmt.Sprintf("%s-old", c.ID()))
+ if _, err := os.Stat(exitFile); err != nil {
+ if !os.IsNotExist(err) {
+ return errors.Wrapf(err, "error running stat on container %s exit file", c.ID())
+ }
+ } else if err == nil {
+ // Rename should replace the old exit file (if it exists)
+ if err := os.Rename(exitFile, oldExitFile); err != nil {
+ return errors.Wrapf(err, "error renaming container %s exit file", c.ID())
+ }
}
return nil
@@ -553,6 +564,160 @@ func (c *Container) save() error {
return nil
}
+// Checks the container is in the right state, then initializes the container in preparation to start the container.
+// If recursive is true, each of the containers dependencies will be started.
+// Otherwise, this function will return with error if there are dependencies of this container that aren't running.
+func (c *Container) prepareToStart(ctx context.Context, recursive bool) (err error) {
+ // Container must be created or stopped to be started
+ if !(c.state.State == ContainerStateConfigured ||
+ c.state.State == ContainerStateCreated ||
+ c.state.State == ContainerStateStopped ||
+ c.state.State == ContainerStateExited) {
+ return errors.Wrapf(ErrCtrStateInvalid, "container %s must be in Created or Stopped state to be started", c.ID())
+ }
+
+ if !recursive {
+ if err := c.checkDependenciesAndHandleError(ctx); err != nil {
+ return err
+ }
+ } else {
+ if err := c.startDependencies(ctx); err != nil {
+ return err
+ }
+ }
+
+ defer func() {
+ if err != nil {
+ if err2 := c.cleanup(ctx); err2 != nil {
+ logrus.Errorf("error cleaning up container %s: %v", c.ID(), err2)
+ }
+ }
+ }()
+
+ if err := c.prepare(); err != nil {
+ return err
+ }
+
+ if c.state.State == ContainerStateStopped {
+ // Reinitialize the container if we need to
+ if err := c.reinit(ctx); err != nil {
+ return err
+ }
+ } else if c.state.State == ContainerStateConfigured ||
+ c.state.State == ContainerStateExited {
+ // Or initialize it if necessary
+ if err := c.init(ctx); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// checks dependencies are running and prints a helpful message
+func (c *Container) checkDependenciesAndHandleError(ctx context.Context) error {
+ notRunning, err := c.checkDependenciesRunning()
+ if err != nil {
+ return errors.Wrapf(err, "error checking dependencies for container %s", c.ID())
+ }
+ if len(notRunning) > 0 {
+ depString := strings.Join(notRunning, ",")
+ return errors.Wrapf(ErrCtrStateInvalid, "some dependencies of container %s are not started: %s", c.ID(), depString)
+ }
+
+ return nil
+}
+
+// Recursively start all dependencies of a container so the container can be started.
+func (c *Container) startDependencies(ctx context.Context) error {
+ depCtrIDs := c.Dependencies()
+ if len(depCtrIDs) == 0 {
+ return nil
+ }
+
+ depVisitedCtrs := make(map[string]*Container)
+ if err := c.getAllDependencies(depVisitedCtrs); err != nil {
+ return errors.Wrapf(err, "error starting dependency for container %s", c.ID())
+ }
+
+ // Because of how Go handles passing slices through functions, a slice cannot grow between function calls
+ // without clunky syntax. Circumnavigate this by translating the map to a slice for buildContainerGraph
+ depCtrs := make([]*Container, 0)
+ for _, ctr := range depVisitedCtrs {
+ depCtrs = append(depCtrs, ctr)
+ }
+
+ // Build a dependency graph of containers
+ graph, err := buildContainerGraph(depCtrs)
+ if err != nil {
+ return errors.Wrapf(err, "error generating dependency graph for container %s", c.ID())
+ }
+
+ // If there are no containers without dependencies, we can't start
+ // Error out
+ if len(graph.noDepNodes) == 0 {
+ // we have no dependencies that need starting, go ahead and return
+ if len(graph.nodes) == 0 {
+ return nil
+ }
+ return errors.Wrapf(ErrNoSuchCtr, "All dependencies have dependencies of %s", c.ID())
+ }
+
+ ctrErrors := make(map[string]error)
+ ctrsVisited := make(map[string]bool)
+
+ // Traverse the graph beginning at nodes with no dependencies
+ for _, node := range graph.noDepNodes {
+ startNode(ctx, node, false, ctrErrors, ctrsVisited, true)
+ }
+
+ if len(ctrErrors) > 0 {
+ logrus.Errorf("error starting some container dependencies")
+ for _, e := range ctrErrors {
+ logrus.Errorf("%q", e)
+ }
+ return errors.Wrapf(ErrInternal, "error starting some containers")
+ }
+ return nil
+}
+
+// getAllDependencies is a precursor to starting dependencies.
+// To start a container with all of its dependencies, we need to recursively find all dependencies
+// a container has, as well as each of those containers' dependencies, and so on
+// To do so, keep track of containers already visisted (so there aren't redundant state lookups),
+// and recursively search until we have reached the leafs of every dependency node.
+// Since we need to start all dependencies for our original container to successfully start, we propegate any errors
+// in looking up dependencies.
+// Note: this function is currently meant as a robust solution to a narrow problem: start an infra-container when
+// a container in the pod is run. It has not been tested for performance past one level, so expansion of recursive start
+// must be tested first.
+func (c *Container) getAllDependencies(visited map[string]*Container) error {
+ depIDs := c.Dependencies()
+ if len(depIDs) == 0 {
+ return nil
+ }
+ for _, depID := range depIDs {
+ if _, ok := visited[depID]; !ok {
+ dep, err := c.runtime.state.LookupContainer(depID)
+ if err != nil {
+ return err
+ }
+ status, err := dep.State()
+ if err != nil {
+ return err
+ }
+ // if the dependency is already running, we can assume its dependencies are also running
+ // so no need to add them to those we need to start
+ if status != ContainerStateRunning {
+ visited[depID] = dep
+ if err := dep.getAllDependencies(visited); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
// Check if a container's dependencies are running
// Returns a []string containing the IDs of dependencies that are not running
func (c *Container) checkDependenciesRunning() ([]string, error) {
@@ -630,6 +795,10 @@ func (c *Container) completeNetworkSetup() error {
// Initialize a container, creating it in the runtime
func (c *Container) init(ctx context.Context) error {
+ span, _ := opentracing.StartSpanFromContext(ctx, "init")
+ span.SetTag("struct", "container")
+ defer span.Finish()
+
// Generate the OCI spec
spec, err := c.generateSpec(ctx)
if err != nil {
@@ -663,6 +832,10 @@ func (c *Container) init(ctx context.Context) error {
// Deletes the container in the runtime, and resets its state to Exited.
// The container can be restarted cleanly after this.
func (c *Container) cleanupRuntime(ctx context.Context) error {
+ span, _ := opentracing.StartSpanFromContext(ctx, "cleanupRuntime")
+ span.SetTag("struct", "container")
+ defer span.Finish()
+
// If the container is not ContainerStateStopped, do nothing
if c.state.State != ContainerStateStopped {
return nil
@@ -698,6 +871,10 @@ func (c *Container) cleanupRuntime(ctx context.Context) error {
// Not necessary for ContainerStateExited - the container has already been
// removed from the runtime, so init() can proceed freely.
func (c *Container) reinit(ctx context.Context) error {
+ span, _ := opentracing.StartSpanFromContext(ctx, "reinit")
+ span.SetTag("struct", "container")
+ defer span.Finish()
+
logrus.Debugf("Recreating container %s in OCI runtime", c.ID())
if err := c.cleanupRuntime(ctx); err != nil {
@@ -930,6 +1107,10 @@ func (c *Container) cleanupStorage() error {
func (c *Container) cleanup(ctx context.Context) error {
var lastError error
+ span, _ := opentracing.StartSpanFromContext(ctx, "cleanup")
+ span.SetTag("struct", "container")
+ defer span.Finish()
+
logrus.Debugf("Cleaning up container %s", c.ID())
// Clean up network namespace, if present
@@ -961,6 +1142,10 @@ func (c *Container) cleanup(ctx context.Context) error {
// delete deletes the container and runs any configured poststop
// hooks.
func (c *Container) delete(ctx context.Context) (err error) {
+ span, _ := opentracing.StartSpanFromContext(ctx, "delete")
+ span.SetTag("struct", "container")
+ defer span.Finish()
+
if err := c.runtime.ociRuntime.deleteContainer(c); err != nil {
return errors.Wrapf(err, "error removing container %s from runtime", c.ID())
}
@@ -976,6 +1161,10 @@ func (c *Container) delete(ctx context.Context) (err error) {
// the OCI Runtime Specification (which requires them to run
// post-delete, despite the stage name).
func (c *Container) postDeleteHooks(ctx context.Context) (err error) {
+ span, _ := opentracing.StartSpanFromContext(ctx, "postDeleteHooks")
+ span.SetTag("struct", "container")
+ defer span.Finish()
+
if c.state.ExtensionStageHooks != nil {
extensionHooks, ok := c.state.ExtensionStageHooks["poststop"]
if ok {
@@ -1042,113 +1231,6 @@ func (c *Container) writeStringToRundir(destFile, output string) (string, error)
return filepath.Join(c.state.DestinationRunDir, destFile), nil
}
-func (c *Container) addLocalVolumes(ctx context.Context, g *generate.Generator, execUser *user.ExecUser) error {
- var uid, gid int
- mountPoint := c.state.Mountpoint
- if !c.state.Mounted {
- return errors.Wrapf(ErrInternal, "container is not mounted")
- }
- newImage, err := c.runtime.imageRuntime.NewFromLocal(c.config.RootfsImageID)
- if err != nil {
- return err
- }
- imageData, err := newImage.Inspect(ctx)
- if err != nil {
- return err
- }
- // Add the built-in volumes of the container passed in to --volumes-from
- for _, vol := range c.config.LocalVolumes {
- if imageData.Config.Volumes == nil {
- imageData.Config.Volumes = map[string]struct{}{
- vol: {},
- }
- } else {
- imageData.Config.Volumes[vol] = struct{}{}
- }
- }
-
- if c.config.User != "" {
- if execUser == nil {
- return errors.Wrapf(ErrInternal, "nil pointer passed to addLocalVolumes for execUser")
- }
- uid = execUser.Uid
- gid = execUser.Gid
- }
-
- for k := range imageData.Config.Volumes {
- mount := spec.Mount{
- Destination: k,
- Type: "bind",
- Options: []string{"private", "bind", "rw"},
- }
- if MountExists(g.Mounts(), k) {
- continue
- }
- volumePath := filepath.Join(c.config.StaticDir, "volumes", k)
-
- // Ensure the symlinks are resolved
- resolvedSymlink, err := imagebuildah.ResolveSymLink(mountPoint, k)
- if err != nil {
- return errors.Wrapf(ErrCtrStateInvalid, "cannot resolve %s in %s for container %s", k, mountPoint, c.ID())
- }
- var srcPath string
- if resolvedSymlink != "" {
- srcPath = filepath.Join(mountPoint, resolvedSymlink)
- } else {
- srcPath = filepath.Join(mountPoint, k)
- }
-
- if _, err := os.Stat(srcPath); os.IsNotExist(err) {
- logrus.Infof("Volume image mount point %s does not exist in root FS, need to create it", k)
- if err = os.MkdirAll(srcPath, 0755); err != nil {
- return errors.Wrapf(err, "error creating directory %q for volume %q in container %q", volumePath, k, c.ID())
- }
-
- if err = os.Chown(srcPath, uid, gid); err != nil {
- return errors.Wrapf(err, "error chowning directory %q for volume %q in container %q", srcPath, k, c.ID())
- }
- }
-
- if _, err := os.Stat(volumePath); os.IsNotExist(err) {
- if err = os.MkdirAll(volumePath, 0755); err != nil {
- return errors.Wrapf(err, "error creating directory %q for volume %q in container %q", volumePath, k, c.ID())
- }
-
- if err = os.Chown(volumePath, uid, gid); err != nil {
- return errors.Wrapf(err, "error chowning directory %q for volume %q in container %q", volumePath, k, c.ID())
- }
-
- if err = label.Relabel(volumePath, c.config.MountLabel, false); err != nil {
- return errors.Wrapf(err, "error relabeling directory %q for volume %q in container %q", volumePath, k, c.ID())
- }
- if err = chrootarchive.NewArchiver(nil).CopyWithTar(srcPath, volumePath); err != nil && !os.IsNotExist(err) {
- return errors.Wrapf(err, "error populating directory %q for volume %q in container %q using contents of %q", volumePath, k, c.ID(), srcPath)
- }
-
- // Set the volume path with the same owner and permission of source path
- sstat, _ := os.Stat(srcPath)
- st, ok := sstat.Sys().(*syscall.Stat_t)
- if !ok {
- return fmt.Errorf("could not convert to syscall.Stat_t")
- }
- uid := int(st.Uid)
- gid := int(st.Gid)
-
- if err := os.Lchown(volumePath, uid, gid); err != nil {
- return err
- }
- if os.Chmod(volumePath, sstat.Mode()); err != nil {
- return err
- }
-
- }
-
- mount.Source = volumePath
- g.AddMount(mount)
- }
- return nil
-}
-
// Save OCI spec to disk, replacing any existing specs for the container
func (c *Container) saveSpec(spec *spec.Spec) error {
// If the OCI spec already exists, we need to replace it
@@ -1292,3 +1374,30 @@ func getExcludedCGroups() (excludes []string) {
excludes = []string{"rdma"}
return
}
+
+// namedVolumes returns named volumes for the container
+func (c *Container) namedVolumes() ([]string, error) {
+ var volumes []string
+ for _, vol := range c.config.Spec.Mounts {
+ if strings.HasPrefix(vol.Source, c.runtime.config.VolumePath) {
+ volume := strings.TrimPrefix(vol.Source, c.runtime.config.VolumePath+"/")
+ split := strings.Split(volume, "/")
+ volume = split[0]
+ if _, err := c.runtime.state.Volume(volume); err == nil {
+ volumes = append(volumes, volume)
+ }
+ }
+ }
+ return volumes, nil
+}
+
+// this should be from chrootarchive.
+func (c *Container) copyWithTarFromImage(src, dest string) error {
+ mountpoint, err := c.mount()
+ if err != nil {
+ return err
+ }
+ a := archive.NewDefaultArchiver()
+ source := filepath.Join(mountpoint, src)
+ return a.CopyWithTar(source, dest)
+}