summaryrefslogtreecommitdiff
path: root/libpod
diff options
context:
space:
mode:
Diffstat (limited to 'libpod')
-rw-r--r--libpod/boltdb_state_internal.go8
-rw-r--r--libpod/container.go2
-rw-r--r--libpod/container_api.go5
-rw-r--r--libpod/container_exec.go329
-rw-r--r--libpod/container_inspect.go144
-rw-r--r--libpod/container_internal.go29
-rw-r--r--libpod/container_internal_linux.go2
-rw-r--r--libpod/container_internal_test.go2
-rw-r--r--libpod/define/annotations.go68
-rw-r--r--libpod/define/config.go10
-rw-r--r--libpod/define/container_inspect.go7
-rw-r--r--libpod/define/containerstate.go19
-rw-r--r--libpod/define/healthchecks.go36
-rw-r--r--libpod/define/info.go1
-rw-r--r--libpod/define/version.go29
-rw-r--r--libpod/healthcheck.go65
-rw-r--r--libpod/image/docker_registry_options.go2
-rw-r--r--libpod/image/filters.go3
-rw-r--r--libpod/image/manifests.go56
-rw-r--r--libpod/info.go9
-rw-r--r--libpod/kube.go2
-rw-r--r--libpod/linkmode/linkmode_dynamic.go8
-rw-r--r--libpod/linkmode/linkmode_static.go8
-rw-r--r--libpod/lock/shm/shm_lock_test.go1
-rw-r--r--libpod/oci.go26
-rw-r--r--libpod/oci_conmon_exec_linux.go599
-rw-r--r--libpod/oci_conmon_linux.go323
-rw-r--r--libpod/oci_missing.go12
-rw-r--r--libpod/options.go60
-rw-r--r--libpod/pod.go25
-rw-r--r--libpod/pod_api.go20
-rw-r--r--libpod/runtime.go7
-rw-r--r--libpod/runtime_ctr.go24
-rw-r--r--libpod/runtime_img.go2
-rw-r--r--libpod/runtime_pod_linux.go2
-rw-r--r--libpod/stats.go4
-rw-r--r--libpod/stats_config.go20
-rw-r--r--libpod/stats_unsupported.go2
-rw-r--r--libpod/util.go29
-rw-r--r--libpod/util_test.go3
-rw-r--r--libpod/volume.go13
41 files changed, 1377 insertions, 639 deletions
diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go
index 33ff0720f..21d55bf77 100644
--- a/libpod/boltdb_state_internal.go
+++ b/libpod/boltdb_state_internal.go
@@ -695,7 +695,10 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
return errors.Wrapf(define.ErrNoSuchVolume, "no volume with name %s found in database when adding container %s", vol.Name, ctr.ID())
}
- ctrDepsBkt := volDB.Bucket(volDependenciesBkt)
+ ctrDepsBkt, err := volDB.CreateBucketIfNotExists(volDependenciesBkt)
+ if err != nil {
+ return errors.Wrapf(err, "error creating volume %s dependencies bucket to add container %s", vol.Name, ctr.ID())
+ }
if depExists := ctrDepsBkt.Get(ctrID); depExists == nil {
if err := ctrDepsBkt.Put(ctrID, ctrID); err != nil {
return errors.Wrapf(err, "error adding container %s to volume %s dependencies", ctr.ID(), vol.Name)
@@ -890,6 +893,9 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error
}
ctrDepsBkt := volDB.Bucket(volDependenciesBkt)
+ if ctrDepsBkt == nil {
+ return errors.Wrapf(define.ErrInternal, "volume %s is missing container dependencies bucket, cannot remove container %s from dependencies", vol.Name, ctr.ID())
+ }
if depExists := ctrDepsBkt.Get(ctrID); depExists == nil {
if err := ctrDepsBkt.Delete(ctrID); err != nil {
return errors.Wrapf(err, "error deleting container %s dependency on volume %s", ctr.ID(), vol.Name)
diff --git a/libpod/container.go b/libpod/container.go
index 5cd719ab6..d4a779b13 100644
--- a/libpod/container.go
+++ b/libpod/container.go
@@ -1221,5 +1221,5 @@ func (c *Container) AutoRemove() bool {
if spec.Annotations == nil {
return false
}
- return c.Spec().Annotations[InspectAnnotationAutoremove] == InspectResponseTrue
+ return c.Spec().Annotations[define.InspectAnnotationAutoremove] == define.InspectResponseTrue
}
diff --git a/libpod/container_api.go b/libpod/container_api.go
index b31079b26..d366ffb84 100644
--- a/libpod/container_api.go
+++ b/libpod/container_api.go
@@ -285,6 +285,7 @@ func (c *Container) HTTPAttach(httpCon net.Conn, httpBuf *bufio.ReadWriter, stre
logrus.Infof("Performing HTTP Hijack attach to container %s", c.ID())
+ logSize := 0
if streamLogs {
// Get all logs for the container
logChan := make(chan *logs.LogLine)
@@ -302,7 +303,7 @@ func (c *Container) HTTPAttach(httpCon net.Conn, httpBuf *bufio.ReadWriter, stre
device := logLine.Device
var header []byte
headerLen := uint32(len(logLine.Msg))
-
+ logSize += len(logLine.Msg)
switch strings.ToLower(device) {
case "stdin":
header = makeHTTPAttachHeader(0, headerLen)
@@ -341,7 +342,7 @@ func (c *Container) HTTPAttach(httpCon net.Conn, httpBuf *bufio.ReadWriter, stre
if err := c.ReadLog(logOpts, logChan); err != nil {
return err
}
- logrus.Debugf("Done reading logs for container %s", c.ID())
+ logrus.Debugf("Done reading logs for container %s, %d bytes", c.ID(), logSize)
if err := <-errChan; err != nil {
return err
}
diff --git a/libpod/container_exec.go b/libpod/container_exec.go
index c1ce8b724..f2943b73c 100644
--- a/libpod/container_exec.go
+++ b/libpod/container_exec.go
@@ -1,7 +1,9 @@
package libpod
import (
+ "bufio"
"io/ioutil"
+ "net"
"os"
"path/filepath"
"strconv"
@@ -60,6 +62,13 @@ type ExecConfig struct {
// given is the number that will be passed into the exec session,
// starting at 3.
PreserveFDs uint `json:"preserveFds,omitempty"`
+ // ExitCommand is the exec session's exit command.
+ // This command will be executed when the exec session exits.
+ // If unset, no command will be executed.
+ // Two arguments will be appended to the exit command by Libpod:
+ // The ID of the exec session, and the ID of the container the exec
+ // session is a part of (in that order).
+ ExitCommand []string `json:"exitCommand,omitempty"`
}
// ExecSession contains information on a single exec session attached to a given
@@ -102,7 +111,7 @@ func (e *ExecSession) Inspect() (*define.InspectExecSession, error) {
}
output := new(define.InspectExecSession)
- output.CanRemove = e.State != define.ExecStateRunning
+ output.CanRemove = e.State == define.ExecStateStopped
output.ContainerID = e.ContainerId
if e.Config.DetachKeys != nil {
output.DetachKeys = *e.Config.DetachKeys
@@ -156,9 +165,6 @@ func (c *Container) ExecCreate(config *ExecConfig) (string, error) {
if len(config.Command) == 0 {
return "", errors.Wrapf(define.ErrInvalidArg, "must provide a non-empty command to start an exec session")
}
- if config.Terminal && (config.AttachStdin || config.AttachStdout || config.AttachStderr) {
- return "", errors.Wrapf(define.ErrInvalidArg, "cannot specify streams to attach to when exec session has a pseudoterminal")
- }
// Verify that we are in a good state to continue
if !c.ensureState(define.ContainerStateRunning) {
@@ -192,6 +198,10 @@ func (c *Container) ExecCreate(config *ExecConfig) (string, error) {
return "", errors.Wrapf(err, "error copying exec configuration into exec session")
}
+ if len(session.Config.ExitCommand) > 0 {
+ session.Config.ExitCommand = append(session.Config.ExitCommand, []string{session.ID(), c.ID()}...)
+ }
+
if c.state.ExecSessions == nil {
c.state.ExecSessions = make(map[string]*ExecSession)
}
@@ -211,11 +221,52 @@ func (c *Container) ExecCreate(config *ExecConfig) (string, error) {
}
// ExecStart starts an exec session in the container, but does not attach to it.
-// Returns immediately upon starting the exec session.
+// Returns immediately upon starting the exec session, unlike other ExecStart
+// functions, which will only return when the exec session exits.
func (c *Container) ExecStart(sessionID string) error {
- // Will be implemented in part 2, migrating Start and implementing
- // detached Start.
- return define.ErrNotImplemented
+ if !c.batched {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ if err := c.syncContainer(); err != nil {
+ return err
+ }
+ }
+
+ // Verify that we are in a good state to continue
+ if !c.ensureState(define.ContainerStateRunning) {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "can only start exec sessions when their container is running")
+ }
+
+ session, ok := c.state.ExecSessions[sessionID]
+ if !ok {
+ return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID)
+ }
+
+ if session.State != define.ExecStateCreated {
+ return errors.Wrapf(define.ErrExecSessionStateInvalid, "can only start created exec sessions, while container %s session %s state is %q", c.ID(), session.ID(), session.State.String())
+ }
+
+ logrus.Infof("Going to start container %s exec session %s and attach to it", c.ID(), session.ID())
+
+ opts, err := prepareForExec(c, session)
+ if err != nil {
+ return err
+ }
+
+ pid, err := c.ociRuntime.ExecContainerDetached(c, session.ID(), opts, session.Config.AttachStdin)
+ if err != nil {
+ return err
+ }
+
+ c.newContainerEvent(events.Exec)
+ logrus.Debugf("Successfully started exec session %s in container %s", session.ID(), c.ID())
+
+ // Update and save session to reflect PID/running
+ session.PID = pid
+ session.State = define.ExecStateRunning
+
+ return c.save()
}
// ExecStartAndAttach starts and attaches to an exec session in a container.
@@ -247,34 +298,12 @@ func (c *Container) ExecStartAndAttach(sessionID string, streams *define.AttachS
logrus.Infof("Going to start container %s exec session %s and attach to it", c.ID(), session.ID())
- // TODO: check logic here - should we set Privileged if the container is
- // privileged?
- var capList []string
- if session.Config.Privileged || c.config.Privileged {
- capList = capabilities.AllCapabilities()
- }
-
- user := c.config.User
- if session.Config.User != "" {
- user = session.Config.User
- }
-
- if err := c.createExecBundle(session.ID()); err != nil {
+ opts, err := prepareForExec(c, session)
+ if err != nil {
return err
}
- opts := new(ExecOptions)
- opts.Cmd = session.Config.Command
- opts.CapAdd = capList
- opts.Env = session.Config.Environment
- opts.Terminal = session.Config.Terminal
- opts.Cwd = session.Config.WorkDir
- opts.User = user
- opts.Streams = streams
- opts.PreserveFDs = session.Config.PreserveFDs
- opts.DetachKeys = session.Config.DetachKeys
-
- pid, attachChan, err := c.ociRuntime.ExecContainer(c, session.ID(), opts)
+ pid, attachChan, err := c.ociRuntime.ExecContainer(c, session.ID(), opts, streams)
if err != nil {
return err
}
@@ -318,28 +347,124 @@ func (c *Container) ExecStartAndAttach(sessionID string, streams *define.AttachS
c.lock.Lock()
}
- // Sync the container to pick up state changes
- if err := c.syncContainer(); err != nil {
+ if err := writeExecExitCode(c, session.ID(), exitCode); err != nil {
if lastErr != nil {
logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr)
}
- return errors.Wrapf(err, "error syncing container %s state to remove exec session %s", c.ID(), session.ID())
+ lastErr = err
+ }
+
+ // Clean up after ourselves
+ if err := c.cleanupExecBundle(session.ID()); err != nil {
+ if lastErr != nil {
+ logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr)
+ }
+ lastErr = err
+ }
+
+ return lastErr
+}
+
+// ExecHTTPStartAndAttach starts and performs an HTTP attach to an exec session.
+func (c *Container) ExecHTTPStartAndAttach(sessionID string, httpCon net.Conn, httpBuf *bufio.ReadWriter, streams *HTTPAttachStreams, detachKeys *string, cancel <-chan bool) (deferredErr error) {
+ // TODO: How do we combine streams with the default streams set in the exec session?
+
+ // The flow here is somewhat strange, because we need to determine if
+ // there's a terminal ASAP (for error handling).
+ // Until we know, assume it's true (don't add standard stream headers).
+ // Add a defer to ensure our invariant (HTTP session is closed) is
+ // maintained.
+ isTerminal := true
+ defer func() {
+ hijackWriteErrorAndClose(deferredErr, c.ID(), isTerminal, httpCon, httpBuf)
+ }()
+
+ if !c.batched {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ if err := c.syncContainer(); err != nil {
+ return err
+ }
}
- // Update status
- // Since we did a syncContainer, the old session has been overwritten.
- // Grab a fresh one from the database.
- session, ok = c.state.ExecSessions[sessionID]
+ session, ok := c.state.ExecSessions[sessionID]
if !ok {
- // Exec session already removed.
- logrus.Infof("Container %s exec session %s already removed from database", c.ID(), sessionID)
- return nil
+ return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID)
}
- session.State = define.ExecStateStopped
- session.ExitCode = exitCode
- session.PID = 0
+ // We can now finally get the real value of isTerminal.
+ isTerminal = session.Config.Terminal
+
+ // Verify that we are in a good state to continue
+ if !c.ensureState(define.ContainerStateRunning) {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "can only start exec sessions when their container is running")
+ }
+
+ if session.State != define.ExecStateCreated {
+ return errors.Wrapf(define.ErrExecSessionStateInvalid, "can only start created exec sessions, while container %s session %s state is %q", c.ID(), session.ID(), session.State.String())
+ }
+
+ logrus.Infof("Going to start container %s exec session %s and attach to it", c.ID(), session.ID())
+
+ execOpts, err := prepareForExec(c, session)
+ if err != nil {
+ return err
+ }
+
+ if streams == nil {
+ streams = new(HTTPAttachStreams)
+ streams.Stdin = session.Config.AttachStdin
+ streams.Stdout = session.Config.AttachStdout
+ streams.Stderr = session.Config.AttachStderr
+ }
+
+ pid, attachChan, err := c.ociRuntime.ExecContainerHTTP(c, session.ID(), execOpts, httpCon, httpBuf, streams, cancel)
+ if err != nil {
+ return err
+ }
+
+ // TODO: Investigate whether more of this can be made common with
+ // ExecStartAndAttach
+
+ c.newContainerEvent(events.Exec)
+ logrus.Debugf("Successfully started exec session %s in container %s", session.ID(), c.ID())
+
+ var lastErr error
+
+ session.PID = pid
+ session.State = define.ExecStateRunning
if err := c.save(); err != nil {
+ lastErr = err
+ }
+
+ // Unlock so other processes can use the container
+ if !c.batched {
+ c.lock.Unlock()
+ }
+
+ tmpErr := <-attachChan
+ if lastErr != nil {
+ logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr)
+ }
+ lastErr = tmpErr
+
+ exitCode, err := c.readExecExitCode(session.ID())
+ if err != nil {
+ if lastErr != nil {
+ logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr)
+ }
+ lastErr = err
+ }
+
+ logrus.Debugf("Container %s exec session %s completed with exit code %d", c.ID(), session.ID(), exitCode)
+
+ // Lock again
+ if !c.batched {
+ c.lock.Lock()
+ }
+
+ if err := writeExecExitCode(c, session.ID(), exitCode); err != nil {
if lastErr != nil {
logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr)
}
@@ -357,12 +482,6 @@ func (c *Container) ExecStartAndAttach(sessionID string, streams *define.AttachS
return lastErr
}
-// ExecHTTPStartAndAttach starts and performs an HTTP attach to an exec session.
-func (c *Container) ExecHTTPStartAndAttach(sessionID string) error {
- // Will be implemented in part 2, migrating Start.
- return define.ErrNotImplemented
-}
-
// ExecStop stops an exec session in the container.
// If a timeout is provided, it will be used; otherwise, the timeout will
// default to the stop timeout of the container.
@@ -444,7 +563,27 @@ func (c *Container) ExecCleanup(sessionID string) error {
}
if session.State == define.ExecStateRunning {
- return errors.Wrapf(define.ErrExecSessionStateInvalid, "cannot clean up container %s exec session %s as it is running", c.ID(), session.ID())
+ // Check if the exec session is still running.
+ alive, err := c.ociRuntime.ExecUpdateStatus(c, session.ID())
+ if err != nil {
+ return err
+ }
+
+ if alive {
+ return errors.Wrapf(define.ErrExecSessionStateInvalid, "cannot clean up container %s exec session %s as it is running", c.ID(), session.ID())
+ }
+
+ exitCode, err := c.readExecExitCode(session.ID())
+ if err != nil {
+ return err
+ }
+ session.ExitCode = exitCode
+ session.PID = 0
+ session.State = define.ExecStateStopped
+
+ if err := c.save(); err != nil {
+ return err
+ }
}
logrus.Infof("Cleaning up container %s exec session %s", c.ID(), session.ID())
@@ -474,11 +613,11 @@ func (c *Container) ExecRemove(sessionID string, force bool) error {
// Update status of exec session if running, so we cna check if it
// stopped in the meantime.
if session.State == define.ExecStateRunning {
- stopped, err := c.ociRuntime.ExecUpdateStatus(c, session.ID())
+ running, err := c.ociRuntime.ExecUpdateStatus(c, session.ID())
if err != nil {
return err
}
- if stopped {
+ if !running {
session.State = define.ExecStateStopped
// TODO: should we retrieve exit code here?
// TODO: Might be worth saving state here.
@@ -733,13 +872,6 @@ func (c *Container) getActiveExecSessions() ([]string, error) {
continue
}
if !alive {
- if err := c.cleanupExecBundle(id); err != nil {
- if lastErr != nil {
- logrus.Errorf("Error checking container %s exec sessions: %v", c.ID(), lastErr)
- }
- lastErr = err
- }
-
_, isLegacy := c.state.LegacyExecSessions[id]
if isLegacy {
delete(c.state.LegacyExecSessions, id)
@@ -759,6 +891,12 @@ func (c *Container) getActiveExecSessions() ([]string, error) {
needSave = true
}
+ if err := c.cleanupExecBundle(id); err != nil {
+ if lastErr != nil {
+ logrus.Errorf("Error checking container %s exec sessions: %v", c.ID(), lastErr)
+ }
+ lastErr = err
+ }
} else {
activeSessions = append(activeSessions, id)
}
@@ -779,6 +917,8 @@ func (c *Container) getActiveExecSessions() ([]string, error) {
func (c *Container) removeAllExecSessions() error {
knownSessions := c.getKnownExecSessions()
+ logrus.Debugf("Removing all exec sessions for container %s", c.ID())
+
var lastErr error
for _, id := range knownSessions {
if err := c.ociRuntime.ExecStopContainer(c, id, c.StopTimeout()); err != nil {
@@ -814,3 +954,68 @@ func (c *Container) removeAllExecSessions() error {
return lastErr
}
+
+// Make an ExecOptions struct to start the OCI runtime and prepare its exec
+// bundle.
+func prepareForExec(c *Container, session *ExecSession) (*ExecOptions, error) {
+ // TODO: check logic here - should we set Privileged if the container is
+ // privileged?
+ var capList []string
+ if session.Config.Privileged || c.config.Privileged {
+ capList = capabilities.AllCapabilities()
+ }
+
+ user := c.config.User
+ if session.Config.User != "" {
+ user = session.Config.User
+ }
+
+ if err := c.createExecBundle(session.ID()); err != nil {
+ return nil, err
+ }
+
+ opts := new(ExecOptions)
+ opts.Cmd = session.Config.Command
+ opts.CapAdd = capList
+ opts.Env = session.Config.Environment
+ opts.Terminal = session.Config.Terminal
+ opts.Cwd = session.Config.WorkDir
+ opts.User = user
+ opts.PreserveFDs = session.Config.PreserveFDs
+ opts.DetachKeys = session.Config.DetachKeys
+ opts.ExitCommand = session.Config.ExitCommand
+
+ return opts, nil
+}
+
+// Write an exec session's exit code to the database
+func writeExecExitCode(c *Container, sessionID string, exitCode int) error {
+ // We can't reuse the old exec session (things may have changed from
+ // under use, the container was unlocked).
+ // So re-sync and get a fresh copy.
+ // If we can't do this, no point in continuing, any attempt to save
+ // would write garbage to the DB.
+ if err := c.syncContainer(); err != nil {
+ if errors.Cause(err) == define.ErrNoSuchCtr || errors.Cause(err) == define.ErrCtrRemoved {
+ // Container's entirely removed. We can't save status,
+ // but the container's entirely removed, so we don't
+ // need to. Exit without error.
+ return nil
+ }
+ return errors.Wrapf(err, "error syncing container %s state to remove exec session %s", c.ID(), sessionID)
+ }
+
+ session, ok := c.state.ExecSessions[sessionID]
+ if !ok {
+ // Exec session already removed.
+ logrus.Infof("Container %s exec session %s already removed from database", c.ID(), sessionID)
+ return nil
+ }
+
+ session.State = define.ExecStateStopped
+ session.ExitCode = exitCode
+ session.PID = 0
+
+ // Finally, save our changes.
+ return c.save()
+}
diff --git a/libpod/container_inspect.go b/libpod/container_inspect.go
index 729a00be8..b26dcddf6 100644
--- a/libpod/container_inspect.go
+++ b/libpod/container_inspect.go
@@ -16,73 +16,6 @@ import (
"github.com/syndtr/gocapability/capability"
)
-const (
- // InspectAnnotationCIDFile is used by Inspect to determine if a
- // container ID file was created for the container.
- // If an annotation with this key is found in the OCI spec, it will be
- // used in the output of Inspect().
- InspectAnnotationCIDFile = "io.podman.annotations.cid-file"
- // InspectAnnotationAutoremove is used by Inspect to determine if a
- // container will be automatically removed on exit.
- // If an annotation with this key is found in the OCI spec and is one of
- // the two supported boolean values (InspectResponseTrue and
- // InspectResponseFalse) it will be used in the output of Inspect().
- InspectAnnotationAutoremove = "io.podman.annotations.autoremove"
- // InspectAnnotationVolumesFrom is used by Inspect to identify
- // containers whose volumes are are being used by this container.
- // It is expected to be a comma-separated list of container names and/or
- // IDs.
- // If an annotation with this key is found in the OCI spec, it will be
- // used in the output of Inspect().
- InspectAnnotationVolumesFrom = "io.podman.annotations.volumes-from"
- // InspectAnnotationPrivileged is used by Inspect to identify containers
- // which are privileged (IE, running with elevated privileges).
- // It is expected to be a boolean, populated by one of
- // InspectResponseTrue or InspectResponseFalse.
- // If an annotation with this key is found in the OCI spec, it will be
- // used in the output of Inspect().
- InspectAnnotationPrivileged = "io.podman.annotations.privileged"
- // InspectAnnotationPublishAll is used by Inspect to identify containers
- // which have all the ports from their image published.
- // It is expected to be a boolean, populated by one of
- // InspectResponseTrue or InspectResponseFalse.
- // If an annotation with this key is found in the OCI spec, it will be
- // used in the output of Inspect().
- InspectAnnotationPublishAll = "io.podman.annotations.publish-all"
- // InspectAnnotationInit is used by Inspect to identify containers that
- // mount an init binary in.
- // It is expected to be a boolean, populated by one of
- // InspectResponseTrue or InspectResponseFalse.
- // If an annotation with this key is found in the OCI spec, it will be
- // used in the output of Inspect().
- InspectAnnotationInit = "io.podman.annotations.init"
- // InspectAnnotationLabel is used by Inspect to identify containers with
- // special SELinux-related settings. It is used to populate the output
- // of the SecurityOpt setting.
- // If an annotation with this key is found in the OCI spec, it will be
- // used in the output of Inspect().
- InspectAnnotationLabel = "io.podman.annotations.label"
- // InspectAnnotationSeccomp is used by Inspect to identify containers
- // with special Seccomp-related settings. It is used to populate the
- // output of the SecurityOpt setting in Inspect.
- // If an annotation with this key is found in the OCI spec, it will be
- // used in the output of Inspect().
- InspectAnnotationSeccomp = "io.podman.annotations.seccomp"
- // InspectAnnotationApparmor is used by Inspect to identify containers
- // with special Apparmor-related settings. It is used to populate the
- // output of the SecurityOpt setting.
- // If an annotation with this key is found in the OCI spec, it will be
- // used in the output of Inspect().
- InspectAnnotationApparmor = "io.podman.annotations.apparmor"
-
- // InspectResponseTrue is a boolean True response for an inspect
- // annotation.
- InspectResponseTrue = "TRUE"
- // InspectResponseFalse is a boolean False response for an inspect
- // annotation.
- InspectResponseFalse = "FALSE"
-)
-
// inspectLocked inspects a container for low-level information.
// The caller must held c.lock.
func (c *Container) inspectLocked(size bool) (*define.InspectContainerData, error) {
@@ -452,26 +385,26 @@ func (c *Container) generateInspectContainerHostConfig(ctrSpec *spec.Spec, named
// Annotations
if ctrSpec.Annotations != nil {
- hostConfig.ContainerIDFile = ctrSpec.Annotations[InspectAnnotationCIDFile]
- if ctrSpec.Annotations[InspectAnnotationAutoremove] == InspectResponseTrue {
+ hostConfig.ContainerIDFile = ctrSpec.Annotations[define.InspectAnnotationCIDFile]
+ if ctrSpec.Annotations[define.InspectAnnotationAutoremove] == define.InspectResponseTrue {
hostConfig.AutoRemove = true
}
- if ctrs, ok := ctrSpec.Annotations[InspectAnnotationVolumesFrom]; ok {
+ if ctrs, ok := ctrSpec.Annotations[define.InspectAnnotationVolumesFrom]; ok {
hostConfig.VolumesFrom = strings.Split(ctrs, ",")
}
- if ctrSpec.Annotations[InspectAnnotationPrivileged] == InspectResponseTrue {
+ if ctrSpec.Annotations[define.InspectAnnotationPrivileged] == define.InspectResponseTrue {
hostConfig.Privileged = true
}
- if ctrSpec.Annotations[InspectAnnotationInit] == InspectResponseTrue {
+ if ctrSpec.Annotations[define.InspectAnnotationInit] == define.InspectResponseTrue {
hostConfig.Init = true
}
- if label, ok := ctrSpec.Annotations[InspectAnnotationLabel]; ok {
+ if label, ok := ctrSpec.Annotations[define.InspectAnnotationLabel]; ok {
hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, fmt.Sprintf("label=%s", label))
}
- if seccomp, ok := ctrSpec.Annotations[InspectAnnotationSeccomp]; ok {
+ if seccomp, ok := ctrSpec.Annotations[define.InspectAnnotationSeccomp]; ok {
hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, fmt.Sprintf("seccomp=%s", seccomp))
}
- if apparmor, ok := ctrSpec.Annotations[InspectAnnotationApparmor]; ok {
+ if apparmor, ok := ctrSpec.Annotations[define.InspectAnnotationApparmor]; ok {
hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, fmt.Sprintf("apparmor=%s", apparmor))
}
}
@@ -647,7 +580,10 @@ func (c *Container) generateInspectContainerHostConfig(ctrSpec *spec.Spec, named
networkMode := ""
switch {
case c.config.CreateNetNS:
- networkMode = "default"
+ // We actually store the network
+ // mode for Slirp and Bridge, so
+ // we can just use that
+ networkMode = string(c.config.NetMode)
case c.config.NetNsCtr != "":
networkMode = fmt.Sprintf("container:%s", c.config.NetNsCtr)
default:
@@ -661,6 +597,9 @@ func (c *Container) generateInspectContainerHostConfig(ctrSpec *spec.Spec, named
if ns.Path != "" {
networkMode = fmt.Sprintf("ns:%s", ns.Path)
} else {
+ // We're making a network ns, but not
+ // configuring with Slirp or CNI. That
+ // means it's --net=none
networkMode = "none"
}
break
@@ -743,27 +682,52 @@ func (c *Container) generateInspectContainerHostConfig(ctrSpec *spec.Spec, named
ipcMode := ""
if c.config.IPCNsCtr != "" {
ipcMode = fmt.Sprintf("container:%s", c.config.IPCNsCtr)
- } else {
+ } else if ctrSpec.Linux != nil {
// Locate the spec's IPC namespace.
// If there is none, it's ipc=host.
// If there is one and it has a path, it's "ns:".
// If no path, it's default - the empty string.
- foundIPCNS := false
+
for _, ns := range ctrSpec.Linux.Namespaces {
if ns.Type == spec.IPCNamespace {
- foundIPCNS = true
if ns.Path != "" {
ipcMode = fmt.Sprintf("ns:%s", ns.Path)
+ } else {
+ ipcMode = "private"
}
break
}
}
- if !foundIPCNS {
+ if ipcMode == "" {
ipcMode = "host"
}
}
hostConfig.IpcMode = ipcMode
+ // Cgroup namespace mode
+ cgroupMode := ""
+ if c.config.CgroupNsCtr != "" {
+ cgroupMode = fmt.Sprintf("container:%s", c.config.CgroupNsCtr)
+ } else if ctrSpec.Linux != nil {
+ // Locate the spec's cgroup namespace
+ // If there is none, it's cgroup=host.
+ // If there is one and it has a path, it's "ns:".
+ // If there is no path, it's private.
+ for _, ns := range ctrSpec.Linux.Namespaces {
+ if ns.Type == spec.CgroupNamespace {
+ if ns.Path != "" {
+ cgroupMode = fmt.Sprintf("ns:%s", ns.Path)
+ } else {
+ cgroupMode = "private"
+ }
+ }
+ }
+ if cgroupMode == "" {
+ cgroupMode = "host"
+ }
+ }
+ hostConfig.CgroupMode = cgroupMode
+
// CGroup parent
// Need to check if it's the default, and not print if so.
defaultCgroupParent := ""
@@ -781,22 +745,22 @@ func (c *Container) generateInspectContainerHostConfig(ctrSpec *spec.Spec, named
pidMode := ""
if c.config.PIDNsCtr != "" {
pidMode = fmt.Sprintf("container:%s", c.config.PIDNsCtr)
- } else {
+ } else if ctrSpec.Linux != nil {
// Locate the spec's PID namespace.
// If there is none, it's pid=host.
// If there is one and it has a path, it's "ns:".
// If there is no path, it's default - the empty string.
- foundPIDNS := false
for _, ns := range ctrSpec.Linux.Namespaces {
if ns.Type == spec.PIDNamespace {
- foundPIDNS = true
if ns.Path != "" {
pidMode = fmt.Sprintf("ns:%s", ns.Path)
+ } else {
+ pidMode = "private"
}
break
}
}
- if !foundPIDNS {
+ if pidMode == "" {
pidMode = "host"
}
}
@@ -806,22 +770,23 @@ func (c *Container) generateInspectContainerHostConfig(ctrSpec *spec.Spec, named
utsMode := ""
if c.config.UTSNsCtr != "" {
utsMode = fmt.Sprintf("container:%s", c.config.UTSNsCtr)
- } else {
+ } else if ctrSpec.Linux != nil {
+
// Locate the spec's UTS namespace.
// If there is none, it's uts=host.
// If there is one and it has a path, it's "ns:".
// If there is no path, it's default - the empty string.
- foundUTSNS := false
for _, ns := range ctrSpec.Linux.Namespaces {
if ns.Type == spec.UTSNamespace {
- foundUTSNS = true
if ns.Path != "" {
utsMode = fmt.Sprintf("ns:%s", ns.Path)
+ } else {
+ utsMode = "private"
}
break
}
}
- if !foundUTSNS {
+ if utsMode == "" {
utsMode = "host"
}
}
@@ -831,11 +796,12 @@ func (c *Container) generateInspectContainerHostConfig(ctrSpec *spec.Spec, named
usernsMode := ""
if c.config.UserNsCtr != "" {
usernsMode = fmt.Sprintf("container:%s", c.config.UserNsCtr)
- } else {
+ } else if ctrSpec.Linux != nil {
// Locate the spec's user namespace.
// If there is none, it's default - the empty string.
// If there is one, it's "private" if no path, or "ns:" if
// there's a path.
+
for _, ns := range ctrSpec.Linux.Namespaces {
if ns.Type == spec.UserNamespace {
if ns.Path != "" {
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index 3fcf687ec..43e873bd6 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -1011,6 +1011,14 @@ func (c *Container) init(ctx context.Context, retainRetries bool) error {
logrus.Debugf("Created container %s in OCI runtime", c.ID())
+ // Remove any exec sessions leftover from a potential prior run.
+ if len(c.state.ExecSessions) > 0 {
+ if err := c.runtime.state.RemoveContainerExecSessions(c); err != nil {
+ logrus.Errorf("Error removing container %s exec sessions from DB: %v", c.ID(), err)
+ }
+ c.state.ExecSessions = make(map[string]*ExecSession)
+ }
+
c.state.ExitCode = 0
c.state.Exited = false
c.state.State = define.ContainerStateCreated
@@ -1161,7 +1169,7 @@ func (c *Container) start() error {
c.state.State = define.ContainerStateRunning
if c.config.HealthCheckConfig != nil {
- if err := c.updateHealthStatus(HealthCheckStarting); err != nil {
+ if err := c.updateHealthStatus(define.HealthCheckStarting); err != nil {
logrus.Error(err)
}
if err := c.startTimer(); err != nil {
@@ -1562,21 +1570,24 @@ func (c *Container) cleanup(ctx context.Context) error {
lastError = errors.Wrapf(err, "error removing container %s network", c.ID())
}
- // Unmount storage
- if err := c.cleanupStorage(); err != nil {
+ // Remove the container from the runtime, if necessary.
+ // Do this *before* unmounting storage - some runtimes (e.g. Kata)
+ // apparently object to having storage removed while the container still
+ // exists.
+ if err := c.cleanupRuntime(ctx); err != nil {
if lastError != nil {
- logrus.Errorf("Error unmounting container %s storage: %v", c.ID(), err)
+ logrus.Errorf("Error removing container %s from OCI runtime: %v", c.ID(), err)
} else {
- lastError = errors.Wrapf(err, "error unmounting container %s storage", c.ID())
+ lastError = err
}
}
- // Remove the container from the runtime, if necessary
- if err := c.cleanupRuntime(ctx); err != nil {
+ // Unmount storage
+ if err := c.cleanupStorage(); err != nil {
if lastError != nil {
- logrus.Errorf("Error removing container %s from OCI runtime: %v", c.ID(), err)
+ logrus.Errorf("Error unmounting container %s storage: %v", c.ID(), err)
} else {
- lastError = err
+ lastError = errors.Wrapf(err, "error unmounting container %s storage", c.ID())
}
}
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index 8ee0fb456..2bd6099f0 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -1236,7 +1236,7 @@ func (c *Container) makeBindMounts() error {
}
// Add Secret Mounts
- secretMounts := secrets.SecretMountsWithUIDGID(c.config.MountLabel, c.state.RunDir, c.runtime.config.Containers.DefaultMountsFile, c.state.RunDir, c.RootUID(), c.RootGID(), rootless.IsRootless(), false)
+ secretMounts := secrets.SecretMountsWithUIDGID(c.config.MountLabel, c.state.RunDir, c.runtime.config.Containers.DefaultMountsFile, c.state.Mountpoint, c.RootUID(), c.RootGID(), rootless.IsRootless(), false)
for _, mount := range secretMounts {
if _, ok := c.state.BindMounts[mount.Destination]; !ok {
c.state.BindMounts[mount.Destination] = mount.Source
diff --git a/libpod/container_internal_test.go b/libpod/container_internal_test.go
index 5428504ef..fdf7c2e20 100644
--- a/libpod/container_internal_test.go
+++ b/libpod/container_internal_test.go
@@ -60,7 +60,7 @@ func TestPostDeleteHooks(t *testing.T) {
t.Fatal(err)
}
- stateRegexp := `{"ociVersion":"1\.0\.1-dev","id":"123abc","status":"stopped","bundle":"` + strings.TrimSuffix(os.TempDir(), "/") + `/libpod_test_[0-9]*","annotations":{"a":"b"}}`
+ stateRegexp := `{"ociVersion":"1\.0\.2-dev","id":"123abc","status":"stopped","bundle":"` + strings.TrimSuffix(os.TempDir(), "/") + `/libpod_test_[0-9]*","annotations":{"a":"b"}}`
for _, p := range []string{statePath, copyPath} {
path := p
t.Run(path, func(t *testing.T) {
diff --git a/libpod/define/annotations.go b/libpod/define/annotations.go
new file mode 100644
index 000000000..f6b1c06ea
--- /dev/null
+++ b/libpod/define/annotations.go
@@ -0,0 +1,68 @@
+package define
+
+const (
+ // InspectAnnotationCIDFile is used by Inspect to determine if a
+ // container ID file was created for the container.
+ // If an annotation with this key is found in the OCI spec, it will be
+ // used in the output of Inspect().
+ InspectAnnotationCIDFile = "io.podman.annotations.cid-file"
+ // InspectAnnotationAutoremove is used by Inspect to determine if a
+ // container will be automatically removed on exit.
+ // If an annotation with this key is found in the OCI spec and is one of
+ // the two supported boolean values (InspectResponseTrue and
+ // InspectResponseFalse) it will be used in the output of Inspect().
+ InspectAnnotationAutoremove = "io.podman.annotations.autoremove"
+ // InspectAnnotationVolumesFrom is used by Inspect to identify
+ // containers whose volumes are are being used by this container.
+ // It is expected to be a comma-separated list of container names and/or
+ // IDs.
+ // If an annotation with this key is found in the OCI spec, it will be
+ // used in the output of Inspect().
+ InspectAnnotationVolumesFrom = "io.podman.annotations.volumes-from"
+ // InspectAnnotationPrivileged is used by Inspect to identify containers
+ // which are privileged (IE, running with elevated privileges).
+ // It is expected to be a boolean, populated by one of
+ // InspectResponseTrue or InspectResponseFalse.
+ // If an annotation with this key is found in the OCI spec, it will be
+ // used in the output of Inspect().
+ InspectAnnotationPrivileged = "io.podman.annotations.privileged"
+ // InspectAnnotationPublishAll is used by Inspect to identify containers
+ // which have all the ports from their image published.
+ // It is expected to be a boolean, populated by one of
+ // InspectResponseTrue or InspectResponseFalse.
+ // If an annotation with this key is found in the OCI spec, it will be
+ // used in the output of Inspect().
+ InspectAnnotationPublishAll = "io.podman.annotations.publish-all"
+ // InspectAnnotationInit is used by Inspect to identify containers that
+ // mount an init binary in.
+ // It is expected to be a boolean, populated by one of
+ // InspectResponseTrue or InspectResponseFalse.
+ // If an annotation with this key is found in the OCI spec, it will be
+ // used in the output of Inspect().
+ InspectAnnotationInit = "io.podman.annotations.init"
+ // InspectAnnotationLabel is used by Inspect to identify containers with
+ // special SELinux-related settings. It is used to populate the output
+ // of the SecurityOpt setting.
+ // If an annotation with this key is found in the OCI spec, it will be
+ // used in the output of Inspect().
+ InspectAnnotationLabel = "io.podman.annotations.label"
+ // InspectAnnotationSeccomp is used by Inspect to identify containers
+ // with special Seccomp-related settings. It is used to populate the
+ // output of the SecurityOpt setting in Inspect.
+ // If an annotation with this key is found in the OCI spec, it will be
+ // used in the output of Inspect().
+ InspectAnnotationSeccomp = "io.podman.annotations.seccomp"
+ // InspectAnnotationApparmor is used by Inspect to identify containers
+ // with special Apparmor-related settings. It is used to populate the
+ // output of the SecurityOpt setting.
+ // If an annotation with this key is found in the OCI spec, it will be
+ // used in the output of Inspect().
+ InspectAnnotationApparmor = "io.podman.annotations.apparmor"
+
+ // InspectResponseTrue is a boolean True response for an inspect
+ // annotation.
+ InspectResponseTrue = "TRUE"
+ // InspectResponseFalse is a boolean False response for an inspect
+ // annotation.
+ InspectResponseFalse = "FALSE"
+)
diff --git a/libpod/define/config.go b/libpod/define/config.go
index 692eafb04..5ca4da4af 100644
--- a/libpod/define/config.go
+++ b/libpod/define/config.go
@@ -3,6 +3,9 @@ package define
import (
"bufio"
"io"
+ "regexp"
+
+ "github.com/pkg/errors"
)
var (
@@ -10,6 +13,13 @@ var (
DefaultSHMLockPath = "/libpod_lock"
// DefaultRootlessSHMLockPath is the default path for rootless SHM locks
DefaultRootlessSHMLockPath = "/libpod_rootless_lock"
+
+ // NameRegex is a regular expression to validate container/pod names.
+ // This must NOT be changed from outside of Libpod. It should be a
+ // constant, but Go won't let us do that.
+ NameRegex = regexp.MustCompile("^[a-zA-Z0-9][a-zA-Z0-9_.-]*$")
+ // RegexError is thrown in presence of an invalid container/pod name.
+ RegexError = errors.Wrapf(ErrInvalidArg, "names must match [a-zA-Z0-9][a-zA-Z0-9_.-]*")
)
const (
diff --git a/libpod/define/container_inspect.go b/libpod/define/container_inspect.go
index e6a19e5b4..27ada8706 100644
--- a/libpod/define/container_inspect.go
+++ b/libpod/define/container_inspect.go
@@ -228,6 +228,13 @@ type InspectContainerHostConfig struct {
// include a Mounts field in inspect.
// Format: <src>:<destination>[:<comma-separated options>]
Binds []string `json:"Binds"`
+ // CgroupMode is the configuration of the container's cgroup namespace.
+ // Populated as follows:
+ // private - a cgroup namespace has been created
+ // host - No cgroup namespace created
+ // container:<id> - Using another container's cgroup namespace
+ // ns:<path> - A path to a cgroup namespace has been specified
+ CgroupMode string `json:"CgroupMode"`
// ContainerIDFile is a file created during container creation to hold
// the ID of the created container.
// This is not handled within libpod and is stored in an annotation.
diff --git a/libpod/define/containerstate.go b/libpod/define/containerstate.go
index 6da49a594..825e77387 100644
--- a/libpod/define/containerstate.go
+++ b/libpod/define/containerstate.go
@@ -112,3 +112,22 @@ func (s ContainerExecStatus) String() string {
return "bad state"
}
}
+
+// ContainerStats contains the statistics information for a running container
+type ContainerStats struct {
+ ContainerID string
+ Name string
+ PerCPU []uint64
+ CPU float64
+ CPUNano uint64
+ CPUSystemNano uint64
+ SystemNano uint64
+ MemUsage uint64
+ MemLimit uint64
+ MemPerc float64
+ NetInput uint64
+ NetOutput uint64
+ BlockInput uint64
+ BlockOutput uint64
+ PIDs uint64
+}
diff --git a/libpod/define/healthchecks.go b/libpod/define/healthchecks.go
new file mode 100644
index 000000000..4114262b6
--- /dev/null
+++ b/libpod/define/healthchecks.go
@@ -0,0 +1,36 @@
+package define
+
+const (
+ // HealthCheckHealthy describes a healthy container
+ HealthCheckHealthy string = "healthy"
+ // HealthCheckUnhealthy describes an unhealthy container
+ HealthCheckUnhealthy string = "unhealthy"
+ // HealthCheckStarting describes the time between when the container starts
+ // and the start-period (time allowed for the container to start and application
+ // to be running) expires.
+ HealthCheckStarting string = "starting"
+)
+
+// HealthCheckStatus represents the current state of a container
+type HealthCheckStatus int
+
+const (
+ // HealthCheckSuccess means the health worked
+ HealthCheckSuccess HealthCheckStatus = iota
+ // HealthCheckFailure means the health ran and failed
+ HealthCheckFailure HealthCheckStatus = iota
+ // HealthCheckContainerStopped means the health check cannot
+ // be run because the container is stopped
+ HealthCheckContainerStopped HealthCheckStatus = iota
+ // HealthCheckContainerNotFound means the container could
+ // not be found in local store
+ HealthCheckContainerNotFound HealthCheckStatus = iota
+ // HealthCheckNotDefined means the container has no health
+ // check defined in it
+ HealthCheckNotDefined HealthCheckStatus = iota
+ // HealthCheckInternalError means some something failed obtaining or running
+ // a given health check
+ HealthCheckInternalError HealthCheckStatus = iota
+ // HealthCheckDefined means the healthcheck was found on the container
+ HealthCheckDefined HealthCheckStatus = iota
+)
diff --git a/libpod/define/info.go b/libpod/define/info.go
index 2516cad77..906aa523f 100644
--- a/libpod/define/info.go
+++ b/libpod/define/info.go
@@ -33,6 +33,7 @@ type HostInfo struct {
SwapFree int64 `json:"swapFree"`
SwapTotal int64 `json:"swapTotal"`
Uptime string `json:"uptime"`
+ Linkmode string `json:"linkmode"`
}
// SlirpInfo describes the slirp exectuable that
diff --git a/libpod/define/version.go b/libpod/define/version.go
index 0f9f49050..3eb016264 100644
--- a/libpod/define/version.go
+++ b/libpod/define/version.go
@@ -3,6 +3,7 @@ package define
import (
"runtime"
"strconv"
+ "time"
podmanVersion "github.com/containers/libpod/version"
)
@@ -17,14 +18,15 @@ var (
buildInfo string
)
-//Version is an output struct for varlink
+// Version is an output struct for varlink
type Version struct {
- RemoteAPIVersion int64
- Version string
- GoVersion string
- GitCommit string
- Built int64
- OsArch string
+ APIVersion int64
+ Version string
+ GoVersion string
+ GitCommit string
+ BuiltTime string
+ Built int64
+ OsArch string
}
// GetVersion returns a VersionOutput struct for varlink and podman
@@ -40,11 +42,12 @@ func GetVersion() (Version, error) {
}
}
return Version{
- RemoteAPIVersion: podmanVersion.RemoteAPIVersion,
- Version: podmanVersion.Version,
- GoVersion: runtime.Version(),
- GitCommit: gitCommit,
- Built: buildTime,
- OsArch: runtime.GOOS + "/" + runtime.GOARCH,
+ APIVersion: podmanVersion.APIVersion,
+ Version: podmanVersion.Version,
+ GoVersion: runtime.Version(),
+ GitCommit: gitCommit,
+ BuiltTime: time.Unix(buildTime, 0).Format(time.ANSIC),
+ Built: buildTime,
+ OsArch: runtime.GOOS + "/" + runtime.GOARCH,
}, nil
}
diff --git a/libpod/healthcheck.go b/libpod/healthcheck.go
index aec5fa4e0..0006b7c06 100644
--- a/libpod/healthcheck.go
+++ b/libpod/healthcheck.go
@@ -14,43 +14,12 @@ import (
"github.com/sirupsen/logrus"
)
-// HealthCheckStatus represents the current state of a container
-type HealthCheckStatus int
-
const (
- // HealthCheckSuccess means the health worked
- HealthCheckSuccess HealthCheckStatus = iota
- // HealthCheckFailure means the health ran and failed
- HealthCheckFailure HealthCheckStatus = iota
- // HealthCheckContainerStopped means the health check cannot
- // be run because the container is stopped
- HealthCheckContainerStopped HealthCheckStatus = iota
- // HealthCheckContainerNotFound means the container could
- // not be found in local store
- HealthCheckContainerNotFound HealthCheckStatus = iota
- // HealthCheckNotDefined means the container has no health
- // check defined in it
- HealthCheckNotDefined HealthCheckStatus = iota
- // HealthCheckInternalError means some something failed obtaining or running
- // a given health check
- HealthCheckInternalError HealthCheckStatus = iota
- // HealthCheckDefined means the healthcheck was found on the container
- HealthCheckDefined HealthCheckStatus = iota
-
// MaxHealthCheckNumberLogs is the maximum number of attempts we keep
// in the healthcheck history file
MaxHealthCheckNumberLogs int = 5
// MaxHealthCheckLogLength in characters
MaxHealthCheckLogLength = 500
-
- // HealthCheckHealthy describes a healthy container
- HealthCheckHealthy string = "healthy"
- // HealthCheckUnhealthy describes an unhealthy container
- HealthCheckUnhealthy string = "unhealthy"
- // HealthCheckStarting describes the time between when the container starts
- // and the start-period (time allowed for the container to start and application
- // to be running) expires.
- HealthCheckStarting string = "starting"
)
// hcWriteCloser allows us to use bufio as a WriteCloser
@@ -65,10 +34,10 @@ func (hcwc hcWriteCloser) Close() error {
// HealthCheck verifies the state and validity of the healthcheck configuration
// on the container and then executes the healthcheck
-func (r *Runtime) HealthCheck(name string) (HealthCheckStatus, error) {
+func (r *Runtime) HealthCheck(name string) (define.HealthCheckStatus, error) {
container, err := r.LookupContainer(name)
if err != nil {
- return HealthCheckContainerNotFound, errors.Wrapf(err, "unable to lookup %s to perform a health check", name)
+ return define.HealthCheckContainerNotFound, errors.Wrapf(err, "unable to lookup %s to perform a health check", name)
}
hcStatus, err := checkHealthCheckCanBeRun(container)
if err == nil {
@@ -78,7 +47,7 @@ func (r *Runtime) HealthCheck(name string) (HealthCheckStatus, error) {
}
// runHealthCheck runs the health check as defined by the container
-func (c *Container) runHealthCheck() (HealthCheckStatus, error) {
+func (c *Container) runHealthCheck() (define.HealthCheckStatus, error) {
var (
newCommand []string
returnCode int
@@ -87,11 +56,11 @@ func (c *Container) runHealthCheck() (HealthCheckStatus, error) {
)
hcCommand := c.HealthCheckConfig().Test
if len(hcCommand) < 1 {
- return HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID())
+ return define.HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID())
}
switch hcCommand[0] {
case "", "NONE":
- return HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID())
+ return define.HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID())
case "CMD":
newCommand = hcCommand[1:]
case "CMD-SHELL":
@@ -102,7 +71,7 @@ func (c *Container) runHealthCheck() (HealthCheckStatus, error) {
newCommand = hcCommand
}
if len(newCommand) < 1 || newCommand[0] == "" {
- return HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID())
+ return define.HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID())
}
captureBuffer := bufio.NewWriter(&capture)
hcw := hcWriteCloser{
@@ -120,13 +89,13 @@ func (c *Container) runHealthCheck() (HealthCheckStatus, error) {
logrus.Debugf("executing health check command %s for %s", strings.Join(newCommand, " "), c.ID())
timeStart := time.Now()
- hcResult := HealthCheckSuccess
+ hcResult := define.HealthCheckSuccess
config := new(ExecConfig)
config.Command = newCommand
_, hcErr := c.Exec(config, streams, nil)
if hcErr != nil {
errCause := errors.Cause(hcErr)
- hcResult = HealthCheckFailure
+ hcResult = define.HealthCheckFailure
if errCause == define.ErrOCIRuntimeNotFound ||
errCause == define.ErrOCIRuntimePermissionDenied ||
errCause == define.ErrOCIRuntime {
@@ -154,7 +123,7 @@ func (c *Container) runHealthCheck() (HealthCheckStatus, error) {
if timeEnd.Sub(timeStart) > c.HealthCheckConfig().Timeout {
returnCode = -1
- hcResult = HealthCheckFailure
+ hcResult = define.HealthCheckFailure
hcErr = errors.Errorf("healthcheck command exceeded timeout of %s", c.HealthCheckConfig().Timeout.String())
}
hcl := newHealthCheckLog(timeStart, timeEnd, returnCode, eventLog)
@@ -164,18 +133,18 @@ func (c *Container) runHealthCheck() (HealthCheckStatus, error) {
return hcResult, hcErr
}
-func checkHealthCheckCanBeRun(c *Container) (HealthCheckStatus, error) {
+func checkHealthCheckCanBeRun(c *Container) (define.HealthCheckStatus, error) {
cstate, err := c.State()
if err != nil {
- return HealthCheckInternalError, err
+ return define.HealthCheckInternalError, err
}
if cstate != define.ContainerStateRunning {
- return HealthCheckContainerStopped, errors.Errorf("container %s is not running", c.ID())
+ return define.HealthCheckContainerStopped, errors.Errorf("container %s is not running", c.ID())
}
if !c.HasHealthCheck() {
- return HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID())
+ return define.HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID())
}
- return HealthCheckDefined, nil
+ return define.HealthCheckDefined, nil
}
func newHealthCheckLog(start, end time.Time, exitCode int, log string) define.HealthCheckLog {
@@ -210,18 +179,18 @@ func (c *Container) updateHealthCheckLog(hcl define.HealthCheckLog, inStartPerio
}
if hcl.ExitCode == 0 {
// set status to healthy, reset failing state to 0
- healthCheck.Status = HealthCheckHealthy
+ healthCheck.Status = define.HealthCheckHealthy
healthCheck.FailingStreak = 0
} else {
if len(healthCheck.Status) < 1 {
- healthCheck.Status = HealthCheckHealthy
+ healthCheck.Status = define.HealthCheckHealthy
}
if !inStartPeriod {
// increment failing streak
healthCheck.FailingStreak += 1
// if failing streak > retries, then status to unhealthy
if healthCheck.FailingStreak >= c.HealthCheckConfig().Retries {
- healthCheck.Status = HealthCheckUnhealthy
+ healthCheck.Status = define.HealthCheckUnhealthy
}
}
}
diff --git a/libpod/image/docker_registry_options.go b/libpod/image/docker_registry_options.go
index 01b5558af..081e7ef4f 100644
--- a/libpod/image/docker_registry_options.go
+++ b/libpod/image/docker_registry_options.go
@@ -30,6 +30,8 @@ type DockerRegistryOptions struct {
OSChoice string
// If not "", overrides the use of platform.GOARCH when choosing an image or verifying architecture match.
ArchitectureChoice string
+ // RegistriesConfPath can be used to override the default path of registries.conf.
+ RegistriesConfPath string
}
// GetSystemContext constructs a new system context from a parent context. the values in the DockerRegistryOptions, and other parameters.
diff --git a/libpod/image/filters.go b/libpod/image/filters.go
index 8ca3526a0..747eba165 100644
--- a/libpod/image/filters.go
+++ b/libpod/image/filters.go
@@ -170,8 +170,7 @@ func (ir *Runtime) createFilterFuncs(filters []string, img *Image) ([]ResultFilt
labelFilter := strings.Join(splitFilter[1:], "=")
filterFuncs = append(filterFuncs, LabelFilter(ctx, labelFilter))
case "reference":
- referenceFilter := strings.Join(splitFilter[1:], "=")
- filterFuncs = append(filterFuncs, ReferenceFilter(ctx, referenceFilter))
+ filterFuncs = append(filterFuncs, ReferenceFilter(ctx, splitFilter[1]))
case "id":
filterFuncs = append(filterFuncs, IdFilter(splitFilter[1]))
default:
diff --git a/libpod/image/manifests.go b/libpod/image/manifests.go
index 7ca17f86c..59678fdb2 100644
--- a/libpod/image/manifests.go
+++ b/libpod/image/manifests.go
@@ -24,6 +24,18 @@ type ManifestAddOpts struct {
Variant string `json:"variant"`
}
+// ManifestAnnotateOptions defines the options for
+// manifest annotate
+type ManifestAnnotateOpts struct {
+ Annotation map[string]string `json:"annotation"`
+ Arch string `json:"arch"`
+ Features []string `json:"features"`
+ OS string `json:"os"`
+ OSFeatures []string `json:"os_feature"`
+ OSVersion string `json:"os_version"`
+ Variant string `json:"variant"`
+}
+
// InspectManifest returns a dockerized version of the manifest list
func (i *Image) InspectManifest() (*manifest.Schema2List, error) {
list, err := i.getManifestList()
@@ -158,3 +170,47 @@ func (i *Image) PushManifest(dest types.ImageReference, opts manifests.PushOptio
_, d, err := list.Push(context.Background(), dest, opts)
return d, err
}
+
+// AnnotateManifest updates an image configuration of a manifest list.
+func (i *Image) AnnotateManifest(systemContext types.SystemContext, d digest.Digest, opts ManifestAnnotateOpts) (string, error) {
+ list, err := i.getManifestList()
+ if err != nil {
+ return "", err
+ }
+ if len(opts.OS) > 0 {
+ if err := list.SetOS(d, opts.OS); err != nil {
+ return "", err
+ }
+ }
+ if len(opts.OSVersion) > 0 {
+ if err := list.SetOSVersion(d, opts.OSVersion); err != nil {
+ return "", err
+ }
+ }
+ if len(opts.Features) > 0 {
+ if err := list.SetFeatures(d, opts.Features); err != nil {
+ return "", err
+ }
+ }
+ if len(opts.OSFeatures) > 0 {
+ if err := list.SetOSFeatures(d, opts.OSFeatures); err != nil {
+ return "", err
+ }
+ }
+ if len(opts.Arch) > 0 {
+ if err := list.SetArchitecture(d, opts.Arch); err != nil {
+ return "", err
+ }
+ }
+ if len(opts.Variant) > 0 {
+ if err := list.SetVariant(d, opts.Variant); err != nil {
+ return "", err
+ }
+ }
+ if len(opts.Annotation) > 0 {
+ if err := list.SetAnnotations(&d, opts.Annotation); err != nil {
+ return "", err
+ }
+ }
+ return list.SaveToImage(i.imageruntime.store, i.ID(), nil, "")
+}
diff --git a/libpod/info.go b/libpod/info.go
index d7ed5bb16..51208a2b1 100644
--- a/libpod/info.go
+++ b/libpod/info.go
@@ -14,6 +14,7 @@ import (
"github.com/containers/buildah"
"github.com/containers/libpod/libpod/define"
+ "github.com/containers/libpod/libpod/linkmode"
"github.com/containers/libpod/pkg/cgroups"
registries2 "github.com/containers/libpod/pkg/registries"
"github.com/containers/libpod/pkg/rootless"
@@ -86,6 +87,7 @@ func (r *Runtime) hostInfo() (*define.HostInfo, error) {
info := define.HostInfo{
Arch: runtime.GOARCH,
BuildahVersion: buildah.Version,
+ Linkmode: linkmode.Linkmode(),
CPUs: runtime.NumCPU(),
Distribution: hostDistributionInfo,
EventLogger: r.eventer.String(),
@@ -198,9 +200,15 @@ func (r *Runtime) getContainerStoreInfo() (define.ContainerStore, error) {
if err != nil {
return cs, err
}
+ cs.Number = len(cons)
for _, con := range cons {
state, err := con.State()
if err != nil {
+ if errors.Cause(err) == define.ErrNoSuchCtr {
+ // container was probably removed
+ cs.Number--
+ continue
+ }
return cs, err
}
switch state {
@@ -212,7 +220,6 @@ func (r *Runtime) getContainerStoreInfo() (define.ContainerStore, error) {
stopped += 1
}
}
- cs.Number = len(cons)
cs.Paused = paused
cs.Stopped = stopped
cs.Running = running
diff --git a/libpod/kube.go b/libpod/kube.go
index 5511d303d..a3c5e912f 100644
--- a/libpod/kube.go
+++ b/libpod/kube.go
@@ -469,7 +469,7 @@ func generateKubeSecurityContext(c *Container) (*v1.SecurityContext, error) {
}
var selinuxOpts v1.SELinuxOptions
- opts := strings.SplitN(c.config.Spec.Annotations[InspectAnnotationLabel], ":", 2)
+ opts := strings.SplitN(c.config.Spec.Annotations[define.InspectAnnotationLabel], ":", 2)
if len(opts) == 2 {
switch opts[0] {
case "type":
diff --git a/libpod/linkmode/linkmode_dynamic.go b/libpod/linkmode/linkmode_dynamic.go
new file mode 100644
index 000000000..6d51d60e0
--- /dev/null
+++ b/libpod/linkmode/linkmode_dynamic.go
@@ -0,0 +1,8 @@
+// +build !static
+
+package linkmode
+
+// Linkmode returns the linking mode (static/dynamic) for the build.
+func Linkmode() string {
+ return "dynamic"
+}
diff --git a/libpod/linkmode/linkmode_static.go b/libpod/linkmode/linkmode_static.go
new file mode 100644
index 000000000..2db083f4a
--- /dev/null
+++ b/libpod/linkmode/linkmode_static.go
@@ -0,0 +1,8 @@
+// +build static
+
+package linkmode
+
+// Linkmode returns the linking mode (static/dynamic) for the build.
+func Linkmode() string {
+ return "static"
+}
diff --git a/libpod/lock/shm/shm_lock_test.go b/libpod/lock/shm/shm_lock_test.go
index 41a150c59..362821c62 100644
--- a/libpod/lock/shm/shm_lock_test.go
+++ b/libpod/lock/shm/shm_lock_test.go
@@ -76,6 +76,7 @@ func TestCreateNewSHMBadSizeRoundsUp(t *testing.T) {
// Odd number, not a power of 2, should never be a word size on a system
lock, err := CreateSHMLock("/test1", 7)
assert.NoError(t, err)
+ assert.NotNil(t, lock)
assert.Equal(t, lock.GetMaxLocks(), BitmapSize)
diff --git a/libpod/oci.go b/libpod/oci.go
index 9991c5625..7c5218319 100644
--- a/libpod/oci.go
+++ b/libpod/oci.go
@@ -61,17 +61,30 @@ type OCIRuntime interface {
// the attach session to be terminated if provided via the STDIN
// channel. If they are not provided, the default detach keys will be
// used instead. Detach keys of "" will disable detaching via keyboard.
- // The streams parameter may be passed for containers that did not
- // create a terminal and will determine which streams to forward to the
+ // The streams parameter will determine which streams to forward to the
// client.
HTTPAttach(ctr *Container, httpConn net.Conn, httpBuf *bufio.ReadWriter, streams *HTTPAttachStreams, detachKeys *string, cancel <-chan bool) error
// AttachResize resizes the terminal in use by the given container.
AttachResize(ctr *Container, newSize remotecommand.TerminalSize) error
// ExecContainer executes a command in a running container.
- // Returns an int (exit code), error channel (errors from attach), and
- // error (errors that occurred attempting to start the exec session).
- ExecContainer(ctr *Container, sessionID string, options *ExecOptions) (int, chan error, error)
+ // Returns an int (PID of exec session), error channel (errors from
+ // attach), and error (errors that occurred attempting to start the exec
+ // session). This returns once the exec session is running - not once it
+ // has completed, as one might expect. The attach session will remain
+ // running, in a goroutine that will return via the chan error in the
+ // return signature.
+ ExecContainer(ctr *Container, sessionID string, options *ExecOptions, streams *define.AttachStreams) (int, chan error, error)
+ // ExecContainerHTTP executes a command in a running container and
+ // attaches its standard streams to a provided hijacked HTTP session.
+ // Maintains the same invariants as ExecContainer (returns on session
+ // start, with a goroutine running in the background to handle attach).
+ // The HTTP attach itself maintains the same invariants as HTTPAttach.
+ ExecContainerHTTP(ctr *Container, sessionID string, options *ExecOptions, httpConn net.Conn, httpBuf *bufio.ReadWriter, streams *HTTPAttachStreams, cancel <-chan bool) (int, chan error, error)
+ // ExecContainerDetached executes a command in a running container, but
+ // does not attach to it. Returns the PID of the exec session and an
+ // error (if starting the exec session failed)
+ ExecContainerDetached(ctr *Container, sessionID string, options *ExecOptions, stdin bool) (int, error)
// ExecAttachResize resizes the terminal of a running exec session. Only
// allowed with sessions that were created with a TTY.
ExecAttachResize(ctr *Container, sessionID string, newSize remotecommand.TerminalSize) error
@@ -156,6 +169,9 @@ type ExecOptions struct {
// If provided but set to "", detaching from the container will be
// disabled.
DetachKeys *string
+ // ExitCommand is a command that will be run after the exec session
+ // exits.
+ ExitCommand []string
}
// HTTPAttachStreams informs the HTTPAttach endpoint which of the container's
diff --git a/libpod/oci_conmon_exec_linux.go b/libpod/oci_conmon_exec_linux.go
new file mode 100644
index 000000000..51819f90a
--- /dev/null
+++ b/libpod/oci_conmon_exec_linux.go
@@ -0,0 +1,599 @@
+package libpod
+
+import (
+ "bufio"
+ "fmt"
+ "net"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "syscall"
+ "time"
+
+ "github.com/containers/common/pkg/config"
+ "github.com/containers/libpod/libpod/define"
+ "github.com/containers/libpod/pkg/errorhandling"
+ "github.com/containers/libpod/pkg/util"
+ "github.com/containers/libpod/utils"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+ "k8s.io/client-go/tools/remotecommand"
+)
+
+// ExecContainer executes a command in a running container
+func (r *ConmonOCIRuntime) ExecContainer(c *Container, sessionID string, options *ExecOptions, streams *define.AttachStreams) (int, chan error, error) {
+ if options == nil {
+ return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide an ExecOptions struct to ExecContainer")
+ }
+ if len(options.Cmd) == 0 {
+ return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide a command to execute")
+ }
+
+ if sessionID == "" {
+ return -1, nil, errors.Wrapf(define.ErrEmptyID, "must provide a session ID for exec")
+ }
+
+ // TODO: Should we default this to false?
+ // Or maybe make streams mandatory?
+ attachStdin := true
+ if streams != nil {
+ attachStdin = streams.AttachInput
+ }
+
+ var ociLog string
+ if logrus.GetLevel() != logrus.DebugLevel && r.supportsJSON {
+ ociLog = c.execOCILog(sessionID)
+ }
+
+ execCmd, pipes, err := r.startExec(c, sessionID, options, attachStdin, ociLog)
+ if err != nil {
+ return -1, nil, err
+ }
+
+ // Only close sync pipe. Start and attach are consumed in the attach
+ // goroutine.
+ defer func() {
+ if pipes.syncPipe != nil && !pipes.syncClosed {
+ errorhandling.CloseQuiet(pipes.syncPipe)
+ pipes.syncClosed = true
+ }
+ }()
+
+ // TODO Only create if !detach
+ // Attach to the container before starting it
+ attachChan := make(chan error)
+ go func() {
+ // attachToExec is responsible for closing pipes
+ attachChan <- c.attachToExec(streams, options.DetachKeys, sessionID, pipes.startPipe, pipes.attachPipe)
+ close(attachChan)
+ }()
+
+ if err := execCmd.Wait(); err != nil {
+ return -1, nil, errors.Wrapf(err, "cannot run conmon")
+ }
+
+ pid, err := readConmonPipeData(pipes.syncPipe, ociLog)
+
+ return pid, attachChan, err
+}
+
+// ExecContainerHTTP executes a new command in an existing container and
+// forwards its standard streams over an attach
+func (r *ConmonOCIRuntime) ExecContainerHTTP(ctr *Container, sessionID string, options *ExecOptions, httpConn net.Conn, httpBuf *bufio.ReadWriter, streams *HTTPAttachStreams, cancel <-chan bool) (int, chan error, error) {
+ if streams != nil {
+ if !streams.Stdin && !streams.Stdout && !streams.Stderr {
+ return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide at least one stream to attach to")
+ }
+ }
+
+ if options == nil {
+ return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide exec options to ExecContainerHTTP")
+ }
+
+ detachString := config.DefaultDetachKeys
+ if options.DetachKeys != nil {
+ detachString = *options.DetachKeys
+ }
+ detachKeys, err := processDetachKeys(detachString)
+ if err != nil {
+ return -1, nil, err
+ }
+
+ // TODO: Should we default this to false?
+ // Or maybe make streams mandatory?
+ attachStdin := true
+ if streams != nil {
+ attachStdin = streams.Stdin
+ }
+
+ var ociLog string
+ if logrus.GetLevel() != logrus.DebugLevel && r.supportsJSON {
+ ociLog = ctr.execOCILog(sessionID)
+ }
+
+ execCmd, pipes, err := r.startExec(ctr, sessionID, options, attachStdin, ociLog)
+ if err != nil {
+ return -1, nil, err
+ }
+
+ // Only close sync pipe. Start and attach are consumed in the attach
+ // goroutine.
+ defer func() {
+ if pipes.syncPipe != nil && !pipes.syncClosed {
+ errorhandling.CloseQuiet(pipes.syncPipe)
+ pipes.syncClosed = true
+ }
+ }()
+
+ attachChan := make(chan error)
+ go func() {
+ // attachToExec is responsible for closing pipes
+ attachChan <- attachExecHTTP(ctr, sessionID, httpBuf, streams, pipes, detachKeys, options.Terminal, cancel)
+ close(attachChan)
+ }()
+
+ // Wait for conmon to succeed, when return.
+ if err := execCmd.Wait(); err != nil {
+ return -1, nil, errors.Wrapf(err, "cannot run conmon")
+ }
+
+ pid, err := readConmonPipeData(pipes.syncPipe, ociLog)
+
+ return pid, attachChan, err
+}
+
+// ExecContainerDetached executes a command in a running container, but does
+// not attach to it.
+func (r *ConmonOCIRuntime) ExecContainerDetached(ctr *Container, sessionID string, options *ExecOptions, stdin bool) (int, error) {
+ if options == nil {
+ return -1, errors.Wrapf(define.ErrInvalidArg, "must provide exec options to ExecContainerHTTP")
+ }
+
+ var ociLog string
+ if logrus.GetLevel() != logrus.DebugLevel && r.supportsJSON {
+ ociLog = ctr.execOCILog(sessionID)
+ }
+
+ execCmd, pipes, err := r.startExec(ctr, sessionID, options, stdin, ociLog)
+ if err != nil {
+ return -1, err
+ }
+
+ defer func() {
+ pipes.cleanup()
+ }()
+
+ // Wait for Conmon to tell us we're ready to attach.
+ // We aren't actually *going* to attach, but this means that we're good
+ // to proceed.
+ if _, err := readConmonPipeData(pipes.attachPipe, ""); err != nil {
+ return -1, err
+ }
+
+ // Start the exec session
+ if err := writeConmonPipeData(pipes.startPipe); err != nil {
+ return -1, err
+ }
+
+ // Wait for conmon to succeed, when return.
+ if err := execCmd.Wait(); err != nil {
+ return -1, errors.Wrapf(err, "cannot run conmon")
+ }
+
+ pid, err := readConmonPipeData(pipes.syncPipe, ociLog)
+
+ return pid, err
+}
+
+// ExecAttachResize resizes the TTY of the given exec session.
+func (r *ConmonOCIRuntime) ExecAttachResize(ctr *Container, sessionID string, newSize remotecommand.TerminalSize) error {
+ controlFile, err := openControlFile(ctr, ctr.execBundlePath(sessionID))
+ if err != nil {
+ return err
+ }
+ defer controlFile.Close()
+
+ if _, err = fmt.Fprintf(controlFile, "%d %d %d\n", 1, newSize.Height, newSize.Width); err != nil {
+ return errors.Wrapf(err, "failed to write to ctl file to resize terminal")
+ }
+
+ return nil
+}
+
+// ExecStopContainer stops a given exec session in a running container.
+func (r *ConmonOCIRuntime) ExecStopContainer(ctr *Container, sessionID string, timeout uint) error {
+ pid, err := ctr.getExecSessionPID(sessionID)
+ if err != nil {
+ return err
+ }
+
+ logrus.Debugf("Going to stop container %s exec session %s", ctr.ID(), sessionID)
+
+ // Is the session dead?
+ // Ping the PID with signal 0 to see if it still exists.
+ if err := unix.Kill(pid, 0); err != nil {
+ if err == unix.ESRCH {
+ return nil
+ }
+ return errors.Wrapf(err, "error pinging container %s exec session %s PID %d with signal 0", ctr.ID(), sessionID, pid)
+ }
+
+ if timeout > 0 {
+ // Use SIGTERM by default, then SIGSTOP after timeout.
+ logrus.Debugf("Killing exec session %s (PID %d) of container %s with SIGTERM", sessionID, pid, ctr.ID())
+ if err := unix.Kill(pid, unix.SIGTERM); err != nil {
+ if err == unix.ESRCH {
+ return nil
+ }
+ return errors.Wrapf(err, "error killing container %s exec session %s PID %d with SIGTERM", ctr.ID(), sessionID, pid)
+ }
+
+ // Wait for the PID to stop
+ if err := waitPidStop(pid, time.Duration(timeout)*time.Second); err != nil {
+ logrus.Warnf("Timed out waiting for container %s exec session %s to stop, resorting to SIGKILL", ctr.ID(), sessionID)
+ } else {
+ // No error, container is dead
+ return nil
+ }
+ }
+
+ // SIGTERM did not work. On to SIGKILL.
+ logrus.Debugf("Killing exec session %s (PID %d) of container %s with SIGKILL", sessionID, pid, ctr.ID())
+ if err := unix.Kill(pid, unix.SIGTERM); err != nil {
+ if err == unix.ESRCH {
+ return nil
+ }
+ return errors.Wrapf(err, "error killing container %s exec session %s PID %d with SIGKILL", ctr.ID(), sessionID, pid)
+ }
+
+ // Wait for the PID to stop
+ if err := waitPidStop(pid, killContainerTimeout*time.Second); err != nil {
+ return errors.Wrapf(err, "timed out waiting for container %s exec session %s PID %d to stop after SIGKILL", ctr.ID(), sessionID, pid)
+ }
+
+ return nil
+}
+
+// ExecUpdateStatus checks if the given exec session is still running.
+func (r *ConmonOCIRuntime) ExecUpdateStatus(ctr *Container, sessionID string) (bool, error) {
+ pid, err := ctr.getExecSessionPID(sessionID)
+ if err != nil {
+ return false, err
+ }
+
+ logrus.Debugf("Checking status of container %s exec session %s", ctr.ID(), sessionID)
+
+ // Is the session dead?
+ // Ping the PID with signal 0 to see if it still exists.
+ if err := unix.Kill(pid, 0); err != nil {
+ if err == unix.ESRCH {
+ return false, nil
+ }
+ return false, errors.Wrapf(err, "error pinging container %s exec session %s PID %d with signal 0", ctr.ID(), sessionID, pid)
+ }
+
+ return true, nil
+}
+
+// ExecContainerCleanup cleans up files created when a command is run via
+// ExecContainer. This includes the attach socket for the exec session.
+func (r *ConmonOCIRuntime) ExecContainerCleanup(ctr *Container, sessionID string) error {
+ // Clean up the sockets dir. Issue #3962
+ // Also ignore if it doesn't exist for some reason; hence the conditional return below
+ if err := os.RemoveAll(filepath.Join(r.socketsDir, sessionID)); err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ return nil
+}
+
+// ExecAttachSocketPath is the path to a container's exec session attach socket.
+func (r *ConmonOCIRuntime) ExecAttachSocketPath(ctr *Container, sessionID string) (string, error) {
+ // We don't even use container, so don't validity check it
+ if sessionID == "" {
+ return "", errors.Wrapf(define.ErrInvalidArg, "must provide a valid session ID to get attach socket path")
+ }
+
+ return filepath.Join(r.socketsDir, sessionID, "attach"), nil
+}
+
+// This contains pipes used by the exec API.
+type execPipes struct {
+ syncPipe *os.File
+ syncClosed bool
+ startPipe *os.File
+ startClosed bool
+ attachPipe *os.File
+ attachClosed bool
+}
+
+func (p *execPipes) cleanup() {
+ if p.syncPipe != nil && !p.syncClosed {
+ errorhandling.CloseQuiet(p.syncPipe)
+ p.syncClosed = true
+ }
+ if p.startPipe != nil && !p.startClosed {
+ errorhandling.CloseQuiet(p.startPipe)
+ p.startClosed = true
+ }
+ if p.attachPipe != nil && !p.attachClosed {
+ errorhandling.CloseQuiet(p.attachPipe)
+ p.attachClosed = true
+ }
+}
+
+// Start an exec session's conmon parent from the given options.
+func (r *ConmonOCIRuntime) startExec(c *Container, sessionID string, options *ExecOptions, attachStdin bool, ociLog string) (_ *exec.Cmd, _ *execPipes, deferredErr error) {
+ pipes := new(execPipes)
+
+ if options == nil {
+ return nil, nil, errors.Wrapf(define.ErrInvalidArg, "must provide an ExecOptions struct to ExecContainer")
+ }
+ if len(options.Cmd) == 0 {
+ return nil, nil, errors.Wrapf(define.ErrInvalidArg, "must provide a command to execute")
+ }
+
+ if sessionID == "" {
+ return nil, nil, errors.Wrapf(define.ErrEmptyID, "must provide a session ID for exec")
+ }
+
+ // create sync pipe to receive the pid
+ parentSyncPipe, childSyncPipe, err := newPipe()
+ if err != nil {
+ return nil, nil, errors.Wrapf(err, "error creating socket pair")
+ }
+ pipes.syncPipe = parentSyncPipe
+
+ defer func() {
+ if deferredErr != nil {
+ pipes.cleanup()
+ }
+ }()
+
+ // create start pipe to set the cgroup before running
+ // attachToExec is responsible for closing parentStartPipe
+ childStartPipe, parentStartPipe, err := newPipe()
+ if err != nil {
+ return nil, nil, errors.Wrapf(err, "error creating socket pair")
+ }
+ pipes.startPipe = parentStartPipe
+
+ // create the attach pipe to allow attach socket to be created before
+ // $RUNTIME exec starts running. This is to make sure we can capture all output
+ // from the process through that socket, rather than half reading the log, half attaching to the socket
+ // attachToExec is responsible for closing parentAttachPipe
+ parentAttachPipe, childAttachPipe, err := newPipe()
+ if err != nil {
+ return nil, nil, errors.Wrapf(err, "error creating socket pair")
+ }
+ pipes.attachPipe = parentAttachPipe
+
+ childrenClosed := false
+ defer func() {
+ if !childrenClosed {
+ errorhandling.CloseQuiet(childSyncPipe)
+ errorhandling.CloseQuiet(childAttachPipe)
+ errorhandling.CloseQuiet(childStartPipe)
+ }
+ }()
+
+ runtimeDir, err := util.GetRuntimeDir()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ finalEnv := make([]string, 0, len(options.Env))
+ for k, v := range options.Env {
+ finalEnv = append(finalEnv, fmt.Sprintf("%s=%s", k, v))
+ }
+
+ processFile, err := prepareProcessExec(c, options.Cmd, finalEnv, options.Terminal, options.Cwd, options.User, sessionID)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ args := r.sharedConmonArgs(c, sessionID, c.execBundlePath(sessionID), c.execPidPath(sessionID), c.execLogPath(sessionID), c.execExitFileDir(sessionID), ociLog, "")
+
+ if options.PreserveFDs > 0 {
+ args = append(args, formatRuntimeOpts("--preserve-fds", fmt.Sprintf("%d", options.PreserveFDs))...)
+ }
+
+ for _, capability := range options.CapAdd {
+ args = append(args, formatRuntimeOpts("--cap", capability)...)
+ }
+
+ if options.Terminal {
+ args = append(args, "-t")
+ }
+
+ if attachStdin {
+ args = append(args, "-i")
+ }
+
+ // Append container ID and command
+ args = append(args, "-e")
+ // TODO make this optional when we can detach
+ args = append(args, "--exec-attach")
+ args = append(args, "--exec-process-spec", processFile.Name())
+
+ if len(options.ExitCommand) > 0 {
+ args = append(args, "--exit-command", options.ExitCommand[0])
+ for _, arg := range options.ExitCommand[1:] {
+ args = append(args, []string{"--exit-command-arg", arg}...)
+ }
+ }
+
+ logrus.WithFields(logrus.Fields{
+ "args": args,
+ }).Debugf("running conmon: %s", r.conmonPath)
+ // TODO: Need to pass this back so we can wait on it.
+ execCmd := exec.Command(r.conmonPath, args...)
+
+ // TODO: This is commented because it doesn't make much sense in HTTP
+ // attach, and I'm not certain it does for non-HTTP attach as well.
+ // if streams != nil {
+ // // Don't add the InputStream to the execCmd. Instead, the data should be passed
+ // // through CopyDetachable
+ // if streams.AttachOutput {
+ // execCmd.Stdout = options.Streams.OutputStream
+ // }
+ // if streams.AttachError {
+ // execCmd.Stderr = options.Streams.ErrorStream
+ // }
+ // }
+
+ conmonEnv, extraFiles, err := r.configureConmonEnv(runtimeDir)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if options.PreserveFDs > 0 {
+ for fd := 3; fd < int(3+options.PreserveFDs); fd++ {
+ execCmd.ExtraFiles = append(execCmd.ExtraFiles, os.NewFile(uintptr(fd), fmt.Sprintf("fd-%d", fd)))
+ }
+ }
+
+ // we don't want to step on users fds they asked to preserve
+ // Since 0-2 are used for stdio, start the fds we pass in at preserveFDs+3
+ execCmd.Env = r.conmonEnv
+ execCmd.Env = append(execCmd.Env, fmt.Sprintf("_OCI_SYNCPIPE=%d", options.PreserveFDs+3), fmt.Sprintf("_OCI_STARTPIPE=%d", options.PreserveFDs+4), fmt.Sprintf("_OCI_ATTACHPIPE=%d", options.PreserveFDs+5))
+ execCmd.Env = append(execCmd.Env, conmonEnv...)
+
+ execCmd.ExtraFiles = append(execCmd.ExtraFiles, childSyncPipe, childStartPipe, childAttachPipe)
+ execCmd.ExtraFiles = append(execCmd.ExtraFiles, extraFiles...)
+ execCmd.Dir = c.execBundlePath(sessionID)
+ execCmd.SysProcAttr = &syscall.SysProcAttr{
+ Setpgid: true,
+ }
+
+ err = startCommandGivenSelinux(execCmd)
+
+ // We don't need children pipes on the parent side
+ errorhandling.CloseQuiet(childSyncPipe)
+ errorhandling.CloseQuiet(childAttachPipe)
+ errorhandling.CloseQuiet(childStartPipe)
+ childrenClosed = true
+
+ if err != nil {
+ return nil, nil, errors.Wrapf(err, "cannot start container %s", c.ID())
+ }
+ if err := r.moveConmonToCgroupAndSignal(c, execCmd, parentStartPipe); err != nil {
+ return nil, nil, err
+ }
+
+ if options.PreserveFDs > 0 {
+ for fd := 3; fd < int(3+options.PreserveFDs); fd++ {
+ // These fds were passed down to the runtime. Close them
+ // and not interfere
+ if err := os.NewFile(uintptr(fd), fmt.Sprintf("fd-%d", fd)).Close(); err != nil {
+ logrus.Debugf("unable to close file fd-%d", fd)
+ }
+ }
+ }
+
+ return execCmd, pipes, nil
+}
+
+// Attach to a container over HTTP
+func attachExecHTTP(c *Container, sessionID string, httpBuf *bufio.ReadWriter, streams *HTTPAttachStreams, pipes *execPipes, detachKeys []byte, isTerminal bool, cancel <-chan bool) error {
+ if pipes == nil || pipes.startPipe == nil || pipes.attachPipe == nil {
+ return errors.Wrapf(define.ErrInvalidArg, "must provide a start and attach pipe to finish an exec attach")
+ }
+
+ defer func() {
+ if !pipes.startClosed {
+ errorhandling.CloseQuiet(pipes.startPipe)
+ pipes.startClosed = true
+ }
+ if !pipes.attachClosed {
+ errorhandling.CloseQuiet(pipes.attachPipe)
+ pipes.attachClosed = true
+ }
+ }()
+
+ logrus.Debugf("Attaching to container %s exec session %s", c.ID(), sessionID)
+
+ // set up the socket path, such that it is the correct length and location for exec
+ sockPath, err := c.execAttachSocketPath(sessionID)
+ if err != nil {
+ return err
+ }
+ socketPath := buildSocketPath(sockPath)
+
+ // 2: read from attachFd that the parent process has set up the console socket
+ if _, err := readConmonPipeData(pipes.attachPipe, ""); err != nil {
+ return err
+ }
+
+ // 2: then attach
+ conn, err := net.DialUnix("unixpacket", nil, &net.UnixAddr{Name: socketPath, Net: "unixpacket"})
+ if err != nil {
+ return errors.Wrapf(err, "failed to connect to container's attach socket: %v", socketPath)
+ }
+ defer func() {
+ if err := conn.Close(); err != nil {
+ logrus.Errorf("unable to close socket: %q", err)
+ }
+ }()
+
+ // Make a channel to pass errors back
+ errChan := make(chan error)
+
+ attachStdout := true
+ attachStderr := true
+ attachStdin := true
+ if streams != nil {
+ attachStdout = streams.Stdout
+ attachStderr = streams.Stderr
+ attachStdin = streams.Stdin
+ }
+
+ // Next, STDIN. Avoid entirely if attachStdin unset.
+ if attachStdin {
+ go func() {
+ logrus.Debugf("Beginning STDIN copy")
+ _, err := utils.CopyDetachable(conn, httpBuf, detachKeys)
+ logrus.Debugf("STDIN copy completed")
+ errChan <- err
+ }()
+ }
+
+ // 4: send start message to child
+ if err := writeConmonPipeData(pipes.startPipe); err != nil {
+ return err
+ }
+
+ // Handle STDOUT/STDERR *after* start message is sent
+ go func() {
+ var err error
+ if isTerminal {
+ // Hack: return immediately if attachStdout not set to
+ // emulate Docker.
+ // Basically, when terminal is set, STDERR goes nowhere.
+ // Everything does over STDOUT.
+ // Therefore, if not attaching STDOUT - we'll never copy
+ // anything from here.
+ logrus.Debugf("Performing terminal HTTP attach for container %s", c.ID())
+ if attachStdout {
+ err = httpAttachTerminalCopy(conn, httpBuf, c.ID())
+ }
+ } else {
+ logrus.Debugf("Performing non-terminal HTTP attach for container %s", c.ID())
+ err = httpAttachNonTerminalCopy(conn, httpBuf, c.ID(), attachStdin, attachStdout, attachStderr)
+ }
+ errChan <- err
+ logrus.Debugf("STDOUT/ERR copy completed")
+ }()
+
+ if cancel != nil {
+ select {
+ case err := <-errChan:
+ return err
+ case <-cancel:
+ return nil
+ }
+ } else {
+ var connErr error = <-errChan
+ return connErr
+ }
+}
diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go
index da4b85067..9c92b036e 100644
--- a/libpod/oci_conmon_linux.go
+++ b/libpod/oci_conmon_linux.go
@@ -635,297 +635,6 @@ func (r *ConmonOCIRuntime) AttachResize(ctr *Container, newSize remotecommand.Te
return nil
}
-// ExecContainer executes a command in a running container
-// TODO: Split into Create/Start/Attach/Wait
-func (r *ConmonOCIRuntime) ExecContainer(c *Container, sessionID string, options *ExecOptions) (int, chan error, error) {
- if options == nil {
- return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide an ExecOptions struct to ExecContainer")
- }
- if len(options.Cmd) == 0 {
- return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide a command to execute")
- }
-
- if sessionID == "" {
- return -1, nil, errors.Wrapf(define.ErrEmptyID, "must provide a session ID for exec")
- }
-
- // create sync pipe to receive the pid
- parentSyncPipe, childSyncPipe, err := newPipe()
- if err != nil {
- return -1, nil, errors.Wrapf(err, "error creating socket pair")
- }
-
- defer errorhandling.CloseQuiet(parentSyncPipe)
-
- // create start pipe to set the cgroup before running
- // attachToExec is responsible for closing parentStartPipe
- childStartPipe, parentStartPipe, err := newPipe()
- if err != nil {
- return -1, nil, errors.Wrapf(err, "error creating socket pair")
- }
-
- // We want to make sure we close the parent{Start,Attach}Pipes if we fail
- // but also don't want to close them after attach to exec is called
- attachToExecCalled := false
-
- defer func() {
- if !attachToExecCalled {
- errorhandling.CloseQuiet(parentStartPipe)
- }
- }()
-
- // create the attach pipe to allow attach socket to be created before
- // $RUNTIME exec starts running. This is to make sure we can capture all output
- // from the process through that socket, rather than half reading the log, half attaching to the socket
- // attachToExec is responsible for closing parentAttachPipe
- parentAttachPipe, childAttachPipe, err := newPipe()
- if err != nil {
- return -1, nil, errors.Wrapf(err, "error creating socket pair")
- }
-
- defer func() {
- if !attachToExecCalled {
- errorhandling.CloseQuiet(parentAttachPipe)
- }
- }()
-
- childrenClosed := false
- defer func() {
- if !childrenClosed {
- errorhandling.CloseQuiet(childSyncPipe)
- errorhandling.CloseQuiet(childAttachPipe)
- errorhandling.CloseQuiet(childStartPipe)
- }
- }()
-
- runtimeDir, err := util.GetRuntimeDir()
- if err != nil {
- return -1, nil, err
- }
-
- finalEnv := make([]string, 0, len(options.Env))
- for k, v := range options.Env {
- finalEnv = append(finalEnv, fmt.Sprintf("%s=%s", k, v))
- }
-
- processFile, err := prepareProcessExec(c, options.Cmd, finalEnv, options.Terminal, options.Cwd, options.User, sessionID)
- if err != nil {
- return -1, nil, err
- }
-
- var ociLog string
- if logrus.GetLevel() != logrus.DebugLevel && r.supportsJSON {
- ociLog = c.execOCILog(sessionID)
- }
- args := r.sharedConmonArgs(c, sessionID, c.execBundlePath(sessionID), c.execPidPath(sessionID), c.execLogPath(sessionID), c.execExitFileDir(sessionID), ociLog, "")
-
- if options.PreserveFDs > 0 {
- args = append(args, formatRuntimeOpts("--preserve-fds", fmt.Sprintf("%d", options.PreserveFDs))...)
- }
-
- for _, capability := range options.CapAdd {
- args = append(args, formatRuntimeOpts("--cap", capability)...)
- }
-
- if options.Terminal {
- args = append(args, "-t")
- }
-
- if options.Streams.AttachInput {
- args = append(args, "-i")
- }
-
- // Append container ID and command
- args = append(args, "-e")
- // TODO make this optional when we can detach
- args = append(args, "--exec-attach")
- args = append(args, "--exec-process-spec", processFile.Name())
-
- logrus.WithFields(logrus.Fields{
- "args": args,
- }).Debugf("running conmon: %s", r.conmonPath)
- execCmd := exec.Command(r.conmonPath, args...)
-
- if options.Streams != nil {
- // Don't add the InputStream to the execCmd. Instead, the data should be passed
- // through CopyDetachable
- if options.Streams.AttachOutput {
- execCmd.Stdout = options.Streams.OutputStream
- }
- if options.Streams.AttachError {
- execCmd.Stderr = options.Streams.ErrorStream
- }
- }
-
- conmonEnv, extraFiles, err := r.configureConmonEnv(runtimeDir)
- if err != nil {
- return -1, nil, err
- }
-
- if options.PreserveFDs > 0 {
- for fd := 3; fd < int(3+options.PreserveFDs); fd++ {
- execCmd.ExtraFiles = append(execCmd.ExtraFiles, os.NewFile(uintptr(fd), fmt.Sprintf("fd-%d", fd)))
- }
- }
-
- // we don't want to step on users fds they asked to preserve
- // Since 0-2 are used for stdio, start the fds we pass in at preserveFDs+3
- execCmd.Env = r.conmonEnv
- execCmd.Env = append(execCmd.Env, fmt.Sprintf("_OCI_SYNCPIPE=%d", options.PreserveFDs+3), fmt.Sprintf("_OCI_STARTPIPE=%d", options.PreserveFDs+4), fmt.Sprintf("_OCI_ATTACHPIPE=%d", options.PreserveFDs+5))
- execCmd.Env = append(execCmd.Env, conmonEnv...)
-
- execCmd.ExtraFiles = append(execCmd.ExtraFiles, childSyncPipe, childStartPipe, childAttachPipe)
- execCmd.ExtraFiles = append(execCmd.ExtraFiles, extraFiles...)
- execCmd.Dir = c.execBundlePath(sessionID)
- execCmd.SysProcAttr = &syscall.SysProcAttr{
- Setpgid: true,
- }
-
- err = startCommandGivenSelinux(execCmd)
-
- // We don't need children pipes on the parent side
- errorhandling.CloseQuiet(childSyncPipe)
- errorhandling.CloseQuiet(childAttachPipe)
- errorhandling.CloseQuiet(childStartPipe)
- childrenClosed = true
-
- if err != nil {
- return -1, nil, errors.Wrapf(err, "cannot start container %s", c.ID())
- }
- if err := r.moveConmonToCgroupAndSignal(c, execCmd, parentStartPipe); err != nil {
- return -1, nil, err
- }
-
- if options.PreserveFDs > 0 {
- for fd := 3; fd < int(3+options.PreserveFDs); fd++ {
- // These fds were passed down to the runtime. Close them
- // and not interfere
- if err := os.NewFile(uintptr(fd), fmt.Sprintf("fd-%d", fd)).Close(); err != nil {
- logrus.Debugf("unable to close file fd-%d", fd)
- }
- }
- }
-
- // TODO Only create if !detach
- // Attach to the container before starting it
- attachChan := make(chan error)
- go func() {
- // attachToExec is responsible for closing pipes
- attachChan <- c.attachToExec(options.Streams, options.DetachKeys, sessionID, parentStartPipe, parentAttachPipe)
- close(attachChan)
- }()
- attachToExecCalled = true
-
- if err := execCmd.Wait(); err != nil {
- return -1, nil, errors.Wrapf(err, "cannot run conmon")
- }
-
- pid, err := readConmonPipeData(parentSyncPipe, ociLog)
-
- return pid, attachChan, err
-}
-
-// ExecAttachResize resizes the TTY of the given exec session.
-func (r *ConmonOCIRuntime) ExecAttachResize(ctr *Container, sessionID string, newSize remotecommand.TerminalSize) error {
- controlFile, err := openControlFile(ctr, ctr.execBundlePath(sessionID))
- if err != nil {
- return err
- }
- defer controlFile.Close()
-
- if _, err = fmt.Fprintf(controlFile, "%d %d %d\n", 1, newSize.Height, newSize.Width); err != nil {
- return errors.Wrapf(err, "failed to write to ctl file to resize terminal")
- }
-
- return nil
-}
-
-// ExecStopContainer stops a given exec session in a running container.
-func (r *ConmonOCIRuntime) ExecStopContainer(ctr *Container, sessionID string, timeout uint) error {
- pid, err := ctr.getExecSessionPID(sessionID)
- if err != nil {
- return err
- }
-
- logrus.Debugf("Going to stop container %s exec session %s", ctr.ID(), sessionID)
-
- // Is the session dead?
- // Ping the PID with signal 0 to see if it still exists.
- if err := unix.Kill(pid, 0); err != nil {
- if err == unix.ESRCH {
- return nil
- }
- return errors.Wrapf(err, "error pinging container %s exec session %s PID %d with signal 0", ctr.ID(), sessionID, pid)
- }
-
- if timeout > 0 {
- // Use SIGTERM by default, then SIGSTOP after timeout.
- logrus.Debugf("Killing exec session %s (PID %d) of container %s with SIGTERM", sessionID, pid, ctr.ID())
- if err := unix.Kill(pid, unix.SIGTERM); err != nil {
- if err == unix.ESRCH {
- return nil
- }
- return errors.Wrapf(err, "error killing container %s exec session %s PID %d with SIGTERM", ctr.ID(), sessionID, pid)
- }
-
- // Wait for the PID to stop
- if err := waitPidStop(pid, time.Duration(timeout)*time.Second); err != nil {
- logrus.Warnf("Timed out waiting for container %s exec session %s to stop, resorting to SIGKILL", ctr.ID(), sessionID)
- } else {
- // No error, container is dead
- return nil
- }
- }
-
- // SIGTERM did not work. On to SIGKILL.
- logrus.Debugf("Killing exec session %s (PID %d) of container %s with SIGKILL", sessionID, pid, ctr.ID())
- if err := unix.Kill(pid, unix.SIGTERM); err != nil {
- if err == unix.ESRCH {
- return nil
- }
- return errors.Wrapf(err, "error killing container %s exec session %s PID %d with SIGKILL", ctr.ID(), sessionID, pid)
- }
-
- // Wait for the PID to stop
- if err := waitPidStop(pid, killContainerTimeout*time.Second); err != nil {
- return errors.Wrapf(err, "timed out waiting for container %s exec session %s PID %d to stop after SIGKILL", ctr.ID(), sessionID, pid)
- }
-
- return nil
-}
-
-// ExecUpdateStatus checks if the given exec session is still running.
-func (r *ConmonOCIRuntime) ExecUpdateStatus(ctr *Container, sessionID string) (bool, error) {
- pid, err := ctr.getExecSessionPID(sessionID)
- if err != nil {
- return false, err
- }
-
- logrus.Debugf("Checking status of container %s exec session %s", ctr.ID(), sessionID)
-
- // Is the session dead?
- // Ping the PID with signal 0 to see if it still exists.
- if err := unix.Kill(pid, 0); err != nil {
- if err == unix.ESRCH {
- return false, nil
- }
- return false, errors.Wrapf(err, "error pinging container %s exec session %s PID %d with signal 0", ctr.ID(), sessionID, pid)
- }
-
- return true, nil
-}
-
-// ExecContainerCleanup cleans up files created when a command is run via
-// ExecContainer. This includes the attach socket for the exec session.
-func (r *ConmonOCIRuntime) ExecContainerCleanup(ctr *Container, sessionID string) error {
- // Clean up the sockets dir. Issue #3962
- // Also ignore if it doesn't exist for some reason; hence the conditional return below
- if err := os.RemoveAll(filepath.Join(r.socketsDir, sessionID)); err != nil && !os.IsNotExist(err) {
- return err
- }
- return nil
-}
-
// CheckpointContainer checkpoints the given container.
func (r *ConmonOCIRuntime) CheckpointContainer(ctr *Container, options ContainerCheckpointOptions) error {
if err := label.SetSocketLabel(ctr.ProcessLabel()); err != nil {
@@ -1002,16 +711,6 @@ func (r *ConmonOCIRuntime) AttachSocketPath(ctr *Container) (string, error) {
return filepath.Join(r.socketsDir, ctr.ID(), "attach"), nil
}
-// ExecAttachSocketPath is the path to a container's exec session attach socket.
-func (r *ConmonOCIRuntime) ExecAttachSocketPath(ctr *Container, sessionID string) (string, error) {
- // We don't even use container, so don't validity check it
- if sessionID == "" {
- return "", errors.Wrapf(define.ErrInvalidArg, "must provide a valid session ID to get attach socket path")
- }
-
- return filepath.Join(r.socketsDir, sessionID, "attach"), nil
-}
-
// ExitFilePath is the path to a container's exit file.
func (r *ConmonOCIRuntime) ExitFilePath(ctr *Container) (string, error) {
if ctr == nil {
@@ -1415,15 +1114,21 @@ func (r *ConmonOCIRuntime) configureConmonEnv(runtimeDir string) ([]string, []*o
// sharedConmonArgs takes common arguments for exec and create/restore and formats them for the conmon CLI
func (r *ConmonOCIRuntime) sharedConmonArgs(ctr *Container, cuuid, bundlePath, pidPath, logPath, exitDir, ociLogPath, logTag string) []string {
// set the conmon API version to be able to use the correct sync struct keys
- args := []string{"--api-version", "1"}
+ args := []string{
+ "--api-version", "1",
+ "-c", ctr.ID(),
+ "-u", cuuid,
+ "-r", r.path,
+ "-b", bundlePath,
+ "-p", pidPath,
+ "-n", ctr.Name(),
+ "--exit-dir", exitDir,
+ "--socket-dir-path", r.socketsDir,
+ }
+
if r.cgroupManager == config.SystemdCgroupsManager && !ctr.config.NoCgroups {
args = append(args, "-s")
}
- args = append(args, "-c", ctr.ID())
- args = append(args, "-u", cuuid)
- args = append(args, "-r", r.path)
- args = append(args, "-b", bundlePath)
- args = append(args, "-p", pidPath)
var logDriver string
switch ctr.LogDriver() {
@@ -1444,8 +1149,6 @@ func (r *ConmonOCIRuntime) sharedConmonArgs(ctr *Container, cuuid, bundlePath, p
}
args = append(args, "-l", logDriver)
- args = append(args, "--exit-dir", exitDir)
- args = append(args, "--socket-dir-path", r.socketsDir)
if r.logSizeMax >= 0 {
args = append(args, "--log-size-max", fmt.Sprintf("%v", r.logSizeMax))
}
@@ -1704,6 +1407,8 @@ func httpAttachTerminalCopy(container *net.UnixConn, http *bufio.ReadWriter, cid
buf := make([]byte, bufferSize)
for {
numR, err := container.Read(buf)
+ logrus.Debugf("Read fd(%d) %d/%d bytes for container %s", int(buf[0]), numR, len(buf), cid)
+
if numR > 0 {
switch buf[0] {
case AttachPipeStdout:
diff --git a/libpod/oci_missing.go b/libpod/oci_missing.go
index 172805b0d..4da16876c 100644
--- a/libpod/oci_missing.go
+++ b/libpod/oci_missing.go
@@ -121,10 +121,20 @@ func (r *MissingRuntime) AttachResize(ctr *Container, newSize remotecommand.Term
}
// ExecContainer is not available as the runtime is missing
-func (r *MissingRuntime) ExecContainer(ctr *Container, sessionID string, options *ExecOptions) (int, chan error, error) {
+func (r *MissingRuntime) ExecContainer(ctr *Container, sessionID string, options *ExecOptions, streams *define.AttachStreams) (int, chan error, error) {
return -1, nil, r.printError()
}
+// ExecContainerHTTP is not available as the runtime is missing
+func (r *MissingRuntime) ExecContainerHTTP(ctr *Container, sessionID string, options *ExecOptions, httpConn net.Conn, httpBuf *bufio.ReadWriter, streams *HTTPAttachStreams, cancel <-chan bool) (int, chan error, error) {
+ return -1, nil, r.printError()
+}
+
+// ExecContainerDetached is not available as the runtime is missing
+func (r *MissingRuntime) ExecContainerDetached(ctr *Container, sessionID string, options *ExecOptions, stdin bool) (int, error) {
+ return -1, r.printError()
+}
+
// ExecAttachResize is not available as the runtime is missing.
func (r *MissingRuntime) ExecAttachResize(ctr *Container, sessionID string, newSize remotecommand.TerminalSize) error {
return r.printError()
diff --git a/libpod/options.go b/libpod/options.go
index 33b423bce..8e0d3df86 100644
--- a/libpod/options.go
+++ b/libpod/options.go
@@ -4,11 +4,11 @@ import (
"net"
"os"
"path/filepath"
- "regexp"
"syscall"
"github.com/containers/common/pkg/config"
"github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/types"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/events"
"github.com/containers/libpod/pkg/namespaces"
@@ -18,15 +18,7 @@ import (
"github.com/containers/storage/pkg/idtools"
"github.com/cri-o/ocicni/pkg/ocicni"
"github.com/pkg/errors"
-)
-
-var (
- // NameRegex is a regular expression to validate container/pod names.
- // This must NOT be changed from outside of Libpod. It should be a
- // constant, but Go won't let us do that.
- NameRegex = regexp.MustCompile("^[a-zA-Z0-9][a-zA-Z0-9_.-]*$")
- // RegexError is thrown in presence of an invalid container/pod name.
- RegexError = errors.Wrapf(define.ErrInvalidArg, "names must match [a-zA-Z0-9][a-zA-Z0-9_.-]*")
+ "github.com/sirupsen/logrus"
)
// Runtime Creation Options
@@ -254,6 +246,22 @@ func WithStaticDir(dir string) RuntimeOption {
}
}
+// WithRegistriesConf configures the runtime to always use specified
+// registries.conf for image processing.
+func WithRegistriesConf(path string) RuntimeOption {
+ logrus.Debugf("Setting custom registries.conf: %q", path)
+ return func(rt *Runtime) error {
+ if _, err := os.Stat(path); err != nil {
+ return errors.Wrap(err, "error locating specified registries.conf")
+ }
+ if rt.imageContext == nil {
+ rt.imageContext = &types.SystemContext{}
+ }
+ rt.imageContext.SystemRegistriesConfPath = path
+ return nil
+ }
+}
+
// WithHooksDir sets the directories to look for OCI runtime hook configuration.
func WithHooksDir(hooksDirs ...string) RuntimeOption {
return func(rt *Runtime) error {
@@ -665,8 +673,8 @@ func WithName(name string) CtrCreateOption {
}
// Check the name against a regex
- if !NameRegex.MatchString(name) {
- return RegexError
+ if !define.NameRegex.MatchString(name) {
+ return define.RegexError
}
ctr.config.Name = name
@@ -1383,8 +1391,8 @@ func WithVolumeName(name string) VolumeCreateOption {
}
// Check the name against a regex
- if !NameRegex.MatchString(name) {
- return RegexError
+ if !define.NameRegex.MatchString(name) {
+ return define.RegexError
}
volume.config.Name = name
@@ -1502,8 +1510,8 @@ func WithPodName(name string) PodCreateOption {
}
// Check the name against a regex
- if !NameRegex.MatchString(name) {
- return RegexError
+ if !define.NameRegex.MatchString(name) {
+ return define.RegexError
}
pod.config.Name = name
@@ -1520,8 +1528,8 @@ func WithPodHostname(hostname string) PodCreateOption {
}
// Check the hostname against a regex
- if !NameRegex.MatchString(hostname) {
- return RegexError
+ if !define.NameRegex.MatchString(hostname) {
+ return define.RegexError
}
pod.config.Hostname = hostname
@@ -1692,6 +1700,22 @@ func WithPodUTS() PodCreateOption {
}
}
+// WithPodCgroup tells containers in this pod to use the cgroup namespace
+// created for this pod.
+// Containers in a pod will inherit the kernel namespaces from the first
+// container added.
+func WithPodCgroup() PodCreateOption {
+ return func(pod *Pod) error {
+ if pod.valid {
+ return define.ErrPodFinalized
+ }
+
+ pod.config.UsePodCgroupNS = true
+
+ return nil
+ }
+}
+
// WithInfraContainer tells the pod to create a pause container
func WithInfraContainer() PodCreateOption {
return func(pod *Pod) error {
diff --git a/libpod/pod.go b/libpod/pod.go
index b5a14c165..34ceef5ef 100644
--- a/libpod/pod.go
+++ b/libpod/pod.go
@@ -51,12 +51,13 @@ type PodConfig struct {
// The following UsePod{kernelNamespace} indicate whether the containers
// in the pod will inherit the namespace from the first container in the pod.
- UsePodPID bool `json:"sharesPid,omitempty"`
- UsePodIPC bool `json:"sharesIpc,omitempty"`
- UsePodNet bool `json:"sharesNet,omitempty"`
- UsePodMount bool `json:"sharesMnt,omitempty"`
- UsePodUser bool `json:"sharesUser,omitempty"`
- UsePodUTS bool `json:"sharesUts,omitempty"`
+ UsePodPID bool `json:"sharesPid,omitempty"`
+ UsePodIPC bool `json:"sharesIpc,omitempty"`
+ UsePodNet bool `json:"sharesNet,omitempty"`
+ UsePodMount bool `json:"sharesMnt,omitempty"`
+ UsePodUser bool `json:"sharesUser,omitempty"`
+ UsePodUTS bool `json:"sharesUts,omitempty"`
+ UsePodCgroupNS bool `json:"sharesCgroupNS,omitempty"`
InfraContainer *InfraContainerConfig `json:"infraConfig"`
@@ -167,7 +168,7 @@ func (p *Pod) SharesUTS() bool {
// SharesCgroup returns whether containers in the pod will default to this pod's
// cgroup instead of the default libpod parent
func (p *Pod) SharesCgroup() bool {
- return p.config.UsePodCgroup
+ return p.config.UsePodCgroupNS
}
// CgroupPath returns the path to the pod's CGroup
@@ -247,14 +248,14 @@ func (p *Pod) InfraContainerID() (string, error) {
// PodContainerStats is an organization struct for pods and their containers
type PodContainerStats struct {
Pod *Pod
- ContainerStats map[string]*ContainerStats
+ ContainerStats map[string]*define.ContainerStats
}
// GetPodStats returns the stats for each of its containers
-func (p *Pod) GetPodStats(previousContainerStats map[string]*ContainerStats) (map[string]*ContainerStats, error) {
+func (p *Pod) GetPodStats(previousContainerStats map[string]*define.ContainerStats) (map[string]*define.ContainerStats, error) {
var (
ok bool
- prevStat *ContainerStats
+ prevStat *define.ContainerStats
)
p.lock.Lock()
defer p.lock.Unlock()
@@ -266,10 +267,10 @@ func (p *Pod) GetPodStats(previousContainerStats map[string]*ContainerStats) (ma
if err != nil {
return nil, err
}
- newContainerStats := make(map[string]*ContainerStats)
+ newContainerStats := make(map[string]*define.ContainerStats)
for _, c := range containers {
if prevStat, ok = previousContainerStats[c.ID()]; !ok {
- prevStat = &ContainerStats{}
+ prevStat = &define.ContainerStats{}
}
newStats, err := c.GetContainerStats(prevStat)
// If the container wasn't running, don't include it
diff --git a/libpod/pod_api.go b/libpod/pod_api.go
index 45aa5cb8d..0be9f2573 100644
--- a/libpod/pod_api.go
+++ b/libpod/pod_api.go
@@ -466,6 +466,24 @@ func (p *Pod) Inspect() (*define.InspectPodData, error) {
if err != nil {
return nil, err
}
+
+ namespaces := map[string]bool{
+ "pid": p.config.UsePodPID,
+ "ipc": p.config.UsePodIPC,
+ "net": p.config.UsePodNet,
+ "mount": p.config.UsePodMount,
+ "user": p.config.UsePodUser,
+ "uts": p.config.UsePodUTS,
+ "cgroup": p.config.UsePodCgroupNS,
+ }
+
+ sharesNS := []string{}
+ for nsStr, include := range namespaces {
+ if include {
+ sharesNS = append(sharesNS, nsStr)
+ }
+ }
+
inspectData := define.InspectPodData{
ID: p.ID(),
Name: p.Name(),
@@ -480,7 +498,7 @@ func (p *Pod) Inspect() (*define.InspectPodData, error) {
CreateInfra: false,
InfraContainerID: p.state.InfraContainerID,
InfraConfig: nil,
- SharedNamespaces: nil,
+ SharedNamespaces: sharesNS,
NumContainers: uint(len(containers)),
Containers: ctrs,
}
diff --git a/libpod/runtime.go b/libpod/runtime.go
index e71483ef9..4744de1a2 100644
--- a/libpod/runtime.go
+++ b/libpod/runtime.go
@@ -339,9 +339,10 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) {
}
// Set up containers/image
- runtime.imageContext = &types.SystemContext{
- SignaturePolicyPath: runtime.config.Engine.SignaturePolicyPath,
+ if runtime.imageContext == nil {
+ runtime.imageContext = &types.SystemContext{}
}
+ runtime.imageContext.SignaturePolicyPath = runtime.config.Engine.SignaturePolicyPath
// Create the tmpDir
if err := os.MkdirAll(runtime.config.Engine.TmpDir, 0751); err != nil {
@@ -752,7 +753,7 @@ type DBConfig struct {
// mergeDBConfig merges the configuration from the database.
func (r *Runtime) mergeDBConfig(dbConfig *DBConfig) error {
- c := r.config.Engine
+ c := &r.config.Engine
if !r.storageSet.RunRootSet && dbConfig.StorageTmp != "" {
if r.storageConfig.RunRoot != dbConfig.StorageTmp &&
r.storageConfig.RunRoot != "" {
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index 3dc8d3d0f..655b42e51 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -390,6 +390,8 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool,
}
}
+ logrus.Debugf("Removing container %s", c.ID())
+
// We need to lock the pod before we lock the container.
// To avoid races around removing a container and the pod it is in.
// Don't need to do this in pod removal case - we're evicting the entire
@@ -488,20 +490,25 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool,
}
}
+ var cleanupErr error
+
+ // Clean up network namespace, cgroups, mounts.
+ // Do this before we set ContainerStateRemoving, to ensure that we can
+ // actually remove from the OCI runtime.
+ if err := c.cleanup(ctx); err != nil {
+ cleanupErr = errors.Wrapf(err, "error cleaning up container %s", c.ID())
+ }
+
// Set ContainerStateRemoving
c.state.State = define.ContainerStateRemoving
if err := c.save(); err != nil {
+ if cleanupErr != nil {
+ logrus.Errorf(err.Error())
+ }
return errors.Wrapf(err, "unable to set container %s removing state in database", c.ID())
}
- var cleanupErr error
-
- // Clean up network namespace, cgroups, mounts
- if err := c.cleanup(ctx); err != nil {
- cleanupErr = errors.Wrapf(err, "error cleaning up container %s", c.ID())
- }
-
// Stop the container's storage
if err := c.teardownStorage(); err != nil {
if cleanupErr == nil {
@@ -869,7 +876,8 @@ func (r *Runtime) PruneContainers(filterFuncs []ContainerFilter) (map[string]int
logrus.Error(err)
return false
}
- if state == define.ContainerStateStopped || state == define.ContainerStateExited {
+ if state == define.ContainerStateStopped || state == define.ContainerStateExited ||
+ state == define.ContainerStateCreated || state == define.ContainerStateConfigured {
return true
}
return false
diff --git a/libpod/runtime_img.go b/libpod/runtime_img.go
index 919080c42..cd7f54799 100644
--- a/libpod/runtime_img.go
+++ b/libpod/runtime_img.go
@@ -56,7 +56,7 @@ func (r *Runtime) RemoveImage(ctx context.Context, img *image.Image, force bool)
}
}
} else {
- return nil, fmt.Errorf("could not remove image %s as it is being used by %d containers", img.ID(), len(imageCtrs))
+ return nil, errors.Wrapf(define.ErrImageInUse, "could not remove image %s as it is being used by %d containers", img.ID(), len(imageCtrs))
}
}
diff --git a/libpod/runtime_pod_linux.go b/libpod/runtime_pod_linux.go
index 872e8ea8a..73b6c5d9b 100644
--- a/libpod/runtime_pod_linux.go
+++ b/libpod/runtime_pod_linux.go
@@ -236,7 +236,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool)
}
if err := r.removeContainer(ctx, ctr, force, false, true); err != nil {
- if removalErr != nil {
+ if removalErr == nil {
removalErr = err
} else {
logrus.Errorf("Error removing container %s from pod %s: %v", ctr.ID(), p.ID(), err)
diff --git a/libpod/stats.go b/libpod/stats.go
index 6f42afd18..9f4986144 100644
--- a/libpod/stats.go
+++ b/libpod/stats.go
@@ -13,8 +13,8 @@ import (
)
// GetContainerStats gets the running stats for a given container
-func (c *Container) GetContainerStats(previousStats *ContainerStats) (*ContainerStats, error) {
- stats := new(ContainerStats)
+func (c *Container) GetContainerStats(previousStats *define.ContainerStats) (*define.ContainerStats, error) {
+ stats := new(define.ContainerStats)
stats.ContainerID = c.ID()
stats.Name = c.Name()
diff --git a/libpod/stats_config.go b/libpod/stats_config.go
deleted file mode 100644
index 91d3d1493..000000000
--- a/libpod/stats_config.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package libpod
-
-// ContainerStats contains the statistics information for a running container
-type ContainerStats struct {
- ContainerID string
- Name string
- PerCPU []uint64
- CPU float64
- CPUNano uint64
- CPUSystemNano uint64
- SystemNano uint64
- MemUsage uint64
- MemLimit uint64
- MemPerc float64
- NetInput uint64
- NetOutput uint64
- BlockInput uint64
- BlockOutput uint64
- PIDs uint64
-}
diff --git a/libpod/stats_unsupported.go b/libpod/stats_unsupported.go
index ec19a89a1..6d21ae8f2 100644
--- a/libpod/stats_unsupported.go
+++ b/libpod/stats_unsupported.go
@@ -5,6 +5,6 @@ package libpod
import "github.com/containers/libpod/libpod/define"
// GetContainerStats gets the running stats for a given container
-func (c *Container) GetContainerStats(previousStats *ContainerStats) (*ContainerStats, error) {
+func (c *Container) GetContainerStats(previousStats *define.ContainerStats) (*define.ContainerStats, error) {
return nil, define.ErrOSNotSupported
}
diff --git a/libpod/util.go b/libpod/util.go
index 6457dac1c..ba9f1fa05 100644
--- a/libpod/util.go
+++ b/libpod/util.go
@@ -9,12 +9,10 @@ import (
"os/exec"
"path/filepath"
"sort"
- "strconv"
"strings"
"time"
"github.com/containers/common/pkg/config"
-
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/utils"
"github.com/fsnotify/fsnotify"
@@ -36,24 +34,6 @@ func FuncTimer(funcName string) {
fmt.Printf("%s executed in %d ms\n", funcName, elapsed)
}
-// RemoveScientificNotationFromFloat returns a float without any
-// scientific notation if the number has any.
-// golang does not handle conversion of float64s that have scientific
-// notation in them and otherwise stinks. please replace this if you have
-// a better implementation.
-func RemoveScientificNotationFromFloat(x float64) (float64, error) {
- bigNum := strconv.FormatFloat(x, 'g', -1, 64)
- breakPoint := strings.IndexAny(bigNum, "Ee")
- if breakPoint > 0 {
- bigNum = bigNum[:breakPoint]
- }
- result, err := strconv.ParseFloat(bigNum, 64)
- if err != nil {
- return x, errors.Wrapf(err, "unable to remove scientific number from calculations")
- }
- return result, nil
-}
-
// MountExists returns true if dest exists in the list of mounts
func MountExists(specMounts []spec.Mount, dest string) bool {
for _, m := range specMounts {
@@ -269,9 +249,8 @@ func hijackWriteErrorAndClose(toWrite error, cid string, terminal bool, httpCon
// length and stream. Accepts an integer indicating which stream we are sending
// to (STDIN = 0, STDOUT = 1, STDERR = 2).
func makeHTTPAttachHeader(stream byte, length uint32) []byte {
- headerBuf := []byte{stream, 0, 0, 0}
- lenBuf := []byte{0, 0, 0, 0}
- binary.BigEndian.PutUint32(lenBuf, length)
- headerBuf = append(headerBuf, lenBuf...)
- return headerBuf
+ header := make([]byte, 8)
+ header[0] = stream
+ binary.BigEndian.PutUint32(header[4:], length)
+ return header
}
diff --git a/libpod/util_test.go b/libpod/util_test.go
index 227686c2b..4e18a7e4e 100644
--- a/libpod/util_test.go
+++ b/libpod/util_test.go
@@ -3,6 +3,7 @@ package libpod
import (
"testing"
+ "github.com/containers/libpod/utils"
"github.com/stretchr/testify/assert"
)
@@ -10,7 +11,7 @@ func TestRemoveScientificNotationFromFloat(t *testing.T) {
numbers := []float64{0.0, .5, 1.99999932, 1.04e+10}
results := []float64{0.0, .5, 1.99999932, 1.04}
for i, x := range numbers {
- result, err := RemoveScientificNotationFromFloat(x)
+ result, err := utils.RemoveScientificNotationFromFloat(x)
assert.NoError(t, err)
assert.Equal(t, result, results[i])
}
diff --git a/libpod/volume.go b/libpod/volume.go
index 70099d6f4..82f389833 100644
--- a/libpod/volume.go
+++ b/libpod/volume.go
@@ -3,6 +3,7 @@ package libpod
import (
"time"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/lock"
)
@@ -133,3 +134,15 @@ func (v *Volume) Config() (*VolumeConfig, error) {
err := JSONDeepCopy(v.config, &config)
return &config, err
}
+
+// VolumeInUse goes through the container dependencies of a volume
+// and checks if the volume is being used by any container.
+func (v *Volume) VolumesInUse() ([]string, error) {
+ v.lock.Lock()
+ defer v.lock.Unlock()
+
+ if !v.valid {
+ return nil, define.ErrVolumeRemoved
+ }
+ return v.runtime.state.VolumeInUse(v)
+}