summaryrefslogtreecommitdiff
path: root/libpod/container_internal.go
diff options
context:
space:
mode:
authorMatthew Heon <matthew.heon@pm.me>2019-10-15 15:11:26 -0400
committerMatthew Heon <matthew.heon@pm.me>2019-10-15 15:59:20 -0400
commitcab7bfbb211f2496af9f86208588e26954fc9b2a (patch)
treed65c78bbac71bba5dc7a1b95b830a9d032243f87 /libpod/container_internal.go
parent5f72e6ef2ef7b6941cefc3c655903aeacb53e115 (diff)
downloadpodman-cab7bfbb211f2496af9f86208588e26954fc9b2a.tar.gz
podman-cab7bfbb211f2496af9f86208588e26954fc9b2a.tar.bz2
podman-cab7bfbb211f2496af9f86208588e26954fc9b2a.zip
Add a MissingRuntime implementation
When a container is created with a given OCI runtime, but then it is uninstalled or removed from the configuration file, Libpod presently reacts very poorly. The EvictContainer code can potentially remove these containers, but we still can't see them in `podman ps` (aside from the massive logrus.Errorf messages they create). Providing a minimal OCI runtime implementation for missing runtimes allows us to behave better. We'll be able to retrieve containers from the database, though we still pop up an error for each missing runtime. For containers which are stopped, we can remove them as normal. Signed-off-by: Matthew Heon <matthew.heon@pm.me>
Diffstat (limited to 'libpod/container_internal.go')
-rw-r--r--libpod/container_internal.go39
1 files changed, 36 insertions, 3 deletions
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index a7ac23f73..0043c9651 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -252,7 +252,7 @@ func (c *Container) waitForExitFileAndSync() error {
return err
}
- if err := c.ociRuntime.UpdateContainerStatus(c, false); err != nil {
+ if err := c.checkExitFile(); err != nil {
return err
}
@@ -386,10 +386,11 @@ func (c *Container) syncContainer() error {
(c.state.State != define.ContainerStateConfigured) &&
(c.state.State != define.ContainerStateExited) {
oldState := c.state.State
- // TODO: optionally replace this with a stat for the exit file
- if err := c.ociRuntime.UpdateContainerStatus(c, false); err != nil {
+
+ if err := c.checkExitFile(); err != nil {
return err
}
+
// Only save back to DB if state changed
if c.state.State != oldState {
// Check for a restart policy match
@@ -1811,3 +1812,35 @@ func (c *Container) sortUserVolumes(ctrSpec *spec.Spec) ([]*ContainerNamedVolume
}
return namedUserVolumes, userMounts
}
+
+// Check for an exit file, and handle one if present
+func (c *Container) checkExitFile() error {
+ // If the container's not running, nothing to do.
+ if c.state.State != define.ContainerStateRunning && c.state.State != define.ContainerStatePaused {
+ return nil
+ }
+
+ exitFile, err := c.exitFilePath()
+ if err != nil {
+ return err
+ }
+
+ // Check for the exit file
+ info, err := os.Stat(exitFile)
+ if err != nil {
+ if os.IsNotExist(err) {
+ // Container is still running, no error
+ return nil
+ }
+
+ return errors.Wrapf(err, "error running stat on container %s exit file", c.ID())
+ }
+
+ // Alright, it exists. Transition to Stopped state.
+ c.state.State = define.ContainerStateStopped
+ c.state.PID = 0
+ c.state.ConmonPID = 0
+
+ // Read the exit file to get our stopped time and exit code.
+ return c.handleExitFile(exitFile, info)
+}