aboutsummaryrefslogtreecommitdiff
path: root/libpod
diff options
context:
space:
mode:
authorMatthew Heon <matthew.heon@pm.me>2019-05-03 10:35:48 -0400
committerMatthew Heon <matthew.heon@pm.me>2019-05-03 10:36:16 -0400
commitd7c367aa61850936249fc99461a92185cb6ba8aa (patch)
treebc07f5c401638ce59e107a5f13f8b87a06f6fd95 /libpod
parente1443fe05d146def61a5bb882ed0aafea5730d64 (diff)
downloadpodman-d7c367aa61850936249fc99461a92185cb6ba8aa.tar.gz
podman-d7c367aa61850936249fc99461a92185cb6ba8aa.tar.bz2
podman-d7c367aa61850936249fc99461a92185cb6ba8aa.zip
Address review comments on restart policy
Signed-off-by: Matthew Heon <matthew.heon@pm.me>
Diffstat (limited to 'libpod')
-rw-r--r--libpod/container_api.go25
-rw-r--r--libpod/container_internal.go48
2 files changed, 49 insertions, 24 deletions
diff --git a/libpod/container_api.go b/libpod/container_api.go
index faf01eccb..5bb610aab 100644
--- a/libpod/container_api.go
+++ b/libpod/container_api.go
@@ -601,23 +601,18 @@ func (c *Container) Cleanup(ctx context.Context) error {
return errors.Wrapf(ErrCtrStateInvalid, "container %s is running or paused, refusing to clean up", c.ID())
}
- // If we have a restart policy match when updating the state, we need to
- // restart the container.
- // However, perform a full validation of restart policy first.
- if c.state.RestartPolicyMatch {
- if c.config.RestartPolicy == RestartPolicyOnFailure && c.state.ExitCode != 0 {
- logrus.Debugf("Container %s restart policy trigger: on retry %d (of %d)",
- c.ID(), c.state.RestartCount, c.config.RestartRetries)
- }
- if (c.config.RestartPolicy == RestartPolicyOnFailure && c.state.ExitCode != 0 &&
- (c.config.RestartRetries > 0 && c.state.RestartCount < c.config.RestartRetries)) ||
- c.config.RestartPolicy == RestartPolicyAlways {
- // The container stopped. We need to restart it.
- return c.handleRestartPolicy(ctx)
- }
+ // Handle restart policy.
+ // Returns a bool indicating whether we actually restarted.
+ // If we did, don't proceed to cleanup - just exit.
+ didRestart, err := c.handleRestartPolicy(ctx)
+ if err != nil {
+ return err
+ }
+ if didRestart {
+ return nil
}
- // If we aren't hitting restart policy, we perform a normal cleanup
+ // If we didn't restart, we perform a normal cleanup
// Check if we have active exec sessions
if len(c.state.ExecSessions) != 0 {
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index 00f916d0f..0dadcd1ca 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -213,20 +213,47 @@ func (c *Container) handleExitFile(exitFile string, fi os.FileInfo) error {
// Handle container restart policy.
// This is called when a container has exited, and was not explicitly stopped by
// an API call to stop the container or pod it is in.
-func (c *Container) handleRestartPolicy(ctx context.Context) (err error) {
+func (c *Container) handleRestartPolicy(ctx context.Context) (restarted bool, err error) {
+ // If we did not get a restart policy match, exit immediately.
+ // Do the same if we're not a policy that restarts.
+ if !c.state.RestartPolicyMatch ||
+ c.config.RestartPolicy == RestartPolicyNo ||
+ c.config.RestartPolicy == RestartPolicyNone {
+ return false, nil
+ }
+
+ // If we're RestartPolicyOnFailure, we need to check retries and exit
+ // code.
+ if c.config.RestartPolicy == RestartPolicyOnFailure {
+ if c.state.ExitCode == 0 {
+ return false, nil
+ }
+
+ // If we don't have a max retries set, continue
+ if c.config.RestartRetries > 0 {
+ if c.state.RestartCount < c.config.RestartRetries {
+ logrus.Debugf("Container %s restart policy trigger: on retry %d (of %d)",
+ c.ID(), c.state.RestartCount, c.config.RestartRetries)
+ } else {
+ logrus.Debugf("Container %s restart policy trigger: retries exhausted")
+ return false, nil
+ }
+ }
+ }
+
logrus.Debugf("Restarting container %s due to restart policy %s", c.ID(), c.config.RestartPolicy)
// Need to check if dependencies are alive.
if err = c.checkDependenciesAndHandleError(ctx); err != nil {
- return err
+ return false, err
}
// Is the container running again?
// If so, we don't have to do anything
if c.state.State == ContainerStateRunning || c.state.State == ContainerStatePaused {
- return nil
+ return false, nil
} else if c.state.State == ContainerStateUnknown {
- return errors.Wrapf(ErrInternal, "invalid container state encountered in restart attempt!")
+ return false, errors.Wrapf(ErrInternal, "invalid container state encountered in restart attempt!")
}
c.newContainerEvent(events.Restart)
@@ -235,7 +262,7 @@ func (c *Container) handleRestartPolicy(ctx context.Context) (err error) {
c.state.RestartCount = c.state.RestartCount + 1
logrus.Debugf("Container %s now on retry %d", c.ID(), c.state.RestartCount)
if err := c.save(); err != nil {
- return err
+ return false, err
}
defer func() {
@@ -246,22 +273,25 @@ func (c *Container) handleRestartPolicy(ctx context.Context) (err error) {
}
}()
if err := c.prepare(); err != nil {
- return err
+ return false, err
}
if c.state.State == ContainerStateStopped {
// Reinitialize the container if we need to
if err := c.reinit(ctx, true); err != nil {
- return err
+ return false, err
}
} else if c.state.State == ContainerStateConfigured ||
c.state.State == ContainerStateExited {
// Initialize the container
if err := c.init(ctx, true); err != nil {
- return err
+ return false, err
}
}
- return c.start()
+ if err := c.start(); err != nil {
+ return false, err
+ }
+ return true, nil
}
// Sync this container with on-disk state and runtime status