summaryrefslogtreecommitdiff
path: root/libpod
diff options
context:
space:
mode:
authorMatthew Heon <matthew.heon@pm.me>2019-04-01 15:22:32 -0400
committerMatthew Heon <matthew.heon@pm.me>2019-05-03 10:36:16 -0400
commit0d73ee40b2e95ec7d50c7ee72fcb24cae0e190a7 (patch)
tree665382c42613e99258536536d61b00462a67ae53 /libpod
parent3fb52f4fbb04781e32f72888c9e509dea5d7b434 (diff)
downloadpodman-0d73ee40b2e95ec7d50c7ee72fcb24cae0e190a7.tar.gz
podman-0d73ee40b2e95ec7d50c7ee72fcb24cae0e190a7.tar.bz2
podman-0d73ee40b2e95ec7d50c7ee72fcb24cae0e190a7.zip
Add container restart policy to Libpod & Podman
This initial version does not support restart count, but it works as advertised otherwise. Signed-off-by: Matthew Heon <matthew.heon@pm.me>
Diffstat (limited to 'libpod')
-rw-r--r--libpod/container.go27
-rw-r--r--libpod/container_api.go19
-rw-r--r--libpod/container_internal.go47
-rw-r--r--libpod/options.go35
4 files changed, 127 insertions, 1 deletions
diff --git a/libpod/container.go b/libpod/container.go
index 570110ca1..4236f2456 100644
--- a/libpod/container.go
+++ b/libpod/container.go
@@ -181,7 +181,10 @@ type ContainerState struct {
BindMounts map[string]string `json:"bindMounts,omitempty"`
// StoppedByUser indicates whether the container was stopped by an
// explicit call to the Stop() API.
- StoppedByUser bool
+ StoppedByUser bool `json:"stoppedByUser,omitempty"`
+ // RestartPolicyMatch indicates whether the conditions for restart
+ // policy have been met.
+ RestartPolicyMatch bool `json:"restartPolicyMatch,omitempty"`
// ExtensionStageHooks holds hooks which will be executed by libpod
// and not delegated to the OCI runtime.
@@ -349,6 +352,17 @@ type ContainerConfig struct {
LogPath string `json:"logPath"`
// File containing the conmon PID
ConmonPidFile string `json:"conmonPidFile,omitempty"`
+ // RestartPolicy indicates what action the container will take upon
+ // exiting naturally.
+ // Allowed options are "no" (take no action), "on-failure" (restart on
+ // non-zero exit code, up an a maximum of RestartRetries times),
+ // and "always" (always restart the container on any exit code).
+ // The empty string is treated as the default ("no")
+ RestartPolicy string `json:"restart_policy,omitempty"`
+ // RestartRetries indicates the number of attempts that will be made to
+ // restart the container. Used only if RestartPolicy is set to
+ // "on-failure".
+ RestartRetries uint `json:"restart_retries,omitempty"`
// TODO log options for log drivers
PostConfigureNetNS bool `json:"postConfigureNetNS"`
@@ -732,6 +746,17 @@ func (c *Container) LogPath() string {
return c.config.LogPath
}
+// RestartPolicy returns the container's restart policy.
+func (c *Container) RestartPolicy() string {
+ return c.config.RestartPolicy
+}
+
+// RestartRetries returns the number of retries that will be attempted when
+// using the "on-failure" restart policy
+func (c *Container) RestartRetries() uint {
+ return c.config.RestartRetries
+}
+
// RuntimeName returns the name of the runtime
func (c *Container) RuntimeName() string {
return c.runtime.ociRuntime.name
diff --git a/libpod/container_api.go b/libpod/container_api.go
index 5bfd869b3..46c913e99 100644
--- a/libpod/container_api.go
+++ b/libpod/container_api.go
@@ -583,6 +583,7 @@ func (c *Container) Cleanup(ctx context.Context) error {
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
+
if err := c.syncContainer(); err != nil {
return err
}
@@ -593,6 +594,24 @@ func (c *Container) Cleanup(ctx context.Context) error {
return errors.Wrapf(ErrCtrStateInvalid, "container %s is running or paused, refusing to clean up", c.ID())
}
+ // If we have a restart policy match when updating the state, we need to
+ // restart the container.
+ // However, perform a full validation of restart policy first.
+ if c.state.RestartPolicyMatch {
+ // if restart policy is on-error and exit code is 0, we do
+ // nothing.
+ // TODO: if restart retries is set, handle that here.
+ if c.config.RestartRetries != 0 {
+ return errors.Wrapf(ErrNotImplemented, "restart retries not yet implemented")
+ }
+ if (c.config.RestartPolicy == "on-error" && c.state.ExitCode == 0) || c.config.RestartPolicy == "always" {
+ // The container stopped. We need to restart it.
+ return c.handleRestartPolicy(ctx)
+ }
+ }
+
+ // If we aren't hitting restart policy, we perform a normal cleanup
+
// Check if we have active exec sessions
if len(c.state.ExecSessions) != 0 {
return errors.Wrapf(ErrCtrStateInvalid, "container %s has active exec sessions, refusing to clean up", c.ID())
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index 0b9c5a96a..f72d86c84 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -210,6 +210,43 @@ func (c *Container) handleExitFile(exitFile string, fi os.FileInfo) error {
return nil
}
+// Handle container restart policy.
+// This is called when a container has exited, and was not explicitly stopped by
+// an API call to stop the container or pod it is in.
+func (c *Container) handleRestartPolicy(ctx context.Context) (err error) {
+ logrus.Debugf("Restarting container %s due to restart policy %s", c.ID(), c.config.RestartPolicy)
+
+ // Need to check if dependencies are alive.
+ if err = c.checkDependenciesAndHandleError(ctx); err != nil {
+ return err
+ }
+
+ defer func() {
+ if err != nil {
+ if err2 := c.cleanup(ctx); err2 != nil {
+ logrus.Errorf("error cleaning up container %s: %v", c.ID(), err2)
+ }
+ }
+ }()
+ if err := c.prepare(); err != nil {
+ return err
+ }
+
+ if c.state.State == ContainerStateStopped {
+ // Reinitialize the container if we need to
+ if err := c.reinit(ctx); err != nil {
+ return err
+ }
+ } else if c.state.State == ContainerStateConfigured ||
+ c.state.State == ContainerStateExited {
+ // Initialize the container
+ if err := c.init(ctx); err != nil {
+ return err
+ }
+ }
+ return c.start()
+}
+
// Sync this container with on-disk state and runtime status
// Should only be called with container lock held
// This function should suffice to ensure a container's state is accurate and
@@ -230,6 +267,14 @@ func (c *Container) syncContainer() error {
}
// Only save back to DB if state changed
if c.state.State != oldState {
+ // Check for a restart policy match
+ if c.config.RestartPolicy != "" && c.config.RestartPolicy != "no" &&
+ (oldState == ContainerStateRunning || oldState == ContainerStatePaused) &&
+ (c.state.State == ContainerStateStopped || c.state.State == ContainerStateExited) &&
+ !c.state.StoppedByUser {
+ c.state.RestartPolicyMatch = true
+ }
+
if err := c.save(); err != nil {
return err
}
@@ -377,6 +422,7 @@ func resetState(state *ContainerState) error {
state.NetworkStatus = nil
state.BindMounts = make(map[string]string)
state.StoppedByUser = false
+ state.RestartPolicyMatch = false
return nil
}
@@ -791,6 +837,7 @@ func (c *Container) init(ctx context.Context) error {
c.state.Exited = false
c.state.State = ContainerStateCreated
c.state.StoppedByUser = false
+ c.state.RestartPolicyMatch = false
if err := c.save(); err != nil {
return err
diff --git a/libpod/options.go b/libpod/options.go
index 86c04db09..e83515822 100644
--- a/libpod/options.go
+++ b/libpod/options.go
@@ -1239,6 +1239,41 @@ func WithUseImageHosts() CtrCreateOption {
}
}
+// WithRestartPolicy sets the container's restart policy. Valid values are
+// "no", "on-failure", and "always". The empty string is allowed, and will be
+// equivalent to "no".
+func WithRestartPolicy(policy string) CtrCreateOption {
+ return func(ctr *Container) error {
+ if ctr.valid {
+ return ErrCtrFinalized
+ }
+
+ switch policy {
+ case "", "no", "on-failure", "always":
+ ctr.config.RestartPolicy = policy
+ default:
+ return errors.Wrapf(ErrInvalidArg, "%q is not a valid restart policy", policy)
+ }
+
+ return nil
+ }
+}
+
+// WithRestartRetries sets the number of retries to use when restarting a
+// container with the "on-failure" restart policy.
+// 0 is an allowed value, and indicates infinite retries.
+func WithRestartRetries(tries uint) CtrCreateOption {
+ return func(ctr *Container) error {
+ if ctr.valid {
+ return ErrCtrFinalized
+ }
+
+ ctr.config.RestartRetries = tries
+
+ return nil
+ }
+}
+
// withIsInfra sets the container to be an infra container. This means the container will be sometimes hidden
// and expected to be the first container in the pod.
func withIsInfra() CtrCreateOption {