summaryrefslogtreecommitdiff
path: root/pkg/adapter/containers.go
diff options
context:
space:
mode:
Diffstat (limited to 'pkg/adapter/containers.go')
-rw-r--r--pkg/adapter/containers.go179
1 files changed, 179 insertions, 0 deletions
diff --git a/pkg/adapter/containers.go b/pkg/adapter/containers.go
index a5b911da1..9ec897a60 100644
--- a/pkg/adapter/containers.go
+++ b/pkg/adapter/containers.go
@@ -697,3 +697,182 @@ func (r *LocalRuntime) UnpauseContainers(ctx context.Context, cli *cliconfig.Unp
}
return pool.Run()
}
+
+// Restart containers without or without a timeout
+func (r *LocalRuntime) Restart(ctx context.Context, c *cliconfig.RestartValues) ([]string, map[string]error, error) {
+ var (
+ containers []*libpod.Container
+ restartContainers []*libpod.Container
+ err error
+ )
+ useTimeout := c.Flag("timeout").Changed || c.Flag("time").Changed
+ inputTimeout := c.Timeout
+
+ // Handle --latest
+ if c.Latest {
+ lastCtr, err := r.Runtime.GetLatestContainer()
+ if err != nil {
+ return nil, nil, errors.Wrapf(err, "unable to get latest container")
+ }
+ restartContainers = append(restartContainers, lastCtr)
+ } else if c.Running {
+ containers, err = r.GetRunningContainers()
+ if err != nil {
+ return nil, nil, err
+ }
+ restartContainers = append(restartContainers, containers...)
+ } else if c.All {
+ containers, err = r.Runtime.GetAllContainers()
+ if err != nil {
+ return nil, nil, err
+ }
+ restartContainers = append(restartContainers, containers...)
+ } else {
+ for _, id := range c.InputArgs {
+ ctr, err := r.Runtime.LookupContainer(id)
+ if err != nil {
+ return nil, nil, err
+ }
+ restartContainers = append(restartContainers, ctr)
+ }
+ }
+
+ maxWorkers := shared.DefaultPoolSize("restart")
+ if c.GlobalIsSet("max-workers") {
+ maxWorkers = c.GlobalFlags.MaxWorks
+ }
+
+ logrus.Debugf("Setting maximum workers to %d", maxWorkers)
+
+ // We now have a slice of all the containers to be restarted. Iterate them to
+ // create restart Funcs with a timeout as needed
+ pool := shared.NewPool("restart", maxWorkers, len(restartContainers))
+ for _, c := range restartContainers {
+ ctr := c
+ timeout := ctr.StopTimeout()
+ if useTimeout {
+ timeout = inputTimeout
+ }
+ pool.Add(shared.Job{
+ ID: ctr.ID(),
+ Fn: func() error {
+ err := ctr.RestartWithTimeout(ctx, timeout)
+ if err != nil {
+ logrus.Debugf("Failed to restart container %s: %s", ctr.ID(), err.Error())
+ }
+ return err
+ },
+ })
+ }
+ return pool.Run()
+}
+
+// Top display the running processes of a container
+func (r *LocalRuntime) Top(cli *cliconfig.TopValues) ([]string, error) {
+ var (
+ descriptors []string
+ container *libpod.Container
+ err error
+ )
+ if cli.Latest {
+ descriptors = cli.InputArgs
+ container, err = r.Runtime.GetLatestContainer()
+ } else {
+ descriptors = cli.InputArgs[1:]
+ container, err = r.Runtime.LookupContainer(cli.InputArgs[0])
+ }
+ if err != nil {
+ return nil, errors.Wrapf(err, "unable to lookup requested container")
+ }
+ return container.Top(descriptors)
+}
+
+// Prune removes stopped containers
+func (r *LocalRuntime) Prune(ctx context.Context, maxWorkers int, force bool) ([]string, map[string]error, error) {
+ var (
+ ok = []string{}
+ failures = map[string]error{}
+ err error
+ )
+
+ logrus.Debugf("Setting maximum rm workers to %d", maxWorkers)
+
+ filter := func(c *libpod.Container) bool {
+ state, err := c.State()
+ if err != nil {
+ logrus.Error(err)
+ return false
+ }
+ if c.PodID() != "" {
+ return false
+ }
+ if state == libpod.ContainerStateStopped || state == libpod.ContainerStateExited {
+ return true
+ }
+ return false
+ }
+ delContainers, err := r.Runtime.GetContainers(filter)
+ if err != nil {
+ return ok, failures, err
+ }
+ if len(delContainers) < 1 {
+ return ok, failures, err
+ }
+ pool := shared.NewPool("prune", maxWorkers, len(delContainers))
+ for _, c := range delContainers {
+ ctr := c
+ pool.Add(shared.Job{
+ ID: ctr.ID(),
+ Fn: func() error {
+ err := r.Runtime.RemoveContainer(ctx, ctr, force, false)
+ if err != nil {
+ logrus.Debugf("Failed to prune container %s: %s", ctr.ID(), err.Error())
+ }
+ return err
+ },
+ })
+ }
+ return pool.Run()
+}
+
+// CleanupContainers any leftovers bits of stopped containers
+func (r *LocalRuntime) CleanupContainers(ctx context.Context, cli *cliconfig.CleanupValues) ([]string, map[string]error, error) {
+ var (
+ ok = []string{}
+ failures = map[string]error{}
+ )
+
+ ctrs, err := shortcuts.GetContainersByContext(cli.All, cli.Latest, cli.InputArgs, r.Runtime)
+ if err != nil {
+ return ok, failures, err
+ }
+
+ for _, ctr := range ctrs {
+ if cli.Remove {
+ err = removeContainer(ctx, ctr, r)
+ } else {
+ err = cleanupContainer(ctx, ctr, r)
+ }
+
+ if err == nil {
+ ok = append(ok, ctr.ID())
+ } else {
+ failures[ctr.ID()] = err
+ }
+ }
+ return ok, failures, nil
+}
+
+func removeContainer(ctx context.Context, ctr *libpod.Container, runtime *LocalRuntime) error {
+ if err := runtime.RemoveContainer(ctx, ctr, false, true); err != nil {
+ return errors.Wrapf(err, "failed to cleanup and remove container %v", ctr.ID())
+ }
+ return nil
+}
+
+func cleanupContainer(ctx context.Context, ctr *libpod.Container, runtime *LocalRuntime) error {
+ if err := ctr.Cleanup(ctx); err != nil {
+ return errors.Wrapf(err, "failed to cleanup container %v", ctr.ID())
+ }
+ return nil
+}