diff options
Diffstat (limited to 'pkg/adapter/containers.go')
-rw-r--r-- | pkg/adapter/containers.go | 246 |
1 files changed, 226 insertions, 20 deletions
diff --git a/pkg/adapter/containers.go b/pkg/adapter/containers.go index 1bca99cec..931c55a57 100644 --- a/pkg/adapter/containers.go +++ b/pkg/adapter/containers.go @@ -18,6 +18,7 @@ import ( "github.com/containers/libpod/cmd/podman/shared" "github.com/containers/libpod/libpod" "github.com/containers/libpod/pkg/adapter/shortcuts" + "github.com/containers/storage" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -62,52 +63,144 @@ func (r *LocalRuntime) StopContainers(ctx context.Context, cli *cliconfig.StopVa timeout = &t } - var ( - ok = []string{} - failures = map[string]error{} - ) + maxWorkers := shared.DefaultPoolSize("stop") + if cli.GlobalIsSet("max-workers") { + maxWorkers = cli.GlobalFlags.MaxWorks + } + logrus.Debugf("Setting maximum stop workers to %d", maxWorkers) ctrs, err := shortcuts.GetContainersByContext(cli.All, cli.Latest, cli.InputArgs, r.Runtime) if err != nil { - return ok, failures, err + return nil, nil, err } + pool := shared.NewPool("stop", maxWorkers, len(ctrs)) for _, c := range ctrs { + c := c + if timeout == nil { t := c.StopTimeout() timeout = &t logrus.Debugf("Set timeout to container %s default (%d)", c.ID(), *timeout) } - if err := c.StopWithTimeout(*timeout); err == nil { - ok = append(ok, c.ID()) - } else if errors.Cause(err) == libpod.ErrCtrStopped { - ok = append(ok, c.ID()) - logrus.Debugf("Container %s is already stopped", c.ID()) - } else { - failures[c.ID()] = err - } + + pool.Add(shared.Job{ + c.ID(), + func() error { + err := c.StopWithTimeout(*timeout) + if err != nil { + if errors.Cause(err) == libpod.ErrCtrStopped { + logrus.Debugf("Container %s is already stopped", c.ID()) + return nil + } + logrus.Debugf("Failed to stop container %s: %s", c.ID(), err.Error()) + } + return err + }, + }) } - return ok, failures, nil + return pool.Run() } // KillContainers sends signal to container(s) based on CLI inputs. // Returns list of successful id(s), map of failed id(s) + error, or error not from container func (r *LocalRuntime) KillContainers(ctx context.Context, cli *cliconfig.KillValues, signal syscall.Signal) ([]string, map[string]error, error) { + maxWorkers := shared.DefaultPoolSize("kill") + if cli.GlobalIsSet("max-workers") { + maxWorkers = cli.GlobalFlags.MaxWorks + } + logrus.Debugf("Setting maximum kill workers to %d", maxWorkers) + + ctrs, err := shortcuts.GetContainersByContext(cli.All, cli.Latest, cli.InputArgs, r.Runtime) + if err != nil { + return nil, nil, err + } + + pool := shared.NewPool("kill", maxWorkers, len(ctrs)) + for _, c := range ctrs { + c := c + + pool.Add(shared.Job{ + c.ID(), + func() error { + return c.Kill(uint(signal)) + }, + }) + } + return pool.Run() +} + +// RemoveContainers removes container(s) based on CLI inputs. +func (r *LocalRuntime) RemoveContainers(ctx context.Context, cli *cliconfig.RmValues) ([]string, map[string]error, error) { var ( ok = []string{} failures = map[string]error{} ) + maxWorkers := shared.DefaultPoolSize("rm") + if cli.GlobalIsSet("max-workers") { + maxWorkers = cli.GlobalFlags.MaxWorks + } + logrus.Debugf("Setting maximum rm workers to %d", maxWorkers) + ctrs, err := shortcuts.GetContainersByContext(cli.All, cli.Latest, cli.InputArgs, r.Runtime) if err != nil { + // Force may be used to remove containers no longer found in the database + if cli.Force && len(cli.InputArgs) > 0 && errors.Cause(err) == libpod.ErrNoSuchCtr { + r.RemoveContainersFromStorage(cli.InputArgs) + } return ok, failures, err } + pool := shared.NewPool("rm", maxWorkers, len(ctrs)) for _, c := range ctrs { - if err := c.Kill(uint(signal)); err == nil { - ok = append(ok, c.ID()) + c := c + + pool.Add(shared.Job{ + c.ID(), + func() error { + err := r.RemoveContainer(ctx, c, cli.Force, cli.Volumes) + if err != nil { + logrus.Debugf("Failed to remove container %s: %s", c.ID(), err.Error()) + } + return err + }, + }) + } + return pool.Run() +} + +// UmountRootFilesystems removes container(s) based on CLI inputs. +func (r *LocalRuntime) UmountRootFilesystems(ctx context.Context, cli *cliconfig.UmountValues) ([]string, map[string]error, error) { + var ( + ok = []string{} + failures = map[string]error{} + ) + + ctrs, err := shortcuts.GetContainersByContext(cli.All, cli.Latest, cli.InputArgs, r.Runtime) + if err != nil { + return ok, failures, err + } + + for _, ctr := range ctrs { + state, err := ctr.State() + if err != nil { + logrus.Debugf("Error umounting container %s state: %s", ctr.ID(), err.Error()) + continue + } + if state == libpod.ContainerStateRunning { + logrus.Debugf("Error umounting container %s, is running", ctr.ID()) + continue + } + + if err := ctr.Unmount(cli.Force); err != nil { + if cli.All && errors.Cause(err) == storage.ErrLayerNotMounted { + logrus.Debugf("Error umounting container %s, storage.ErrLayerNotMounted", ctr.ID()) + continue + } + failures[ctr.ID()] = errors.Wrapf(err, "error unmounting continaner %s", ctr.ID()) } else { - failures[c.ID()] = err + ok = append(ok, ctr.ID()) } } return ok, failures, nil @@ -162,14 +255,17 @@ func (r *LocalRuntime) Log(c *cliconfig.LogsValues, options *libpod.LogOptions) // CreateContainer creates a libpod container func (r *LocalRuntime) CreateContainer(ctx context.Context, c *cliconfig.CreateValues) (string, error) { - results := shared.NewIntermediateLayer(&c.PodmanCommand) + results := shared.NewIntermediateLayer(&c.PodmanCommand, false) ctr, _, err := shared.CreateContainer(ctx, &results, r.Runtime) - return ctr.ID(), err + if err != nil { + return "", err + } + return ctr.ID(), nil } // Run a libpod container func (r *LocalRuntime) Run(ctx context.Context, c *cliconfig.RunValues, exitCode int) (int, error) { - results := shared.NewIntermediateLayer(&c.PodmanCommand) + results := shared.NewIntermediateLayer(&c.PodmanCommand, false) ctr, createConfig, err := shared.CreateContainer(ctx, &results, r.Runtime) if err != nil { @@ -304,3 +400,113 @@ func ReadExitFile(runtimeTmp, ctrID string) (int, error) { return exitCode, nil } + +// Ps ... +func (r *LocalRuntime) Ps(c *cliconfig.PsValues, opts shared.PsOptions) ([]shared.PsContainerOutput, error) { + maxWorkers := shared.Parallelize("ps") + if c.GlobalIsSet("max-workers") { + maxWorkers = c.GlobalFlags.MaxWorks + } + logrus.Debugf("Setting maximum workers to %d", maxWorkers) + return shared.GetPsContainerOutput(r.Runtime, opts, c.Filter, maxWorkers) +} + +// Attach ... +func (r *LocalRuntime) Attach(ctx context.Context, c *cliconfig.AttachValues) error { + var ( + ctr *libpod.Container + err error + ) + + if c.Latest { + ctr, err = r.Runtime.GetLatestContainer() + } else { + ctr, err = r.Runtime.LookupContainer(c.InputArgs[0]) + } + + if err != nil { + return errors.Wrapf(err, "unable to exec into %s", c.InputArgs[0]) + } + + conState, err := ctr.State() + if err != nil { + return errors.Wrapf(err, "unable to determine state of %s", ctr.ID()) + } + if conState != libpod.ContainerStateRunning { + return errors.Errorf("you can only attach to running containers") + } + + inputStream := os.Stdin + if c.NoStdin { + inputStream = nil + } + // If the container is in a pod, also set to recursively start dependencies + if err := StartAttachCtr(ctx, ctr, os.Stdout, os.Stderr, inputStream, c.DetachKeys, c.SigProxy, false, ctr.PodID() != ""); err != nil && errors.Cause(err) != libpod.ErrDetach { + return errors.Wrapf(err, "error attaching to container %s", ctr.ID()) + } + return nil +} + +// Checkpoint one or more containers +func (r *LocalRuntime) Checkpoint(c *cliconfig.CheckpointValues, options libpod.ContainerCheckpointOptions) error { + var ( + containers []*libpod.Container + err, lastError error + ) + + if c.All { + containers, err = r.Runtime.GetRunningContainers() + } else { + containers, err = shortcuts.GetContainersByContext(false, c.Latest, c.InputArgs, r.Runtime) + } + if err != nil { + return err + } + + for _, ctr := range containers { + if err = ctr.Checkpoint(context.TODO(), options); err != nil { + if lastError != nil { + fmt.Fprintln(os.Stderr, lastError) + } + lastError = errors.Wrapf(err, "failed to checkpoint container %v", ctr.ID()) + } else { + fmt.Println(ctr.ID()) + } + } + return lastError +} + +// Restore one or more containers +func (r *LocalRuntime) Restore(c *cliconfig.RestoreValues, options libpod.ContainerCheckpointOptions) error { + var ( + containers []*libpod.Container + err, lastError error + filterFuncs []libpod.ContainerFilter + ) + + filterFuncs = append(filterFuncs, func(c *libpod.Container) bool { + state, _ := c.State() + return state == libpod.ContainerStateExited + }) + + if c.All { + containers, err = r.GetContainers(filterFuncs...) + } else { + containers, err = shortcuts.GetContainersByContext(false, c.Latest, c.InputArgs, r.Runtime) + } + if err != nil { + return err + } + + for _, ctr := range containers { + if err = ctr.Restore(context.TODO(), options); err != nil { + if lastError != nil { + fmt.Fprintln(os.Stderr, lastError) + } + lastError = errors.Wrapf(err, "failed to restore container %v", ctr.ID()) + } else { + fmt.Println(ctr.ID()) + } + } + return lastError +} |