aboutsummaryrefslogtreecommitdiff
path: root/pkg/adapter
diff options
context:
space:
mode:
authorbaude <bbaude@redhat.com>2019-04-15 09:03:18 -0500
committerbaude <bbaude@redhat.com>2019-04-18 13:42:27 -0500
commit55e630e7876557ebd2a44e81fa357aab9efbb793 (patch)
treecc8b6f224a6520e3c38bc41022abe40c6f1952a2 /pkg/adapter
parentbf5ffdafb40f32fac891a8cd5fc64cfd5b77674f (diff)
downloadpodman-55e630e7876557ebd2a44e81fa357aab9efbb793.tar.gz
podman-55e630e7876557ebd2a44e81fa357aab9efbb793.tar.bz2
podman-55e630e7876557ebd2a44e81fa357aab9efbb793.zip
podman-remote pause|unpause
Add the ability to pause and unpause containers with the remote client. Also turned on the pause tests! Signed-off-by: baude <bbaude@redhat.com>
Diffstat (limited to 'pkg/adapter')
-rw-r--r--pkg/adapter/containers.go87
-rw-r--r--pkg/adapter/containers_remote.go110
2 files changed, 197 insertions, 0 deletions
diff --git a/pkg/adapter/containers.go b/pkg/adapter/containers.go
index 063ecfbfb..5279f11b2 100644
--- a/pkg/adapter/containers.go
+++ b/pkg/adapter/containers.go
@@ -607,3 +607,90 @@ func (r *LocalRuntime) Start(ctx context.Context, c *cliconfig.StartValues, sigP
}
return exitCode, lastError
}
+
+// PauseContainers removes container(s) based on CLI inputs.
+func (r *LocalRuntime) PauseContainers(ctx context.Context, cli *cliconfig.PauseValues) ([]string, map[string]error, error) {
+ var (
+ ok = []string{}
+ failures = map[string]error{}
+ ctrs []*libpod.Container
+ err error
+ )
+
+ maxWorkers := shared.DefaultPoolSize("pause")
+ if cli.GlobalIsSet("max-workers") {
+ maxWorkers = cli.GlobalFlags.MaxWorks
+ }
+ logrus.Debugf("Setting maximum rm workers to %d", maxWorkers)
+
+ if cli.All {
+ ctrs, err = r.GetRunningContainers()
+ } else {
+ ctrs, err = shortcuts.GetContainersByContext(false, false, cli.InputArgs, r.Runtime)
+ }
+ if err != nil {
+ return ok, failures, err
+ }
+
+ pool := shared.NewPool("pause", maxWorkers, len(ctrs))
+ for _, c := range ctrs {
+ ctr := c
+ pool.Add(shared.Job{
+ ID: ctr.ID(),
+ Fn: func() error {
+ err := ctr.Pause()
+ if err != nil {
+ logrus.Debugf("Failed to pause container %s: %s", ctr.ID(), err.Error())
+ }
+ return err
+ },
+ })
+ }
+ return pool.Run()
+}
+
+// UnpauseContainers removes container(s) based on CLI inputs.
+func (r *LocalRuntime) UnpauseContainers(ctx context.Context, cli *cliconfig.UnpauseValues) ([]string, map[string]error, error) {
+ var (
+ ok = []string{}
+ failures = map[string]error{}
+ ctrs []*libpod.Container
+ err error
+ )
+
+ maxWorkers := shared.DefaultPoolSize("pause")
+ if cli.GlobalIsSet("max-workers") {
+ maxWorkers = cli.GlobalFlags.MaxWorks
+ }
+ logrus.Debugf("Setting maximum rm workers to %d", maxWorkers)
+
+ if cli.All {
+ var filterFuncs []libpod.ContainerFilter
+ filterFuncs = append(filterFuncs, func(c *libpod.Container) bool {
+ state, _ := c.State()
+ return state == libpod.ContainerStatePaused
+ })
+ ctrs, err = r.GetContainers(filterFuncs...)
+ } else {
+ ctrs, err = shortcuts.GetContainersByContext(false, false, cli.InputArgs, r.Runtime)
+ }
+ if err != nil {
+ return ok, failures, err
+ }
+
+ pool := shared.NewPool("pause", maxWorkers, len(ctrs))
+ for _, c := range ctrs {
+ ctr := c
+ pool.Add(shared.Job{
+ ID: ctr.ID(),
+ Fn: func() error {
+ err := ctr.Unpause()
+ if err != nil {
+ logrus.Debugf("Failed to unpause container %s: %s", ctr.ID(), err.Error())
+ }
+ return err
+ },
+ })
+ }
+ return pool.Run()
+}
diff --git a/pkg/adapter/containers_remote.go b/pkg/adapter/containers_remote.go
index d5314c382..cb61871bf 100644
--- a/pkg/adapter/containers_remote.go
+++ b/pkg/adapter/containers_remote.go
@@ -45,6 +45,18 @@ func (c *Container) ID() string {
return c.config.ID
}
+// Pause a container
+func (c *Container) Pause() error {
+ _, err := iopodman.PauseContainer().Call(c.Runtime.Conn, c.ID())
+ return err
+}
+
+// Unpause a container
+func (c *Container) Unpause() error {
+ _, err := iopodman.UnpauseContainer().Call(c.Runtime.Conn, c.ID())
+ return err
+}
+
// Config returns a container config
func (r *LocalRuntime) Config(name string) *libpod.ContainerConfig {
// TODO the Spec being returned is not populated. Matt and I could not figure out why. Will defer
@@ -90,6 +102,19 @@ func (r *LocalRuntime) Spec(name string) (*specs.Spec, error) {
return &data, nil
}
+// LookupContainers is a wrapper for LookupContainer
+func (r *LocalRuntime) LookupContainers(idsOrNames []string) ([]*Container, error) {
+ var containers []*Container
+ for _, name := range idsOrNames {
+ ctr, err := r.LookupContainer(name)
+ if err != nil {
+ return nil, err
+ }
+ containers = append(containers, ctr)
+ }
+ return containers, nil
+}
+
// LookupContainer gets basic information about container over a varlink
// connection and then translates it to a *Container
func (r *LocalRuntime) LookupContainer(idOrName string) (*Container, error) {
@@ -107,6 +132,24 @@ func (r *LocalRuntime) LookupContainer(idOrName string) (*Container, error) {
}, nil
}
+func (r *LocalRuntime) LookupContainersWithStatus(filters []string) ([]*Container, error) {
+ var containers []*Container
+ ctrs, err := iopodman.GetContainersByStatus().Call(r.Conn, filters)
+ if err != nil {
+ return nil, err
+ }
+ // This is not performance savy; if this turns out to be a problematic series of lookups, we need to
+ // create a new endpoint to speed things up
+ for _, ctr := range ctrs {
+ container, err := r.LookupContainer(ctr.Id)
+ if err != nil {
+ return nil, err
+ }
+ containers = append(containers, container)
+ }
+ return containers, nil
+}
+
func (r *LocalRuntime) GetLatestContainer() (*Container, error) {
reply, err := iopodman.GetContainersByContext().Call(r.Conn, false, true, nil)
if err != nil {
@@ -643,3 +686,70 @@ func (r *LocalRuntime) Start(ctx context.Context, c *cliconfig.StartValues, sigP
}
return exitCode, finalErr
}
+
+// PauseContainers pauses container(s) based on CLI inputs.
+func (r *LocalRuntime) PauseContainers(ctx context.Context, cli *cliconfig.PauseValues) ([]string, map[string]error, error) {
+ var (
+ ok []string
+ failures = map[string]error{}
+ ctrs []*Container
+ err error
+ )
+
+ if cli.All {
+ filters := []string{libpod.ContainerStateRunning.String()}
+ ctrs, err = r.LookupContainersWithStatus(filters)
+ } else {
+ ctrs, err = r.LookupContainers(cli.InputArgs)
+ }
+ if err != nil {
+ return ok, failures, err
+ }
+
+ for _, c := range ctrs {
+ c := c
+ err := c.Pause()
+ if err != nil {
+ failures[c.ID()] = err
+ } else {
+ ok = append(ok, c.ID())
+ }
+ }
+ return ok, failures, nil
+}
+
+// UnpauseContainers unpauses containers based on input
+func (r *LocalRuntime) UnpauseContainers(ctx context.Context, cli *cliconfig.UnpauseValues) ([]string, map[string]error, error) {
+ var (
+ ok = []string{}
+ failures = map[string]error{}
+ ctrs []*Container
+ err error
+ )
+
+ maxWorkers := shared.DefaultPoolSize("unpause")
+ if cli.GlobalIsSet("max-workers") {
+ maxWorkers = cli.GlobalFlags.MaxWorks
+ }
+ logrus.Debugf("Setting maximum rm workers to %d", maxWorkers)
+
+ if cli.All {
+ filters := []string{libpod.ContainerStatePaused.String()}
+ ctrs, err = r.LookupContainersWithStatus(filters)
+ } else {
+ ctrs, err = r.LookupContainers(cli.InputArgs)
+ }
+ if err != nil {
+ return ok, failures, err
+ }
+ for _, c := range ctrs {
+ c := c
+ err := c.Unpause()
+ if err != nil {
+ failures[c.ID()] = err
+ } else {
+ ok = append(ok, c.ID())
+ }
+ }
+ return ok, failures, nil
+}