summaryrefslogtreecommitdiff
path: root/pkg/adapter
diff options
context:
space:
mode:
Diffstat (limited to 'pkg/adapter')
-rw-r--r--pkg/adapter/containers.go179
-rw-r--r--pkg/adapter/containers_remote.go137
-rw-r--r--pkg/adapter/runtime.go17
-rw-r--r--pkg/adapter/runtime_remote.go6
4 files changed, 325 insertions, 14 deletions
diff --git a/pkg/adapter/containers.go b/pkg/adapter/containers.go
index a5b911da1..9ec897a60 100644
--- a/pkg/adapter/containers.go
+++ b/pkg/adapter/containers.go
@@ -697,3 +697,182 @@ func (r *LocalRuntime) UnpauseContainers(ctx context.Context, cli *cliconfig.Unp
}
return pool.Run()
}
+
+// Restart containers without or without a timeout
+func (r *LocalRuntime) Restart(ctx context.Context, c *cliconfig.RestartValues) ([]string, map[string]error, error) {
+ var (
+ containers []*libpod.Container
+ restartContainers []*libpod.Container
+ err error
+ )
+ useTimeout := c.Flag("timeout").Changed || c.Flag("time").Changed
+ inputTimeout := c.Timeout
+
+ // Handle --latest
+ if c.Latest {
+ lastCtr, err := r.Runtime.GetLatestContainer()
+ if err != nil {
+ return nil, nil, errors.Wrapf(err, "unable to get latest container")
+ }
+ restartContainers = append(restartContainers, lastCtr)
+ } else if c.Running {
+ containers, err = r.GetRunningContainers()
+ if err != nil {
+ return nil, nil, err
+ }
+ restartContainers = append(restartContainers, containers...)
+ } else if c.All {
+ containers, err = r.Runtime.GetAllContainers()
+ if err != nil {
+ return nil, nil, err
+ }
+ restartContainers = append(restartContainers, containers...)
+ } else {
+ for _, id := range c.InputArgs {
+ ctr, err := r.Runtime.LookupContainer(id)
+ if err != nil {
+ return nil, nil, err
+ }
+ restartContainers = append(restartContainers, ctr)
+ }
+ }
+
+ maxWorkers := shared.DefaultPoolSize("restart")
+ if c.GlobalIsSet("max-workers") {
+ maxWorkers = c.GlobalFlags.MaxWorks
+ }
+
+ logrus.Debugf("Setting maximum workers to %d", maxWorkers)
+
+ // We now have a slice of all the containers to be restarted. Iterate them to
+ // create restart Funcs with a timeout as needed
+ pool := shared.NewPool("restart", maxWorkers, len(restartContainers))
+ for _, c := range restartContainers {
+ ctr := c
+ timeout := ctr.StopTimeout()
+ if useTimeout {
+ timeout = inputTimeout
+ }
+ pool.Add(shared.Job{
+ ID: ctr.ID(),
+ Fn: func() error {
+ err := ctr.RestartWithTimeout(ctx, timeout)
+ if err != nil {
+ logrus.Debugf("Failed to restart container %s: %s", ctr.ID(), err.Error())
+ }
+ return err
+ },
+ })
+ }
+ return pool.Run()
+}
+
+// Top display the running processes of a container
+func (r *LocalRuntime) Top(cli *cliconfig.TopValues) ([]string, error) {
+ var (
+ descriptors []string
+ container *libpod.Container
+ err error
+ )
+ if cli.Latest {
+ descriptors = cli.InputArgs
+ container, err = r.Runtime.GetLatestContainer()
+ } else {
+ descriptors = cli.InputArgs[1:]
+ container, err = r.Runtime.LookupContainer(cli.InputArgs[0])
+ }
+ if err != nil {
+ return nil, errors.Wrapf(err, "unable to lookup requested container")
+ }
+ return container.Top(descriptors)
+}
+
+// Prune removes stopped containers
+func (r *LocalRuntime) Prune(ctx context.Context, maxWorkers int, force bool) ([]string, map[string]error, error) {
+ var (
+ ok = []string{}
+ failures = map[string]error{}
+ err error
+ )
+
+ logrus.Debugf("Setting maximum rm workers to %d", maxWorkers)
+
+ filter := func(c *libpod.Container) bool {
+ state, err := c.State()
+ if err != nil {
+ logrus.Error(err)
+ return false
+ }
+ if c.PodID() != "" {
+ return false
+ }
+ if state == libpod.ContainerStateStopped || state == libpod.ContainerStateExited {
+ return true
+ }
+ return false
+ }
+ delContainers, err := r.Runtime.GetContainers(filter)
+ if err != nil {
+ return ok, failures, err
+ }
+ if len(delContainers) < 1 {
+ return ok, failures, err
+ }
+ pool := shared.NewPool("prune", maxWorkers, len(delContainers))
+ for _, c := range delContainers {
+ ctr := c
+ pool.Add(shared.Job{
+ ID: ctr.ID(),
+ Fn: func() error {
+ err := r.Runtime.RemoveContainer(ctx, ctr, force, false)
+ if err != nil {
+ logrus.Debugf("Failed to prune container %s: %s", ctr.ID(), err.Error())
+ }
+ return err
+ },
+ })
+ }
+ return pool.Run()
+}
+
+// CleanupContainers any leftovers bits of stopped containers
+func (r *LocalRuntime) CleanupContainers(ctx context.Context, cli *cliconfig.CleanupValues) ([]string, map[string]error, error) {
+ var (
+ ok = []string{}
+ failures = map[string]error{}
+ )
+
+ ctrs, err := shortcuts.GetContainersByContext(cli.All, cli.Latest, cli.InputArgs, r.Runtime)
+ if err != nil {
+ return ok, failures, err
+ }
+
+ for _, ctr := range ctrs {
+ if cli.Remove {
+ err = removeContainer(ctx, ctr, r)
+ } else {
+ err = cleanupContainer(ctx, ctr, r)
+ }
+
+ if err == nil {
+ ok = append(ok, ctr.ID())
+ } else {
+ failures[ctr.ID()] = err
+ }
+ }
+ return ok, failures, nil
+}
+
+func removeContainer(ctx context.Context, ctr *libpod.Container, runtime *LocalRuntime) error {
+ if err := runtime.RemoveContainer(ctx, ctr, false, true); err != nil {
+ return errors.Wrapf(err, "failed to cleanup and remove container %v", ctr.ID())
+ }
+ return nil
+}
+
+func cleanupContainer(ctx context.Context, ctr *libpod.Container, runtime *LocalRuntime) error {
+ if err := ctr.Cleanup(ctx); err != nil {
+ return errors.Wrapf(err, "failed to cleanup container %v", ctr.ID())
+ }
+ return nil
+}
diff --git a/pkg/adapter/containers_remote.go b/pkg/adapter/containers_remote.go
index cb61871bf..a3a48a564 100644
--- a/pkg/adapter/containers_remote.go
+++ b/pkg/adapter/containers_remote.go
@@ -45,6 +45,12 @@ func (c *Container) ID() string {
return c.config.ID
}
+// Restart a single container
+func (c *Container) Restart(timeout int64) error {
+ _, err := iopodman.RestartContainer().Call(c.Runtime.Conn, c.ID(), timeout)
+ return err
+}
+
// Pause a container
func (c *Container) Pause() error {
_, err := iopodman.PauseContainer().Call(c.Runtime.Conn, c.ID())
@@ -132,6 +138,23 @@ func (r *LocalRuntime) LookupContainer(idOrName string) (*Container, error) {
}, nil
}
+// GetAllContainers returns all containers in a slice
+func (r *LocalRuntime) GetAllContainers() ([]*Container, error) {
+ var containers []*Container
+ ctrs, err := iopodman.GetContainersByContext().Call(r.Conn, true, false, []string{})
+ if err != nil {
+ return nil, err
+ }
+ for _, ctr := range ctrs {
+ container, err := r.LookupContainer(ctr)
+ if err != nil {
+ return nil, err
+ }
+ containers = append(containers, container)
+ }
+ return containers, nil
+}
+
func (r *LocalRuntime) LookupContainersWithStatus(filters []string) ([]*Container, error) {
var containers []*Container
ctrs, err := iopodman.GetContainersByStatus().Call(r.Conn, filters)
@@ -561,7 +584,10 @@ func (r *LocalRuntime) Attach(ctx context.Context, c *cliconfig.AttachValues) er
}
inputStream := os.Stdin
if c.NoStdin {
- inputStream = nil
+ inputStream, err = os.Open(os.DevNull)
+ if err != nil {
+ return err
+ }
}
errChan, err := r.attach(ctx, inputStream, os.Stdout, c.InputArgs[0], false, c.DetachKeys)
if err != nil {
@@ -753,3 +779,112 @@ func (r *LocalRuntime) UnpauseContainers(ctx context.Context, cli *cliconfig.Unp
}
return ok, failures, nil
}
+
+// Restart restarts a container over varlink
+func (r *LocalRuntime) Restart(ctx context.Context, c *cliconfig.RestartValues) ([]string, map[string]error, error) {
+ var (
+ containers []*Container
+ restartContainers []*Container
+ err error
+ ok = []string{}
+ failures = map[string]error{}
+ )
+ useTimeout := c.Flag("timeout").Changed || c.Flag("time").Changed
+ inputTimeout := c.Timeout
+
+ if c.Latest {
+ lastCtr, err := r.GetLatestContainer()
+ if err != nil {
+ return nil, nil, errors.Wrapf(err, "unable to get latest container")
+ }
+ restartContainers = append(restartContainers, lastCtr)
+ } else if c.Running {
+ containers, err = r.LookupContainersWithStatus([]string{libpod.ContainerStateRunning.String()})
+ if err != nil {
+ return nil, nil, err
+ }
+ restartContainers = append(restartContainers, containers...)
+ } else if c.All {
+ containers, err = r.GetAllContainers()
+ if err != nil {
+ return nil, nil, err
+ }
+ restartContainers = append(restartContainers, containers...)
+ } else {
+ for _, id := range c.InputArgs {
+ ctr, err := r.LookupContainer(id)
+ if err != nil {
+ return nil, nil, err
+ }
+ restartContainers = append(restartContainers, ctr)
+ }
+ }
+
+ for _, c := range restartContainers {
+ c := c
+ timeout := c.config.StopTimeout
+ if useTimeout {
+ timeout = inputTimeout
+ }
+ err := c.Restart(int64(timeout))
+ if err != nil {
+ failures[c.ID()] = err
+ } else {
+ ok = append(ok, c.ID())
+ }
+ }
+ return ok, failures, nil
+}
+
+// Top display the running processes of a container
+func (r *LocalRuntime) Top(cli *cliconfig.TopValues) ([]string, error) {
+ var (
+ ctr *Container
+ err error
+ descriptors []string
+ )
+ if cli.Latest {
+ ctr, err = r.GetLatestContainer()
+ descriptors = cli.InputArgs
+ } else {
+ ctr, err = r.LookupContainer(cli.InputArgs[0])
+ descriptors = cli.InputArgs[1:]
+ }
+ if err != nil {
+ return nil, err
+ }
+ return iopodman.Top().Call(r.Conn, ctr.ID(), descriptors)
+}
+
+// Prune removes stopped containers
+func (r *LocalRuntime) Prune(ctx context.Context, maxWorkers int, force bool) ([]string, map[string]error, error) {
+
+ var (
+ ok = []string{}
+ failures = map[string]error{}
+ ctrs []*Container
+ err error
+ )
+ logrus.Debugf("Setting maximum rm workers to %d", maxWorkers)
+
+ filters := []string{libpod.ContainerStateExited.String()}
+ ctrs, err = r.LookupContainersWithStatus(filters)
+ if err != nil {
+ return ok, failures, err
+ }
+ for _, c := range ctrs {
+ c := c
+ _, err := iopodman.RemoveContainer().Call(r.Conn, c.ID(), false, false)
+ if err != nil {
+ failures[c.ID()] = err
+ } else {
+ ok = append(ok, c.ID())
+ }
+ }
+ return ok, failures, nil
+}
+
+// Cleanup any leftovers bits of stopped containers
+func (r *LocalRuntime) CleanupContainers(ctx context.Context, cli *cliconfig.CleanupValues) ([]string, map[string]error, error) {
+ return nil, nil, errors.New("container cleanup not supported for remote clients")
+}
diff --git a/pkg/adapter/runtime.go b/pkg/adapter/runtime.go
index 753f7c944..0d840d65b 100644
--- a/pkg/adapter/runtime.go
+++ b/pkg/adapter/runtime.go
@@ -5,6 +5,7 @@ package adapter
import (
"bufio"
"context"
+ "github.com/containers/libpod/cmd/podman/shared"
"io"
"io/ioutil"
"os"
@@ -17,7 +18,6 @@ import (
"github.com/containers/image/types"
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/libpodruntime"
- "github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/events"
"github.com/containers/libpod/libpod/image"
@@ -57,8 +57,8 @@ type Volume struct {
type VolumeFilter func(*Volume) bool
// GetRuntime returns a LocalRuntime struct with the actual runtime embedded in it
-func GetRuntime(c *cliconfig.PodmanCommand) (*LocalRuntime, error) {
- runtime, err := libpodruntime.GetRuntime(c)
+func GetRuntime(ctx context.Context, c *cliconfig.PodmanCommand) (*LocalRuntime, error) {
+ runtime, err := libpodruntime.GetRuntime(ctx, c)
if err != nil {
return nil, err
}
@@ -119,8 +119,8 @@ func (r *LocalRuntime) RemoveImage(ctx context.Context, img *ContainerImage, for
}
// PruneImages is wrapper into PruneImages within the image pkg
-func (r *LocalRuntime) PruneImages(all bool) ([]string, error) {
- return r.ImageRuntime().PruneImages(all)
+func (r *LocalRuntime) PruneImages(ctx context.Context, all bool) ([]string, error) {
+ return r.ImageRuntime().PruneImages(ctx, all)
}
// Export is a wrapper to container export to a tarfile
@@ -322,10 +322,6 @@ func (r *LocalRuntime) Events(c *cliconfig.EventValues) error {
fromStart bool
eventsError error
)
- options, err := shared.GenerateEventOptions(c.Filter, c.Since, c.Until)
- if err != nil {
- return errors.Wrapf(err, "unable to generate event options")
- }
tmpl, err := template.New("events").Parse(c.Format)
if err != nil {
return err
@@ -335,7 +331,8 @@ func (r *LocalRuntime) Events(c *cliconfig.EventValues) error {
}
eventChannel := make(chan *events.Event)
go func() {
- eventsError = r.Runtime.Events(fromStart, c.Stream, options, eventChannel)
+ readOpts := events.ReadOptions{FromStart: fromStart, Stream: c.Stream, Filters: c.Filter, EventChannel: eventChannel, Since: c.Since, Until: c.Until}
+ eventsError = r.Runtime.Events(readOpts)
}()
if eventsError != nil {
diff --git a/pkg/adapter/runtime_remote.go b/pkg/adapter/runtime_remote.go
index dcb0924ce..6102daccf 100644
--- a/pkg/adapter/runtime_remote.go
+++ b/pkg/adapter/runtime_remote.go
@@ -46,7 +46,7 @@ type LocalRuntime struct {
}
// GetRuntime returns a LocalRuntime struct with the actual runtime embedded in it
-func GetRuntime(c *cliconfig.PodmanCommand) (*LocalRuntime, error) {
+func GetRuntime(ctx context.Context, c *cliconfig.PodmanCommand) (*LocalRuntime, error) {
runtime := RemoteRuntime{}
conn, err := runtime.Connect()
if err != nil {
@@ -256,7 +256,7 @@ func (r *LocalRuntime) New(ctx context.Context, name, signaturePolicyPath, authf
// IsParent goes through the layers in the store and checks if i.TopLayer is
// the parent of any other layer in store. Double check that image with that
// layer exists as well.
-func (ci *ContainerImage) IsParent() (bool, error) {
+func (ci *ContainerImage) IsParent(context.Context) (bool, error) {
return ci.remoteImage.isParent, nil
}
@@ -338,7 +338,7 @@ func (ci *ContainerImage) History(ctx context.Context) ([]*image.History, error)
}
// PruneImages is the wrapper call for a remote-client to prune images
-func (r *LocalRuntime) PruneImages(all bool) ([]string, error) {
+func (r *LocalRuntime) PruneImages(ctx context.Context, all bool) ([]string, error) {
return iopodman.ImagesPrune().Call(r.Conn, all)
}