summaryrefslogtreecommitdiff
path: root/cmd/podman/containers_prune.go
diff options
context:
space:
mode:
authorbaude <bbaude@redhat.com>2018-12-01 09:49:46 -0600
committerbaude <bbaude@redhat.com>2018-12-05 19:57:54 -0600
commite037427035dc57e536478362fc19e30a400bc327 (patch)
tree3fb14dc437f58f29e197a43aaefa0e0254251f43 /cmd/podman/containers_prune.go
parent75b19ca8abe1957f3c48035767960a6b20c10519 (diff)
downloadpodman-e037427035dc57e536478362fc19e30a400bc327.tar.gz
podman-e037427035dc57e536478362fc19e30a400bc327.tar.bz2
podman-e037427035dc57e536478362fc19e30a400bc327.zip
Add ability to prune containers and images
Allow user to prune unused/unnamed images, the layer images from building, via podman rmi --prune. Allow user to prune stopped/exiuted containers via podman rm --prune. This should resolve #1910 Signed-off-by: baude <bbaude@redhat.com>
Diffstat (limited to 'cmd/podman/containers_prune.go')
-rw-r--r--cmd/podman/containers_prune.go74
1 files changed, 74 insertions, 0 deletions
diff --git a/cmd/podman/containers_prune.go b/cmd/podman/containers_prune.go
new file mode 100644
index 000000000..92604e82f
--- /dev/null
+++ b/cmd/podman/containers_prune.go
@@ -0,0 +1,74 @@
+package main
+
+import (
+ "github.com/containers/libpod/cmd/podman/libpodruntime"
+ "github.com/containers/libpod/cmd/podman/shared"
+ "github.com/containers/libpod/libpod"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "github.com/urfave/cli"
+)
+
+var (
+ pruneContainersDescription = `
+ podman container prune
+
+ Removes all exited containers
+`
+
+ pruneContainersCommand = cli.Command{
+ Name: "prune",
+ Usage: "Remove all stopped containers",
+ Description: pruneContainersDescription,
+ Action: pruneContainersCmd,
+ OnUsageError: usageErrorHandler,
+ }
+)
+
+func pruneContainersCmd(c *cli.Context) error {
+ var (
+ deleteFuncs []shared.ParallelWorkerInput
+ )
+
+ ctx := getContext()
+ runtime, err := libpodruntime.GetRuntime(c)
+ if err != nil {
+ return errors.Wrapf(err, "could not get runtime")
+ }
+ defer runtime.Shutdown(false)
+
+ filter := func(c *libpod.Container) bool {
+ state, _ := c.State()
+ if state == libpod.ContainerStateStopped || (state == libpod.ContainerStateExited && err == nil && c.PodID() == "") {
+ return true
+ }
+ return false
+ }
+ delContainers, err := runtime.GetContainers(filter)
+ if err != nil {
+ return err
+ }
+ if len(delContainers) < 1 {
+ return nil
+ }
+ for _, container := range delContainers {
+ con := container
+ f := func() error {
+ return runtime.RemoveContainer(ctx, con, c.Bool("force"))
+ }
+
+ deleteFuncs = append(deleteFuncs, shared.ParallelWorkerInput{
+ ContainerID: con.ID(),
+ ParallelFunc: f,
+ })
+ }
+ maxWorkers := shared.Parallelize("rm")
+ if c.GlobalIsSet("max-workers") {
+ maxWorkers = c.GlobalInt("max-workers")
+ }
+ logrus.Debugf("Setting maximum workers to %d", maxWorkers)
+
+ // Run the parallel funcs
+ deleteErrors, errCount := shared.ParallelExecuteWorkerPool(maxWorkers, deleteFuncs)
+ return printParallelOutput(deleteErrors, errCount)
+}