summaryrefslogtreecommitdiff
path: root/cmd/podman/pods_prune.go
diff options
context:
space:
mode:
authorPeter Hunt <pehunt@redhat.com>2019-03-15 17:41:03 -0400
committerPeter Hunt <pehunt@redhat.com>2019-04-16 11:23:18 -0400
commit0b34b4a59cf090a47a2a13cc4814954c497b3d49 (patch)
tree22e5185775ae83766d6911dd6b3d6b8376a976e1 /cmd/podman/pods_prune.go
parenta2e9626d92dedb182a500c3a0f04dcc0499a6d54 (diff)
downloadpodman-0b34b4a59cf090a47a2a13cc4814954c497b3d49.tar.gz
podman-0b34b4a59cf090a47a2a13cc4814954c497b3d49.tar.bz2
podman-0b34b4a59cf090a47a2a13cc4814954c497b3d49.zip
Add podman pod prune
podman system prune would leave pods be, and not prune them if they were stopped. Fix this by adding a `podman pod prune` command that prunes stopped pods similarly to containers. Signed-off-by: Peter Hunt <pehunt@redhat.com>
Diffstat (limited to 'cmd/podman/pods_prune.go')
-rw-r--r--cmd/podman/pods_prune.go91
1 files changed, 91 insertions, 0 deletions
diff --git a/cmd/podman/pods_prune.go b/cmd/podman/pods_prune.go
new file mode 100644
index 000000000..4ffe6fc27
--- /dev/null
+++ b/cmd/podman/pods_prune.go
@@ -0,0 +1,91 @@
+package main
+
+import (
+ "context"
+
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/containers/libpod/cmd/podman/shared"
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/pkg/adapter"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/cobra"
+)
+
+var (
+ prunePodsCommand cliconfig.PrunePodsValues
+ prunePodsDescription = `
+ podman pod prune
+
+ Removes all exited pods
+`
+ _prunePodsCommand = &cobra.Command{
+ Use: "prune",
+ Args: noSubArgs,
+ Short: "Remove all stopped pods",
+ Long: prunePodsDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ prunePodsCommand.InputArgs = args
+ prunePodsCommand.GlobalFlags = MainGlobalOpts
+ return prunePodsCmd(&prunePodsCommand)
+ },
+ }
+)
+
+func init() {
+ prunePodsCommand.Command = _prunePodsCommand
+ prunePodsCommand.SetHelpTemplate(HelpTemplate())
+ prunePodsCommand.SetUsageTemplate(UsageTemplate())
+ flags := prunePodsCommand.Flags()
+ flags.BoolVarP(&prunePodsCommand.Force, "force", "f", false, "Force removal of a running pods. The default is false")
+}
+
+func prunePods(runtime *adapter.LocalRuntime, ctx context.Context, maxWorkers int, force bool) error {
+ var deleteFuncs []shared.ParallelWorkerInput
+
+ filter := func(p *libpod.Pod) bool {
+ state, err := shared.GetPodStatus(p)
+ // pod states should be the same
+ if state == shared.PodStateStopped || (state == shared.PodStateExited && err == nil) {
+ return true
+ }
+ return false
+ }
+ delPods, err := runtime.Pods(filter)
+ if err != nil {
+ return err
+ }
+ if len(delPods) < 1 {
+ return nil
+ }
+ for _, pod := range delPods {
+ p := pod
+ f := func() error {
+ return runtime.RemovePod(ctx, p, force, force)
+ }
+
+ deleteFuncs = append(deleteFuncs, shared.ParallelWorkerInput{
+ ContainerID: p.ID(),
+ ParallelFunc: f,
+ })
+ }
+ // Run the parallel funcs
+ deleteErrors, errCount := shared.ParallelExecuteWorkerPool(maxWorkers, deleteFuncs)
+ return printParallelOutput(deleteErrors, errCount)
+}
+
+func prunePodsCmd(c *cliconfig.PrunePodsValues) error {
+ runtime, err := adapter.GetRuntime(&c.PodmanCommand)
+ if err != nil {
+ return errors.Wrapf(err, "could not get runtime")
+ }
+ defer runtime.Shutdown(false)
+
+ maxWorkers := shared.Parallelize("rm")
+ if c.GlobalIsSet("max-workers") {
+ maxWorkers = c.GlobalFlags.MaxWorks
+ }
+ logrus.Debugf("Setting maximum workers to %d", maxWorkers)
+
+ return prunePods(runtime, getContext(), maxWorkers, c.Bool("force"))
+}