summaryrefslogtreecommitdiff
path: root/cmd/podman/containers_prune.go
blob: abc56cee12afbc7b82173c48ab994a150501d96b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
package main

import (
	"context"

	"github.com/containers/libpod/cmd/podman/cliconfig"
	"github.com/containers/libpod/cmd/podman/shared"
	"github.com/containers/libpod/libpod"
	"github.com/containers/libpod/pkg/adapter"
	"github.com/pkg/errors"
	"github.com/sirupsen/logrus"
	"github.com/spf13/cobra"
)

var (
	pruneContainersCommand     cliconfig.PruneContainersValues
	pruneContainersDescription = `
	podman container prune

	Removes all exited containers
`
	_pruneContainersCommand = &cobra.Command{
		Use:   "prune",
		Args:  noSubArgs,
		Short: "Remove all stopped containers",
		Long:  pruneContainersDescription,
		RunE: func(cmd *cobra.Command, args []string) error {
			pruneContainersCommand.InputArgs = args
			pruneContainersCommand.GlobalFlags = MainGlobalOpts
			pruneContainersCommand.Remote = remoteclient
			return pruneContainersCmd(&pruneContainersCommand)
		},
	}
)

func init() {
	pruneContainersCommand.Command = _pruneContainersCommand
	pruneContainersCommand.SetHelpTemplate(HelpTemplate())
	pruneContainersCommand.SetUsageTemplate(UsageTemplate())
	flags := pruneContainersCommand.Flags()
	flags.BoolVarP(&pruneContainersCommand.Force, "force", "f", false, "Force removal of a running container.  The default is false")
}

func pruneContainers(runtime *adapter.LocalRuntime, ctx context.Context, maxWorkers int, force, volumes bool) error {
	var deleteFuncs []shared.ParallelWorkerInput

	filter := func(c *libpod.Container) bool {
		state, err := c.State()
		if state == libpod.ContainerStateStopped || (state == libpod.ContainerStateExited && err == nil && c.PodID() == "") {
			return true
		}
		return false
	}
	delContainers, err := runtime.GetContainers(filter)
	if err != nil {
		return err
	}
	if len(delContainers) < 1 {
		return nil
	}
	for _, container := range delContainers {
		con := container
		f := func() error {
			return runtime.RemoveContainer(ctx, con, force, volumes)
		}

		deleteFuncs = append(deleteFuncs, shared.ParallelWorkerInput{
			ContainerID:  con.ID(),
			ParallelFunc: f,
		})
	}
	// Run the parallel funcs
	deleteErrors, errCount := shared.ParallelExecuteWorkerPool(maxWorkers, deleteFuncs)
	return printParallelOutput(deleteErrors, errCount)
}

func pruneContainersCmd(c *cliconfig.PruneContainersValues) error {
	runtime, err := adapter.GetRuntime(&c.PodmanCommand)
	if err != nil {
		return errors.Wrapf(err, "could not get runtime")
	}
	defer runtime.Shutdown(false)

	maxWorkers := shared.Parallelize("rm")
	if c.GlobalIsSet("max-workers") {
		maxWorkers = c.GlobalFlags.MaxWorks
	}
	logrus.Debugf("Setting maximum workers to %d", maxWorkers)

	return pruneContainers(runtime, getContext(), maxWorkers, c.Bool("force"), c.Bool("volumes"))
}