aboutsummaryrefslogtreecommitdiff
path: root/cmd/podman/containers_prune.go
diff options
context:
space:
mode:
authorDaniel J Walsh <dwalsh@redhat.com>2019-02-14 13:21:52 -0500
committerDaniel J Walsh <dwalsh@redhat.com>2019-02-14 13:21:52 -0500
commit52df1fa7e054d577e8416d1d46db1741ad324d4a (patch)
tree2d1212425257a8d332f4400d0896aa2e465176b0 /cmd/podman/containers_prune.go
parentdd82acd8ba02be51ec5fea65584e1f7b2036d7c8 (diff)
downloadpodman-52df1fa7e054d577e8416d1d46db1741ad324d4a.tar.gz
podman-52df1fa7e054d577e8416d1d46db1741ad324d4a.tar.bz2
podman-52df1fa7e054d577e8416d1d46db1741ad324d4a.zip
Fix volume handling in podman
iFix builtin volumes to work with podman volume Currently builtin volumes are not recored in podman volumes when they are created automatically. This patch fixes this. Remove container volumes when requested Currently the --volume option on podman remove does nothing. This will implement the changes needed to remove the volumes if the user requests it. When removing a volume make sure that no container uses the volume. Signed-off-by: Daniel J Walsh dwalsh@redhat.com Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
Diffstat (limited to 'cmd/podman/containers_prune.go')
-rw-r--r--cmd/podman/containers_prune.go13
1 files changed, 7 insertions, 6 deletions
diff --git a/cmd/podman/containers_prune.go b/cmd/podman/containers_prune.go
index acc138fe0..bae578e1d 100644
--- a/cmd/podman/containers_prune.go
+++ b/cmd/podman/containers_prune.go
@@ -13,13 +13,12 @@ import (
)
var (
- pruneContainersCommand cliconfig.ContainersPrune
+ pruneContainersCommand cliconfig.PruneContainersValues
pruneContainersDescription = `
podman container prune
Removes all exited containers
`
-
_pruneContainersCommand = &cobra.Command{
Use: "prune",
Short: "Remove all stopped containers",
@@ -35,9 +34,11 @@ var (
func init() {
pruneContainersCommand.Command = _pruneContainersCommand
pruneContainersCommand.SetUsageTemplate(UsageTemplate())
+ flags := pruneContainersCommand.Flags()
+ flags.BoolVarP(&pruneContainersCommand.Force, "force", "f", false, "Force removal of a running container. The default is false")
}
-func pruneContainers(runtime *adapter.LocalRuntime, ctx context.Context, maxWorkers int, force bool) error {
+func pruneContainers(runtime *adapter.LocalRuntime, ctx context.Context, maxWorkers int, force, volumes bool) error {
var deleteFuncs []shared.ParallelWorkerInput
filter := func(c *libpod.Container) bool {
@@ -57,7 +58,7 @@ func pruneContainers(runtime *adapter.LocalRuntime, ctx context.Context, maxWork
for _, container := range delContainers {
con := container
f := func() error {
- return runtime.RemoveContainer(ctx, con, force)
+ return runtime.RemoveContainer(ctx, con, force, volumes)
}
deleteFuncs = append(deleteFuncs, shared.ParallelWorkerInput{
@@ -70,7 +71,7 @@ func pruneContainers(runtime *adapter.LocalRuntime, ctx context.Context, maxWork
return printParallelOutput(deleteErrors, errCount)
}
-func pruneContainersCmd(c *cliconfig.ContainersPrune) error {
+func pruneContainersCmd(c *cliconfig.PruneContainersValues) error {
runtime, err := adapter.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
@@ -83,5 +84,5 @@ func pruneContainersCmd(c *cliconfig.ContainersPrune) error {
}
logrus.Debugf("Setting maximum workers to %d", maxWorkers)
- return pruneContainers(runtime, getContext(), maxWorkers, c.Bool("force"))
+ return pruneContainers(runtime, getContext(), maxWorkers, c.Bool("force"), c.Bool("volumes"))
}