summaryrefslogtreecommitdiff
path: root/cmd
diff options
context:
space:
mode:
Diffstat (limited to 'cmd')
-rw-r--r--cmd/podman/cliconfig/config.go2
-rw-r--r--cmd/podman/commands.go2
-rw-r--r--cmd/podman/container.go1
-rw-r--r--cmd/podman/main.go1
-rw-r--r--cmd/podman/play_kube.go85
-rw-r--r--cmd/podman/pods_prune.go64
-rw-r--r--cmd/podman/pull.go129
-rw-r--r--cmd/podman/restart.go84
-rw-r--r--cmd/podman/system_prune.go10
9 files changed, 167 insertions, 211 deletions
diff --git a/cmd/podman/cliconfig/config.go b/cmd/podman/cliconfig/config.go
index 16c98a13e..640a4bff4 100644
--- a/cmd/podman/cliconfig/config.go
+++ b/cmd/podman/cliconfig/config.go
@@ -159,7 +159,7 @@ type PruneContainersValues struct {
Force bool
}
-type PrunePodsValues struct {
+type PodPruneValues struct {
PodmanCommand
Force bool
}
diff --git a/cmd/podman/commands.go b/cmd/podman/commands.go
index c36452cfe..7680d6df2 100644
--- a/cmd/podman/commands.go
+++ b/cmd/podman/commands.go
@@ -19,7 +19,6 @@ func getMainCommands() []*cobra.Command {
_mountCommand,
_portCommand,
_refreshCommand,
- _restartCommand,
_searchCommand,
_statsCommand,
_topCommand,
@@ -50,7 +49,6 @@ func getContainerSubCommands() []*cobra.Command {
_portCommand,
_pruneContainersCommand,
_refreshCommand,
- _restartCommand,
_restoreCommand,
_runlabelCommand,
_statsCommand,
diff --git a/cmd/podman/container.go b/cmd/podman/container.go
index 7733c8eef..28e0f0e4a 100644
--- a/cmd/podman/container.go
+++ b/cmd/podman/container.go
@@ -60,6 +60,7 @@ var (
_listSubCommand,
_logsCommand,
_pauseCommand,
+ _restartCommand,
_runCommand,
_rmCommand,
_startCommand,
diff --git a/cmd/podman/main.go b/cmd/podman/main.go
index 15f4a5d71..392dfe542 100644
--- a/cmd/podman/main.go
+++ b/cmd/podman/main.go
@@ -50,6 +50,7 @@ var mainCommands = []*cobra.Command{
&_psCommand,
_pullCommand,
_pushCommand,
+ _restartCommand,
_rmCommand,
&_rmiCommand,
_runCommand,
diff --git a/cmd/podman/play_kube.go b/cmd/podman/play_kube.go
index d60c873f8..d1008e615 100644
--- a/cmd/podman/play_kube.go
+++ b/cmd/podman/play_kube.go
@@ -45,7 +45,7 @@ var (
playKubeCommand.InputArgs = args
playKubeCommand.GlobalFlags = MainGlobalOpts
playKubeCommand.Remote = remoteclient
- return playKubeYAMLCmd(&playKubeCommand)
+ return playKubeCmd(&playKubeCommand)
},
Example: `podman play kube demo.yml
podman play kube --cert-dir /mycertsdir --tls-verify=true --quiet myWebPod`,
@@ -65,16 +65,7 @@ func init() {
flags.BoolVar(&playKubeCommand.TlsVerify, "tls-verify", true, "Require HTTPS and verify certificates when contacting registries")
}
-func playKubeYAMLCmd(c *cliconfig.KubePlayValues) error {
- var (
- podOptions []libpod.PodCreateOption
- podYAML v1.Pod
- registryCreds *types.DockerAuthConfig
- containers []*libpod.Container
- writer io.Writer
- )
-
- ctx := getContext()
+func playKubeCmd(c *cliconfig.KubePlayValues) error {
args := c.InputArgs
if len(args) > 1 {
return errors.New("you can only play one kubernetes file at a time")
@@ -83,19 +74,39 @@ func playKubeYAMLCmd(c *cliconfig.KubePlayValues) error {
return errors.New("you must supply at least one file")
}
+ ctx := getContext()
runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
- content, err := ioutil.ReadFile(args[0])
+ pod, err := playKubeYAMLCmd(c, ctx, runtime, args[0])
+ if err != nil && pod != nil {
+ if err2 := runtime.RemovePod(ctx, pod, true, true); err2 != nil {
+ logrus.Errorf("unable to remove pod %s after failing to play kube", pod.ID())
+ }
+ }
+ return err
+}
+
+func playKubeYAMLCmd(c *cliconfig.KubePlayValues, ctx context.Context, runtime *libpod.Runtime, yamlFile string) (*libpod.Pod, error) {
+ var (
+ containers []*libpod.Container
+ pod *libpod.Pod
+ podOptions []libpod.PodCreateOption
+ podYAML v1.Pod
+ registryCreds *types.DockerAuthConfig
+ writer io.Writer
+ )
+
+ content, err := ioutil.ReadFile(yamlFile)
if err != nil {
- return err
+ return nil, err
}
if err := yaml.Unmarshal(content, &podYAML); err != nil {
- return errors.Wrapf(err, "unable to read %s as YAML", args[0])
+ return nil, errors.Wrapf(err, "unable to read %s as YAML", yamlFile)
}
// check for name collision between pod and container
@@ -113,23 +124,21 @@ func playKubeYAMLCmd(c *cliconfig.KubePlayValues) error {
nsOptions, err := shared.GetNamespaceOptions(strings.Split(shared.DefaultKernelNamespaces, ","))
if err != nil {
- return err
+ return nil, err
}
podOptions = append(podOptions, nsOptions...)
podPorts := getPodPorts(podYAML.Spec.Containers)
podOptions = append(podOptions, libpod.WithInfraContainerPorts(podPorts))
// Create the Pod
- pod, err := runtime.NewPod(ctx, podOptions...)
+ pod, err = runtime.NewPod(ctx, podOptions...)
if err != nil {
- return err
+ return pod, err
}
- // Print the Pod's ID
- fmt.Println(pod.ID())
podInfraID, err := pod.InfraContainerID()
if err != nil {
- return err
+ return pod, err
}
namespaces := map[string]string{
@@ -157,26 +166,26 @@ func playKubeYAMLCmd(c *cliconfig.KubePlayValues) error {
for _, volume := range podYAML.Spec.Volumes {
hostPath := volume.VolumeSource.HostPath
if hostPath == nil {
- return errors.Errorf("HostPath is currently the only supported VolumeSource")
+ return pod, errors.Errorf("HostPath is currently the only supported VolumeSource")
}
if hostPath.Type != nil {
switch *hostPath.Type {
case v1.HostPathDirectoryOrCreate:
if _, err := os.Stat(hostPath.Path); os.IsNotExist(err) {
if err := os.Mkdir(hostPath.Path, createDirectoryPermission); err != nil {
- return errors.Errorf("Error creating HostPath %s at %s", volume.Name, hostPath.Path)
+ return pod, errors.Errorf("Error creating HostPath %s at %s", volume.Name, hostPath.Path)
}
}
// unconditionally label a newly created volume as private
if err := libpod.LabelVolumePath(hostPath.Path, false); err != nil {
- return errors.Wrapf(err, "Error giving %s a label", hostPath.Path)
+ return pod, errors.Wrapf(err, "Error giving %s a label", hostPath.Path)
}
break
case v1.HostPathFileOrCreate:
if _, err := os.Stat(hostPath.Path); os.IsNotExist(err) {
f, err := os.OpenFile(hostPath.Path, os.O_RDONLY|os.O_CREATE, createFilePermission)
if err != nil {
- return errors.Errorf("Error creating HostPath %s at %s", volume.Name, hostPath.Path)
+ return pod, errors.Errorf("Error creating HostPath %s at %s", volume.Name, hostPath.Path)
}
if err := f.Close(); err != nil {
logrus.Warnf("Error in closing newly created HostPath file: %v", err)
@@ -184,7 +193,7 @@ func playKubeYAMLCmd(c *cliconfig.KubePlayValues) error {
}
// unconditionally label a newly created volume as private
if err := libpod.LabelVolumePath(hostPath.Path, false); err != nil {
- return errors.Wrapf(err, "Error giving %s a label", hostPath.Path)
+ return pod, errors.Wrapf(err, "Error giving %s a label", hostPath.Path)
}
break
case v1.HostPathDirectory:
@@ -193,11 +202,11 @@ func playKubeYAMLCmd(c *cliconfig.KubePlayValues) error {
// do nothing here because we will verify the path exists in validateVolumeHostDir
break
default:
- return errors.Errorf("Directories are the only supported HostPath type")
+ return pod, errors.Errorf("Directories are the only supported HostPath type")
}
}
if err := shared.ValidateVolumeHostDir(hostPath.Path); err != nil {
- return errors.Wrapf(err, "Error in parsing HostPath in YAML")
+ return pod, errors.Wrapf(err, "Error in parsing HostPath in YAML")
}
volumes[volume.Name] = hostPath.Path
}
@@ -205,15 +214,15 @@ func playKubeYAMLCmd(c *cliconfig.KubePlayValues) error {
for _, container := range podYAML.Spec.Containers {
newImage, err := runtime.ImageRuntime().New(ctx, container.Image, c.SignaturePolicy, c.Authfile, writer, &dockerRegistryOptions, image.SigningOptions{}, false, nil)
if err != nil {
- return err
+ return pod, err
}
createConfig, err := kubeContainerToCreateConfig(ctx, container, runtime, newImage, namespaces, volumes)
if err != nil {
- return err
+ return pod, err
}
ctr, err := shared.CreateContainerFromCreateConfig(runtime, createConfig, ctx, pod)
if err != nil {
- return err
+ return pod, err
}
containers = append(containers, ctr)
}
@@ -223,12 +232,24 @@ func playKubeYAMLCmd(c *cliconfig.KubePlayValues) error {
if err := ctr.Start(ctx, true); err != nil {
// Making this a hard failure here to avoid a mess
// the other containers are in created status
- return err
+ return pod, err
}
+ }
+
+ // We've now successfully converted this YAML into a pod
+ // print our pod and containers, signifying we succeeded
+ fmt.Printf("Pod:\n%s\n", pod.ID())
+ if len(containers) == 1 {
+ fmt.Printf("Container:\n")
+ }
+ if len(containers) > 1 {
+ fmt.Printf("Containers:\n")
+ }
+ for _, ctr := range containers {
fmt.Println(ctr.ID())
}
- return nil
+ return pod, nil
}
// getPodPorts converts a slice of kube container descriptions to an
diff --git a/cmd/podman/pods_prune.go b/cmd/podman/pods_prune.go
index 89401a98a..e6946f068 100644
--- a/cmd/podman/pods_prune.go
+++ b/cmd/podman/pods_prune.go
@@ -1,19 +1,15 @@
package main
import (
- "context"
-
"github.com/containers/libpod/cmd/podman/cliconfig"
- "github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/pkg/adapter"
"github.com/pkg/errors"
- "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
var (
- prunePodsCommand cliconfig.PrunePodsValues
- prunePodsDescription = `
+ podPruneCommand cliconfig.PodPruneValues
+ podPruneDescription = `
podman pod prune
Removes all exited pods
@@ -22,62 +18,30 @@ var (
Use: "prune",
Args: noSubArgs,
Short: "Remove all stopped pods",
- Long: prunePodsDescription,
+ Long: podPruneDescription,
RunE: func(cmd *cobra.Command, args []string) error {
- prunePodsCommand.InputArgs = args
- prunePodsCommand.GlobalFlags = MainGlobalOpts
- return prunePodsCmd(&prunePodsCommand)
+ podPruneCommand.InputArgs = args
+ podPruneCommand.GlobalFlags = MainGlobalOpts
+ return podPruneCmd(&podPruneCommand)
},
}
)
func init() {
- prunePodsCommand.Command = _prunePodsCommand
- prunePodsCommand.SetHelpTemplate(HelpTemplate())
- prunePodsCommand.SetUsageTemplate(UsageTemplate())
- flags := prunePodsCommand.Flags()
- flags.BoolVarP(&prunePodsCommand.Force, "force", "f", false, "Force removal of a running pods. The default is false")
+ podPruneCommand.Command = _prunePodsCommand
+ podPruneCommand.SetHelpTemplate(HelpTemplate())
+ podPruneCommand.SetUsageTemplate(UsageTemplate())
+ flags := podPruneCommand.Flags()
+ flags.BoolVarP(&podPruneCommand.Force, "force", "f", false, "Force removal of a running pods. The default is false")
}
-func prunePods(runtime *adapter.LocalRuntime, ctx context.Context, maxWorkers int, force bool) error {
- var deleteFuncs []shared.ParallelWorkerInput
-
- states := []string{shared.PodStateStopped, shared.PodStateExited}
- delPods, err := runtime.GetPodsByStatus(states)
- if err != nil {
- return err
- }
- if len(delPods) < 1 {
- return nil
- }
- for _, pod := range delPods {
- p := pod
- f := func() error {
- return runtime.RemovePod(ctx, p, force, force)
- }
-
- deleteFuncs = append(deleteFuncs, shared.ParallelWorkerInput{
- ContainerID: p.ID(),
- ParallelFunc: f,
- })
- }
- // Run the parallel funcs
- deleteErrors, errCount := shared.ParallelExecuteWorkerPool(maxWorkers, deleteFuncs)
- return printParallelOutput(deleteErrors, errCount)
-}
-
-func prunePodsCmd(c *cliconfig.PrunePodsValues) error {
+func podPruneCmd(c *cliconfig.PodPruneValues) error {
runtime, err := adapter.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
- maxWorkers := shared.Parallelize("rm")
- if c.GlobalIsSet("max-workers") {
- maxWorkers = c.GlobalFlags.MaxWorks
- }
- logrus.Debugf("Setting maximum workers to %d", maxWorkers)
-
- return prunePods(runtime, getContext(), maxWorkers, c.Bool("force"))
+ ok, failures, err := runtime.PrunePods(getContext(), c)
+ return printCmdResults(ok, failures)
}
diff --git a/cmd/podman/pull.go b/cmd/podman/pull.go
index 04eb5bd46..521419e7a 100644
--- a/cmd/podman/pull.go
+++ b/cmd/podman/pull.go
@@ -46,7 +46,7 @@ func init() {
pullCommand.SetHelpTemplate(HelpTemplate())
pullCommand.SetUsageTemplate(UsageTemplate())
flags := pullCommand.Flags()
- flags.BoolVar(&pullCommand.AllTags, "all-tags", false, "All tagged images inthe repository will be pulled")
+ flags.BoolVar(&pullCommand.AllTags, "all-tags", false, "All tagged images in the repository will be pulled")
flags.StringVar(&pullCommand.CertDir, "cert-dir", "", "`Pathname` of a directory containing TLS certificates and keys")
flags.StringVar(&pullCommand.Creds, "creds", "", "`Credentials` (USERNAME:PASSWORD) to use for authenticating to a registry")
flags.BoolVarP(&pullCommand.Quiet, "quiet", "q", false, "Suppress output information when pulling images")
@@ -94,8 +94,9 @@ func pullCmd(c *cliconfig.PullValues) (retError error) {
return errors.Errorf("tag can't be used with --all-tags")
}
}
+
ctx := getContext()
- img := args[0]
+ imgArg := args[0]
var registryCreds *types.DockerAuthConfig
@@ -122,68 +123,86 @@ func pullCmd(c *cliconfig.PullValues) (retError error) {
dockerRegistryOptions.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!c.TlsVerify)
}
- // Possible for docker-archive to have multiple tags, so use LoadFromArchiveReference instead
- if strings.HasPrefix(img, dockerarchive.Transport.Name()+":") {
- srcRef, err := alltransports.ParseImageName(img)
+ // Special-case for docker-archive which allows multiple tags.
+ if strings.HasPrefix(imgArg, dockerarchive.Transport.Name()+":") {
+ srcRef, err := alltransports.ParseImageName(imgArg)
if err != nil {
- return errors.Wrapf(err, "error parsing %q", img)
+ return errors.Wrapf(err, "error parsing %q", imgArg)
}
newImage, err := runtime.LoadFromArchiveReference(getContext(), srcRef, c.SignaturePolicy, writer)
if err != nil {
- return errors.Wrapf(err, "error pulling image from %q", img)
+ return errors.Wrapf(err, "error pulling image from %q", imgArg)
}
fmt.Println(newImage[0].ID())
- } else {
- authfile := getAuthFile(c.String("authfile"))
- spec := img
- systemContext := image.GetSystemContext("", authfile, false)
- srcRef, err := alltransports.ParseImageName(spec)
+
+ return nil
+ }
+
+ authfile := getAuthFile(c.String("authfile"))
+
+ // FIXME: the default pull consults the registries.conf's search registries
+ // while the all-tags pull does not. This behavior must be fixed in the
+ // future and span across c/buildah, c/image and c/libpod to avoid redundant
+ // and error prone code.
+ //
+ // See https://bugzilla.redhat.com/show_bug.cgi?id=1701922 for background
+ // information.
+ if !c.Bool("all-tags") {
+ newImage, err := runtime.New(getContext(), imgArg, c.SignaturePolicy, authfile, writer, &dockerRegistryOptions, image.SigningOptions{}, true, nil)
if err != nil {
- dockerTransport := "docker://"
- logrus.Debugf("error parsing image name %q, trying with transport %q: %v", spec, dockerTransport, err)
- spec = dockerTransport + spec
- srcRef2, err2 := alltransports.ParseImageName(spec)
- if err2 != nil {
- return errors.Wrapf(err2, "error parsing image name %q", img)
- }
- srcRef = srcRef2
- }
- var names []string
- if c.Bool("all-tags") {
- if srcRef.DockerReference() == nil {
- return errors.New("Non-docker transport is currently not supported")
- }
- tags, err := docker.GetRepositoryTags(ctx, systemContext, srcRef)
- if err != nil {
- return errors.Wrapf(err, "error getting repository tags")
- }
- for _, tag := range tags {
- name := spec + ":" + tag
- names = append(names, name)
- }
- } else {
- names = append(names, spec)
+ return errors.Wrapf(err, "error pulling image %q", imgArg)
}
- var foundIDs []string
- foundImage := true
- for _, name := range names {
- newImage, err := runtime.New(getContext(), name, c.String("signature-policy"), authfile, writer, &dockerRegistryOptions, image.SigningOptions{}, true, nil)
- if err != nil {
- logrus.Errorf("error pulling image %q", name)
- foundImage = false
- continue
- }
- foundIDs = append(foundIDs, newImage.ID())
- }
- if len(names) == 1 && !foundImage {
- return errors.Wrapf(err, "error pulling image %q", img)
- }
- if len(names) > 1 {
- fmt.Println("Pulled Images:")
+ fmt.Println(newImage.ID())
+ return nil
+ }
+
+ // FIXME: all-tags should use the libpod backend instead of baking its own bread.
+ spec := imgArg
+ systemContext := image.GetSystemContext("", authfile, false)
+ srcRef, err := alltransports.ParseImageName(spec)
+ if err != nil {
+ dockerTransport := "docker://"
+ logrus.Debugf("error parsing image name %q, trying with transport %q: %v", spec, dockerTransport, err)
+ spec = dockerTransport + spec
+ srcRef2, err2 := alltransports.ParseImageName(spec)
+ if err2 != nil {
+ return errors.Wrapf(err2, "error parsing image name %q", imgArg)
}
- for _, id := range foundIDs {
- fmt.Println(id)
+ srcRef = srcRef2
+ }
+ var names []string
+ if srcRef.DockerReference() == nil {
+ return errors.New("Non-docker transport is currently not supported")
+ }
+ tags, err := docker.GetRepositoryTags(ctx, systemContext, srcRef)
+ if err != nil {
+ return errors.Wrapf(err, "error getting repository tags")
+ }
+ for _, tag := range tags {
+ name := spec + ":" + tag
+ names = append(names, name)
+ }
+
+ var foundIDs []string
+ foundImage := true
+ for _, name := range names {
+ newImage, err := runtime.New(getContext(), name, c.String("signature-policy"), authfile, writer, &dockerRegistryOptions, image.SigningOptions{}, true, nil)
+ if err != nil {
+ logrus.Errorf("error pulling image %q", name)
+ foundImage = false
+ continue
}
- } // end else if strings.HasPrefix(img, dockerarchive.Transport.Name()+":")
+ foundIDs = append(foundIDs, newImage.ID())
+ }
+ if len(names) == 1 && !foundImage {
+ return errors.Wrapf(err, "error pulling image %q", imgArg)
+ }
+ if len(names) > 1 {
+ fmt.Println("Pulled Images:")
+ }
+ for _, id := range foundIDs {
+ fmt.Println(id)
+ }
+
return nil
}
diff --git a/cmd/podman/restart.go b/cmd/podman/restart.go
index 5a9f3043a..9ab2dd528 100644
--- a/cmd/podman/restart.go
+++ b/cmd/podman/restart.go
@@ -2,11 +2,9 @@ package main
import (
"github.com/containers/libpod/cmd/podman/cliconfig"
- "github.com/containers/libpod/cmd/podman/libpodruntime"
- "github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/pkg/adapter"
"github.com/pkg/errors"
- "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
@@ -22,7 +20,6 @@ var (
RunE: func(cmd *cobra.Command, args []string) error {
restartCommand.InputArgs = args
restartCommand.GlobalFlags = MainGlobalOpts
- restartCommand.Remote = remoteclient
return restartCmd(&restartCommand)
},
Args: func(cmd *cobra.Command, args []string) error {
@@ -49,83 +46,30 @@ func init() {
}
func restartCmd(c *cliconfig.RestartValues) error {
- var (
- restartFuncs []shared.ParallelWorkerInput
- containers []*libpod.Container
- restartContainers []*libpod.Container
- )
-
- args := c.InputArgs
- runOnly := c.Running
all := c.All
- if len(args) < 1 && !c.Latest && !all {
+ if len(c.InputArgs) < 1 && !c.Latest && !all {
return errors.Wrapf(libpod.ErrInvalidArg, "you must provide at least one container name or ID")
}
- runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand)
+ runtime, err := adapter.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
defer runtime.Shutdown(false)
- timeout := c.Timeout
- useTimeout := c.Flag("timeout").Changed || c.Flag("time").Changed
-
- // Handle --latest
- if c.Latest {
- lastCtr, err := runtime.GetLatestContainer()
- if err != nil {
- return errors.Wrapf(err, "unable to get latest container")
- }
- restartContainers = append(restartContainers, lastCtr)
- } else if runOnly {
- containers, err = getAllOrLatestContainers(&c.PodmanCommand, runtime, libpod.ContainerStateRunning, "running")
- if err != nil {
- return err
- }
- restartContainers = append(restartContainers, containers...)
- } else if all {
- containers, err = runtime.GetAllContainers()
- if err != nil {
- return err
- }
- restartContainers = append(restartContainers, containers...)
- } else {
- for _, id := range args {
- ctr, err := runtime.LookupContainer(id)
- if err != nil {
- return err
+ ok, failures, err := runtime.Restart(getContext(), c)
+ if err != nil {
+ if errors.Cause(err) == libpod.ErrNoSuchCtr {
+ if len(c.InputArgs) > 1 {
+ exitCode = 125
+ } else {
+ exitCode = 1
}
- restartContainers = append(restartContainers, ctr)
}
+ return err
}
-
- maxWorkers := shared.Parallelize("restart")
- if c.GlobalIsSet("max-workers") {
- maxWorkers = c.GlobalFlags.MaxWorks
+ if len(failures) > 0 {
+ exitCode = 125
}
-
- logrus.Debugf("Setting maximum workers to %d", maxWorkers)
-
- // We now have a slice of all the containers to be restarted. Iterate them to
- // create restart Funcs with a timeout as needed
- for _, ctr := range restartContainers {
- con := ctr
- ctrTimeout := ctr.StopTimeout()
- if useTimeout {
- ctrTimeout = timeout
- }
-
- f := func() error {
- return con.RestartWithTimeout(getContext(), ctrTimeout)
- }
-
- restartFuncs = append(restartFuncs, shared.ParallelWorkerInput{
- ContainerID: con.ID(),
- ParallelFunc: f,
- })
- }
-
- restartErrors, errCount := shared.ParallelExecuteWorkerPool(maxWorkers, restartFuncs)
- return printParallelOutput(restartErrors, errCount)
+ return printCmdResults(ok, failures)
}
diff --git a/cmd/podman/system_prune.go b/cmd/podman/system_prune.go
index 14cb96941..8900e2644 100644
--- a/cmd/podman/system_prune.go
+++ b/cmd/podman/system_prune.go
@@ -82,13 +82,21 @@ Are you sure you want to continue? [y/N] `, volumeString)
ctx := getContext()
fmt.Println("Deleted Containers")
lasterr := pruneContainers(runtime, ctx, rmWorkers, false, false)
+
fmt.Println("Deleted Pods")
- if err := prunePods(runtime, ctx, rmWorkers, true); err != nil {
+ pruneValues := cliconfig.PodPruneValues{
+ PodmanCommand: c.PodmanCommand,
+ Force: c.Force,
+ }
+ ok, failures, err := runtime.PrunePods(ctx, &pruneValues)
+ if err != nil {
if lasterr != nil {
logrus.Errorf("%q", lasterr)
}
lasterr = err
}
+ printCmdResults(ok, failures)
+
if c.Bool("volumes") {
fmt.Println("Deleted Volumes")
err := volumePrune(runtime, getContext())