diff options
-rw-r--r-- | README.md | 6 | ||||
-rw-r--r-- | cmd/podman/cliconfig/config.go | 2 | ||||
-rw-r--r-- | cmd/podman/commands.go | 2 | ||||
-rw-r--r-- | cmd/podman/container.go | 1 | ||||
-rw-r--r-- | cmd/podman/main.go | 1 | ||||
-rw-r--r-- | cmd/podman/play_kube.go | 85 | ||||
-rw-r--r-- | cmd/podman/pods_prune.go | 64 | ||||
-rw-r--r-- | cmd/podman/pull.go | 129 | ||||
-rw-r--r-- | cmd/podman/restart.go | 84 | ||||
-rw-r--r-- | cmd/podman/system_prune.go | 10 | ||||
-rw-r--r-- | contrib/cirrus/lib.sh | 1 | ||||
-rwxr-xr-x | contrib/cirrus/setup_environment.sh | 11 | ||||
-rw-r--r-- | docs/libpod.conf.5.md | 10 | ||||
-rw-r--r-- | docs/podman-pull.1.md | 2 | ||||
-rw-r--r-- | libpod.conf | 8 | ||||
-rw-r--r-- | libpod/testdata/config.toml | 28 | ||||
-rw-r--r-- | pkg/adapter/containers.go | 72 | ||||
-rw-r--r-- | pkg/adapter/containers_remote.go | 79 | ||||
-rw-r--r-- | pkg/adapter/pods.go | 51 | ||||
-rw-r--r-- | pkg/adapter/pods_remote.go | 44 | ||||
-rw-r--r-- | pkg/adapter/runtime.go | 28 | ||||
-rw-r--r-- | pkg/adapter/runtime_remote.go | 12 | ||||
-rw-r--r-- | test/e2e/pod_prune_test.go | 78 | ||||
-rw-r--r-- | test/e2e/restart_test.go | 2 | ||||
-rw-r--r-- | test/e2e/stop_test.go | 78 |
25 files changed, 594 insertions, 294 deletions
@@ -45,7 +45,11 @@ This project tests all builds against each supported version of Fedora, the late Podman can also generate Kubernetes YAML based on a container or Pod (see [podman-generate-kube](https://github.com/containers/libpod/blob/master/docs/podman-generate-kube.1.md)), which allows for an easy transition from a local development environment - to a production Kubernetes cluster. + to a production Kubernetes cluster. If Kubernetes does not fit your requirements, + there are other third-party tools that support the docker-compose format such as + [kompose](https://github.com/kubernetes/kompose/) and + [podman-compose](https://github.com/muayyad-alsadi/podman-compose) + that might be appropriate for your environment. ## OCI Projects Plans diff --git a/cmd/podman/cliconfig/config.go b/cmd/podman/cliconfig/config.go index 16c98a13e..640a4bff4 100644 --- a/cmd/podman/cliconfig/config.go +++ b/cmd/podman/cliconfig/config.go @@ -159,7 +159,7 @@ type PruneContainersValues struct { Force bool } -type PrunePodsValues struct { +type PodPruneValues struct { PodmanCommand Force bool } diff --git a/cmd/podman/commands.go b/cmd/podman/commands.go index c36452cfe..7680d6df2 100644 --- a/cmd/podman/commands.go +++ b/cmd/podman/commands.go @@ -19,7 +19,6 @@ func getMainCommands() []*cobra.Command { _mountCommand, _portCommand, _refreshCommand, - _restartCommand, _searchCommand, _statsCommand, _topCommand, @@ -50,7 +49,6 @@ func getContainerSubCommands() []*cobra.Command { _portCommand, _pruneContainersCommand, _refreshCommand, - _restartCommand, _restoreCommand, _runlabelCommand, _statsCommand, diff --git a/cmd/podman/container.go b/cmd/podman/container.go index 7733c8eef..28e0f0e4a 100644 --- a/cmd/podman/container.go +++ b/cmd/podman/container.go @@ -60,6 +60,7 @@ var ( _listSubCommand, _logsCommand, _pauseCommand, + _restartCommand, _runCommand, _rmCommand, _startCommand, diff --git a/cmd/podman/main.go b/cmd/podman/main.go index 15f4a5d71..392dfe542 100644 --- a/cmd/podman/main.go +++ b/cmd/podman/main.go @@ -50,6 +50,7 @@ var mainCommands = []*cobra.Command{ &_psCommand, _pullCommand, _pushCommand, + _restartCommand, _rmCommand, &_rmiCommand, _runCommand, diff --git a/cmd/podman/play_kube.go b/cmd/podman/play_kube.go index d60c873f8..d1008e615 100644 --- a/cmd/podman/play_kube.go +++ b/cmd/podman/play_kube.go @@ -45,7 +45,7 @@ var ( playKubeCommand.InputArgs = args playKubeCommand.GlobalFlags = MainGlobalOpts playKubeCommand.Remote = remoteclient - return playKubeYAMLCmd(&playKubeCommand) + return playKubeCmd(&playKubeCommand) }, Example: `podman play kube demo.yml podman play kube --cert-dir /mycertsdir --tls-verify=true --quiet myWebPod`, @@ -65,16 +65,7 @@ func init() { flags.BoolVar(&playKubeCommand.TlsVerify, "tls-verify", true, "Require HTTPS and verify certificates when contacting registries") } -func playKubeYAMLCmd(c *cliconfig.KubePlayValues) error { - var ( - podOptions []libpod.PodCreateOption - podYAML v1.Pod - registryCreds *types.DockerAuthConfig - containers []*libpod.Container - writer io.Writer - ) - - ctx := getContext() +func playKubeCmd(c *cliconfig.KubePlayValues) error { args := c.InputArgs if len(args) > 1 { return errors.New("you can only play one kubernetes file at a time") @@ -83,19 +74,39 @@ func playKubeYAMLCmd(c *cliconfig.KubePlayValues) error { return errors.New("you must supply at least one file") } + ctx := getContext() runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand) if err != nil { return errors.Wrapf(err, "could not get runtime") } defer runtime.Shutdown(false) - content, err := ioutil.ReadFile(args[0]) + pod, err := playKubeYAMLCmd(c, ctx, runtime, args[0]) + if err != nil && pod != nil { + if err2 := runtime.RemovePod(ctx, pod, true, true); err2 != nil { + logrus.Errorf("unable to remove pod %s after failing to play kube", pod.ID()) + } + } + return err +} + +func playKubeYAMLCmd(c *cliconfig.KubePlayValues, ctx context.Context, runtime *libpod.Runtime, yamlFile string) (*libpod.Pod, error) { + var ( + containers []*libpod.Container + pod *libpod.Pod + podOptions []libpod.PodCreateOption + podYAML v1.Pod + registryCreds *types.DockerAuthConfig + writer io.Writer + ) + + content, err := ioutil.ReadFile(yamlFile) if err != nil { - return err + return nil, err } if err := yaml.Unmarshal(content, &podYAML); err != nil { - return errors.Wrapf(err, "unable to read %s as YAML", args[0]) + return nil, errors.Wrapf(err, "unable to read %s as YAML", yamlFile) } // check for name collision between pod and container @@ -113,23 +124,21 @@ func playKubeYAMLCmd(c *cliconfig.KubePlayValues) error { nsOptions, err := shared.GetNamespaceOptions(strings.Split(shared.DefaultKernelNamespaces, ",")) if err != nil { - return err + return nil, err } podOptions = append(podOptions, nsOptions...) podPorts := getPodPorts(podYAML.Spec.Containers) podOptions = append(podOptions, libpod.WithInfraContainerPorts(podPorts)) // Create the Pod - pod, err := runtime.NewPod(ctx, podOptions...) + pod, err = runtime.NewPod(ctx, podOptions...) if err != nil { - return err + return pod, err } - // Print the Pod's ID - fmt.Println(pod.ID()) podInfraID, err := pod.InfraContainerID() if err != nil { - return err + return pod, err } namespaces := map[string]string{ @@ -157,26 +166,26 @@ func playKubeYAMLCmd(c *cliconfig.KubePlayValues) error { for _, volume := range podYAML.Spec.Volumes { hostPath := volume.VolumeSource.HostPath if hostPath == nil { - return errors.Errorf("HostPath is currently the only supported VolumeSource") + return pod, errors.Errorf("HostPath is currently the only supported VolumeSource") } if hostPath.Type != nil { switch *hostPath.Type { case v1.HostPathDirectoryOrCreate: if _, err := os.Stat(hostPath.Path); os.IsNotExist(err) { if err := os.Mkdir(hostPath.Path, createDirectoryPermission); err != nil { - return errors.Errorf("Error creating HostPath %s at %s", volume.Name, hostPath.Path) + return pod, errors.Errorf("Error creating HostPath %s at %s", volume.Name, hostPath.Path) } } // unconditionally label a newly created volume as private if err := libpod.LabelVolumePath(hostPath.Path, false); err != nil { - return errors.Wrapf(err, "Error giving %s a label", hostPath.Path) + return pod, errors.Wrapf(err, "Error giving %s a label", hostPath.Path) } break case v1.HostPathFileOrCreate: if _, err := os.Stat(hostPath.Path); os.IsNotExist(err) { f, err := os.OpenFile(hostPath.Path, os.O_RDONLY|os.O_CREATE, createFilePermission) if err != nil { - return errors.Errorf("Error creating HostPath %s at %s", volume.Name, hostPath.Path) + return pod, errors.Errorf("Error creating HostPath %s at %s", volume.Name, hostPath.Path) } if err := f.Close(); err != nil { logrus.Warnf("Error in closing newly created HostPath file: %v", err) @@ -184,7 +193,7 @@ func playKubeYAMLCmd(c *cliconfig.KubePlayValues) error { } // unconditionally label a newly created volume as private if err := libpod.LabelVolumePath(hostPath.Path, false); err != nil { - return errors.Wrapf(err, "Error giving %s a label", hostPath.Path) + return pod, errors.Wrapf(err, "Error giving %s a label", hostPath.Path) } break case v1.HostPathDirectory: @@ -193,11 +202,11 @@ func playKubeYAMLCmd(c *cliconfig.KubePlayValues) error { // do nothing here because we will verify the path exists in validateVolumeHostDir break default: - return errors.Errorf("Directories are the only supported HostPath type") + return pod, errors.Errorf("Directories are the only supported HostPath type") } } if err := shared.ValidateVolumeHostDir(hostPath.Path); err != nil { - return errors.Wrapf(err, "Error in parsing HostPath in YAML") + return pod, errors.Wrapf(err, "Error in parsing HostPath in YAML") } volumes[volume.Name] = hostPath.Path } @@ -205,15 +214,15 @@ func playKubeYAMLCmd(c *cliconfig.KubePlayValues) error { for _, container := range podYAML.Spec.Containers { newImage, err := runtime.ImageRuntime().New(ctx, container.Image, c.SignaturePolicy, c.Authfile, writer, &dockerRegistryOptions, image.SigningOptions{}, false, nil) if err != nil { - return err + return pod, err } createConfig, err := kubeContainerToCreateConfig(ctx, container, runtime, newImage, namespaces, volumes) if err != nil { - return err + return pod, err } ctr, err := shared.CreateContainerFromCreateConfig(runtime, createConfig, ctx, pod) if err != nil { - return err + return pod, err } containers = append(containers, ctr) } @@ -223,12 +232,24 @@ func playKubeYAMLCmd(c *cliconfig.KubePlayValues) error { if err := ctr.Start(ctx, true); err != nil { // Making this a hard failure here to avoid a mess // the other containers are in created status - return err + return pod, err } + } + + // We've now successfully converted this YAML into a pod + // print our pod and containers, signifying we succeeded + fmt.Printf("Pod:\n%s\n", pod.ID()) + if len(containers) == 1 { + fmt.Printf("Container:\n") + } + if len(containers) > 1 { + fmt.Printf("Containers:\n") + } + for _, ctr := range containers { fmt.Println(ctr.ID()) } - return nil + return pod, nil } // getPodPorts converts a slice of kube container descriptions to an diff --git a/cmd/podman/pods_prune.go b/cmd/podman/pods_prune.go index 89401a98a..e6946f068 100644 --- a/cmd/podman/pods_prune.go +++ b/cmd/podman/pods_prune.go @@ -1,19 +1,15 @@ package main import ( - "context" - "github.com/containers/libpod/cmd/podman/cliconfig" - "github.com/containers/libpod/cmd/podman/shared" "github.com/containers/libpod/pkg/adapter" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "github.com/spf13/cobra" ) var ( - prunePodsCommand cliconfig.PrunePodsValues - prunePodsDescription = ` + podPruneCommand cliconfig.PodPruneValues + podPruneDescription = ` podman pod prune Removes all exited pods @@ -22,62 +18,30 @@ var ( Use: "prune", Args: noSubArgs, Short: "Remove all stopped pods", - Long: prunePodsDescription, + Long: podPruneDescription, RunE: func(cmd *cobra.Command, args []string) error { - prunePodsCommand.InputArgs = args - prunePodsCommand.GlobalFlags = MainGlobalOpts - return prunePodsCmd(&prunePodsCommand) + podPruneCommand.InputArgs = args + podPruneCommand.GlobalFlags = MainGlobalOpts + return podPruneCmd(&podPruneCommand) }, } ) func init() { - prunePodsCommand.Command = _prunePodsCommand - prunePodsCommand.SetHelpTemplate(HelpTemplate()) - prunePodsCommand.SetUsageTemplate(UsageTemplate()) - flags := prunePodsCommand.Flags() - flags.BoolVarP(&prunePodsCommand.Force, "force", "f", false, "Force removal of a running pods. The default is false") + podPruneCommand.Command = _prunePodsCommand + podPruneCommand.SetHelpTemplate(HelpTemplate()) + podPruneCommand.SetUsageTemplate(UsageTemplate()) + flags := podPruneCommand.Flags() + flags.BoolVarP(&podPruneCommand.Force, "force", "f", false, "Force removal of a running pods. The default is false") } -func prunePods(runtime *adapter.LocalRuntime, ctx context.Context, maxWorkers int, force bool) error { - var deleteFuncs []shared.ParallelWorkerInput - - states := []string{shared.PodStateStopped, shared.PodStateExited} - delPods, err := runtime.GetPodsByStatus(states) - if err != nil { - return err - } - if len(delPods) < 1 { - return nil - } - for _, pod := range delPods { - p := pod - f := func() error { - return runtime.RemovePod(ctx, p, force, force) - } - - deleteFuncs = append(deleteFuncs, shared.ParallelWorkerInput{ - ContainerID: p.ID(), - ParallelFunc: f, - }) - } - // Run the parallel funcs - deleteErrors, errCount := shared.ParallelExecuteWorkerPool(maxWorkers, deleteFuncs) - return printParallelOutput(deleteErrors, errCount) -} - -func prunePodsCmd(c *cliconfig.PrunePodsValues) error { +func podPruneCmd(c *cliconfig.PodPruneValues) error { runtime, err := adapter.GetRuntime(&c.PodmanCommand) if err != nil { return errors.Wrapf(err, "could not get runtime") } defer runtime.Shutdown(false) - maxWorkers := shared.Parallelize("rm") - if c.GlobalIsSet("max-workers") { - maxWorkers = c.GlobalFlags.MaxWorks - } - logrus.Debugf("Setting maximum workers to %d", maxWorkers) - - return prunePods(runtime, getContext(), maxWorkers, c.Bool("force")) + ok, failures, err := runtime.PrunePods(getContext(), c) + return printCmdResults(ok, failures) } diff --git a/cmd/podman/pull.go b/cmd/podman/pull.go index 04eb5bd46..521419e7a 100644 --- a/cmd/podman/pull.go +++ b/cmd/podman/pull.go @@ -46,7 +46,7 @@ func init() { pullCommand.SetHelpTemplate(HelpTemplate()) pullCommand.SetUsageTemplate(UsageTemplate()) flags := pullCommand.Flags() - flags.BoolVar(&pullCommand.AllTags, "all-tags", false, "All tagged images inthe repository will be pulled") + flags.BoolVar(&pullCommand.AllTags, "all-tags", false, "All tagged images in the repository will be pulled") flags.StringVar(&pullCommand.CertDir, "cert-dir", "", "`Pathname` of a directory containing TLS certificates and keys") flags.StringVar(&pullCommand.Creds, "creds", "", "`Credentials` (USERNAME:PASSWORD) to use for authenticating to a registry") flags.BoolVarP(&pullCommand.Quiet, "quiet", "q", false, "Suppress output information when pulling images") @@ -94,8 +94,9 @@ func pullCmd(c *cliconfig.PullValues) (retError error) { return errors.Errorf("tag can't be used with --all-tags") } } + ctx := getContext() - img := args[0] + imgArg := args[0] var registryCreds *types.DockerAuthConfig @@ -122,68 +123,86 @@ func pullCmd(c *cliconfig.PullValues) (retError error) { dockerRegistryOptions.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!c.TlsVerify) } - // Possible for docker-archive to have multiple tags, so use LoadFromArchiveReference instead - if strings.HasPrefix(img, dockerarchive.Transport.Name()+":") { - srcRef, err := alltransports.ParseImageName(img) + // Special-case for docker-archive which allows multiple tags. + if strings.HasPrefix(imgArg, dockerarchive.Transport.Name()+":") { + srcRef, err := alltransports.ParseImageName(imgArg) if err != nil { - return errors.Wrapf(err, "error parsing %q", img) + return errors.Wrapf(err, "error parsing %q", imgArg) } newImage, err := runtime.LoadFromArchiveReference(getContext(), srcRef, c.SignaturePolicy, writer) if err != nil { - return errors.Wrapf(err, "error pulling image from %q", img) + return errors.Wrapf(err, "error pulling image from %q", imgArg) } fmt.Println(newImage[0].ID()) - } else { - authfile := getAuthFile(c.String("authfile")) - spec := img - systemContext := image.GetSystemContext("", authfile, false) - srcRef, err := alltransports.ParseImageName(spec) + + return nil + } + + authfile := getAuthFile(c.String("authfile")) + + // FIXME: the default pull consults the registries.conf's search registries + // while the all-tags pull does not. This behavior must be fixed in the + // future and span across c/buildah, c/image and c/libpod to avoid redundant + // and error prone code. + // + // See https://bugzilla.redhat.com/show_bug.cgi?id=1701922 for background + // information. + if !c.Bool("all-tags") { + newImage, err := runtime.New(getContext(), imgArg, c.SignaturePolicy, authfile, writer, &dockerRegistryOptions, image.SigningOptions{}, true, nil) if err != nil { - dockerTransport := "docker://" - logrus.Debugf("error parsing image name %q, trying with transport %q: %v", spec, dockerTransport, err) - spec = dockerTransport + spec - srcRef2, err2 := alltransports.ParseImageName(spec) - if err2 != nil { - return errors.Wrapf(err2, "error parsing image name %q", img) - } - srcRef = srcRef2 - } - var names []string - if c.Bool("all-tags") { - if srcRef.DockerReference() == nil { - return errors.New("Non-docker transport is currently not supported") - } - tags, err := docker.GetRepositoryTags(ctx, systemContext, srcRef) - if err != nil { - return errors.Wrapf(err, "error getting repository tags") - } - for _, tag := range tags { - name := spec + ":" + tag - names = append(names, name) - } - } else { - names = append(names, spec) + return errors.Wrapf(err, "error pulling image %q", imgArg) } - var foundIDs []string - foundImage := true - for _, name := range names { - newImage, err := runtime.New(getContext(), name, c.String("signature-policy"), authfile, writer, &dockerRegistryOptions, image.SigningOptions{}, true, nil) - if err != nil { - logrus.Errorf("error pulling image %q", name) - foundImage = false - continue - } - foundIDs = append(foundIDs, newImage.ID()) - } - if len(names) == 1 && !foundImage { - return errors.Wrapf(err, "error pulling image %q", img) - } - if len(names) > 1 { - fmt.Println("Pulled Images:") + fmt.Println(newImage.ID()) + return nil + } + + // FIXME: all-tags should use the libpod backend instead of baking its own bread. + spec := imgArg + systemContext := image.GetSystemContext("", authfile, false) + srcRef, err := alltransports.ParseImageName(spec) + if err != nil { + dockerTransport := "docker://" + logrus.Debugf("error parsing image name %q, trying with transport %q: %v", spec, dockerTransport, err) + spec = dockerTransport + spec + srcRef2, err2 := alltransports.ParseImageName(spec) + if err2 != nil { + return errors.Wrapf(err2, "error parsing image name %q", imgArg) } - for _, id := range foundIDs { - fmt.Println(id) + srcRef = srcRef2 + } + var names []string + if srcRef.DockerReference() == nil { + return errors.New("Non-docker transport is currently not supported") + } + tags, err := docker.GetRepositoryTags(ctx, systemContext, srcRef) + if err != nil { + return errors.Wrapf(err, "error getting repository tags") + } + for _, tag := range tags { + name := spec + ":" + tag + names = append(names, name) + } + + var foundIDs []string + foundImage := true + for _, name := range names { + newImage, err := runtime.New(getContext(), name, c.String("signature-policy"), authfile, writer, &dockerRegistryOptions, image.SigningOptions{}, true, nil) + if err != nil { + logrus.Errorf("error pulling image %q", name) + foundImage = false + continue } - } // end else if strings.HasPrefix(img, dockerarchive.Transport.Name()+":") + foundIDs = append(foundIDs, newImage.ID()) + } + if len(names) == 1 && !foundImage { + return errors.Wrapf(err, "error pulling image %q", imgArg) + } + if len(names) > 1 { + fmt.Println("Pulled Images:") + } + for _, id := range foundIDs { + fmt.Println(id) + } + return nil } diff --git a/cmd/podman/restart.go b/cmd/podman/restart.go index 5a9f3043a..9ab2dd528 100644 --- a/cmd/podman/restart.go +++ b/cmd/podman/restart.go @@ -2,11 +2,9 @@ package main import ( "github.com/containers/libpod/cmd/podman/cliconfig" - "github.com/containers/libpod/cmd/podman/libpodruntime" - "github.com/containers/libpod/cmd/podman/shared" "github.com/containers/libpod/libpod" + "github.com/containers/libpod/pkg/adapter" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "github.com/spf13/cobra" ) @@ -22,7 +20,6 @@ var ( RunE: func(cmd *cobra.Command, args []string) error { restartCommand.InputArgs = args restartCommand.GlobalFlags = MainGlobalOpts - restartCommand.Remote = remoteclient return restartCmd(&restartCommand) }, Args: func(cmd *cobra.Command, args []string) error { @@ -49,83 +46,30 @@ func init() { } func restartCmd(c *cliconfig.RestartValues) error { - var ( - restartFuncs []shared.ParallelWorkerInput - containers []*libpod.Container - restartContainers []*libpod.Container - ) - - args := c.InputArgs - runOnly := c.Running all := c.All - if len(args) < 1 && !c.Latest && !all { + if len(c.InputArgs) < 1 && !c.Latest && !all { return errors.Wrapf(libpod.ErrInvalidArg, "you must provide at least one container name or ID") } - runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand) + runtime, err := adapter.GetRuntime(&c.PodmanCommand) if err != nil { return errors.Wrapf(err, "error creating libpod runtime") } defer runtime.Shutdown(false) - timeout := c.Timeout - useTimeout := c.Flag("timeout").Changed || c.Flag("time").Changed - - // Handle --latest - if c.Latest { - lastCtr, err := runtime.GetLatestContainer() - if err != nil { - return errors.Wrapf(err, "unable to get latest container") - } - restartContainers = append(restartContainers, lastCtr) - } else if runOnly { - containers, err = getAllOrLatestContainers(&c.PodmanCommand, runtime, libpod.ContainerStateRunning, "running") - if err != nil { - return err - } - restartContainers = append(restartContainers, containers...) - } else if all { - containers, err = runtime.GetAllContainers() - if err != nil { - return err - } - restartContainers = append(restartContainers, containers...) - } else { - for _, id := range args { - ctr, err := runtime.LookupContainer(id) - if err != nil { - return err + ok, failures, err := runtime.Restart(getContext(), c) + if err != nil { + if errors.Cause(err) == libpod.ErrNoSuchCtr { + if len(c.InputArgs) > 1 { + exitCode = 125 + } else { + exitCode = 1 } - restartContainers = append(restartContainers, ctr) } + return err } - - maxWorkers := shared.Parallelize("restart") - if c.GlobalIsSet("max-workers") { - maxWorkers = c.GlobalFlags.MaxWorks + if len(failures) > 0 { + exitCode = 125 } - - logrus.Debugf("Setting maximum workers to %d", maxWorkers) - - // We now have a slice of all the containers to be restarted. Iterate them to - // create restart Funcs with a timeout as needed - for _, ctr := range restartContainers { - con := ctr - ctrTimeout := ctr.StopTimeout() - if useTimeout { - ctrTimeout = timeout - } - - f := func() error { - return con.RestartWithTimeout(getContext(), ctrTimeout) - } - - restartFuncs = append(restartFuncs, shared.ParallelWorkerInput{ - ContainerID: con.ID(), - ParallelFunc: f, - }) - } - - restartErrors, errCount := shared.ParallelExecuteWorkerPool(maxWorkers, restartFuncs) - return printParallelOutput(restartErrors, errCount) + return printCmdResults(ok, failures) } diff --git a/cmd/podman/system_prune.go b/cmd/podman/system_prune.go index 14cb96941..8900e2644 100644 --- a/cmd/podman/system_prune.go +++ b/cmd/podman/system_prune.go @@ -82,13 +82,21 @@ Are you sure you want to continue? [y/N] `, volumeString) ctx := getContext() fmt.Println("Deleted Containers") lasterr := pruneContainers(runtime, ctx, rmWorkers, false, false) + fmt.Println("Deleted Pods") - if err := prunePods(runtime, ctx, rmWorkers, true); err != nil { + pruneValues := cliconfig.PodPruneValues{ + PodmanCommand: c.PodmanCommand, + Force: c.Force, + } + ok, failures, err := runtime.PrunePods(ctx, &pruneValues) + if err != nil { if lasterr != nil { logrus.Errorf("%q", lasterr) } lasterr = err } + printCmdResults(ok, failures) + if c.Bool("volumes") { fmt.Println("Deleted Volumes") err := volumePrune(runtime, getContext()) diff --git a/contrib/cirrus/lib.sh b/contrib/cirrus/lib.sh index 6c45b2c5d..d663616b2 100644 --- a/contrib/cirrus/lib.sh +++ b/contrib/cirrus/lib.sh @@ -178,7 +178,6 @@ setup_rootless() { make install.catatonit go get github.com/onsi/ginkgo/ginkgo go get github.com/onsi/gomega/... - dnf -y update runc # Guarantee independence from specific values ROOTLESS_UID=$[RANDOM+1000] diff --git a/contrib/cirrus/setup_environment.sh b/contrib/cirrus/setup_environment.sh index 55706954e..3818abbc7 100755 --- a/contrib/cirrus/setup_environment.sh +++ b/contrib/cirrus/setup_environment.sh @@ -58,11 +58,14 @@ then # Always install runc on Ubuntu install_runc_from_git ;; - fedora-29) ;& # Continue to the next item + fedora-29) + CON_SEL="https://kojipkgs.fedoraproject.org/packages/container-selinux/2.100/1.git3b78187.fc29/noarch/container-selinux-2.100-1.git3b78187.fc29.noarch.rpm" + echo ">>>>> OVERRIDING container-selinux WITH $CON_SEL <<<<<" + dnf -y install $CON_SEL + ;& # Continue to the next item fedora-28) - RUNC="https://kojipkgs.fedoraproject.org/packages/runc/1.0.0/55.dev.git578fe65.fc${OS_RELEASE_VER}/x86_64/runc-1.0.0-55.dev.git578fe65.fc${OS_RELEASE_VER}.x86_64.rpm" - echo ">>>>> OVERRIDING RUNC WITH $RUNC <<<<<" - dnf -y install "$RUNC" + echo ">>>>> OVERRIDING source-built runc with latest package <<<<<" + dnf update -y runc ;& # Continue to the next item centos-7) ;& rhel-7) diff --git a/docs/libpod.conf.5.md b/docs/libpod.conf.5.md index 777edeacb..4abbcd8b0 100644 --- a/docs/libpod.conf.5.md +++ b/docs/libpod.conf.5.md @@ -65,13 +65,13 @@ libpod to manage containers. **cni_plugin_dir**="" Directories where CNI plugin binaries may be located -**pause_image** = "" - Pause container image name for pod pause containers. When running a pod, we - start a `pause` processes in a container to hold open the namespaces associated with the +**infra_image** = "" + Infra (pause) container image name for pod infra containers. When running a pod, we + start a `pause` process in a container to hold open the namespaces associated with the pod. This container and process, basically sleep/pause for the lifetime of the pod. -**pause_command**="" - Command to run the pause container +**infra_command**="" + Command to run the infra container **namespace**="" Default libpod namespace. If libpod is joined to a namespace, it will see only containers and pods diff --git a/docs/podman-pull.1.md b/docs/podman-pull.1.md index 92740c3af..ab01bb40d 100644 --- a/docs/podman-pull.1.md +++ b/docs/podman-pull.1.md @@ -49,6 +49,8 @@ Image stored in local container/storage All tagged images in the repository will be pulled. +Note: When using the all-tags flag, Podman will not iterate over the search registries in the containers-registries.conf(5) but will always use docker.io for unqualified image names. + **--authfile** Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `podman login`. diff --git a/libpod.conf b/libpod.conf index 211ba106d..80422e3dd 100644 --- a/libpod.conf +++ b/libpod.conf @@ -71,11 +71,11 @@ cni_default_network = "podman" # namespace is set, all containers and pods are visible. #namespace = "" -# Default pause image name for pod pause containers -pause_image = "k8s.gcr.io/pause:3.1" +# Default infra (pause) image name for pod infra containers +infra_image = "k8s.gcr.io/pause:3.1" -# Default command to run the pause container -pause_command = "/pause" +# Default command to run the infra container +infra_command = "/pause" # Determines whether libpod will reserve ports on the host when they are # forwarded to containers. When enabled, when ports are forwarded to containers, diff --git a/libpod/testdata/config.toml b/libpod/testdata/config.toml deleted file mode 100644 index 1d78f2083..000000000 --- a/libpod/testdata/config.toml +++ /dev/null @@ -1,28 +0,0 @@ -[crio] - root = "/var/lib/containers/storage" - runroot = "/var/run/containers/storage" - storage_driver = "overlay2" - log_dir = "/var/log/crio/pods" - file_locking = true - [crio.runtime] - runtime = "/usr/bin/runc" - runtime_untrusted_workload = "" - default_workload_trust = "trusted" - conmon = "/usr/local/libexec/crio/conmon" - conmon_env = ["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"] - selinux = true - seccomp_profile = "/etc/crio/seccomp.json" - apparmor_profile = "crio-default" - cgroup_manager = "cgroupfs" - hooks_dir = ["/usr/share/containers/oci/hooks.d"] - pids_limit = 2048 - container_exits_dir = "/var/run/podman/exits" - [crio.image] - default_transport = "docker://" - pause_image = "kubernetes/pause" - pause_command = "/pause" - signature_policy = "" - image_volumes = "mkdir" - [crio.network] - network_dir = "/etc/cni/net.d/" - plugin_dir = "/opt/cni/bin/" diff --git a/pkg/adapter/containers.go b/pkg/adapter/containers.go index 5279f11b2..8481a0cec 100644 --- a/pkg/adapter/containers.go +++ b/pkg/adapter/containers.go @@ -92,6 +92,9 @@ func (r *LocalRuntime) StopContainers(ctx context.Context, cli *cliconfig.StopVa if errors.Cause(err) == libpod.ErrCtrStopped { logrus.Debugf("Container %s is already stopped", c.ID()) return nil + } else if cli.All && errors.Cause(err) == libpod.ErrCtrStateInvalid { + logrus.Debugf("Container %s is not running, could not stop", c.ID()) + return nil } logrus.Debugf("Failed to stop container %s: %s", c.ID(), err.Error()) } @@ -694,3 +697,72 @@ func (r *LocalRuntime) UnpauseContainers(ctx context.Context, cli *cliconfig.Unp } return pool.Run() } + +// Restart containers without or without a timeout +func (r *LocalRuntime) Restart(ctx context.Context, c *cliconfig.RestartValues) ([]string, map[string]error, error) { + var ( + containers []*libpod.Container + restartContainers []*libpod.Container + err error + ) + useTimeout := c.Flag("timeout").Changed || c.Flag("time").Changed + inputTimeout := c.Timeout + + // Handle --latest + if c.Latest { + lastCtr, err := r.Runtime.GetLatestContainer() + if err != nil { + return nil, nil, errors.Wrapf(err, "unable to get latest container") + } + restartContainers = append(restartContainers, lastCtr) + } else if c.Running { + containers, err = r.GetRunningContainers() + if err != nil { + return nil, nil, err + } + restartContainers = append(restartContainers, containers...) + } else if c.All { + containers, err = r.Runtime.GetAllContainers() + if err != nil { + return nil, nil, err + } + restartContainers = append(restartContainers, containers...) + } else { + for _, id := range c.InputArgs { + ctr, err := r.Runtime.LookupContainer(id) + if err != nil { + return nil, nil, err + } + restartContainers = append(restartContainers, ctr) + } + } + + maxWorkers := shared.DefaultPoolSize("restart") + if c.GlobalIsSet("max-workers") { + maxWorkers = c.GlobalFlags.MaxWorks + } + + logrus.Debugf("Setting maximum workers to %d", maxWorkers) + + // We now have a slice of all the containers to be restarted. Iterate them to + // create restart Funcs with a timeout as needed + pool := shared.NewPool("restart", maxWorkers, len(restartContainers)) + for _, c := range restartContainers { + ctr := c + timeout := ctr.StopTimeout() + if useTimeout { + timeout = inputTimeout + } + pool.Add(shared.Job{ + ID: ctr.ID(), + Fn: func() error { + err := ctr.RestartWithTimeout(ctx, timeout) + if err != nil { + logrus.Debugf("Failed to restart container %s: %s", ctr.ID(), err.Error()) + } + return err + }, + }) + } + return pool.Run() +} diff --git a/pkg/adapter/containers_remote.go b/pkg/adapter/containers_remote.go index cb61871bf..e8f221eaf 100644 --- a/pkg/adapter/containers_remote.go +++ b/pkg/adapter/containers_remote.go @@ -45,6 +45,12 @@ func (c *Container) ID() string { return c.config.ID } +// Restart a single container +func (c *Container) Restart(timeout int64) error { + _, err := iopodman.RestartContainer().Call(c.Runtime.Conn, c.ID(), timeout) + return err +} + // Pause a container func (c *Container) Pause() error { _, err := iopodman.PauseContainer().Call(c.Runtime.Conn, c.ID()) @@ -132,6 +138,23 @@ func (r *LocalRuntime) LookupContainer(idOrName string) (*Container, error) { }, nil } +// GetAllContainers returns all containers in a slice +func (r *LocalRuntime) GetAllContainers() ([]*Container, error) { + var containers []*Container + ctrs, err := iopodman.GetContainersByContext().Call(r.Conn, true, false, []string{}) + if err != nil { + return nil, err + } + for _, ctr := range ctrs { + container, err := r.LookupContainer(ctr) + if err != nil { + return nil, err + } + containers = append(containers, container) + } + return containers, nil +} + func (r *LocalRuntime) LookupContainersWithStatus(filters []string) ([]*Container, error) { var containers []*Container ctrs, err := iopodman.GetContainersByStatus().Call(r.Conn, filters) @@ -753,3 +776,59 @@ func (r *LocalRuntime) UnpauseContainers(ctx context.Context, cli *cliconfig.Unp } return ok, failures, nil } + +// Restart restarts a container over varlink +func (r *LocalRuntime) Restart(ctx context.Context, c *cliconfig.RestartValues) ([]string, map[string]error, error) { + var ( + containers []*Container + restartContainers []*Container + err error + ok = []string{} + failures = map[string]error{} + ) + useTimeout := c.Flag("timeout").Changed || c.Flag("time").Changed + inputTimeout := c.Timeout + + if c.Latest { + lastCtr, err := r.GetLatestContainer() + if err != nil { + return nil, nil, errors.Wrapf(err, "unable to get latest container") + } + restartContainers = append(restartContainers, lastCtr) + } else if c.Running { + containers, err = r.LookupContainersWithStatus([]string{libpod.ContainerStateRunning.String()}) + if err != nil { + return nil, nil, err + } + restartContainers = append(restartContainers, containers...) + } else if c.All { + containers, err = r.GetAllContainers() + if err != nil { + return nil, nil, err + } + restartContainers = append(restartContainers, containers...) + } else { + for _, id := range c.InputArgs { + ctr, err := r.LookupContainer(id) + if err != nil { + return nil, nil, err + } + restartContainers = append(restartContainers, ctr) + } + } + + for _, c := range restartContainers { + c := c + timeout := c.config.StopTimeout + if useTimeout { + timeout = inputTimeout + } + err := c.Restart(int64(timeout)) + if err != nil { + failures[c.ID()] = err + } else { + ok = append(ok, c.ID()) + } + } + return ok, failures, nil +} diff --git a/pkg/adapter/pods.go b/pkg/adapter/pods.go index 901c1857b..bb7d9cce6 100644 --- a/pkg/adapter/pods.go +++ b/pkg/adapter/pods.go @@ -4,20 +4,16 @@ package adapter import ( "context" - "github.com/pkg/errors" "strings" "github.com/containers/libpod/cmd/podman/cliconfig" "github.com/containers/libpod/cmd/podman/shared" "github.com/containers/libpod/libpod" "github.com/containers/libpod/pkg/adapter/shortcuts" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) -// Pod ... -type Pod struct { - *libpod.Pod -} - // PodContainerStats is struct containing an adapter Pod and a libpod // ContainerStats and is used primarily for outputing pod stats. type PodContainerStats struct { @@ -25,6 +21,49 @@ type PodContainerStats struct { ContainerStats map[string]*libpod.ContainerStats } +// PrunePods removes pods +func (r *LocalRuntime) PrunePods(ctx context.Context, cli *cliconfig.PodPruneValues) ([]string, map[string]error, error) { + var ( + ok = []string{} + failures = map[string]error{} + ) + + maxWorkers := shared.DefaultPoolSize("rm") + if cli.GlobalIsSet("max-workers") { + maxWorkers = cli.GlobalFlags.MaxWorks + } + logrus.Debugf("Setting maximum rm workers to %d", maxWorkers) + + states := []string{shared.PodStateStopped, shared.PodStateExited} + if cli.Force { + states = append(states, shared.PodStateRunning) + } + + pods, err := r.GetPodsByStatus(states) + if err != nil { + return ok, failures, err + } + if len(pods) < 1 { + return ok, failures, nil + } + + pool := shared.NewPool("pod_prune", maxWorkers, len(pods)) + for _, p := range pods { + p := p + + pool.Add(shared.Job{p.ID(), + func() error { + err := r.Runtime.RemovePod(ctx, p, cli.Force, cli.Force) + if err != nil { + logrus.Debugf("Failed to remove pod %s: %s", p.ID(), err.Error()) + } + return err + }, + }) + } + return pool.Run() +} + // RemovePods ... func (r *LocalRuntime) RemovePods(ctx context.Context, cli *cliconfig.PodRmValues) ([]string, []error) { var ( diff --git a/pkg/adapter/pods_remote.go b/pkg/adapter/pods_remote.go index 00a5d9a32..7cf38aac0 100644 --- a/pkg/adapter/pods_remote.go +++ b/pkg/adapter/pods_remote.go @@ -14,13 +14,9 @@ import ( "github.com/containers/libpod/libpod" "github.com/containers/libpod/pkg/varlinkapi" "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) -// Pod ... -type Pod struct { - remotepod -} - // PodContainerStats is struct containing an adapter Pod and a libpod // ContainerStats and is used primarily for outputing pod stats. type PodContainerStats struct { @@ -28,13 +24,6 @@ type PodContainerStats struct { ContainerStats map[string]*libpod.ContainerStats } -type remotepod struct { - config *libpod.PodConfig - state *libpod.PodInspectState - containers []libpod.PodContainerInfo - Runtime *LocalRuntime -} - // RemovePods removes one or more based on the cli context. func (r *LocalRuntime) RemovePods(ctx context.Context, cli *cliconfig.PodRmValues) ([]string, []error) { var ( @@ -539,3 +528,34 @@ func (r *LocalRuntime) RemovePod(ctx context.Context, p *Pod, removeCtrs, force } return nil } + +// PrunePods... +func (r *LocalRuntime) PrunePods(ctx context.Context, cli *cliconfig.PodPruneValues) ([]string, map[string]error, error) { + var ( + ok = []string{} + failures = map[string]error{} + ) + states := []string{shared.PodStateStopped, shared.PodStateExited} + if cli.Force { + states = append(states, shared.PodStateRunning) + } + + ids, err := iopodman.GetPodsByStatus().Call(r.Conn, states) + if err != nil { + return ok, failures, err + } + if len(ids) < 1 { + return ok, failures, nil + } + + for _, id := range ids { + _, err := iopodman.RemovePod().Call(r.Conn, id, cli.Force) + if err != nil { + logrus.Debugf("Failed to remove pod %s: %s", id, err.Error()) + failures[id] = err + } else { + ok = append(ok, id) + } + } + return ok, failures, nil +} diff --git a/pkg/adapter/runtime.go b/pkg/adapter/runtime.go index 6ed9cee77..753f7c944 100644 --- a/pkg/adapter/runtime.go +++ b/pkg/adapter/runtime.go @@ -7,7 +7,6 @@ import ( "context" "io" "io/ioutil" - "k8s.io/api/core/v1" "os" "text/template" @@ -25,6 +24,7 @@ import ( "github.com/containers/libpod/pkg/rootless" "github.com/containers/storage/pkg/archive" "github.com/pkg/errors" + "k8s.io/api/core/v1" ) // LocalRuntime describes a typical libpod runtime @@ -43,6 +43,11 @@ type Container struct { *libpod.Container } +// Pod encapsulates the libpod.Pod structure, helps with remote vs. local +type Pod struct { + *libpod.Pod +} + // Volume ... type Volume struct { *libpod.Volume @@ -371,8 +376,7 @@ func (r *LocalRuntime) GenerateKube(c *cliconfig.GenerateKubeValues) (*v1.Pod, * } // GetPodsByStatus returns a slice of pods filtered by a libpod status -func (r *LocalRuntime) GetPodsByStatus(statuses []string) ([]*Pod, error) { - var adapterPods []*Pod +func (r *LocalRuntime) GetPodsByStatus(statuses []string) ([]*libpod.Pod, error) { filterFunc := func(p *libpod.Pod) bool { state, _ := shared.GetPodStatus(p) @@ -383,25 +387,11 @@ func (r *LocalRuntime) GetPodsByStatus(statuses []string) ([]*Pod, error) { } return false } + pods, err := r.Runtime.Pods(filterFunc) if err != nil { return nil, err } - for _, p := range pods { - adapterPod := Pod{ - p, - } - adapterPods = append(adapterPods, &adapterPod) - } - return adapterPods, nil -} -// RemovePod removes a pod -// If removeCtrs is specified, containers will be removed -// Otherwise, a pod that is not empty will return an error and not be removed -// If force is specified with removeCtrs, all containers will be stopped before -// being removed -// Otherwise, the pod will not be removed if any containers are running -func (r *LocalRuntime) RemovePod(ctx context.Context, p *Pod, removeCtrs, force bool) error { - return r.Runtime.RemovePod(ctx, p.Pod, removeCtrs, force) + return pods, nil } diff --git a/pkg/adapter/runtime_remote.go b/pkg/adapter/runtime_remote.go index 71f7380db..dcb0924ce 100644 --- a/pkg/adapter/runtime_remote.go +++ b/pkg/adapter/runtime_remote.go @@ -99,6 +99,18 @@ type remoteContainer struct { state *libpod.ContainerState } +// Pod ... +type Pod struct { + remotepod +} + +type remotepod struct { + config *libpod.PodConfig + state *libpod.PodInspectState + containers []libpod.PodContainerInfo + Runtime *LocalRuntime +} + type VolumeFilter func(*Volume) bool // Volume is embed for libpod volumes diff --git a/test/e2e/pod_prune_test.go b/test/e2e/pod_prune_test.go new file mode 100644 index 000000000..c20f602ad --- /dev/null +++ b/test/e2e/pod_prune_test.go @@ -0,0 +1,78 @@ +// +build !remoteclient + +package integration + +import ( + "os" + + . "github.com/containers/libpod/test/utils" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("Podman pod prune", func() { + var ( + tempdir string + err error + podmanTest *PodmanTestIntegration + ) + + BeforeEach(func() { + tempdir, err = CreateTempDirInTempDir() + if err != nil { + os.Exit(1) + } + podmanTest = PodmanTestCreate(tempdir) + podmanTest.Setup() + podmanTest.RestoreAllArtifacts() + }) + + AfterEach(func() { + podmanTest.CleanupPod() + f := CurrentGinkgoTestDescription() + processTestResult(f) + + }) + + It("podman pod prune empty pod", func() { + _, ec, _ := podmanTest.CreatePod("") + Expect(ec).To(Equal(0)) + + result := podmanTest.Podman([]string{"pod", "prune"}) + result.WaitWithDefaultTimeout() + Expect(result.ExitCode()).To(Equal(0)) + }) + + It("podman pod prune doesn't remove a pod with a container", func() { + _, ec, podid := podmanTest.CreatePod("") + Expect(ec).To(Equal(0)) + + _, ec2, _ := podmanTest.RunLsContainerInPod("", podid) + Expect(ec2).To(Equal(0)) + + result := podmanTest.Podman([]string{"pod", "prune"}) + result.WaitWithDefaultTimeout() + Expect(result.ExitCode()).To(Equal(125)) + + result = podmanTest.Podman([]string{"ps", "-qa"}) + result.WaitWithDefaultTimeout() + Expect(len(result.OutputToStringArray())).To(Equal(1)) + }) + + It("podman pod prune -f does remove a running container", func() { + _, ec, podid := podmanTest.CreatePod("") + Expect(ec).To(Equal(0)) + + session := podmanTest.RunTopContainerInPod("", podid) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + + result := podmanTest.Podman([]string{"pod", "prune", "-f"}) + result.WaitWithDefaultTimeout() + Expect(result.ExitCode()).To(Equal(0)) + + result = podmanTest.Podman([]string{"ps", "-q"}) + result.WaitWithDefaultTimeout() + Expect(result.OutputToString()).To(BeEmpty()) + }) +}) diff --git a/test/e2e/restart_test.go b/test/e2e/restart_test.go index 1daf63a0e..7a9a466d8 100644 --- a/test/e2e/restart_test.go +++ b/test/e2e/restart_test.go @@ -1,5 +1,3 @@ -// +build !remoteclient - package integration import ( diff --git a/test/e2e/stop_test.go b/test/e2e/stop_test.go index 97c9287b9..717eea441 100644 --- a/test/e2e/stop_test.go +++ b/test/e2e/stop_test.go @@ -4,6 +4,7 @@ package integration import ( "os" + "strings" . "github.com/containers/libpod/test/utils" . "github.com/onsi/ginkgo" @@ -48,6 +49,11 @@ var _ = Describe("Podman stop", func() { session = podmanTest.Podman([]string{"stop", cid}) session.WaitWithDefaultTimeout() Expect(session.ExitCode()).To(Equal(0)) + + finalCtrs := podmanTest.Podman([]string{"ps", "-q"}) + finalCtrs.WaitWithDefaultTimeout() + Expect(finalCtrs.ExitCode()).To(Equal(0)) + Expect(strings.TrimSpace(finalCtrs.OutputToString())).To(Equal("")) }) It("podman stop container by name", func() { @@ -57,15 +63,25 @@ var _ = Describe("Podman stop", func() { session = podmanTest.Podman([]string{"stop", "test1"}) session.WaitWithDefaultTimeout() Expect(session.ExitCode()).To(Equal(0)) + + finalCtrs := podmanTest.Podman([]string{"ps", "-q"}) + finalCtrs.WaitWithDefaultTimeout() + Expect(finalCtrs.ExitCode()).To(Equal(0)) + Expect(strings.TrimSpace(finalCtrs.OutputToString())).To(Equal("")) }) - It("podman stop container by name", func() { + It("podman container stop by name", func() { session := podmanTest.RunTopContainer("test1") session.WaitWithDefaultTimeout() Expect(session.ExitCode()).To(Equal(0)) session = podmanTest.Podman([]string{"container", "stop", "test1"}) session.WaitWithDefaultTimeout() Expect(session.ExitCode()).To(Equal(0)) + + finalCtrs := podmanTest.Podman([]string{"ps", "-q"}) + finalCtrs.WaitWithDefaultTimeout() + Expect(finalCtrs.ExitCode()).To(Equal(0)) + Expect(strings.TrimSpace(finalCtrs.OutputToString())).To(Equal("")) }) It("podman stop stopped container", func() { @@ -80,6 +96,11 @@ var _ = Describe("Podman stop", func() { session3 := podmanTest.Podman([]string{"stop", "test1"}) session3.WaitWithDefaultTimeout() Expect(session3.ExitCode()).To(Equal(0)) + + finalCtrs := podmanTest.Podman([]string{"ps", "-q"}) + finalCtrs.WaitWithDefaultTimeout() + Expect(finalCtrs.ExitCode()).To(Equal(0)) + Expect(strings.TrimSpace(finalCtrs.OutputToString())).To(Equal("")) }) It("podman stop all containers -t", func() { @@ -105,6 +126,11 @@ var _ = Describe("Podman stop", func() { Expect(output).To(ContainSubstring(cid1)) Expect(output).To(ContainSubstring(cid2)) Expect(output).To(ContainSubstring(cid3)) + + finalCtrs := podmanTest.Podman([]string{"ps", "-q"}) + finalCtrs.WaitWithDefaultTimeout() + Expect(finalCtrs.ExitCode()).To(Equal(0)) + Expect(strings.TrimSpace(finalCtrs.OutputToString())).To(Equal("")) }) It("podman stop container --time", func() { @@ -118,6 +144,11 @@ var _ = Describe("Podman stop", func() { Expect(session.ExitCode()).To(Equal(0)) output := session.OutputToString() Expect(output).To(ContainSubstring(cid1)) + + finalCtrs := podmanTest.Podman([]string{"ps", "-q"}) + finalCtrs.WaitWithDefaultTimeout() + Expect(finalCtrs.ExitCode()).To(Equal(0)) + Expect(strings.TrimSpace(finalCtrs.OutputToString())).To(Equal("")) }) It("podman stop container --timeout", func() { @@ -131,6 +162,11 @@ var _ = Describe("Podman stop", func() { Expect(session.ExitCode()).To(Equal(0)) output := session.OutputToString() Expect(output).To(ContainSubstring(cid1)) + + finalCtrs := podmanTest.Podman([]string{"ps", "-q"}) + finalCtrs.WaitWithDefaultTimeout() + Expect(finalCtrs.ExitCode()).To(Equal(0)) + Expect(strings.TrimSpace(finalCtrs.OutputToString())).To(Equal("")) }) It("podman stop latest containers", func() { @@ -140,5 +176,45 @@ var _ = Describe("Podman stop", func() { session = podmanTest.Podman([]string{"stop", "-l", "-t", "1"}) session.WaitWithDefaultTimeout() Expect(session.ExitCode()).To(Equal(0)) + finalCtrs := podmanTest.Podman([]string{"ps", "-q"}) + finalCtrs.WaitWithDefaultTimeout() + Expect(finalCtrs.ExitCode()).To(Equal(0)) + Expect(strings.TrimSpace(finalCtrs.OutputToString())).To(Equal("")) }) + + It("podman stop all containers with one stopped", func() { + session := podmanTest.RunTopContainer("test1") + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + session2 := podmanTest.RunTopContainer("test2") + session2.WaitWithDefaultTimeout() + Expect(session2.ExitCode()).To(Equal(0)) + session3 := podmanTest.Podman([]string{"stop", "-l", "-t", "1"}) + session3.WaitWithDefaultTimeout() + Expect(session3.ExitCode()).To(Equal(0)) + session4 := podmanTest.Podman([]string{"stop", "-a", "-t", "1"}) + session4.WaitWithDefaultTimeout() + Expect(session4.ExitCode()).To(Equal(0)) + finalCtrs := podmanTest.Podman([]string{"ps", "-q"}) + finalCtrs.WaitWithDefaultTimeout() + Expect(finalCtrs.ExitCode()).To(Equal(0)) + Expect(strings.TrimSpace(finalCtrs.OutputToString())).To(Equal("")) + }) + + It("podman stop all containers with one created", func() { + session := podmanTest.RunTopContainer("test1") + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + session2 := podmanTest.Podman([]string{"create", ALPINE, "/bin/sh"}) + session2.WaitWithDefaultTimeout() + Expect(session2.ExitCode()).To(Equal(0)) + session3 := podmanTest.Podman([]string{"stop", "-a", "-t", "1"}) + session3.WaitWithDefaultTimeout() + Expect(session3.ExitCode()).To(Equal(0)) + finalCtrs := podmanTest.Podman([]string{"ps", "-q"}) + finalCtrs.WaitWithDefaultTimeout() + Expect(finalCtrs.ExitCode()).To(Equal(0)) + Expect(strings.TrimSpace(finalCtrs.OutputToString())).To(Equal("")) + }) + }) |