diff options
-rw-r--r-- | .github/workflows/stale.yml | 1 | ||||
-rw-r--r-- | CONTRIBUTING.md | 12 | ||||
-rw-r--r-- | RELEASE_NOTES.md | 11 | ||||
-rw-r--r-- | pkg/api/handlers/libpod/images_pull.go | 9 | ||||
-rw-r--r-- | pkg/api/handlers/types.go | 2 | ||||
-rw-r--r-- | pkg/bindings/images/pull.go | 1 | ||||
-rw-r--r-- | pkg/bindings/test/images_test.go | 8 | ||||
-rw-r--r-- | pkg/domain/entities/images.go | 2 | ||||
-rw-r--r-- | pkg/domain/infra/abi/images.go | 9 | ||||
-rw-r--r-- | pkg/domain/infra/tunnel/containers.go | 9 | ||||
-rw-r--r-- | test/apiv2/rest_api/test_rest_v2_0_0.py | 246 | ||||
-rw-r--r-- | test/e2e/attach_test.go | 10 | ||||
-rw-r--r-- | test/e2e/libpod_suite_remote_test.go | 4 | ||||
-rw-r--r-- | test/e2e/libpod_suite_test.go | 4 | ||||
-rw-r--r-- | test/e2e/libpod_suite_varlink_test.go | 4 | ||||
-rw-r--r-- | test/e2e/mount_test.go | 23 |
16 files changed, 346 insertions, 9 deletions
diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 5e5a75713..8fd51b5e9 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -22,3 +22,4 @@ jobs: stale-pr-label: 'stale-pr' days-before-stale: 30 days-before-close: 365 + remove-stale-when-updated: true diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index dc48b389e..ba321921c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -7,6 +7,7 @@ that we follow. ## Topics * [Reporting Issues](#reporting-issues) +* [Working On Issues](#working-on-issues) * [Contributing to Podman](#contributing-to-podman) * [Continuous Integration](#continuous-integration) [![Build Status](https://api.cirrus-ci.com/github/containers/podman.svg)](https://cirrus-ci.com/github/containers/podman/master) * [Submitting Pull Requests](#submitting-pull-requests) @@ -28,6 +29,17 @@ The easier it is for us to reproduce it, the faster it'll be fixed! Please don't include any private/sensitive information in your issue! +## Working On Issues + +Once you have decided to contribute to Podman by working on an issue, check our +backlog of [open issues](https://github.com/containers/podman/issues) looking +for any that do not have an "In Progress" label attached to it. Often issues +will be assigned to someone, to be worked on at a later time. If you have the +time to work on the issue now add yourself as an assignee, and set the +"In Progress" label if you’re a member of the “Containers” GitHub organization. +If you can not set the label, just add a quick comment in the issue asking that +the “In Progress” label be set and a member will do so for you. + ## Contributing to Podman This section describes how to start a contribution to Podman. diff --git a/RELEASE_NOTES.md b/RELEASE_NOTES.md index f3ac50a59..cabfafabb 100644 --- a/RELEASE_NOTES.md +++ b/RELEASE_NOTES.md @@ -15,6 +15,7 @@ - The `podman play kube` command now supports the Socket HostPath type ([#7112](https://github.com/containers/podman/issues/7112)). - The `podman play kube` command now supports read-only mounts. - The `podman play kube` command now supports setting labels on pods from Kubernetes metadata labels. +- The `podman play kube` command now supports setting container restart policy ([#7656](https://github.com/containers/podman/issues/7656)). - The `podman play kube` command now properly handles `HostAlias` entries. - The `podman generate kube` command now adds entries to `/etc/hosts` from `--host-add` generated YAML as `HostAlias` entries. - The `podman play kube` and `podman generate kube` commands now properly support `shareProcessNamespace` to share the PID namespace in pods. @@ -29,6 +30,9 @@ - A new global option has been added to Podman, `--runtime-flags`, which allows for setting flags to use when the OCI runtime is called. - The `podman manifest add` command now supports the `--cert-dir`, `--auth-file`, `--creds`, and `--tls-verify` options. +### Security +- This release resolves CVE-2020-14370, in which environment variables could be leaked between containers created using the Varlink API. + ### Changes - Podman will now retry pulling an image 3 times if a pull fails due to network errors. - The `podman exec` command would previously print error messages (e.g. `exec session exited with non-zero exit code -1`) when the command run exited with a non-0 exit code. It no longer does this. The `podman exec` command will still exit with the same exit code as the command run in the container did. @@ -72,8 +76,12 @@ - Fixed a bug where the `--infra-command` parameter to `podman pod create` was nonfunctional. - Fixed a bug where `podman auto-update` would fail for any container started with `--pull=always` ([#7407](https://github.com/containers/podman/issues/7407)). - Fixed a bug where the `podman wait` command would only accept a single argument. +- Fixed a bug where the parsing of the `--volumes-from` option to `podman run` and `podman create` was broken, making it impossible to use multiple mount options at the same time ([#7701](https://github.com/containers/podman/issues/7701)). +- Fixed a bug where the `podman exec` command would not join executed processes to the container's supplemental groups if the container was started with both the `--user` and `--group-add` options. +- Fixed a bug where the `--iidfile` option to `podman-remote build` was nonfunctional. ### API +- The Libpod API version has been bumped to v2.0.0 due to a breaking change in the Image List API. - Docker-compatible Volume Endpoints (Create, Inspect, List, Remove, Prune) are now available! - Added an endpoint for generating systemd unit files for containers. - The `last` parameter to the Libpod container list endpoint now has an alias, `limit` ([#6413](https://github.com/containers/podman/issues/6413)). @@ -96,6 +104,9 @@ - All non-hijacking responses to API requests should not include headers with the version of the server. - Fixed a bug where Libpod and Compat Events endpoints did not send response headers until the first event occurred ([#7263](https://github.com/containers/podman/issues/7263)). - Fixed a bug where the Build endpoints (Compat and Libpod) did not stream progress to the client. +- Fixed a bug where the Stats endpoints (Compat and Libpod) did not properly handle clients disconnecting. +- Fixed a bug where the Ignore parameter to the Libpod Stop endpoint was not performing properly. +- Fixed a bug where the Compat Logs endpoint for containers did not stream its output in the correct format ([#7196](https://github.com/containers/podman/issues/7196)). ### Misc - Updated Buildah to v1.16.1 diff --git a/pkg/api/handlers/libpod/images_pull.go b/pkg/api/handlers/libpod/images_pull.go index 8a2f4f4cf..ad8d1f38e 100644 --- a/pkg/api/handlers/libpod/images_pull.go +++ b/pkg/api/handlers/libpod/images_pull.go @@ -178,10 +178,19 @@ loop: // break out of for/select infinite loop flush() case <-runCtx.Done(): if !failed { + // Send all image id's pulled in 'images' stanza report.Images = images if err := enc.Encode(report); err != nil { logrus.Warnf("Failed to json encode error %q", err.Error()) } + + report.Images = nil + // Pull last ID from list and publish in 'id' stanza. This maintains previous API contract + report.ID = images[len(images)-1] + if err := enc.Encode(report); err != nil { + logrus.Warnf("Failed to json encode error %q", err.Error()) + } + flush() } break loop // break out of for/select infinite loop diff --git a/pkg/api/handlers/types.go b/pkg/api/handlers/types.go index 0ccaa95bb..9e503dbb0 100644 --- a/pkg/api/handlers/types.go +++ b/pkg/api/handlers/types.go @@ -33,7 +33,7 @@ type LibpodImagesLoadReport struct { } type LibpodImagesPullReport struct { - ID string `json:"id"` + entities.ImagePullReport } // LibpodImagesRemoveReport is the return type for image removal via the rest diff --git a/pkg/bindings/images/pull.go b/pkg/bindings/images/pull.go index 261a481a2..2bfbbb2ac 100644 --- a/pkg/bindings/images/pull.go +++ b/pkg/bindings/images/pull.go @@ -89,6 +89,7 @@ func Pull(ctx context.Context, rawImage string, options entities.ImagePullOption mErr = multierror.Append(mErr, errors.New(report.Error)) case len(report.Images) > 0: images = report.Images + case report.ID != "": default: return images, errors.New("failed to parse pull results stream, unexpected input") } diff --git a/pkg/bindings/test/images_test.go b/pkg/bindings/test/images_test.go index e0dd28d7a..681855293 100644 --- a/pkg/bindings/test/images_test.go +++ b/pkg/bindings/test/images_test.go @@ -360,19 +360,19 @@ var _ = Describe("Podman images", func() { rawImage := "docker.io/library/busybox:latest" pulledImages, err := images.Pull(bt.conn, rawImage, entities.ImagePullOptions{}) - Expect(err).To(BeNil()) + Expect(err).NotTo(HaveOccurred()) Expect(len(pulledImages)).To(Equal(1)) exists, err := images.Exists(bt.conn, rawImage) - Expect(err).To(BeNil()) + Expect(err).NotTo(HaveOccurred()) Expect(exists).To(BeTrue()) // Make sure the normalization AND the full-transport reference works. _, err = images.Pull(bt.conn, "docker://"+rawImage, entities.ImagePullOptions{}) - Expect(err).To(BeNil()) + Expect(err).NotTo(HaveOccurred()) // The v2 endpoint only supports the docker transport. Let's see if that's really true. _, err = images.Pull(bt.conn, "bogus-transport:bogus.com/image:reference", entities.ImagePullOptions{}) - Expect(err).To(Not(BeNil())) + Expect(err).To(HaveOccurred()) }) }) diff --git a/pkg/domain/entities/images.go b/pkg/domain/entities/images.go index d0b738934..cad6693fa 100644 --- a/pkg/domain/entities/images.go +++ b/pkg/domain/entities/images.go @@ -156,6 +156,8 @@ type ImagePullReport struct { Error string `json:"error,omitempty"` // Images contains the ID's of the images pulled Images []string `json:"images,omitempty"` + // ID contains image id (retained for backwards compatibility) + ID string `json:"id,omitempty"` } // ImagePushOptions are the arguments for pushing images. diff --git a/pkg/domain/infra/abi/images.go b/pkg/domain/infra/abi/images.go index cc62c3f27..25c0c184f 100644 --- a/pkg/domain/infra/abi/images.go +++ b/pkg/domain/infra/abi/images.go @@ -191,6 +191,15 @@ func (ir *ImageEngine) Unmount(ctx context.Context, nameOrIDs []string, options reports := []*entities.ImageUnmountReport{} for _, img := range images { report := entities.ImageUnmountReport{Id: img.ID()} + mounted, _, err := img.Mounted() + if err != nil { + // Errors will be caught in Unmount call below + // Default assumption to mounted + mounted = true + } + if !mounted { + continue + } if err := img.Unmount(options.Force); err != nil { if options.All && errors.Cause(err) == storage.ErrLayerNotMounted { logrus.Debugf("Error umounting image %s, storage.ErrLayerNotMounted", img.ID()) diff --git a/pkg/domain/infra/tunnel/containers.go b/pkg/domain/infra/tunnel/containers.go index 3e99b73b6..d0f90d900 100644 --- a/pkg/domain/infra/tunnel/containers.go +++ b/pkg/domain/infra/tunnel/containers.go @@ -389,6 +389,15 @@ func (ic *ContainerEngine) ContainerLogs(_ context.Context, nameOrIDs []string, } func (ic *ContainerEngine) ContainerAttach(ctx context.Context, nameOrID string, options entities.AttachOptions) error { + ctrs, err := getContainersByContext(ic.ClientCxt, false, false, []string{nameOrID}) + if err != nil { + return err + } + ctr := ctrs[0] + if ctr.State != define.ContainerStateRunning.String() { + return errors.Errorf("you can only attach to running containers") + } + return containers.Attach(ic.ClientCxt, nameOrID, &options.DetachKeys, nil, bindings.PTrue, options.Stdin, options.Stdout, options.Stderr, nil) } diff --git a/test/apiv2/rest_api/test_rest_v2_0_0.py b/test/apiv2/rest_api/test_rest_v2_0_0.py new file mode 100644 index 000000000..3376f8402 --- /dev/null +++ b/test/apiv2/rest_api/test_rest_v2_0_0.py @@ -0,0 +1,246 @@ +import json +import os +import subprocess +import sys +import time +import unittest +from multiprocessing import Process + +import requests +from dateutil.parser import parse + +PODMAN_URL = "http://localhost:8080" + + +def _url(path): + return PODMAN_URL + "/v1.0.0/libpod" + path + + +def podman(): + binary = os.getenv("PODMAN_BINARY") + if binary is None: + binary = "bin/podman" + return binary + + +def ctnr(path): + r = requests.get(_url("/containers/json?all=true")) + try: + ctnrs = json.loads(r.text) + except Exception as e: + sys.stderr.write("Bad container response: {}/{}".format(r.text, e)) + raise e + return path.format(ctnrs[0]["Id"]) + + +def validateObjectFields(buffer): + objs = json.loads(buffer) + if not isinstance(objs, dict): + for o in objs: + _ = o["Id"] + else: + _ = objs["Id"] + return objs + + +class TestApi(unittest.TestCase): + podman = None + + def setUp(self): + super().setUp() + if TestApi.podman.poll() is not None: + sys.stderr.write(f"podman service returned {TestApi.podman.returncode}\n") + sys.exit(2) + requests.get( + _url("/images/create?fromSrc=docker.io%2Falpine%3Alatest")) + # calling out to podman is easier than the API for running a container + subprocess.run([podman(), "run", "alpine", "/bin/ls"], + check=True, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL) + + @classmethod + def setUpClass(cls): + super().setUpClass() + + TestApi.podman = subprocess.Popen( + [ + podman(), "system", "service", "tcp:localhost:8080", + "--log-level=debug", "--time=0" + ], + shell=False, + stdin=subprocess.DEVNULL, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ) + time.sleep(2) + + @classmethod + def tearDownClass(cls): + TestApi.podman.terminate() + stdout, stderr = TestApi.podman.communicate(timeout=0.5) + if stdout: + print("\nService Stdout:\n" + stdout.decode('utf-8')) + if stderr: + print("\nService Stderr:\n" + stderr.decode('utf-8')) + + if TestApi.podman.returncode > 0: + sys.stderr.write(f"podman exited with error code {TestApi.podman.returncode}\n") + sys.exit(2) + + return super().tearDownClass() + + def test_info(self): + r = requests.get(_url("/info")) + self.assertEqual(r.status_code, 200) + self.assertIsNotNone(r.content) + _ = json.loads(r.text) + + def test_events(self): + r = requests.get(_url("/events?stream=false")) + self.assertEqual(r.status_code, 200, r.text) + self.assertIsNotNone(r.content) + for line in r.text.splitlines(): + obj = json.loads(line) + # Actor.ID is uppercase for compatibility + _ = obj["Actor"]["ID"] + + def test_containers(self): + r = requests.get(_url("/containers/json"), timeout=5) + self.assertEqual(r.status_code, 200, r.text) + obj = json.loads(r.text) + self.assertEqual(len(obj), 0) + + def test_containers_all(self): + r = requests.get(_url("/containers/json?all=true")) + self.assertEqual(r.status_code, 200, r.text) + validateObjectFields(r.text) + + def test_inspect_container(self): + r = requests.get(_url(ctnr("/containers/{}/json"))) + self.assertEqual(r.status_code, 200, r.text) + obj = validateObjectFields(r.content) + _ = parse(obj["Created"]) + + def test_stats(self): + r = requests.get(_url(ctnr("/containers/{}/stats?stream=false"))) + self.assertIn(r.status_code, (200, 409), r.text) + if r.status_code == 200: + validateObjectFields(r.text) + + def test_delete_containers(self): + r = requests.delete(_url(ctnr("/containers/{}"))) + self.assertEqual(r.status_code, 204, r.text) + + def test_stop_containers(self): + r = requests.post(_url(ctnr("/containers/{}/start"))) + self.assertIn(r.status_code, (204, 304), r.text) + + r = requests.post(_url(ctnr("/containers/{}/stop"))) + self.assertIn(r.status_code, (204, 304), r.text) + + def test_start_containers(self): + r = requests.post(_url(ctnr("/containers/{}/stop"))) + self.assertIn(r.status_code, (204, 304), r.text) + + r = requests.post(_url(ctnr("/containers/{}/start"))) + self.assertIn(r.status_code, (204, 304), r.text) + + def test_restart_containers(self): + r = requests.post(_url(ctnr("/containers/{}/start"))) + self.assertIn(r.status_code, (204, 304), r.text) + + r = requests.post(_url(ctnr("/containers/{}/restart")), timeout=5) + self.assertEqual(r.status_code, 204, r.text) + + def test_resize(self): + r = requests.post(_url(ctnr("/containers/{}/resize?h=43&w=80"))) + self.assertIn(r.status_code, (200, 409), r.text) + if r.status_code == 200: + self.assertIsNone(r.text) + + def test_attach_containers(self): + r = requests.post(_url(ctnr("/containers/{}/attach")), timeout=5) + self.assertIn(r.status_code, (101, 500), r.text) + + def test_logs_containers(self): + r = requests.get(_url(ctnr("/containers/{}/logs?stdout=true"))) + self.assertEqual(r.status_code, 200, r.text) + + def test_post_create(self): + self.skipTest("TODO: create request body") + r = requests.post(_url("/containers/create?args=True")) + self.assertEqual(r.status_code, 200, r.text) + json.loads(r.text) + + def test_commit(self): + r = requests.post(_url(ctnr("/commit?container={}"))) + self.assertEqual(r.status_code, 200, r.text) + validateObjectFields(r.text) + + def test_images(self): + r = requests.get(_url("/images/json")) + self.assertEqual(r.status_code, 200, r.text) + validateObjectFields(r.content) + + def test_inspect_image(self): + r = requests.get(_url("/images/alpine/json")) + self.assertEqual(r.status_code, 200, r.text) + obj = validateObjectFields(r.content) + _ = parse(obj["Created"]) + + def test_delete_image(self): + r = requests.delete(_url("/images/alpine?force=true")) + self.assertEqual(r.status_code, 200, r.text) + json.loads(r.text) + + def test_pull(self): + r = requests.post(_url("/images/pull?reference=alpine"), timeout=15) + self.assertEqual(r.status_code, 200, r.status_code) + text = r.text + keys = { + "error": False, + "id": False, + "images": False, + "stream": False, + } + # Read and record stanza's from pull + for line in str.splitlines(text): + obj = json.loads(line) + key_list = list(obj.keys()) + for k in key_list: + keys[k] = True + + self.assertFalse(keys["error"], "Expected no errors") + self.assertTrue(keys["id"], "Expected to find id stanza") + self.assertTrue(keys["images"], "Expected to find images stanza") + self.assertTrue(keys["stream"], "Expected to find stream progress stanza's") + + def test_search(self): + # Had issues with this test hanging when repositories not happy + def do_search(): + r = requests.get(_url("/images/search?term=alpine"), timeout=5) + self.assertEqual(r.status_code, 200, r.text) + json.loads(r.text) + + search = Process(target=do_search) + search.start() + search.join(timeout=10) + self.assertFalse(search.is_alive(), "/images/search took too long") + + def test_ping(self): + r = requests.get(PODMAN_URL + "/_ping") + self.assertEqual(r.status_code, 200, r.text) + + r = requests.head(PODMAN_URL + "/_ping") + self.assertEqual(r.status_code, 200, r.text) + + r = requests.get(_url("/_ping")) + self.assertEqual(r.status_code, 200, r.text) + + r = requests.get(_url("/_ping")) + self.assertEqual(r.status_code, 200, r.text) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/e2e/attach_test.go b/test/e2e/attach_test.go index 7b18f71ac..8065f6298 100644 --- a/test/e2e/attach_test.go +++ b/test/e2e/attach_test.go @@ -40,7 +40,6 @@ var _ = Describe("Podman attach", func() { }) It("podman attach to non-running container", func() { - SkipIfRemote() session := podmanTest.Podman([]string{"create", "--name", "test1", "-d", "-i", ALPINE, "ls"}) session.WaitWithDefaultTimeout() Expect(session.ExitCode()).To(Equal(0)) @@ -51,8 +50,8 @@ var _ = Describe("Podman attach", func() { }) It("podman container attach to non-running container", func() { - SkipIfRemote() session := podmanTest.Podman([]string{"container", "create", "--name", "test1", "-d", "-i", ALPINE, "ls"}) + session.WaitWithDefaultTimeout() Expect(session.ExitCode()).To(Equal(0)) @@ -87,7 +86,6 @@ var _ = Describe("Podman attach", func() { Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1)) }) It("podman attach to the latest container", func() { - SkipIfRemote() session := podmanTest.Podman([]string{"run", "-d", "--name", "test1", ALPINE, "/bin/sh", "-c", "while true; do echo test1; sleep 1; done"}) session.WaitWithDefaultTimeout() Expect(session.ExitCode()).To(Equal(0)) @@ -96,7 +94,11 @@ var _ = Describe("Podman attach", func() { session.WaitWithDefaultTimeout() Expect(session.ExitCode()).To(Equal(0)) - results := podmanTest.Podman([]string{"attach", "-l"}) + cid := "-l" + if IsRemote() { + cid = "test2" + } + results := podmanTest.Podman([]string{"attach", cid}) time.Sleep(2 * time.Second) results.Signal(syscall.SIGTSTP) Expect(results.OutputToString()).To(ContainSubstring("test2")) diff --git a/test/e2e/libpod_suite_remote_test.go b/test/e2e/libpod_suite_remote_test.go index 874789b5e..e74d9bf7c 100644 --- a/test/e2e/libpod_suite_remote_test.go +++ b/test/e2e/libpod_suite_remote_test.go @@ -19,6 +19,10 @@ import ( "github.com/onsi/ginkgo" ) +func IsRemote() bool { + return true +} + func SkipIfRemote() { ginkgo.Skip("This function is not enabled for remote podman") } diff --git a/test/e2e/libpod_suite_test.go b/test/e2e/libpod_suite_test.go index bfd898108..0f33798b7 100644 --- a/test/e2e/libpod_suite_test.go +++ b/test/e2e/libpod_suite_test.go @@ -12,6 +12,10 @@ import ( . "github.com/onsi/ginkgo" ) +func IsRemote() bool { + return false +} + func SkipIfRemote() { } diff --git a/test/e2e/libpod_suite_varlink_test.go b/test/e2e/libpod_suite_varlink_test.go index 750c8cd09..0d7032429 100644 --- a/test/e2e/libpod_suite_varlink_test.go +++ b/test/e2e/libpod_suite_varlink_test.go @@ -19,6 +19,10 @@ import ( "github.com/onsi/ginkgo" ) +func IsRemote() bool { + return true +} + func SkipIfRemote() { ginkgo.Skip("This function is not enabled for remote podman") } diff --git a/test/e2e/mount_test.go b/test/e2e/mount_test.go index a2b448337..1fbb92b09 100644 --- a/test/e2e/mount_test.go +++ b/test/e2e/mount_test.go @@ -348,6 +348,25 @@ var _ = Describe("Podman mount", func() { Expect(umount.ExitCode()).To(Equal(0)) }) + It("podman umount --all", func() { + setup := podmanTest.PodmanNoCache([]string{"pull", fedoraMinimal}) + setup.WaitWithDefaultTimeout() + Expect(setup.ExitCode()).To(Equal(0)) + + setup = podmanTest.PodmanNoCache([]string{"pull", ALPINE}) + setup.WaitWithDefaultTimeout() + Expect(setup.ExitCode()).To(Equal(0)) + + mount := podmanTest.Podman([]string{"image", "mount", fedoraMinimal}) + mount.WaitWithDefaultTimeout() + Expect(mount.ExitCode()).To(Equal(0)) + + umount := podmanTest.Podman([]string{"image", "umount", "--all"}) + umount.WaitWithDefaultTimeout() + Expect(umount.ExitCode()).To(Equal(0)) + Expect(len(umount.OutputToStringArray())).To(Equal(1)) + }) + It("podman mount many", func() { setup := podmanTest.PodmanNoCache([]string{"pull", fedoraMinimal}) setup.WaitWithDefaultTimeout() @@ -402,6 +421,10 @@ var _ = Describe("Podman mount", func() { Expect(mount.ExitCode()).To(Equal(0)) Expect(mount.OutputToString()).To(Equal("")) + umount = podmanTest.PodmanNoCache([]string{"image", "umount", fedoraMinimal, ALPINE}) + umount.WaitWithDefaultTimeout() + Expect(umount.ExitCode()).To(Equal(0)) + mount1 = podmanTest.PodmanNoCache([]string{"image", "mount", "--all"}) mount1.WaitWithDefaultTimeout() Expect(mount1.ExitCode()).To(Equal(0)) |