summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
Diffstat (limited to 'test')
-rw-r--r--test/apiv2/python/__init__.py0
-rw-r--r--test/apiv2/python/rest_api/__init__.py0
-rw-r--r--test/apiv2/python/rest_api/fixtures/__init__.py3
-rw-r--r--test/apiv2/python/rest_api/fixtures/api_testcase.py103
-rw-r--r--test/apiv2/python/rest_api/fixtures/podman.py (renamed from test/apiv2/rest_api/__init__.py)6
-rw-r--r--test/apiv2/python/rest_api/test_v2_0_0_container.py192
-rw-r--r--test/apiv2/python/rest_api/test_v2_0_0_image.py165
-rw-r--r--test/apiv2/python/rest_api/test_v2_0_0_manifest.py14
-rw-r--r--test/apiv2/python/rest_api/test_v2_0_0_network.py155
-rw-r--r--test/apiv2/python/rest_api/test_v2_0_0_pod.py65
-rw-r--r--test/apiv2/python/rest_api/test_v2_0_0_system.py88
-rw-r--r--test/apiv2/python/rest_api/test_v2_0_0_volume.py75
-rw-r--r--test/apiv2/python/rest_api/v1_test_rest_v1_0_0.py (renamed from test/apiv2/rest_api/v1_test_rest_v1_0_0.py)2
-rw-r--r--test/apiv2/rest_api/test_rest_v2_0_0.py744
-rw-r--r--test/e2e/create_staticip_test.go8
-rw-r--r--test/e2e/create_test.go11
-rw-r--r--test/e2e/info_test.go11
-rw-r--r--test/e2e/network_connect_disconnect_test.go4
-rw-r--r--test/e2e/prune_test.go47
-rw-r--r--test/e2e/run_networking_test.go6
-rw-r--r--test/e2e/run_selinux_test.go8
-rw-r--r--test/e2e/run_test.go50
-rw-r--r--test/e2e/toolbox_test.go7
-rw-r--r--test/system/030-run.bats14
-rw-r--r--test/system/035-logs.bats34
-rw-r--r--test/system/070-build.bats4
-rw-r--r--test/system/130-kill.bats3
-rw-r--r--test/system/500-networking.bats70
-rw-r--r--test/system/700-play.bats26
-rw-r--r--test/upgrade/test-upgrade.bats74
30 files changed, 1149 insertions, 840 deletions
diff --git a/test/apiv2/python/__init__.py b/test/apiv2/python/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/test/apiv2/python/__init__.py
diff --git a/test/apiv2/python/rest_api/__init__.py b/test/apiv2/python/rest_api/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/test/apiv2/python/rest_api/__init__.py
diff --git a/test/apiv2/python/rest_api/fixtures/__init__.py b/test/apiv2/python/rest_api/fixtures/__init__.py
new file mode 100644
index 000000000..5d763e454
--- /dev/null
+++ b/test/apiv2/python/rest_api/fixtures/__init__.py
@@ -0,0 +1,3 @@
+from .api_testcase import APITestCase
+
+__all__ = ["APITestCase"]
diff --git a/test/apiv2/python/rest_api/fixtures/api_testcase.py b/test/apiv2/python/rest_api/fixtures/api_testcase.py
new file mode 100644
index 000000000..8b771774b
--- /dev/null
+++ b/test/apiv2/python/rest_api/fixtures/api_testcase.py
@@ -0,0 +1,103 @@
+import json
+import subprocess
+import unittest
+
+import requests
+import sys
+import time
+
+from .podman import Podman
+
+
+class APITestCase(unittest.TestCase):
+ PODMAN_URL = "http://localhost:8080"
+ podman = None # initialized podman configuration for tests
+ service = None # podman service instance
+
+ @classmethod
+ def setUpClass(cls):
+ super().setUpClass()
+
+ APITestCase.podman = Podman()
+ APITestCase.service = APITestCase.podman.open(
+ "system", "service", "tcp:localhost:8080", "--time=0"
+ )
+ # give the service some time to be ready...
+ time.sleep(2)
+
+ returncode = APITestCase.service.poll()
+ if returncode is not None:
+ raise subprocess.CalledProcessError(returncode, "podman system service")
+
+ r = requests.post(
+ APITestCase.uri("/images/pull?reference=quay.io%2Flibpod%2Falpine%3Alatest")
+ )
+ if r.status_code != 200:
+ raise subprocess.CalledProcessError(
+ r.status_code, f"podman images pull quay.io/libpod/alpine:latest {r.text}"
+ )
+
+ @classmethod
+ def tearDownClass(cls):
+ APITestCase.service.terminate()
+ stdout, stderr = APITestCase.service.communicate(timeout=0.5)
+ if stdout:
+ sys.stdout.write("\nService Stdout:\n" + stdout.decode("utf-8"))
+ if stderr:
+ sys.stderr.write("\nService Stderr:\n" + stderr.decode("utf-8"))
+ return super().tearDownClass()
+
+ def setUp(self):
+ super().setUp()
+ APITestCase.podman.run("run", "alpine", "/bin/ls", check=True)
+
+ def tearDown(self) -> None:
+ APITestCase.podman.run("pod", "rm", "--all", "--force", check=True)
+ APITestCase.podman.run("rm", "--all", "--force", check=True)
+ super().tearDown()
+
+ @property
+ def podman_url(self):
+ return "http://localhost:8080"
+
+ @staticmethod
+ def uri(path):
+ return APITestCase.PODMAN_URL + "/v2.0.0/libpod" + path
+
+ def resolve_container(self, path):
+ """Find 'first' container and return 'Id' formatted into given URI path."""
+
+ try:
+ r = requests.get(self.uri("/containers/json?all=true"))
+ containers = r.json()
+ except Exception as e:
+ msg = f"Bad container response: {e}"
+ if r is not None:
+ msg += ": " + r.text
+ raise self.failureException(msg)
+ return path.format(containers[0]["Id"])
+
+ def assertContainerExists(self, member, msg=None): # pylint: disable=invalid-name
+ r = requests.get(self.uri(f"/containers/{member}/exists"))
+ if r.status_code == 404:
+ if msg is None:
+ msg = f"Container '{member}' does not exist."
+ self.failureException(msg)
+
+ def assertContainerNotExists(self, member, msg=None): # pylint: disable=invalid-name
+ r = requests.get(self.uri(f"/containers/{member}/exists"))
+ if r.status_code == 204:
+ if msg is None:
+ msg = f"Container '{member}' exists."
+ self.failureException(msg)
+
+ def assertId(self, content): # pylint: disable=invalid-name
+ objects = json.loads(content)
+ try:
+ if isinstance(objects, dict):
+ _ = objects["Id"]
+ else:
+ for item in objects:
+ _ = item["Id"]
+ except KeyError:
+ self.failureException("Failed in find 'Id' in return value.")
diff --git a/test/apiv2/rest_api/__init__.py b/test/apiv2/python/rest_api/fixtures/podman.py
index 0ad6b51b3..bae04f87d 100644
--- a/test/apiv2/rest_api/__init__.py
+++ b/test/apiv2/python/rest_api/fixtures/podman.py
@@ -7,7 +7,7 @@ import sys
import tempfile
-class Podman(object):
+class Podman:
"""
Instances hold the configuration and setup for running podman commands
"""
@@ -34,7 +34,7 @@ class Podman(object):
p = configparser.ConfigParser()
p.read_dict(
{
- "registries.search": {"registries": "['docker.io']"},
+ "registries.search": {"registries": "['quay.io']"},
"registries.insecure": {"registries": "[]"},
"registries.block": {"registries": "[]"},
}
@@ -102,7 +102,7 @@ class Podman(object):
)
def run(self, command, *args, **kwargs):
- """Podman initialized instance to run a given command
+ """Run given podman command
:param self: Podman instance
:param command: podman sub-command to run
diff --git a/test/apiv2/python/rest_api/test_v2_0_0_container.py b/test/apiv2/python/rest_api/test_v2_0_0_container.py
new file mode 100644
index 000000000..70c07d47f
--- /dev/null
+++ b/test/apiv2/python/rest_api/test_v2_0_0_container.py
@@ -0,0 +1,192 @@
+import random
+import unittest
+
+import requests
+from dateutil.parser import parse
+
+from .fixtures import APITestCase
+
+
+class ContainerTestCase(APITestCase):
+ def test_list(self):
+ r = requests.get(self.uri("/containers/json"), timeout=5)
+ self.assertEqual(r.status_code, 200, r.text)
+ obj = r.json()
+ self.assertEqual(len(obj), 0)
+
+ def test_list_all(self):
+ r = requests.get(self.uri("/containers/json?all=true"))
+ self.assertEqual(r.status_code, 200, r.text)
+ self.assertId(r.content)
+
+ def test_inspect(self):
+ r = requests.get(self.uri(self.resolve_container("/containers/{}/json")))
+ self.assertEqual(r.status_code, 200, r.text)
+ self.assertId(r.content)
+ _ = parse(r.json()["Created"])
+
+ def test_stats(self):
+ r = requests.get(self.uri(self.resolve_container("/containers/{}/stats?stream=false")))
+ self.assertIn(r.status_code, (200, 409), r.text)
+ if r.status_code == 200:
+ self.assertId(r.content)
+
+ def test_delete(self):
+ r = requests.delete(self.uri(self.resolve_container("/containers/{}")))
+ self.assertEqual(r.status_code, 204, r.text)
+
+ def test_stop(self):
+ r = requests.post(self.uri(self.resolve_container("/containers/{}/start")))
+ self.assertIn(r.status_code, (204, 304), r.text)
+
+ r = requests.post(self.uri(self.resolve_container("/containers/{}/stop")))
+ self.assertIn(r.status_code, (204, 304), r.text)
+
+ def test_start(self):
+ r = requests.post(self.uri(self.resolve_container("/containers/{}/stop")))
+ self.assertIn(r.status_code, (204, 304), r.text)
+
+ r = requests.post(self.uri(self.resolve_container("/containers/{}/start")))
+ self.assertIn(r.status_code, (204, 304), r.text)
+
+ def test_restart(self):
+ r = requests.post(self.uri(self.resolve_container("/containers/{}/start")))
+ self.assertIn(r.status_code, (204, 304), r.text)
+
+ r = requests.post(self.uri(self.resolve_container("/containers/{}/restart")), timeout=5)
+ self.assertEqual(r.status_code, 204, r.text)
+
+ def test_resize(self):
+ r = requests.post(self.uri(self.resolve_container("/containers/{}/resize?h=43&w=80")))
+ self.assertIn(r.status_code, (200, 409), r.text)
+ if r.status_code == 200:
+ self.assertEqual(r.text, "", r.text)
+
+ def test_attach(self):
+ self.skipTest("FIXME: Test timeouts")
+ r = requests.post(self.uri(self.resolve_container("/containers/{}/attach")), timeout=5)
+ self.assertIn(r.status_code, (101, 500), r.text)
+
+ def test_logs(self):
+ r = requests.get(self.uri(self.resolve_container("/containers/{}/logs?stdout=true")))
+ self.assertEqual(r.status_code, 200, r.text)
+
+ def test_commit(self):
+ r = requests.post(self.uri(self.resolve_container("/commit?container={}")))
+ self.assertEqual(r.status_code, 200, r.text)
+ self.assertId(r.content)
+
+ obj = r.json()
+ self.assertIsInstance(obj, dict)
+
+ def test_prune(self):
+ name = f"Container_{random.getrandbits(160):x}"
+
+ r = requests.post(
+ self.podman_url + f"/v1.40/containers/create?name={name}",
+ json={
+ "Cmd": ["cp", "/etc/motd", "/motd.size_test"],
+ "Image": "alpine:latest",
+ "NetworkDisabled": True,
+ },
+ )
+ self.assertEqual(r.status_code, 201, r.text)
+ create = r.json()
+
+ r = requests.post(self.podman_url + f"/v1.40/containers/{create['Id']}/start")
+ self.assertEqual(r.status_code, 204, r.text)
+
+ r = requests.post(self.podman_url + f"/v1.40/containers/{create['Id']}/wait")
+ self.assertEqual(r.status_code, 200, r.text)
+ wait = r.json()
+ self.assertEqual(wait["StatusCode"], 0, wait["Error"]["Message"])
+
+ prune = requests.post(self.podman_url + "/v1.40/containers/prune")
+ self.assertEqual(prune.status_code, 200, prune.status_code)
+ prune_payload = prune.json()
+ self.assertGreater(prune_payload["SpaceReclaimed"], 0)
+ self.assertIn(create["Id"], prune_payload["ContainersDeleted"])
+
+ # Delete any orphaned containers
+ r = requests.get(self.podman_url + "/v1.40/containers/json?all=true")
+ self.assertEqual(r.status_code, 200, r.text)
+ for self.resolve_container in r.json():
+ requests.delete(
+ self.podman_url + f"/v1.40/containers/{self.resolve_container['Id']}?force=true"
+ )
+
+ # Image prune here tied to containers freeing up
+ prune = requests.post(self.podman_url + "/v1.40/images/prune")
+ self.assertEqual(prune.status_code, 200, prune.text)
+ prune_payload = prune.json()
+ self.assertGreater(prune_payload["SpaceReclaimed"], 0)
+
+ # FIXME need method to determine which image is going to be "pruned" to fix test
+ # TODO should handler be recursive when deleting images?
+ # self.assertIn(img["Id"], prune_payload["ImagesDeleted"][1]["Deleted"])
+
+ # FIXME (@vrothberg): I commented this line out during the `libimage` migration.
+ # It doesn't make sense to report anything to be deleted if the reclaimed space
+ # is zero. I think the test needs some rewrite.
+ # self.assertIsNotNone(prune_payload["ImagesDeleted"][1]["Deleted"])
+
+ def test_status(self):
+ r = requests.post(
+ self.podman_url + "/v1.40/containers/create?name=topcontainer",
+ json={"Cmd": ["top"], "Image": "alpine:latest"},
+ )
+ self.assertEqual(r.status_code, 201, r.text)
+ payload = r.json()
+ container_id = payload["Id"]
+ self.assertIsNotNone(container_id)
+
+ r = requests.get(
+ self.podman_url + "/v1.40/containers/json",
+ params={"all": "true", "filters": f'{{"id":["{container_id}"]}}'},
+ )
+ self.assertEqual(r.status_code, 200, r.text)
+ payload = r.json()
+ self.assertEqual(payload[0]["Status"], "Created")
+
+ r = requests.post(self.podman_url + f"/v1.40/containers/{container_id}/start")
+ self.assertEqual(r.status_code, 204, r.text)
+
+ r = requests.get(
+ self.podman_url + "/v1.40/containers/json",
+ params={"all": "true", "filters": f'{{"id":["{container_id}"]}}'},
+ )
+ self.assertEqual(r.status_code, 200, r.text)
+ payload = r.json()
+ self.assertTrue(str(payload[0]["Status"]).startswith("Up"))
+
+ r = requests.post(self.podman_url + f"/v1.40/containers/{container_id}/pause")
+ self.assertEqual(r.status_code, 204, r.text)
+
+ r = requests.get(
+ self.podman_url + "/v1.40/containers/json",
+ params={"all": "true", "filters": f'{{"id":["{container_id}"]}}'},
+ )
+ self.assertEqual(r.status_code, 200, r.text)
+ payload = r.json()
+ self.assertTrue(str(payload[0]["Status"]).startswith("Up"))
+ self.assertTrue(str(payload[0]["Status"]).endswith("(Paused)"))
+
+ r = requests.post(self.podman_url + f"/v1.40/containers/{container_id}/unpause")
+ self.assertEqual(r.status_code, 204, r.text)
+ r = requests.post(self.podman_url + f"/v1.40/containers/{container_id}/stop")
+ self.assertEqual(r.status_code, 204, r.text)
+
+ r = requests.get(
+ self.podman_url + "/v1.40/containers/json",
+ params={"all": "true", "filters": f'{{"id":["{container_id}"]}}'},
+ )
+ self.assertEqual(r.status_code, 200, r.text)
+ payload = r.json()
+ self.assertTrue(str(payload[0]["Status"]).startswith("Exited"))
+
+ r = requests.delete(self.podman_url + f"/v1.40/containers/{container_id}")
+ self.assertEqual(r.status_code, 204, r.text)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/test/apiv2/python/rest_api/test_v2_0_0_image.py b/test/apiv2/python/rest_api/test_v2_0_0_image.py
new file mode 100644
index 000000000..99f513608
--- /dev/null
+++ b/test/apiv2/python/rest_api/test_v2_0_0_image.py
@@ -0,0 +1,165 @@
+import json
+import unittest
+from multiprocessing import Process
+
+import requests
+from dateutil.parser import parse
+from .fixtures import APITestCase
+
+
+class ImageTestCase(APITestCase):
+ def test_list(self):
+ r = requests.get(self.podman_url + "/v1.40/images/json")
+ self.assertEqual(r.status_code, 200, r.text)
+
+ # See https://docs.docker.com/engine/api/v1.40/#operation/ImageList
+ required_keys = (
+ "Id",
+ "ParentId",
+ "RepoTags",
+ "RepoDigests",
+ "Created",
+ "Size",
+ "SharedSize",
+ "VirtualSize",
+ "Labels",
+ "Containers",
+ )
+ images = r.json()
+ self.assertIsInstance(images, list)
+ for item in images:
+ self.assertIsInstance(item, dict)
+ for k in required_keys:
+ self.assertIn(k, item)
+
+ def test_inspect(self):
+ r = requests.get(self.podman_url + "/v1.40/images/alpine/json")
+ self.assertEqual(r.status_code, 200, r.text)
+
+ # See https://docs.docker.com/engine/api/v1.40/#operation/ImageInspect
+ required_keys = (
+ "Id",
+ "Parent",
+ "Comment",
+ "Created",
+ "Container",
+ "DockerVersion",
+ "Author",
+ "Architecture",
+ "Os",
+ "Size",
+ "VirtualSize",
+ "GraphDriver",
+ "RootFS",
+ "Metadata",
+ )
+
+ image = r.json()
+ self.assertIsInstance(image, dict)
+ for item in required_keys:
+ self.assertIn(item, image)
+ _ = parse(image["Created"])
+
+ def test_delete(self):
+ r = requests.delete(self.podman_url + "/v1.40/images/alpine?force=true")
+ self.assertEqual(r.status_code, 200, r.text)
+ self.assertIsInstance(r.json(), list)
+
+ def test_pull(self):
+ r = requests.post(self.uri("/images/pull?reference=alpine"), timeout=15)
+ self.assertEqual(r.status_code, 200, r.status_code)
+ text = r.text
+ keys = {
+ "error": False,
+ "id": False,
+ "images": False,
+ "stream": False,
+ }
+ # Read and record stanza's from pull
+ for line in str.splitlines(text):
+ obj = json.loads(line)
+ key_list = list(obj.keys())
+ for k in key_list:
+ keys[k] = True
+
+ self.assertFalse(keys["error"], "Expected no errors")
+ self.assertTrue(keys["id"], "Expected to find id stanza")
+ self.assertTrue(keys["images"], "Expected to find images stanza")
+ self.assertTrue(keys["stream"], "Expected to find stream progress stanza's")
+
+ def test_search_compat(self):
+ url = self.podman_url + "/v1.40/images/search"
+
+ # Had issues with this test hanging when repositories not happy
+ def do_search1():
+ payload = {"term": "alpine"}
+ r = requests.get(url, params=payload, timeout=5)
+ self.assertEqual(r.status_code, 200, f"#1: {r.text}")
+ self.assertIsInstance(r.json(), list)
+
+ def do_search2():
+ payload = {"term": "alpine", "limit": 1}
+ r = requests.get(url, params=payload, timeout=5)
+ self.assertEqual(r.status_code, 200, f"#2: {r.text}")
+
+ results = r.json()
+ self.assertIsInstance(results, list)
+ self.assertEqual(len(results), 1)
+
+ def do_search3():
+ # FIXME: Research if quay.io supports is-official and which image is "official"
+ return
+ payload = {"term": "thanos", "filters": '{"is-official":["true"]}'}
+ r = requests.get(url, params=payload, timeout=5)
+ self.assertEqual(r.status_code, 200, f"#3: {r.text}")
+
+ results = r.json()
+ self.assertIsInstance(results, list)
+
+ # There should be only one official image
+ self.assertEqual(len(results), 1)
+
+ def do_search4():
+ headers = {"X-Registry-Auth": "null"}
+ payload = {"term": "alpine"}
+ r = requests.get(url, params=payload, headers=headers, timeout=5)
+ self.assertEqual(r.status_code, 200, f"#4: {r.text}")
+
+ def do_search5():
+ headers = {"X-Registry-Auth": "invalid value"}
+ payload = {"term": "alpine"}
+ r = requests.get(url, params=payload, headers=headers, timeout=5)
+ self.assertEqual(r.status_code, 400, f"#5: {r.text}")
+
+ i = 1
+ for fn in [do_search1, do_search2, do_search3, do_search4, do_search5]:
+ with self.subTest(i=i):
+ search = Process(target=fn)
+ search.start()
+ search.join(timeout=10)
+ self.assertFalse(search.is_alive(), f"#{i} /images/search took too long")
+
+ # search_methods = [do_search1, do_search2, do_search3, do_search4, do_search5]
+ # for search_method in search_methods:
+ # search = Process(target=search_method)
+ # search.start()
+ # search.join(timeout=10)
+ # self.assertFalse(search.is_alive(), "/images/search took too long")
+
+ def test_history(self):
+ r = requests.get(self.podman_url + "/v1.40/images/alpine/history")
+ self.assertEqual(r.status_code, 200, r.text)
+
+ # See https://docs.docker.com/engine/api/v1.40/#operation/ImageHistory
+ required_keys = ("Id", "Created", "CreatedBy", "Tags", "Size", "Comment")
+
+ changes = r.json()
+ self.assertIsInstance(changes, list)
+ for change in changes:
+ self.assertIsInstance(change, dict)
+ for k in required_keys:
+ self.assertIn(k, change)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/test/apiv2/python/rest_api/test_v2_0_0_manifest.py b/test/apiv2/python/rest_api/test_v2_0_0_manifest.py
new file mode 100644
index 000000000..c28c63bcb
--- /dev/null
+++ b/test/apiv2/python/rest_api/test_v2_0_0_manifest.py
@@ -0,0 +1,14 @@
+import unittest
+
+import requests
+from .fixtures import APITestCase
+
+
+class ManifestTestCase(APITestCase):
+ def test_manifest_409(self):
+ r = requests.post(self.uri("/manifests/create"), params={"name": "ThisIsAnInvalidImage"})
+ self.assertEqual(r.status_code, 400, r.text)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/test/apiv2/python/rest_api/test_v2_0_0_network.py b/test/apiv2/python/rest_api/test_v2_0_0_network.py
new file mode 100644
index 000000000..3888123fb
--- /dev/null
+++ b/test/apiv2/python/rest_api/test_v2_0_0_network.py
@@ -0,0 +1,155 @@
+import random
+import unittest
+
+import requests
+
+from .fixtures import APITestCase
+
+
+class NetworkTestCase(APITestCase):
+ # TODO Need to support Docker-py order of network/container creates
+ def test_connect(self):
+ """Create network and container then connect to network"""
+ net_default = requests.post(
+ self.podman_url + "/v1.40/networks/create", json={"Name": "TestDefaultNetwork"}
+ )
+ self.assertEqual(net_default.status_code, 201, net_default.text)
+
+ create = requests.post(
+ self.podman_url + "/v1.40/containers/create?name=postCreateConnect",
+ json={
+ "Cmd": ["top"],
+ "Image": "alpine:latest",
+ "NetworkDisabled": False,
+ # FIXME adding these 2 lines cause: (This is sampled from docker-py)
+ # "network already exists","message":"container
+ # 01306e499df5441560d70071a54342611e422a94de20865add50a9565fd79fb9 is already connected to CNI
+ # network \"TestDefaultNetwork\": network already exists"
+ # "HostConfig": {"NetworkMode": "TestDefaultNetwork"},
+ # "NetworkingConfig": {"EndpointsConfig": {"TestDefaultNetwork": None}},
+ # FIXME These two lines cause:
+ # CNI network \"TestNetwork\" not found","message":"error configuring network namespace for container
+ # 369ddfa7d3211ebf1fbd5ddbff91bd33fa948858cea2985c133d6b6507546dff: CNI network \"TestNetwork\" not
+ # found"
+ # "HostConfig": {"NetworkMode": "TestNetwork"},
+ # "NetworkingConfig": {"EndpointsConfig": {"TestNetwork": None}},
+ # FIXME no networking defined cause: (note this error is from the container inspect below)
+ # "internal libpod error","message":"network inspection mismatch: asked to join 2 CNI network(s) [
+ # TestDefaultNetwork podman], but have information on 1 network(s): internal libpod error"
+ },
+ )
+ self.assertEqual(create.status_code, 201, create.text)
+ self.assertId(create.content)
+
+ payload = create.json()
+ start = requests.post(self.podman_url + f"/v1.40/containers/{payload['Id']}/start")
+ self.assertEqual(start.status_code, 204, start.text)
+
+ connect = requests.post(
+ self.podman_url + "/v1.40/networks/TestDefaultNetwork/connect",
+ json={"Container": payload["Id"]},
+ )
+ self.assertEqual(connect.status_code, 200, connect.text)
+ self.assertEqual(connect.text, "OK\n")
+
+ inspect = requests.get(f"{self.podman_url}/v1.40/containers/{payload['Id']}/json")
+ self.assertEqual(inspect.status_code, 200, inspect.text)
+
+ payload = inspect.json()
+ self.assertFalse(payload["Config"].get("NetworkDisabled", False))
+
+ self.assertEqual(
+ "TestDefaultNetwork",
+ payload["NetworkSettings"]["Networks"]["TestDefaultNetwork"]["NetworkID"],
+ )
+ # TODO restore this to test, when joining multiple networks possible
+ # self.assertEqual(
+ # "TestNetwork",
+ # payload["NetworkSettings"]["Networks"]["TestNetwork"]["NetworkID"],
+ # )
+ # TODO Need to support network aliases
+ # self.assertIn(
+ # "test_post_create",
+ # payload["NetworkSettings"]["Networks"]["TestNetwork"]["Aliases"],
+ # )
+
+ def test_create(self):
+ """Create network and connect container during create"""
+ net = requests.post(
+ self.podman_url + "/v1.40/networks/create", json={"Name": "TestNetwork"}
+ )
+ self.assertEqual(net.status_code, 201, net.text)
+
+ create = requests.post(
+ self.podman_url + "/v1.40/containers/create?name=postCreate",
+ json={
+ "Cmd": ["date"],
+ "Image": "alpine:latest",
+ "NetworkDisabled": False,
+ "HostConfig": {"NetworkMode": "TestNetwork"},
+ },
+ )
+ self.assertEqual(create.status_code, 201, create.text)
+ self.assertId(create.content)
+
+ payload = create.json()
+ inspect = requests.get(f"{self.podman_url}/v1.40/containers/{payload['Id']}/json")
+ self.assertEqual(inspect.status_code, 200, inspect.text)
+
+ payload = inspect.json()
+ self.assertFalse(payload["Config"].get("NetworkDisabled", False))
+ self.assertEqual(
+ "TestNetwork",
+ payload["NetworkSettings"]["Networks"]["TestNetwork"]["NetworkID"],
+ )
+
+ def test_crud(self):
+ name = f"Network_{random.getrandbits(160):x}"
+
+ # Cannot test for 0 existing networks because default "podman" network always exists
+
+ create = requests.post(self.podman_url + "/v1.40/networks/create", json={"Name": name})
+ self.assertEqual(create.status_code, 201, create.text)
+ self.assertId(create.content)
+
+ net = create.json()
+ self.assertIsInstance(net, dict)
+ self.assertNotEqual(net["Id"], name)
+ ident = net["Id"]
+
+ ls = requests.get(self.podman_url + "/v1.40/networks")
+ self.assertEqual(ls.status_code, 200, ls.text)
+
+ networks = ls.json()
+ self.assertIsInstance(networks, list)
+
+ found = False
+ for net in networks:
+ if net["Name"] == name:
+ found = True
+ break
+ self.assertTrue(found, f"Network '{name}' not found")
+
+ inspect = requests.get(self.podman_url + f"/v1.40/networks/{ident}")
+ self.assertEqual(inspect.status_code, 200, inspect.text)
+ self.assertIsInstance(inspect.json(), dict)
+
+ inspect = requests.delete(self.podman_url + f"/v1.40/networks/{ident}")
+ self.assertEqual(inspect.status_code, 204, inspect.text)
+ inspect = requests.get(self.podman_url + f"/v1.40/networks/{ident}")
+ self.assertEqual(inspect.status_code, 404, inspect.text)
+
+ # network prune
+ prune_name = f"Network_{random.getrandbits(160):x}"
+ prune_create = requests.post(
+ self.podman_url + "/v1.40/networks/create", json={"Name": prune_name}
+ )
+ self.assertEqual(create.status_code, 201, prune_create.text)
+
+ prune = requests.post(self.podman_url + "/v1.40/networks/prune")
+ self.assertEqual(prune.status_code, 200, prune.text)
+ self.assertTrue(prune_name in prune.json()["NetworksDeleted"])
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/test/apiv2/python/rest_api/test_v2_0_0_pod.py b/test/apiv2/python/rest_api/test_v2_0_0_pod.py
new file mode 100644
index 000000000..9155ad19c
--- /dev/null
+++ b/test/apiv2/python/rest_api/test_v2_0_0_pod.py
@@ -0,0 +1,65 @@
+import random
+import unittest
+
+import requests
+from .fixtures import APITestCase
+
+
+class TestApi(APITestCase):
+ def test_pod_start_conflict(self):
+ """Verify issue #8865"""
+
+ pod_name = list()
+ pod_name.append(f"Pod_{random.getrandbits(160):x}")
+ pod_name.append(f"Pod_{random.getrandbits(160):x}")
+
+ r = requests.post(
+ self.uri("/pods/create"),
+ json={
+ "name": pod_name[0],
+ "no_infra": False,
+ "portmappings": [{"host_ip": "127.0.0.1", "host_port": 8889, "container_port": 89}],
+ },
+ )
+ self.assertEqual(r.status_code, 201, r.text)
+ r = requests.post(
+ self.uri("/containers/create"),
+ json={
+ "pod": pod_name[0],
+ "image": "quay.io/libpod/alpine:latest",
+ "command": ["top"],
+ },
+ )
+ self.assertEqual(r.status_code, 201, r.text)
+
+ r = requests.post(
+ self.uri("/pods/create"),
+ json={
+ "name": pod_name[1],
+ "no_infra": False,
+ "portmappings": [{"host_ip": "127.0.0.1", "host_port": 8889, "container_port": 89}],
+ },
+ )
+ self.assertEqual(r.status_code, 201, r.text)
+ r = requests.post(
+ self.uri("/containers/create"),
+ json={
+ "pod": pod_name[1],
+ "image": "quay.io/libpod/alpine:latest",
+ "command": ["top"],
+ },
+ )
+ self.assertEqual(r.status_code, 201, r.text)
+
+ r = requests.post(self.uri(f"/pods/{pod_name[0]}/start"))
+ self.assertEqual(r.status_code, 200, r.text)
+
+ r = requests.post(self.uri(f"/pods/{pod_name[1]}/start"))
+ self.assertEqual(r.status_code, 409, r.text)
+
+ start = r.json()
+ self.assertGreater(len(start["Errs"]), 0, r.text)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/test/apiv2/python/rest_api/test_v2_0_0_system.py b/test/apiv2/python/rest_api/test_v2_0_0_system.py
new file mode 100644
index 000000000..3628b5af1
--- /dev/null
+++ b/test/apiv2/python/rest_api/test_v2_0_0_system.py
@@ -0,0 +1,88 @@
+import json
+import unittest
+
+import requests
+from .fixtures import APITestCase
+
+
+class SystemTestCase(APITestCase):
+ def test_info(self):
+ r = requests.get(self.uri("/info"))
+ self.assertEqual(r.status_code, 200, r.text)
+ self.assertIsNotNone(r.content)
+ _ = r.json()
+
+ r = requests.get(self.podman_url + "/v1.40/info")
+ self.assertEqual(r.status_code, 200, r.text)
+ self.assertIsNotNone(r.content)
+ _ = r.json()
+
+ def test_events(self):
+ r = requests.get(self.uri("/events?stream=false"))
+ self.assertEqual(r.status_code, 200, r.text)
+ self.assertIsNotNone(r.content)
+
+ report = r.text.splitlines()
+ self.assertGreater(len(report), 0, "No events found!")
+ for line in report:
+ obj = json.loads(line)
+ # Actor.ID is uppercase for compatibility
+ self.assertIn("ID", obj["Actor"])
+
+ def test_ping(self):
+ required_headers = (
+ "API-Version",
+ "Builder-Version",
+ "Docker-Experimental",
+ "Cache-Control",
+ "Pragma",
+ "Pragma",
+ )
+
+ def check_headers(req):
+ for k in required_headers:
+ self.assertIn(k, req.headers)
+
+ r = requests.get(self.podman_url + "/_ping")
+ self.assertEqual(r.status_code, 200, r.text)
+ self.assertEqual(r.text, "OK")
+ check_headers(r)
+
+ r = requests.head(self.podman_url + "/_ping")
+ self.assertEqual(r.status_code, 200, r.text)
+ self.assertEqual(r.text, "")
+ check_headers(r)
+
+ r = requests.get(self.uri("/_ping"))
+ self.assertEqual(r.status_code, 200, r.text)
+ self.assertEqual(r.text, "OK")
+ check_headers(r)
+
+ r = requests.head(self.uri("/_ping"))
+ self.assertEqual(r.status_code, 200, r.text)
+ self.assertEqual(r.text, "")
+ check_headers(r)
+
+ def test_version(self):
+ r = requests.get(self.podman_url + "/v1.40/version")
+ self.assertEqual(r.status_code, 200, r.text)
+
+ r = requests.get(self.uri("/version"))
+ self.assertEqual(r.status_code, 200, r.text)
+
+ def test_df(self):
+ r = requests.get(self.podman_url + "/v1.40/system/df")
+ self.assertEqual(r.status_code, 200, r.text)
+
+ obj = r.json()
+ self.assertIn("Images", obj)
+ self.assertIn("Containers", obj)
+ self.assertIn("Volumes", obj)
+ self.assertIn("BuildCache", obj)
+
+ r = requests.get(self.uri("/system/df"))
+ self.assertEqual(r.status_code, 200, r.text)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/test/apiv2/python/rest_api/test_v2_0_0_volume.py b/test/apiv2/python/rest_api/test_v2_0_0_volume.py
new file mode 100644
index 000000000..f5231e17c
--- /dev/null
+++ b/test/apiv2/python/rest_api/test_v2_0_0_volume.py
@@ -0,0 +1,75 @@
+import os
+import random
+import unittest
+
+import requests
+from .fixtures import APITestCase
+
+
+class VolumeTestCase(APITestCase):
+ def test_volume(self):
+ name = f"Volume_{random.getrandbits(160):x}"
+
+ ls = requests.get(self.podman_url + "/v1.40/volumes")
+ self.assertEqual(ls.status_code, 200, ls.text)
+
+ # See https://docs.docker.com/engine/api/v1.40/#operation/VolumeList
+ required_keys = (
+ "Volumes",
+ "Warnings",
+ )
+
+ volumes = ls.json()
+ self.assertIsInstance(volumes, dict)
+ for key in required_keys:
+ self.assertIn(key, volumes)
+
+ create = requests.post(self.podman_url + "/v1.40/volumes/create", json={"Name": name})
+ self.assertEqual(create.status_code, 201, create.text)
+
+ # See https://docs.docker.com/engine/api/v1.40/#operation/VolumeCreate
+ # and https://docs.docker.com/engine/api/v1.40/#operation/VolumeInspect
+ required_keys = (
+ "Name",
+ "Driver",
+ "Mountpoint",
+ "Labels",
+ "Scope",
+ "Options",
+ )
+
+ volume = create.json()
+ self.assertIsInstance(volume, dict)
+ for k in required_keys:
+ self.assertIn(k, volume)
+ self.assertEqual(volume["Name"], name)
+
+ inspect = requests.get(self.podman_url + f"/v1.40/volumes/{name}")
+ self.assertEqual(inspect.status_code, 200, inspect.text)
+
+ volume = inspect.json()
+ self.assertIsInstance(volume, dict)
+ for k in required_keys:
+ self.assertIn(k, volume)
+
+ rm = requests.delete(self.podman_url + f"/v1.40/volumes/{name}")
+ self.assertEqual(rm.status_code, 204, rm.text)
+
+ # recreate volume with data and then prune it
+ r = requests.post(self.podman_url + "/v1.40/volumes/create", json={"Name": name})
+ self.assertEqual(create.status_code, 201, create.text)
+
+ create = r.json()
+ with open(os.path.join(create["Mountpoint"], "test_prune"), "w") as file:
+ file.writelines(["This is a test\n", "This is a good test\n"])
+
+ prune = requests.post(self.podman_url + "/v1.40/volumes/prune")
+ self.assertEqual(prune.status_code, 200, prune.text)
+
+ payload = prune.json()
+ self.assertIn(name, payload["VolumesDeleted"])
+ self.assertGreater(payload["SpaceReclaimed"], 0)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/test/apiv2/rest_api/v1_test_rest_v1_0_0.py b/test/apiv2/python/rest_api/v1_test_rest_v1_0_0.py
index 23528a246..905c29683 100644
--- a/test/apiv2/rest_api/v1_test_rest_v1_0_0.py
+++ b/test/apiv2/python/rest_api/v1_test_rest_v1_0_0.py
@@ -45,7 +45,7 @@ class TestApi(unittest.TestCase):
if TestApi.podman.poll() is not None:
sys.stderr.write("podman service returned {}", TestApi.podman.returncode)
sys.exit(2)
- requests.get(_url("/images/create?fromSrc=docker.io%2Falpine%3Alatest"))
+ requests.get(_url("/images/create?fromSrc=quay.io%2Flibpod%2Falpine%3Alatest"))
# calling out to podman is easier than the API for running a container
subprocess.run(
[podman(), "run", "alpine", "/bin/ls"],
diff --git a/test/apiv2/rest_api/test_rest_v2_0_0.py b/test/apiv2/rest_api/test_rest_v2_0_0.py
deleted file mode 100644
index f66e2b120..000000000
--- a/test/apiv2/rest_api/test_rest_v2_0_0.py
+++ /dev/null
@@ -1,744 +0,0 @@
-import json
-import os
-import random
-import string
-import subprocess
-import sys
-import time
-import unittest
-from multiprocessing import Process
-
-import requests
-from dateutil.parser import parse
-
-from test.apiv2.rest_api import Podman
-
-PODMAN_URL = "http://localhost:8080"
-
-
-def _url(path):
- return PODMAN_URL + "/v2.0.0/libpod" + path
-
-
-def ctnr(path):
- try:
- r = requests.get(_url("/containers/json?all=true"))
- ctnrs = json.loads(r.text)
- except Exception as e:
- msg = f"Bad container response: {e}"
- if r is not None:
- msg = msg + " " + r.text
- sys.stderr.write(msg + "\n")
- raise
- return path.format(ctnrs[0]["Id"])
-
-
-def validateObjectFields(buffer):
- objs = json.loads(buffer)
- if not isinstance(objs, dict):
- for o in objs:
- _ = o["Id"]
- else:
- _ = objs["Id"]
- return objs
-
-
-class TestApi(unittest.TestCase):
- podman = None # initialized podman configuration for tests
- service = None # podman service instance
-
- def setUp(self):
- super().setUp()
-
- TestApi.podman.run("run", "alpine", "/bin/ls", check=True)
-
- def tearDown(self) -> None:
- super().tearDown()
-
- TestApi.podman.run("pod", "rm", "--all", "--force", check=True)
- TestApi.podman.run("rm", "--all", "--force", check=True)
-
- @classmethod
- def setUpClass(cls):
- super().setUpClass()
-
- TestApi.podman = Podman()
- TestApi.service = TestApi.podman.open("system", "service", "tcp:localhost:8080", "--time=0")
- # give the service some time to be ready...
- time.sleep(2)
-
- returncode = TestApi.service.poll()
- if returncode is not None:
- raise subprocess.CalledProcessError(returncode, "podman system service")
-
- r = requests.post(_url("/images/pull?reference=docker.io%2Falpine%3Alatest"))
- if r.status_code != 200:
- raise subprocess.CalledProcessError(
- r.status_code, f"podman images pull docker.io/alpine:latest {r.text}"
- )
-
- @classmethod
- def tearDownClass(cls):
- TestApi.service.terminate()
- stdout, stderr = TestApi.service.communicate(timeout=0.5)
- if stdout:
- sys.stdout.write("\nService Stdout:\n" + stdout.decode("utf-8"))
- if stderr:
- sys.stderr.write("\nService Stderr:\n" + stderr.decode("utf-8"))
- return super().tearDownClass()
-
- def test_info(self):
- r = requests.get(_url("/info"))
- self.assertEqual(r.status_code, 200)
- self.assertIsNotNone(r.content)
- _ = json.loads(r.text)
-
- info = requests.get(PODMAN_URL + "/v1.40/info")
- self.assertEqual(info.status_code, 200, info.content)
- _ = json.loads(info.text)
-
- def test_events(self):
- r = requests.get(_url("/events?stream=false"))
- self.assertEqual(r.status_code, 200, r.text)
- self.assertIsNotNone(r.content)
-
- report = r.text.splitlines()
- self.assertGreater(len(report), 0, "No events found!")
- for line in report:
- obj = json.loads(line)
- # Actor.ID is uppercase for compatibility
- self.assertIn("ID", obj["Actor"])
-
- def test_containers(self):
- r = requests.get(_url("/containers/json"), timeout=5)
- self.assertEqual(r.status_code, 200, r.text)
- obj = json.loads(r.text)
- self.assertEqual(len(obj), 0)
-
- def test_containers_all(self):
- r = requests.get(_url("/containers/json?all=true"))
- self.assertEqual(r.status_code, 200, r.text)
- validateObjectFields(r.text)
-
- def test_inspect_container(self):
- r = requests.get(_url(ctnr("/containers/{}/json")))
- self.assertEqual(r.status_code, 200, r.text)
- obj = validateObjectFields(r.content)
- _ = parse(obj["Created"])
-
- def test_stats(self):
- r = requests.get(_url(ctnr("/containers/{}/stats?stream=false")))
- self.assertIn(r.status_code, (200, 409), r.text)
- if r.status_code == 200:
- validateObjectFields(r.text)
-
- def test_delete_containers(self):
- r = requests.delete(_url(ctnr("/containers/{}")))
- self.assertEqual(r.status_code, 204, r.text)
-
- def test_stop_containers(self):
- r = requests.post(_url(ctnr("/containers/{}/start")))
- self.assertIn(r.status_code, (204, 304), r.text)
-
- r = requests.post(_url(ctnr("/containers/{}/stop")))
- self.assertIn(r.status_code, (204, 304), r.text)
-
- def test_start_containers(self):
- r = requests.post(_url(ctnr("/containers/{}/stop")))
- self.assertIn(r.status_code, (204, 304), r.text)
-
- r = requests.post(_url(ctnr("/containers/{}/start")))
- self.assertIn(r.status_code, (204, 304), r.text)
-
- def test_restart_containers(self):
- r = requests.post(_url(ctnr("/containers/{}/start")))
- self.assertIn(r.status_code, (204, 304), r.text)
-
- r = requests.post(_url(ctnr("/containers/{}/restart")), timeout=5)
- self.assertEqual(r.status_code, 204, r.text)
-
- def test_resize(self):
- r = requests.post(_url(ctnr("/containers/{}/resize?h=43&w=80")))
- self.assertIn(r.status_code, (200, 409), r.text)
- if r.status_code == 200:
- self.assertEqual(r.text, "", r.text)
-
- def test_attach_containers(self):
- self.skipTest("FIXME: Test timeouts")
- r = requests.post(_url(ctnr("/containers/{}/attach")), timeout=5)
- self.assertIn(r.status_code, (101, 500), r.text)
-
- def test_logs_containers(self):
- r = requests.get(_url(ctnr("/containers/{}/logs?stdout=true")))
- self.assertEqual(r.status_code, 200, r.text)
-
- # TODO Need to support Docker-py order of network/container creates
- def test_post_create_compat_connect(self):
- """Create network and container then connect to network"""
- net_default = requests.post(
- PODMAN_URL + "/v1.40/networks/create", json={"Name": "TestDefaultNetwork"}
- )
- self.assertEqual(net_default.status_code, 201, net_default.text)
-
- create = requests.post(
- PODMAN_URL + "/v1.40/containers/create?name=postCreateConnect",
- json={
- "Cmd": ["top"],
- "Image": "alpine:latest",
- "NetworkDisabled": False,
- # FIXME adding these 2 lines cause: (This is sampled from docker-py)
- # "network already exists","message":"container
- # 01306e499df5441560d70071a54342611e422a94de20865add50a9565fd79fb9 is already connected to CNI
- # network \"TestDefaultNetwork\": network already exists"
- # "HostConfig": {"NetworkMode": "TestDefaultNetwork"},
- # "NetworkingConfig": {"EndpointsConfig": {"TestDefaultNetwork": None}},
- # FIXME These two lines cause:
- # CNI network \"TestNetwork\" not found","message":"error configuring network namespace for container
- # 369ddfa7d3211ebf1fbd5ddbff91bd33fa948858cea2985c133d6b6507546dff: CNI network \"TestNetwork\" not
- # found"
- # "HostConfig": {"NetworkMode": "TestNetwork"},
- # "NetworkingConfig": {"EndpointsConfig": {"TestNetwork": None}},
- # FIXME no networking defined cause: (note this error is from the container inspect below)
- # "internal libpod error","message":"network inspection mismatch: asked to join 2 CNI network(s) [
- # TestDefaultNetwork podman], but have information on 1 network(s): internal libpod error"
- },
- )
- self.assertEqual(create.status_code, 201, create.text)
- payload = json.loads(create.text)
- self.assertIsNotNone(payload["Id"])
-
- start = requests.post(PODMAN_URL + f"/v1.40/containers/{payload['Id']}/start")
- self.assertEqual(start.status_code, 204, start.text)
-
- connect = requests.post(
- PODMAN_URL + "/v1.40/networks/TestDefaultNetwork/connect",
- json={"Container": payload["Id"]},
- )
- self.assertEqual(connect.status_code, 200, connect.text)
- self.assertEqual(connect.text, "OK\n")
-
- inspect = requests.get(f"{PODMAN_URL}/v1.40/containers/{payload['Id']}/json")
- self.assertEqual(inspect.status_code, 200, inspect.text)
-
- payload = json.loads(inspect.text)
- self.assertFalse(payload["Config"].get("NetworkDisabled", False))
-
- self.assertEqual(
- "TestDefaultNetwork",
- payload["NetworkSettings"]["Networks"]["TestDefaultNetwork"]["NetworkID"],
- )
- # TODO restore this to test, when joining multiple networks possible
- # self.assertEqual(
- # "TestNetwork",
- # payload["NetworkSettings"]["Networks"]["TestNetwork"]["NetworkID"],
- # )
- # TODO Need to support network aliases
- # self.assertIn(
- # "test_post_create",
- # payload["NetworkSettings"]["Networks"]["TestNetwork"]["Aliases"],
- # )
-
- def test_post_create_compat(self):
- """Create network and connect container during create"""
- net = requests.post(PODMAN_URL + "/v1.40/networks/create", json={"Name": "TestNetwork"})
- self.assertEqual(net.status_code, 201, net.text)
-
- create = requests.post(
- PODMAN_URL + "/v1.40/containers/create?name=postCreate",
- json={
- "Cmd": ["date"],
- "Image": "alpine:latest",
- "NetworkDisabled": False,
- "HostConfig": {"NetworkMode": "TestNetwork"},
- },
- )
- self.assertEqual(create.status_code, 201, create.text)
- payload = json.loads(create.text)
- self.assertIsNotNone(payload["Id"])
-
- inspect = requests.get(f"{PODMAN_URL}/v1.40/containers/{payload['Id']}/json")
- self.assertEqual(inspect.status_code, 200, inspect.text)
- payload = json.loads(inspect.text)
- self.assertFalse(payload["Config"].get("NetworkDisabled", False))
- self.assertEqual(
- "TestNetwork",
- payload["NetworkSettings"]["Networks"]["TestNetwork"]["NetworkID"],
- )
-
- def test_commit(self):
- r = requests.post(_url(ctnr("/commit?container={}")))
- self.assertEqual(r.status_code, 200, r.text)
-
- obj = json.loads(r.content)
- self.assertIsInstance(obj, dict)
- self.assertIn("Id", obj)
-
- def test_images_compat(self):
- r = requests.get(PODMAN_URL + "/v1.40/images/json")
- self.assertEqual(r.status_code, 200, r.text)
-
- # See https://docs.docker.com/engine/api/v1.40/#operation/ImageList
- required_keys = (
- "Id",
- "ParentId",
- "RepoTags",
- "RepoDigests",
- "Created",
- "Size",
- "SharedSize",
- "VirtualSize",
- "Labels",
- "Containers",
- )
- objs = json.loads(r.content)
- self.assertIn(type(objs), (list,))
- for o in objs:
- self.assertIsInstance(o, dict)
- for k in required_keys:
- self.assertIn(k, o)
-
- def test_inspect_image_compat(self):
- r = requests.get(PODMAN_URL + "/v1.40/images/alpine/json")
- self.assertEqual(r.status_code, 200, r.text)
-
- # See https://docs.docker.com/engine/api/v1.40/#operation/ImageInspect
- required_keys = (
- "Id",
- "Parent",
- "Comment",
- "Created",
- "Container",
- "DockerVersion",
- "Author",
- "Architecture",
- "Os",
- "Size",
- "VirtualSize",
- "GraphDriver",
- "RootFS",
- "Metadata",
- )
-
- obj = json.loads(r.content)
- self.assertIn(type(obj), (dict,))
- for k in required_keys:
- self.assertIn(k, obj)
- _ = parse(obj["Created"])
-
- def test_delete_image_compat(self):
- r = requests.delete(PODMAN_URL + "/v1.40/images/alpine?force=true")
- self.assertEqual(r.status_code, 200, r.text)
- obj = json.loads(r.content)
- self.assertIn(type(obj), (list,))
-
- def test_pull(self):
- r = requests.post(_url("/images/pull?reference=alpine"), timeout=15)
- self.assertEqual(r.status_code, 200, r.status_code)
- text = r.text
- keys = {
- "error": False,
- "id": False,
- "images": False,
- "stream": False,
- }
- # Read and record stanza's from pull
- for line in str.splitlines(text):
- obj = json.loads(line)
- key_list = list(obj.keys())
- for k in key_list:
- keys[k] = True
-
- self.assertFalse(keys["error"], "Expected no errors")
- self.assertTrue(keys["id"], "Expected to find id stanza")
- self.assertTrue(keys["images"], "Expected to find images stanza")
- self.assertTrue(keys["stream"], "Expected to find stream progress stanza's")
-
- def test_search_compat(self):
- url = PODMAN_URL + "/v1.40/images/search"
-
- # Had issues with this test hanging when repositories not happy
- def do_search1():
- payload = {"term": "alpine"}
- r = requests.get(url, params=payload, timeout=5)
- self.assertEqual(r.status_code, 200, r.text)
- objs = json.loads(r.text)
- self.assertIn(type(objs), (list,))
-
- def do_search2():
- payload = {"term": "alpine", "limit": 1}
- r = requests.get(url, params=payload, timeout=5)
- self.assertEqual(r.status_code, 200, r.text)
- objs = json.loads(r.text)
- self.assertIn(type(objs), (list,))
- self.assertEqual(len(objs), 1)
-
- def do_search3():
- payload = {"term": "alpine", "filters": '{"is-official":["true"]}'}
- r = requests.get(url, params=payload, timeout=5)
- self.assertEqual(r.status_code, 200, r.text)
- objs = json.loads(r.text)
- self.assertIn(type(objs), (list,))
- # There should be only one official image
- self.assertEqual(len(objs), 1)
-
- def do_search4():
- headers = {"X-Registry-Auth": "null"}
- payload = {"term": "alpine"}
- r = requests.get(url, params=payload, headers=headers, timeout=5)
- self.assertEqual(r.status_code, 200, r.text)
-
- def do_search5():
- headers = {"X-Registry-Auth": "invalid value"}
- payload = {"term": "alpine"}
- r = requests.get(url, params=payload, headers=headers, timeout=5)
- self.assertEqual(r.status_code, 400, r.text)
-
- search_methods = [do_search1, do_search2, do_search3, do_search4, do_search5]
- for search_method in search_methods:
- search = Process(target=search_method)
- search.start()
- search.join(timeout=10)
- self.assertFalse(search.is_alive(), "/images/search took too long")
-
- def test_ping(self):
- required_headers = (
- "API-Version",
- "Builder-Version",
- "Docker-Experimental",
- "Cache-Control",
- "Pragma",
- "Pragma",
- )
-
- def check_headers(req):
- for k in required_headers:
- self.assertIn(k, req.headers)
-
- r = requests.get(PODMAN_URL + "/_ping")
- self.assertEqual(r.status_code, 200, r.text)
- self.assertEqual(r.text, "OK")
- check_headers(r)
-
- r = requests.head(PODMAN_URL + "/_ping")
- self.assertEqual(r.status_code, 200, r.text)
- self.assertEqual(r.text, "")
- check_headers(r)
-
- r = requests.get(_url("/_ping"))
- self.assertEqual(r.status_code, 200, r.text)
- self.assertEqual(r.text, "OK")
- check_headers(r)
-
- r = requests.head(_url("/_ping"))
- self.assertEqual(r.status_code, 200, r.text)
- self.assertEqual(r.text, "")
- check_headers(r)
-
- def test_history_compat(self):
- r = requests.get(PODMAN_URL + "/v1.40/images/alpine/history")
- self.assertEqual(r.status_code, 200, r.text)
-
- # See https://docs.docker.com/engine/api/v1.40/#operation/ImageHistory
- required_keys = ("Id", "Created", "CreatedBy", "Tags", "Size", "Comment")
-
- objs = json.loads(r.content)
- self.assertIn(type(objs), (list,))
- for o in objs:
- self.assertIsInstance(o, dict)
- for k in required_keys:
- self.assertIn(k, o)
-
- def test_network_compat(self):
- name = "Network_" + "".join(random.choice(string.ascii_letters) for i in range(10))
-
- # Cannot test for 0 existing networks because default "podman" network always exists
-
- create = requests.post(PODMAN_URL + "/v1.40/networks/create", json={"Name": name})
- self.assertEqual(create.status_code, 201, create.content)
- obj = json.loads(create.content)
- self.assertIn(type(obj), (dict,))
- self.assertIn("Id", obj)
- ident = obj["Id"]
- self.assertNotEqual(name, ident)
-
- ls = requests.get(PODMAN_URL + "/v1.40/networks")
- self.assertEqual(ls.status_code, 200, ls.content)
- objs = json.loads(ls.content)
- self.assertIn(type(objs), (list,))
-
- found = False
- for network in objs:
- if network["Name"] == name:
- found = True
- self.assertTrue(found, f"Network {name} not found")
-
- inspect = requests.get(PODMAN_URL + f"/v1.40/networks/{ident}")
- self.assertEqual(inspect.status_code, 200, inspect.content)
- obj = json.loads(create.content)
- self.assertIn(type(obj), (dict,))
-
- inspect = requests.delete(PODMAN_URL + f"/v1.40/networks/{ident}")
- self.assertEqual(inspect.status_code, 204, inspect.content)
- inspect = requests.get(PODMAN_URL + f"/v1.40/networks/{ident}")
- self.assertEqual(inspect.status_code, 404, inspect.content)
-
- # network prune
- prune_name = "Network_" + "".join(random.choice(string.ascii_letters) for i in range(10))
- prune_create = requests.post(
- PODMAN_URL + "/v1.40/networks/create", json={"Name": prune_name}
- )
- self.assertEqual(create.status_code, 201, prune_create.content)
-
- prune = requests.post(PODMAN_URL + "/v1.40/networks/prune")
- self.assertEqual(prune.status_code, 200, prune.content)
- obj = json.loads(prune.content)
- self.assertTrue(prune_name in obj["NetworksDeleted"])
-
- def test_volumes_compat(self):
- name = "Volume_" + "".join(random.choice(string.ascii_letters) for i in range(10))
-
- ls = requests.get(PODMAN_URL + "/v1.40/volumes")
- self.assertEqual(ls.status_code, 200, ls.content)
-
- # See https://docs.docker.com/engine/api/v1.40/#operation/VolumeList
- required_keys = (
- "Volumes",
- "Warnings",
- )
-
- obj = json.loads(ls.content)
- self.assertIn(type(obj), (dict,))
- for k in required_keys:
- self.assertIn(k, obj)
-
- create = requests.post(PODMAN_URL + "/v1.40/volumes/create", json={"Name": name})
- self.assertEqual(create.status_code, 201, create.content)
-
- # See https://docs.docker.com/engine/api/v1.40/#operation/VolumeCreate
- # and https://docs.docker.com/engine/api/v1.40/#operation/VolumeInspect
- required_keys = (
- "Name",
- "Driver",
- "Mountpoint",
- "Labels",
- "Scope",
- "Options",
- )
-
- obj = json.loads(create.content)
- self.assertIn(type(obj), (dict,))
- for k in required_keys:
- self.assertIn(k, obj)
- self.assertEqual(obj["Name"], name)
-
- inspect = requests.get(PODMAN_URL + f"/v1.40/volumes/{name}")
- self.assertEqual(inspect.status_code, 200, inspect.content)
-
- obj = json.loads(create.content)
- self.assertIn(type(obj), (dict,))
- for k in required_keys:
- self.assertIn(k, obj)
-
- rm = requests.delete(PODMAN_URL + f"/v1.40/volumes/{name}")
- self.assertEqual(rm.status_code, 204, rm.content)
-
- # recreate volume with data and then prune it
- r = requests.post(PODMAN_URL + "/v1.40/volumes/create", json={"Name": name})
- self.assertEqual(create.status_code, 201, create.content)
- create = json.loads(r.content)
- with open(os.path.join(create["Mountpoint"], "test_prune"), "w") as file:
- file.writelines(["This is a test\n", "This is a good test\n"])
-
- prune = requests.post(PODMAN_URL + "/v1.40/volumes/prune")
- self.assertEqual(prune.status_code, 200, prune.content)
- payload = json.loads(prune.content)
- self.assertIn(name, payload["VolumesDeleted"])
- self.assertGreater(payload["SpaceReclaimed"], 0)
-
- def test_version(self):
- r = requests.get(PODMAN_URL + "/v1.40/version")
- self.assertEqual(r.status_code, 200, r.content)
-
- r = requests.get(_url("/version"))
- self.assertEqual(r.status_code, 200, r.content)
-
- def test_df_compat(self):
- r = requests.get(PODMAN_URL + "/v1.40/system/df")
- self.assertEqual(r.status_code, 200, r.content)
-
- obj = json.loads(r.content)
- self.assertIn("Images", obj)
- self.assertIn("Containers", obj)
- self.assertIn("Volumes", obj)
- self.assertIn("BuildCache", obj)
-
- def test_prune_compat(self):
- name = "Ctnr_" + "".join(random.choice(string.ascii_letters) for i in range(10))
-
- r = requests.post(
- PODMAN_URL + f"/v1.40/containers/create?name={name}",
- json={
- "Cmd": ["cp", "/etc/motd", "/motd.size_test"],
- "Image": "alpine:latest",
- "NetworkDisabled": True,
- },
- )
- self.assertEqual(r.status_code, 201, r.text)
- create = json.loads(r.text)
-
- r = requests.post(PODMAN_URL + f"/v1.40/containers/{create['Id']}/start")
- self.assertEqual(r.status_code, 204, r.text)
-
- r = requests.post(PODMAN_URL + f"/v1.40/containers/{create['Id']}/wait")
- self.assertEqual(r.status_code, 200, r.text)
- wait = json.loads(r.text)
- self.assertEqual(wait["StatusCode"], 0, wait["Error"]["Message"])
-
- prune = requests.post(PODMAN_URL + "/v1.40/containers/prune")
- self.assertEqual(prune.status_code, 200, prune.status_code)
- prune_payload = json.loads(prune.text)
- self.assertGreater(prune_payload["SpaceReclaimed"], 0)
- self.assertIn(create["Id"], prune_payload["ContainersDeleted"])
-
- # Delete any orphaned containers
- r = requests.get(PODMAN_URL + "/v1.40/containers/json?all=true")
- self.assertEqual(r.status_code, 200, r.text)
- for ctnr in json.loads(r.text):
- requests.delete(PODMAN_URL + f"/v1.40/containers/{ctnr['Id']}?force=true")
-
- prune = requests.post(PODMAN_URL + "/v1.40/images/prune")
- self.assertEqual(prune.status_code, 200, prune.text)
- prune_payload = json.loads(prune.text)
- self.assertGreater(prune_payload["SpaceReclaimed"], 0)
-
- # FIXME need method to determine which image is going to be "pruned" to fix test
- # TODO should handler be recursive when deleting images?
- # self.assertIn(img["Id"], prune_payload["ImagesDeleted"][1]["Deleted"])
-
- # FIXME (@vrothberg): I commented this line out during the `libimage` migration.
- # It doesn't make sense to report anything to be deleted if the reclaimed space
- # is zero. I think the test needs some rewrite.
- # self.assertIsNotNone(prune_payload["ImagesDeleted"][1]["Deleted"])
-
- def test_status_compat(self):
- r = requests.post(
- PODMAN_URL + "/v1.40/containers/create?name=topcontainer",
- json={"Cmd": ["top"], "Image": "alpine:latest"},
- )
- self.assertEqual(r.status_code, 201, r.text)
- payload = json.loads(r.text)
- container_id = payload["Id"]
- self.assertIsNotNone(container_id)
-
- r = requests.get(
- PODMAN_URL + "/v1.40/containers/json",
- params={"all": "true", "filters": f'{{"id":["{container_id}"]}}'},
- )
- self.assertEqual(r.status_code, 200, r.text)
- payload = json.loads(r.text)
- self.assertEqual(payload[0]["Status"], "Created")
-
- r = requests.post(PODMAN_URL + f"/v1.40/containers/{container_id}/start")
- self.assertEqual(r.status_code, 204, r.text)
-
- r = requests.get(
- PODMAN_URL + "/v1.40/containers/json",
- params={"all": "true", "filters": f'{{"id":["{container_id}"]}}'},
- )
- self.assertEqual(r.status_code, 200, r.text)
- payload = json.loads(r.text)
- self.assertTrue(str(payload[0]["Status"]).startswith("Up"))
-
- r = requests.post(PODMAN_URL + f"/v1.40/containers/{container_id}/pause")
- self.assertEqual(r.status_code, 204, r.text)
-
- r = requests.get(
- PODMAN_URL + "/v1.40/containers/json",
- params={"all": "true", "filters": f'{{"id":["{container_id}"]}}'},
- )
- self.assertEqual(r.status_code, 200, r.text)
- payload = json.loads(r.text)
- self.assertTrue(str(payload[0]["Status"]).startswith("Up"))
- self.assertTrue(str(payload[0]["Status"]).endswith("(Paused)"))
-
- r = requests.post(PODMAN_URL + f"/v1.40/containers/{container_id}/unpause")
- self.assertEqual(r.status_code, 204, r.text)
- r = requests.post(PODMAN_URL + f"/v1.40/containers/{container_id}/stop")
- self.assertEqual(r.status_code, 204, r.text)
-
- r = requests.get(
- PODMAN_URL + "/v1.40/containers/json",
- params={"all": "true", "filters": f'{{"id":["{container_id}"]}}'},
- )
- self.assertEqual(r.status_code, 200, r.text)
- payload = json.loads(r.text)
- self.assertTrue(str(payload[0]["Status"]).startswith("Exited"))
-
- r = requests.delete(PODMAN_URL + f"/v1.40/containers/{container_id}")
- self.assertEqual(r.status_code, 204, r.text)
-
- def test_pod_start_conflict(self):
- """Verify issue #8865"""
-
- pod_name = list()
- pod_name.append("Pod_" + "".join(random.choice(string.ascii_letters) for i in range(10)))
- pod_name.append("Pod_" + "".join(random.choice(string.ascii_letters) for i in range(10)))
-
- r = requests.post(
- _url("/pods/create"),
- json={
- "name": pod_name[0],
- "no_infra": False,
- "portmappings": [{"host_ip": "127.0.0.1", "host_port": 8889, "container_port": 89}],
- },
- )
- self.assertEqual(r.status_code, 201, r.text)
- r = requests.post(
- _url("/containers/create"),
- json={
- "pod": pod_name[0],
- "image": "docker.io/alpine:latest",
- "command": ["top"],
- },
- )
- self.assertEqual(r.status_code, 201, r.text)
-
- r = requests.post(
- _url("/pods/create"),
- json={
- "name": pod_name[1],
- "no_infra": False,
- "portmappings": [{"host_ip": "127.0.0.1", "host_port": 8889, "container_port": 89}],
- },
- )
- self.assertEqual(r.status_code, 201, r.text)
- r = requests.post(
- _url("/containers/create"),
- json={
- "pod": pod_name[1],
- "image": "docker.io/alpine:latest",
- "command": ["top"],
- },
- )
- self.assertEqual(r.status_code, 201, r.text)
-
- r = requests.post(_url(f"/pods/{pod_name[0]}/start"))
- self.assertEqual(r.status_code, 200, r.text)
-
- r = requests.post(_url(f"/pods/{pod_name[1]}/start"))
- self.assertEqual(r.status_code, 409, r.text)
-
- start = json.loads(r.text)
- self.assertGreater(len(start["Errs"]), 0, r.text)
-
- def test_manifest_409(self):
- r = requests.post(_url("/manifests/create"), params={"name": "ThisIsAnInvalidImage"})
- self.assertEqual(r.status_code, 400, r.text)
-
- def test_df(self):
- r = requests.get(_url("/system/df"))
- self.assertEqual(r.status_code, 200, r.text)
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/test/e2e/create_staticip_test.go b/test/e2e/create_staticip_test.go
index 340ea31f3..2cf552274 100644
--- a/test/e2e/create_staticip_test.go
+++ b/test/e2e/create_staticip_test.go
@@ -60,8 +60,10 @@ var _ = Describe("Podman create with --ip flag", func() {
})
It("Podman create with specified static IP has correct IP", func() {
+ // NOTE: we force the k8s-file log driver to make sure the
+ // tests are passing inside a container.
ip := GetRandomIPAddress()
- result := podmanTest.Podman([]string{"create", "--name", "test", "--ip", ip, ALPINE, "ip", "addr"})
+ result := podmanTest.Podman([]string{"create", "--log-driver", "k8s-file", "--name", "test", "--ip", ip, ALPINE, "ip", "addr"})
result.WaitWithDefaultTimeout()
// Rootless static ip assignment without network should error
if rootless.IsRootless() {
@@ -83,10 +85,10 @@ var _ = Describe("Podman create with --ip flag", func() {
It("Podman create two containers with the same IP", func() {
SkipIfRootless("--ip not supported without network in rootless mode")
ip := GetRandomIPAddress()
- result := podmanTest.Podman([]string{"create", "--name", "test1", "--ip", ip, ALPINE, "sleep", "999"})
+ result := podmanTest.Podman([]string{"create", "--log-driver", "k8s-file", "--name", "test1", "--ip", ip, ALPINE, "sleep", "999"})
result.WaitWithDefaultTimeout()
Expect(result.ExitCode()).To(Equal(0))
- result = podmanTest.Podman([]string{"create", "--name", "test2", "--ip", ip, ALPINE, "ip", "addr"})
+ result = podmanTest.Podman([]string{"create", "--log-driver", "k8s-file", "--name", "test2", "--ip", ip, ALPINE, "ip", "addr"})
result.WaitWithDefaultTimeout()
Expect(result.ExitCode()).To(Equal(0))
result = podmanTest.Podman([]string{"start", "test1"})
diff --git a/test/e2e/create_test.go b/test/e2e/create_test.go
index 1f1786dbe..e4db6b845 100644
--- a/test/e2e/create_test.go
+++ b/test/e2e/create_test.go
@@ -160,9 +160,12 @@ var _ = Describe("Podman create", func() {
if podmanTest.Host.Arch == "ppc64le" {
Skip("skip failing test on ppc64le")
}
+ // NOTE: we force the k8s-file log driver to make sure the
+ // tests are passing inside a container.
+
mountPath := filepath.Join(podmanTest.TempDir, "secrets")
os.Mkdir(mountPath, 0755)
- session := podmanTest.Podman([]string{"create", "--name", "test", "--mount", fmt.Sprintf("type=bind,src=%s,target=/create/test", mountPath), ALPINE, "grep", "/create/test", "/proc/self/mountinfo"})
+ session := podmanTest.Podman([]string{"create", "--log-driver", "k8s-file", "--name", "test", "--mount", fmt.Sprintf("type=bind,src=%s,target=/create/test", mountPath), ALPINE, "grep", "/create/test", "/proc/self/mountinfo"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
session = podmanTest.Podman([]string{"start", "test"})
@@ -173,7 +176,7 @@ var _ = Describe("Podman create", func() {
Expect(session.ExitCode()).To(Equal(0))
Expect(session.OutputToString()).To(ContainSubstring("/create/test rw"))
- session = podmanTest.Podman([]string{"create", "--name", "test_ro", "--mount", fmt.Sprintf("type=bind,src=%s,target=/create/test,ro", mountPath), ALPINE, "grep", "/create/test", "/proc/self/mountinfo"})
+ session = podmanTest.Podman([]string{"create", "--log-driver", "k8s-file", "--name", "test_ro", "--mount", fmt.Sprintf("type=bind,src=%s,target=/create/test,ro", mountPath), ALPINE, "grep", "/create/test", "/proc/self/mountinfo"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
session = podmanTest.Podman([]string{"start", "test_ro"})
@@ -184,7 +187,7 @@ var _ = Describe("Podman create", func() {
Expect(session.ExitCode()).To(Equal(0))
Expect(session.OutputToString()).To(ContainSubstring("/create/test ro"))
- session = podmanTest.Podman([]string{"create", "--name", "test_shared", "--mount", fmt.Sprintf("type=bind,src=%s,target=/create/test,shared", mountPath), ALPINE, "grep", "/create/test", "/proc/self/mountinfo"})
+ session = podmanTest.Podman([]string{"create", "--log-driver", "k8s-file", "--name", "test_shared", "--mount", fmt.Sprintf("type=bind,src=%s,target=/create/test,shared", mountPath), ALPINE, "grep", "/create/test", "/proc/self/mountinfo"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
session = podmanTest.Podman([]string{"start", "test_shared"})
@@ -200,7 +203,7 @@ var _ = Describe("Podman create", func() {
mountPath = filepath.Join(podmanTest.TempDir, "scratchpad")
os.Mkdir(mountPath, 0755)
- session = podmanTest.Podman([]string{"create", "--name", "test_tmpfs", "--mount", "type=tmpfs,target=/create/test", ALPINE, "grep", "/create/test", "/proc/self/mountinfo"})
+ session = podmanTest.Podman([]string{"create", "--log-driver", "k8s-file", "--name", "test_tmpfs", "--mount", "type=tmpfs,target=/create/test", ALPINE, "grep", "/create/test", "/proc/self/mountinfo"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
session = podmanTest.Podman([]string{"start", "test_tmpfs"})
diff --git a/test/e2e/info_test.go b/test/e2e/info_test.go
index 0b112b312..60136bcc2 100644
--- a/test/e2e/info_test.go
+++ b/test/e2e/info_test.go
@@ -124,4 +124,15 @@ var _ = Describe("Podman Info", func() {
}
})
+ It("verify ServiceIsRemote", func() {
+ session := podmanTest.Podman([]string{"info", "--format", "{{.Host.ServiceIsRemote}}"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).To(Exit(0))
+
+ if podmanTest.RemoteTest {
+ Expect(session.OutputToString()).To(ContainSubstring("true"))
+ } else {
+ Expect(session.OutputToString()).To(ContainSubstring("false"))
+ }
+ })
})
diff --git a/test/e2e/network_connect_disconnect_test.go b/test/e2e/network_connect_disconnect_test.go
index 6974c7614..c82aacbe4 100644
--- a/test/e2e/network_connect_disconnect_test.go
+++ b/test/e2e/network_connect_disconnect_test.go
@@ -66,7 +66,7 @@ var _ = Describe("Podman network connect and disconnect", func() {
con := podmanTest.Podman([]string{"network", "disconnect", netName, "test"})
con.WaitWithDefaultTimeout()
Expect(con.ExitCode()).ToNot(BeZero())
- Expect(con.ErrorToString()).To(ContainSubstring(`network mode "slirp4netns" is not supported`))
+ Expect(con.ErrorToString()).To(ContainSubstring(`"slirp4netns" is not supported: invalid network mode`))
})
It("podman network disconnect", func() {
@@ -132,7 +132,7 @@ var _ = Describe("Podman network connect and disconnect", func() {
con := podmanTest.Podman([]string{"network", "connect", netName, "test"})
con.WaitWithDefaultTimeout()
Expect(con.ExitCode()).ToNot(BeZero())
- Expect(con.ErrorToString()).To(ContainSubstring(`network mode "slirp4netns" is not supported`))
+ Expect(con.ErrorToString()).To(ContainSubstring(`"slirp4netns" is not supported: invalid network mode`))
})
It("podman connect on a container that already is connected to the network should error", func() {
diff --git a/test/e2e/prune_test.go b/test/e2e/prune_test.go
index 38f893a43..419748adb 100644
--- a/test/e2e/prune_test.go
+++ b/test/e2e/prune_test.go
@@ -88,6 +88,53 @@ var _ = Describe("Podman prune", func() {
Expect(podmanTest.NumberOfContainers()).To(Equal(0))
})
+ It("podman image prune - remove only dangling images", func() {
+ session := podmanTest.Podman([]string{"images", "-a"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ hasNone, _ := session.GrepString("<none>")
+ Expect(hasNone).To(BeFalse())
+ numImages := len(session.OutputToStringArray())
+
+ // Since there's no dangling image, none should be removed.
+ session = podmanTest.Podman([]string{"image", "prune", "-f"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(len(session.OutputToStringArray())).To(Equal(0))
+
+ // Let's be extra sure that the same number of images is
+ // reported.
+ session = podmanTest.Podman([]string{"images", "-a"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(len(session.OutputToStringArray())).To(Equal(numImages))
+
+ // Now build a new image with dangling intermediate images.
+ podmanTest.BuildImage(pruneImage, "alpine_bash:latest", "true")
+
+ session = podmanTest.Podman([]string{"images", "-a"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ hasNone, _ = session.GrepString("<none>")
+ Expect(hasNone).To(BeTrue()) // ! we have dangling ones
+ numImages = len(session.OutputToStringArray())
+
+ // Since there's at least one dangling image, prune should
+ // remove them.
+ session = podmanTest.Podman([]string{"image", "prune", "-f"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ numPrunedImages := len(session.OutputToStringArray())
+ Expect(numPrunedImages >= 1).To(BeTrue())
+
+ // Now make sure that exactly the number of pruned images has
+ // been removed.
+ session = podmanTest.Podman([]string{"images", "-a"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(len(session.OutputToStringArray())).To(Equal(numImages - numPrunedImages))
+ })
+
It("podman image prune skip cache images", func() {
podmanTest.BuildImage(pruneImage, "alpine_bash:latest", "true")
diff --git a/test/e2e/run_networking_test.go b/test/e2e/run_networking_test.go
index 4c66e2823..37e837b1d 100644
--- a/test/e2e/run_networking_test.go
+++ b/test/e2e/run_networking_test.go
@@ -649,11 +649,13 @@ var _ = Describe("Podman run networking", func() {
defer podmanTest.removeCNINetwork(netName)
name := "nc-server"
- run := podmanTest.Podman([]string{"run", "-d", "--name", name, "--net", netName, ALPINE, "nc", "-l", "-p", "8080"})
+ run := podmanTest.Podman([]string{"run", "--log-driver", "k8s-file", "-d", "--name", name, "--net", netName, ALPINE, "nc", "-l", "-p", "8080"})
run.WaitWithDefaultTimeout()
Expect(run.ExitCode()).To(Equal(0))
- run = podmanTest.Podman([]string{"run", "--rm", "--net", netName, "--uidmap", "0:1:4096", ALPINE, "sh", "-c", fmt.Sprintf("echo podman | nc -w 1 %s.dns.podman 8080", name)})
+ // NOTE: we force the k8s-file log driver to make sure the
+ // tests are passing inside a container.
+ run = podmanTest.Podman([]string{"run", "--log-driver", "k8s-file", "--rm", "--net", netName, "--uidmap", "0:1:4096", ALPINE, "sh", "-c", fmt.Sprintf("echo podman | nc -w 1 %s.dns.podman 8080", name)})
run.WaitWithDefaultTimeout()
Expect(run.ExitCode()).To(Equal(0))
diff --git a/test/e2e/run_selinux_test.go b/test/e2e/run_selinux_test.go
index 6abe152a9..2886f06c1 100644
--- a/test/e2e/run_selinux_test.go
+++ b/test/e2e/run_selinux_test.go
@@ -343,4 +343,12 @@ var _ = Describe("Podman run", func() {
session.WaitWithDefaultTimeout()
Expect(session.OutputToString()).To(ContainSubstring("container_init_t"))
})
+
+ It("podman relabels named volume with :Z", func() {
+ session := podmanTest.Podman([]string{"run", "-v", "testvol:/test1/test:Z", fedoraMinimal, "ls", "-alZ", "/test1"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ match, _ := session.GrepString(":s0:")
+ Expect(match).Should(BeTrue())
+ })
})
diff --git a/test/e2e/run_test.go b/test/e2e/run_test.go
index 59220cf01..58538b689 100644
--- a/test/e2e/run_test.go
+++ b/test/e2e/run_test.go
@@ -712,7 +712,7 @@ USER bin`, BB)
It("podman run log-opt", func() {
log := filepath.Join(podmanTest.TempDir, "/container.log")
- session := podmanTest.Podman([]string{"run", "--rm", "--log-opt", fmt.Sprintf("path=%s", log), ALPINE, "ls"})
+ session := podmanTest.Podman([]string{"run", "--rm", "--log-driver", "k8s-file", "--log-opt", fmt.Sprintf("path=%s", log), ALPINE, "ls"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
_, err := os.Stat(log)
@@ -1669,6 +1669,49 @@ WORKDIR /madethis`, BB)
Expect(session.OutputToString()).To(Equal(secretsString))
})
+ It("podman run --secret mount with uid, gid, mode options", func() {
+ secretsString := "somesecretdata"
+ secretFilePath := filepath.Join(podmanTest.TempDir, "secret")
+ err := ioutil.WriteFile(secretFilePath, []byte(secretsString), 0755)
+ Expect(err).To(BeNil())
+
+ session := podmanTest.Podman([]string{"secret", "create", "mysecret", secretFilePath})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ // check default permissions
+ session = podmanTest.Podman([]string{"run", "--secret", "mysecret", "--name", "secr", ALPINE, "ls", "-l", "/run/secrets/mysecret"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ output := session.OutputToString()
+ Expect(output).To(ContainSubstring("-r--r--r--"))
+ Expect(output).To(ContainSubstring("root"))
+
+ session = podmanTest.Podman([]string{"run", "--secret", "source=mysecret,type=mount,uid=1000,gid=1001,mode=777", "--name", "secr2", ALPINE, "ls", "-ln", "/run/secrets/mysecret"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ output = session.OutputToString()
+ Expect(output).To(ContainSubstring("-rwxrwxrwx"))
+ Expect(output).To(ContainSubstring("1000"))
+ Expect(output).To(ContainSubstring("1001"))
+ })
+
+ It("podman run --secret with --user", func() {
+ secretsString := "somesecretdata"
+ secretFilePath := filepath.Join(podmanTest.TempDir, "secret")
+ err := ioutil.WriteFile(secretFilePath, []byte(secretsString), 0755)
+ Expect(err).To(BeNil())
+
+ session := podmanTest.Podman([]string{"secret", "create", "mysecret", secretFilePath})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"run", "--secret", "mysecret", "--name", "nonroot", "--user", "200:200", ALPINE, "cat", "/run/secrets/mysecret"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.OutputToString()).To(Equal(secretsString))
+ })
+
It("podman run invalid secret option", func() {
secretsString := "somesecretdata"
secretFilePath := filepath.Join(podmanTest.TempDir, "secret")
@@ -1694,6 +1737,11 @@ WORKDIR /madethis`, BB)
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Not(Equal(0)))
+ // mount option with env type
+ session = podmanTest.Podman([]string{"run", "--secret", "source=mysecret,type=env,uid=1000", "--name", "secr", ALPINE, "printenv", "mysecret"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Not(Equal(0)))
+
// No source given
session = podmanTest.Podman([]string{"run", "--secret", "type=env", "--name", "secr", ALPINE, "printenv", "mysecret"})
session.WaitWithDefaultTimeout()
diff --git a/test/e2e/toolbox_test.go b/test/e2e/toolbox_test.go
index 986f856bf..16300bebc 100644
--- a/test/e2e/toolbox_test.go
+++ b/test/e2e/toolbox_test.go
@@ -215,7 +215,7 @@ var _ = Describe("Toolbox-specific testing", func() {
useradd := fmt.Sprintf("useradd --home-dir %s --shell %s --uid %s %s",
homeDir, shell, uid, username)
passwd := fmt.Sprintf("passwd --delete %s", username)
- session = podmanTest.Podman([]string{"create", "--name", "test", "--userns=keep-id", "--user", "root:root", fedoraToolbox, "sh", "-c",
+ session = podmanTest.Podman([]string{"create", "--log-driver", "k8s-file", "--name", "test", "--userns=keep-id", "--user", "root:root", fedoraToolbox, "sh", "-c",
fmt.Sprintf("%s; %s; echo READY; sleep 1000", useradd, passwd)})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
@@ -250,7 +250,7 @@ var _ = Describe("Toolbox-specific testing", func() {
groupadd := fmt.Sprintf("groupadd --gid %s %s", gid, groupName)
- session = podmanTest.Podman([]string{"create", "--name", "test", "--userns=keep-id", "--user", "root:root", fedoraToolbox, "sh", "-c",
+ session = podmanTest.Podman([]string{"create", "--log-driver", "k8s-file", "--name", "test", "--userns=keep-id", "--user", "root:root", fedoraToolbox, "sh", "-c",
fmt.Sprintf("%s; echo READY; sleep 1000", groupadd)})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
@@ -294,7 +294,7 @@ var _ = Describe("Toolbox-specific testing", func() {
usermod := fmt.Sprintf("usermod --append --groups wheel --home %s --shell %s --uid %s --gid %s %s",
homeDir, shell, uid, gid, username)
- session = podmanTest.Podman([]string{"create", "--name", "test", "--userns=keep-id", "--user", "root:root", fedoraToolbox, "sh", "-c",
+ session = podmanTest.Podman([]string{"create", "--log-driver", "k8s-file", "--name", "test", "--userns=keep-id", "--user", "root:root", fedoraToolbox, "sh", "-c",
fmt.Sprintf("%s; %s; %s; echo READY; sleep 1000", useradd, groupadd, usermod)})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
@@ -339,6 +339,7 @@ var _ = Describe("Toolbox-specific testing", func() {
// These should be most of the switches that Toolbox uses to create a "toolbox" container
// https://github.com/containers/toolbox/blob/master/src/cmd/create.go
session = podmanTest.Podman([]string{"create",
+ "--log-driver", "k8s-file",
"--dns", "none",
"--hostname", "toolbox",
"--ipc", "host",
diff --git a/test/system/030-run.bats b/test/system/030-run.bats
index 9a136ff13..e12c32ef5 100644
--- a/test/system/030-run.bats
+++ b/test/system/030-run.bats
@@ -690,4 +690,18 @@ json-file | f
run_podman rm $cid
}
+@test "podman run no /etc/mtab " {
+ tmpdir=$PODMAN_TMPDIR/build-test
+ mkdir -p $tmpdir
+
+ cat >$tmpdir/Dockerfile <<EOF
+FROM $IMAGE
+RUN rm /etc/mtab
+EOF
+ expected="'/etc/mtab' -> '/proc/mounts'"
+ run_podman build -t nomtab $tmpdir
+ run_podman run --rm nomtab stat -c %N /etc/mtab
+ is "$output" "$expected" "/etc/mtab should be created"
+}
+
# vim: filetype=sh
diff --git a/test/system/035-logs.bats b/test/system/035-logs.bats
index bac153b8e..3dd88e5eb 100644
--- a/test/system/035-logs.bats
+++ b/test/system/035-logs.bats
@@ -27,13 +27,22 @@ load helpers
run_podman rm $cid
}
-@test "podman logs - multi" {
+function _log_test_multi() {
+ local driver=$1
+
skip_if_remote "logs does not support multiple containers when run remotely"
+ # Under k8s file, 'podman logs' returns just the facts, Ma'am.
+ # Under journald, there may be other cruft (e.g. container removals)
+ local etc=
+ if [[ $driver =~ journal ]]; then
+ etc='.*'
+ fi
+
# Simple helper to make the container starts, below, easier to read
local -a cid
doit() {
- run_podman run --rm -d --name "$1" $IMAGE sh -c "$2";
+ run_podman run --log-driver=$driver --rm -d --name "$1" $IMAGE sh -c "$2";
cid+=($(echo "${output:0:12}"))
}
@@ -47,24 +56,21 @@ load helpers
run_podman logs -f c1 c2
is "$output" \
- "${cid[0]} a
-${cid[1]} b
-${cid[1]} c
+ "${cid[0]} a$etc
+${cid[1]} b$etc
+${cid[1]} c$etc
${cid[0]} d" "Sequential output from logs"
}
-@test "podman logs over journald" {
+@test "podman logs - multi k8s-file" {
+ _log_test_multi k8s-file
+}
+
+@test "podman logs - multi journald" {
# We can't use journald on RHEL as rootless: rhbz#1895105
skip_if_journald_unavailable
- msg=$(random_string 20)
-
- run_podman run --name myctr --log-driver journald $IMAGE echo $msg
-
- run_podman logs myctr
- is "$output" "$msg" "check that log output equals the container output"
-
- run_podman rm myctr
+ _log_test_multi journald
}
# vim: filetype=sh
diff --git a/test/system/070-build.bats b/test/system/070-build.bats
index a2c8ae588..d2d56c051 100644
--- a/test/system/070-build.bats
+++ b/test/system/070-build.bats
@@ -393,9 +393,9 @@ Labels.$label_name | $label_value
"image tree: third line"
is "${lines[3]}" "Image Layers" \
"image tree: fourth line"
- is "${lines[4]}" ".* ID: [0-9a-f]\{12\} Size: .* Top Layer of: \[localhost/build_test:latest]" \
+ is "${lines[4]}" ".* ID: [0-9a-f]\{12\} Size: .* Top Layer of: \[$IMAGE]" \
"image tree: first layer line"
- is "${lines[-1]}" ".* ID: [0-9a-f]\{12\} Size: .* Top Layer of: \[$IMAGE]" \
+ is "${lines[-1]}" ".* ID: [0-9a-f]\{12\} Size: .* Top Layer of: \[localhost/build_test:latest]" \
"image tree: last layer line"
# FIXME: 'image tree --whatrequires' does not work via remote
diff --git a/test/system/130-kill.bats b/test/system/130-kill.bats
index 3770eac27..1b02b4976 100644
--- a/test/system/130-kill.bats
+++ b/test/system/130-kill.bats
@@ -8,7 +8,8 @@ load helpers
@test "podman kill - test signal handling in containers" {
# Start a container that will handle all signals by emitting 'got: N'
local -a signals=(1 2 3 4 5 6 8 10 12 13 14 15 16 20 21 22 23 24 25 26 64)
- run_podman run -d $IMAGE sh -c \
+ # Force the k8s-file driver until #10323 is fixed.
+ run_podman run --log-driver=k8s-file -d $IMAGE sh -c \
"for i in ${signals[*]}; do trap \"echo got: \$i\" \$i; done;
echo READY;
while ! test -e /stop; do sleep 0.05; done;
diff --git a/test/system/500-networking.bats b/test/system/500-networking.bats
index 94980346e..1cec50827 100644
--- a/test/system/500-networking.bats
+++ b/test/system/500-networking.bats
@@ -88,8 +88,9 @@ load helpers
# Wait for container to restart
retries=20
while :;do
- run_podman '?' container inspect --format "{{.State.Pid}}" myweb
- if [[ $status -eq 0 ]]; then
+ run_podman container inspect --format "{{.State.Pid}}" myweb
+ # pid is 0 as long as the container is not running
+ if [[ $output -ne 0 ]]; then
if [[ $output == $pid ]]; then
die "This should never happen! Restarted container has same PID ($output) as killed one!"
fi
@@ -161,6 +162,27 @@ load helpers
done
}
+@test "podman run with slirp4ns assigns correct gateway address to host.containers.internal" {
+ CIDR="$(random_rfc1918_subnet)"
+ run_podman run --network slirp4netns:cidr="${CIDR}.0/24" \
+ $IMAGE grep 'host.containers.internal' /etc/hosts
+ is "$output" "${CIDR}.2 host.containers.internal" "host.containers.internal should be the cidr+2 address"
+}
+
+@test "podman run with slirp4ns adds correct dns address to resolv.conf" {
+ CIDR="$(random_rfc1918_subnet)"
+ run_podman run --network slirp4netns:cidr="${CIDR}.0/24" \
+ $IMAGE grep "${CIDR}" /etc/resolv.conf
+ is "$output" "nameserver ${CIDR}.3" "resolv.conf should have slirp4netns cidr+3 as a nameserver"
+}
+
+@test "podman run with slirp4ns assigns correct ip address container" {
+ CIDR="$(random_rfc1918_subnet)"
+ run_podman run --network slirp4netns:cidr="${CIDR}.0/24" \
+ $IMAGE sh -c "ip address | grep ${CIDR}"
+ is "$output" ".*inet ${CIDR}.100/24 \+" "container should have slirp4netns cidr+100 assigned to interface"
+}
+
# "network create" now works rootless, with the help of a special container
@test "podman network create" {
myport=54322
@@ -214,7 +236,6 @@ load helpers
@test "podman network reload" {
skip_if_remote "podman network reload does not have remote support"
- skip_if_rootless "podman network reload does not work rootless"
random_1=$(random_string 30)
HOST_PORT=12345
@@ -224,29 +245,42 @@ load helpers
INDEX1=$PODMAN_TMPDIR/hello.txt
echo $random_1 > $INDEX1
+ # use default network for root
+ local netname=podman
+ # for rootless we have to create a custom network since there is no default network
+ if is_rootless; then
+ netname=testnet-$(random_string 10)
+ run_podman network create $netname
+ is "$output" ".*/cni/net.d/$netname.conflist" "output of 'network create'"
+ fi
+
# Bind-mount this file with a different name to a container running httpd
run_podman run -d --name myweb -p "$HOST_PORT:80" \
- -v $INDEX1:/var/www/index.txt \
- -w /var/www \
- $IMAGE /bin/busybox-extras httpd -f -p 80
+ --network $netname \
+ -v $INDEX1:/var/www/index.txt \
+ -w /var/www \
+ $IMAGE /bin/busybox-extras httpd -f -p 80
cid=$output
- run_podman inspect $cid --format "{{.NetworkSettings.IPAddress}}"
+ run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").IPAddress}}"
ip="$output"
- run_podman inspect $cid --format "{{.NetworkSettings.MacAddress}}"
+ run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").MacAddress}}"
mac="$output"
# Verify http contents: curl from localhost
run curl -s $SERVER/index.txt
is "$output" "$random_1" "curl 127.0.0.1:/index.txt"
- # flush the CNI iptables here
- run iptables -t nat -F CNI-HOSTPORT-DNAT
+ # rootless cannot modify iptables
+ if ! is_rootless; then
+ # flush the CNI iptables here
+ run iptables -t nat -F CNI-HOSTPORT-DNAT
- # check that we cannot curl (timeout after 5 sec)
- run timeout 5 curl -s $SERVER/index.txt
- if [ "$status" -ne 124 ]; then
- die "curl did not timeout, status code: $status"
+ # check that we cannot curl (timeout after 5 sec)
+ run timeout 5 curl -s $SERVER/index.txt
+ if [ "$status" -ne 124 ]; then
+ die "curl did not timeout, status code: $status"
+ fi
fi
# reload the network to recreate the iptables rules
@@ -254,9 +288,9 @@ load helpers
is "$output" "$cid" "Output does not match container ID"
# check that we still have the same mac and ip
- run_podman inspect $cid --format "{{.NetworkSettings.IPAddress}}"
+ run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").IPAddress}}"
is "$output" "$ip" "IP address changed after podman network reload"
- run_podman inspect $cid --format "{{.NetworkSettings.MacAddress}}"
+ run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").MacAddress}}"
is "$output" "$mac" "MAC address changed after podman network reload"
# check that we can still curl
@@ -274,6 +308,10 @@ load helpers
# cleanup the container
run_podman rm -f $cid
+
+ if is_rootless; then
+ run_podman network rm -f $netname
+ fi
}
@test "podman rootless cni adds /usr/sbin to PATH" {
diff --git a/test/system/700-play.bats b/test/system/700-play.bats
index 8fa96741c..bcd8cf939 100644
--- a/test/system/700-play.bats
+++ b/test/system/700-play.bats
@@ -51,18 +51,40 @@ spec:
seLinuxOptions:
level: "s0:c1,c2"
readOnlyRootFilesystem: false
+ volumeMounts:
+ - mountPath: /testdir:z
+ name: home-podman-testdir
workingDir: /
+ volumes:
+ - hostPath:
+ path: TESTDIR
+ type: Directory
+ name: home-podman-testdir
status: {}
"
+RELABEL="system_u:object_r:container_file_t:s0"
+
@test "podman play with stdin" {
- echo "$testYaml" > $PODMAN_TMPDIR/test.yaml
+ TESTDIR=$PODMAN_TMPDIR/testdir
+ mkdir -p $TESTDIR
+ echo "$testYaml" | sed "s|TESTDIR|${TESTDIR}|g" > $PODMAN_TMPDIR/test.yaml
run_podman play kube - < $PODMAN_TMPDIR/test.yaml
+ if [ -e /usr/sbin/selinuxenabled -a /usr/sbin/selinuxenabled ]; then
+ run ls -Zd $TESTDIR
+ is "$output" ${RELABEL} "selinux relabel should have happened"
+ fi
run_podman pod rm -f test_pod
}
@test "podman play" {
- echo "$testYaml" > $PODMAN_TMPDIR/test.yaml
+ TESTDIR=$PODMAN_TMPDIR/testdir
+ mkdir -p $TESTDIR
+ echo "$testYaml" | sed "s|TESTDIR|${TESTDIR}|g" > $PODMAN_TMPDIR/test.yaml
run_podman play kube $PODMAN_TMPDIR/test.yaml
+ if [ -e /usr/sbin/selinuxenabled -a /usr/sbin/selinuxenabled ]; then
+ run ls -Zd $TESTDIR
+ is "$output" ${RELABEL} "selinux relabel should have happened"
+ fi
run_podman pod rm -f test_pod
}
diff --git a/test/upgrade/test-upgrade.bats b/test/upgrade/test-upgrade.bats
index dd827b398..ca478e263 100644
--- a/test/upgrade/test-upgrade.bats
+++ b/test/upgrade/test-upgrade.bats
@@ -109,6 +109,8 @@ podman \$opts run -d --name myrunningcontainer --label mylabel=$LABEL_RUNNING \
-w /var/www \
$IMAGE /bin/busybox-extras httpd -f -p 80
+podman \$opts pod create --name mypod
+
echo READY
while :;do
if [ -e /stop ]; then
@@ -136,12 +138,18 @@ EOF
# pollute it for use by old-podman. We must keep that pristine
# so old-podman is the first to write to it.
#
+ # mount /etc/containers/storage.conf to use the same storage settings as on the host
+ # mount /dev/shm because the container locks are stored there
+ #
$PODMAN run -d --name podman_parent --pid=host \
--privileged \
--net=host \
--cgroupns=host \
+ --pid=host \
+ -v /etc/containers/storage.conf:/etc/containers/storage.conf \
-v /dev/fuse:/dev/fuse \
-v /run/crun:/run/crun \
+ -v /dev/shm:/dev/shm \
-v $pmroot:$pmroot \
$OLD_PODMAN $pmroot/setup
@@ -175,10 +183,11 @@ EOF
run_podman ps -a \
--format '{{.Names}}--{{.Status}}--{{.Ports}}--{{.Labels.mylabel}}' \
--sort=names
- is "${lines[0]}" "mycreatedcontainer--Created----$LABEL_CREATED" "created"
- is "${lines[1]}" "mydonecontainer--Exited (0).*----<no value>" "done"
- is "${lines[2]}" "myfailedcontainer--Exited (17) .*----$LABEL_FAILED" "fail"
- is "${lines[3]}" "myrunningcontainer--Up .*----$LABEL_RUNNING" "running"
+ is "${lines[0]}" ".*-infra--Created----<no value>" "infra container"
+ is "${lines[1]}" "mycreatedcontainer--Created----$LABEL_CREATED" "created"
+ is "${lines[2]}" "mydonecontainer--Exited (0).*----<no value>" "done"
+ is "${lines[3]}" "myfailedcontainer--Exited (17) .*----$LABEL_FAILED" "fail"
+ is "${lines[4]}" "myrunningcontainer--Up .*----$LABEL_RUNNING" "running"
# For debugging: dump containers and IDs
if [[ -n "$PODMAN_UPGRADE_TEST_DEBUG" ]]; then
@@ -206,9 +215,6 @@ failed | exited | 17
@test "logs" {
run_podman logs mydonecontainer
is "$output" "++$RANDOM_STRING_1++" "podman logs on stopped container"
-
-# run_podman logs myrunningcontainer
-# is "$output" "READY" "podman logs on running container"
}
@test "exec" {
@@ -226,45 +232,36 @@ failed | exited | 17
}
@test "pods" {
- skip "TBI"
+ run_podman pod inspect mypod
+ is "$output" ".*mypod.*"
+
+ run_podman --cgroup-manager=cgroupfs pod start mypod
+ is "$output" "[0-9a-f]\\{64\\}" "podman pod start"
+
+ run_podman pod ps
+ is "$output" ".*mypod.*" "podman pod ps shows name"
+ is "$output" ".*Running.*" "podman pod ps shows running state"
+
+ run_podman pod stop mypod
+ is "$output" "[0-9a-f]\\{64\\}" "podman pod stop"
+
+ run_podman --cgroup-manager=cgroupfs pod rm mypod
+ # FIXME: CI runs show this (non fatal) error:
+ # Error updating pod <ID> conmon cgroup PID limit: open /sys/fs/cgroup/libpod_parent/<ID>/conmon/pids.max: no such file or directory
+ # Investigate how to fix this (likely a race condition)
+ # Let's ignore the logrus messages for now
+ is "$output" ".*[0-9a-f]\\{64\\}" "podman pod rm"
}
# FIXME: commit? kill? network? pause? restart? top? volumes? What else?
@test "start" {
- skip "FIXME: this leaves a mount behind: root/overlay/sha/merged"
run_podman --cgroup-manager=cgroupfs start -a mydonecontainer
is "$output" "++$RANDOM_STRING_1++" "start on already-run container"
}
@test "rm a stopped container" {
- # FIXME FIXME FIXME!
- #
- # I have no idea what's going on here. For most of my testing in this
- # section, the code here was simply 'podman rm myfailedcontainer', and
- # it would succeed, but then way down, in 'cleanup' below, the 'rm -f'
- # step would fail:
- #
- # # podman rm -f podman_parent
- # error freeing lock for container <sha>: no such file or directory
- # ...where <sha> is the ID of the podman_parent container.
- #
- # I started playing with this section, by adding 'rm mydonecontainer',
- # and now it always fails, the same way, but with the container we're
- # removing right here:
- #
- # error freeing lock for container <sha>: no such file or directory
- # ...where <sha> is the ID of mydonecontainer.
- #
- # I don't know. I give up for now, and am skip'ing the whole thing.
- # If you want to play with it, try commenting out the 'myfailed' lines,
- # or just the 'mydone' ones, or, I don't know.
- skip "FIXME: error freeing lock for container <sha>: no such file or dir"
-
- # For debugging, so we can see what 'error freeing lock' refers to
- run_podman ps -a
-
run_podman rm myfailedcontainer
is "$output" "[0-9a-f]\\{64\\}" "podman rm myfailedcontainer"
@@ -274,12 +271,6 @@ failed | exited | 17
@test "stop and rm" {
- # About a ten-second pause, then:
- # Error: timed out waiting for file /tmp/pu.nf747w/tmp/exits/<sha>: internal libpod error
- # It doesn't seem to be a socket-length issue: the paths are ~80-88 chars.
- # Leaving podman_parent running, and exec'ing into it, it doesn't look
- # like the file is being written to the wrong place.
- skip "FIXME: this doesn't work: timed out waiting for file tmpdir/exits/sha"
run_podman stop myrunningcontainer
run_podman rm myrunningcontainer
}
@@ -304,7 +295,6 @@ failed | exited | 17
run_podman logs podman_parent
run_podman rm -f podman_parent
- # FIXME: why does this remain mounted?
umount $PODMAN_UPGRADE_WORKDIR/root/overlay || true
rm -rf $PODMAN_UPGRADE_WORKDIR