summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
Diffstat (limited to 'test')
-rw-r--r--test/apiv2/35-networks.at90
-rw-r--r--test/apiv2/rest_api/__init__.py1
-rw-r--r--test/apiv2/rest_api/test_rest_v2_0_0.py2
-rw-r--r--test/e2e/logs_test.go18
-rw-r--r--test/e2e/network_create_test.go86
-rw-r--r--test/e2e/network_test.go39
-rw-r--r--test/e2e/play_kube_test.go18
-rw-r--r--test/e2e/run_networking_test.go5
-rw-r--r--test/e2e/search_test.go8
-rw-r--r--test/python/docker/README.md38
-rw-r--r--test/python/docker/__init__.py157
-rw-r--r--test/python/docker/common.py21
-rw-r--r--test/python/docker/constant.py6
-rw-r--r--test/python/docker/test_containers.py214
-rw-r--r--test/python/docker/test_images.py169
-rw-r--r--test/python/docker/test_system.py66
-rw-r--r--test/python/dockerpy/README.md40
-rw-r--r--test/python/dockerpy/__init__.py0
-rw-r--r--test/python/dockerpy/tests/__init__.py0
-rw-r--r--test/python/dockerpy/tests/common.py105
-rw-r--r--test/python/dockerpy/tests/constant.py13
-rw-r--r--test/python/dockerpy/tests/test_containers.py193
-rw-r--r--test/python/dockerpy/tests/test_images.py162
-rw-r--r--test/python/dockerpy/tests/test_info_version.py44
-rw-r--r--test/system/030-run.bats10
-rw-r--r--test/system/035-logs.bats10
26 files changed, 904 insertions, 611 deletions
diff --git a/test/apiv2/35-networks.at b/test/apiv2/35-networks.at
index 72c63207d..ad34511c7 100644
--- a/test/apiv2/35-networks.at
+++ b/test/apiv2/35-networks.at
@@ -6,52 +6,48 @@
t GET networks/non-existing-network 404 \
.cause='network not found'
-# FIXME FIXME FIXME: failing in CI. Deferring to someone else to fix later.
-#if root; then
-if false; then
- t POST libpod/networks/create?name=network1 '' 200 \
- .Filename~.*/network1\\.conflist
-
- # --data '{"Subnet":{"IP":"10.10.254.0","Mask":[255,255,255,0]}}'
- t POST libpod/networks/create?name=network2 '"Subnet":{"IP":"10.10.254.0","Mask":[255,255,255,0]}' 200 \
- .Filename~.*/network2\\.conflist
-
- # test for empty mask
- t POST libpod/networks/create '"Subnet":{"IP":"10.10.1.0","Mask":[]}' 500 \
- .cause~'.*cannot be empty'
- # test for invalid mask
- t POST libpod/networks/create '"Subnet":{"IP":"10.10.1.0","Mask":[0,255,255,0]}' 500 \
- .cause~'.*mask is invalid'
-
- # network list
- t GET libpod/networks/json 200
- t GET libpod/networks/json?filter=name=network1 200 \
- length=1 \
- .[0].Name=network1
- t GET networks 200
-
- #network list docker endpoint
- #filters={"name":["network1","network2"]}
- t GET networks?filters=%7B%22name%22%3A%5B%22network1%22%2C%22network2%22%5D%7D 200 \
- length=2
- #filters={"name":["network"]}
- t GET networks?filters=%7B%22name%22%3A%5B%22network%22%5D%7D 200 \
- length=2
- # invalid filter filters={"label":"abc"}
- t GET networks?filters=%7B%22label%22%3A%5B%22abc%22%5D%7D 500 \
- .cause="only the name filter for listing networks is implemented"
- # invalid filter filters={"label":"abc","name":["network"]}
- t GET networks?filters=%7B%22label%22%3A%22abc%22%2C%22name%22%3A%5B%22network%22%5D%7D 500 \
- .cause="only the name filter for listing networks is implemented"
-
- # clean the network
- t DELETE libpod/networks/network1 200 \
- .[0].Name~network1 \
- .[0].Err=null
- t DELETE libpod/networks/network2 200 \
- .[0].Name~network2 \
- .[0].Err=null
-
-fi
+t POST libpod/networks/create?name=network1 '' 200 \
+.Filename~.*/network1\\.conflist
+
+# --data '{"Subnet":{"IP":"10.10.254.0","Mask":[255,255,255,0]}}'
+t POST libpod/networks/create?name=network2 '"Subnet":{"IP":"10.10.254.0","Mask":[255,255,255,0]}' 200 \
+.Filename~.*/network2\\.conflist
+
+# test for empty mask
+t POST libpod/networks/create '"Subnet":{"IP":"10.10.1.0","Mask":[]}' 500 \
+.cause~'.*cannot be empty'
+# test for invalid mask
+t POST libpod/networks/create '"Subnet":{"IP":"10.10.1.0","Mask":[0,255,255,0]}' 500 \
+.cause~'.*mask is invalid'
+
+# network list
+t GET libpod/networks/json 200
+t GET libpod/networks/json?filter=name=network1 200 \
+length=1 \
+.[0].Name=network1
+t GET networks 200
+
+#network list docker endpoint
+#filters={"name":["network1","network2"]}
+t GET networks?filters=%7B%22name%22%3A%5B%22network1%22%2C%22network2%22%5D%7D 200 \
+length=2
+#filters={"name":["network"]}
+t GET networks?filters=%7B%22name%22%3A%5B%22network%22%5D%7D 200 \
+length=2
+# invalid filter filters={"label":"abc"}
+t GET networks?filters=%7B%22label%22%3A%5B%22abc%22%5D%7D 500 \
+.cause="only the name filter for listing networks is implemented"
+# invalid filter filters={"label":"abc","name":["network"]}
+t GET networks?filters=%7B%22label%22%3A%22abc%22%2C%22name%22%3A%5B%22network%22%5D%7D 500 \
+.cause="only the name filter for listing networks is implemented"
+
+# clean the network
+t DELETE libpod/networks/network1 200 \
+.[0].Name~network1 \
+.[0].Err=null
+t DELETE libpod/networks/network2 200 \
+.[0].Name~network2 \
+.[0].Err=null
+
# vim: filetype=sh
diff --git a/test/apiv2/rest_api/__init__.py b/test/apiv2/rest_api/__init__.py
index 5f0777d58..8100a4df5 100644
--- a/test/apiv2/rest_api/__init__.py
+++ b/test/apiv2/rest_api/__init__.py
@@ -3,7 +3,6 @@ import json
import os
import shutil
import subprocess
-import sys
import tempfile
diff --git a/test/apiv2/rest_api/test_rest_v2_0_0.py b/test/apiv2/rest_api/test_rest_v2_0_0.py
index 5dfd1fc02..0ac4fde75 100644
--- a/test/apiv2/rest_api/test_rest_v2_0_0.py
+++ b/test/apiv2/rest_api/test_rest_v2_0_0.py
@@ -62,7 +62,7 @@ class TestApi(unittest.TestCase):
TestApi.podman = Podman()
TestApi.service = TestApi.podman.open(
- "system", "service", "tcp:localhost:8080", "--log-level=debug", "--time=0"
+ "system", "service", "tcp:localhost:8080", "--time=0"
)
# give the service some time to be ready...
time.sleep(2)
diff --git a/test/e2e/logs_test.go b/test/e2e/logs_test.go
index 4214bd50e..a749a86ff 100644
--- a/test/e2e/logs_test.go
+++ b/test/e2e/logs_test.go
@@ -337,4 +337,22 @@ var _ = Describe("Podman logs", func() {
Expect(results).To(Exit(0))
Expect(results.OutputToString()).To(Equal("podman podman podman"))
})
+
+ It("Make sure logs match expected length", func() {
+ logc := podmanTest.Podman([]string{"run", "-t", "--name", "test", ALPINE, "sh", "-c", "echo 1; echo 2"})
+ logc.WaitWithDefaultTimeout()
+ Expect(logc).To(Exit(0))
+
+ wait := podmanTest.Podman([]string{"wait", "test"})
+ wait.WaitWithDefaultTimeout()
+ Expect(wait).To(Exit(0))
+
+ results := podmanTest.Podman([]string{"logs", "test"})
+ results.WaitWithDefaultTimeout()
+ Expect(results).To(Exit(0))
+ outlines := results.OutputToStringArray()
+ Expect(len(outlines)).To(Equal(2))
+ Expect(outlines[0]).To(Equal("1\r"))
+ Expect(outlines[1]).To(Equal("2\r"))
+ })
})
diff --git a/test/e2e/network_create_test.go b/test/e2e/network_create_test.go
index 21f03901b..cb997d10a 100644
--- a/test/e2e/network_create_test.go
+++ b/test/e2e/network_create_test.go
@@ -177,8 +177,7 @@ var _ = Describe("Podman network create", func() {
})
It("podman network create with name and IPv6 subnet", func() {
- SkipIfRootless("FIXME I believe this should work in rootlessmode")
-
+ SkipIfRootless("FIXME It needs the ip6tables modules loaded")
var (
results []network.NcList
)
@@ -218,12 +217,72 @@ var _ = Describe("Podman network create", func() {
Expect(subnet.Contains(containerIP)).To(BeTrue())
})
+ It("podman network create with name and IPv6 flag (dual-stack)", func() {
+ SkipIfRootless("FIXME It needs the ip6tables modules loaded")
+ var (
+ results []network.NcList
+ )
+ nc := podmanTest.Podman([]string{"network", "create", "--subnet", "fd00:4:3:2:1::/64", "--ipv6", "newDualStacknetwork"})
+ nc.WaitWithDefaultTimeout()
+ Expect(nc.ExitCode()).To(BeZero())
+
+ defer podmanTest.removeCNINetwork("newDualStacknetwork")
+
+ // Inspect the network configuration
+ inspect := podmanTest.Podman([]string{"network", "inspect", "newDualStacknetwork"})
+ inspect.WaitWithDefaultTimeout()
+
+ // JSON the network configuration into something usable
+ err := json.Unmarshal([]byte(inspect.OutputToString()), &results)
+ Expect(err).To(BeNil())
+ result := results[0]
+ Expect(result["name"]).To(Equal("newDualStacknetwork"))
+
+ // JSON the bridge info
+ bridgePlugin, err := genericPluginsToBridge(result["plugins"], "bridge")
+ Expect(err).To(BeNil())
+ Expect(bridgePlugin.IPAM.Routes[0].Dest).To(Equal("::/0"))
+ Expect(bridgePlugin.IPAM.Routes[1].Dest).To(Equal("0.0.0.0/0"))
+
+ // Once a container executes a new network, the nic will be created. We should clean those up
+ // best we can
+ defer removeNetworkDevice(bridgePlugin.BrName)
+
+ try := podmanTest.Podman([]string{"run", "-it", "--rm", "--network", "newDualStacknetwork", ALPINE, "sh", "-c", "ip addr show eth0 | grep global | awk ' /inet6 / {print $2}'"})
+ try.WaitWithDefaultTimeout()
+
+ _, subnet, err := net.ParseCIDR("fd00:4:3:2:1::/64")
+ Expect(err).To(BeNil())
+ containerIP, _, err := net.ParseCIDR(try.OutputToString())
+ Expect(err).To(BeNil())
+ // Ensure that the IP the container got is within the subnet the user asked for
+ Expect(subnet.Contains(containerIP)).To(BeTrue())
+ // verify the container has an IPv4 address too (the IPv4 subnet is autogenerated)
+ try = podmanTest.Podman([]string{"run", "-it", "--rm", "--network", "newDualStacknetwork", ALPINE, "sh", "-c", "ip addr show eth0 | awk ' /inet / {print $2}'"})
+ try.WaitWithDefaultTimeout()
+ containerIP, _, err = net.ParseCIDR(try.OutputToString())
+ Expect(err).To(BeNil())
+ Expect(containerIP.To4()).To(Not(BeNil()))
+ })
+
It("podman network create with invalid subnet", func() {
nc := podmanTest.Podman([]string{"network", "create", "--subnet", "10.11.12.0/17000", "fail"})
nc.WaitWithDefaultTimeout()
Expect(nc).To(ExitWithError())
})
+ It("podman network create with ipv4 subnet and ipv6 flag", func() {
+ nc := podmanTest.Podman([]string{"network", "create", "--subnet", "10.11.12.0/24", "--ipv6", "fail"})
+ nc.WaitWithDefaultTimeout()
+ Expect(nc).To(ExitWithError())
+ })
+
+ It("podman network create with empty subnet and ipv6 flag", func() {
+ nc := podmanTest.Podman([]string{"network", "create", "--ipv6", "fail"})
+ nc.WaitWithDefaultTimeout()
+ Expect(nc).To(ExitWithError())
+ })
+
It("podman network create with invalid IP", func() {
nc := podmanTest.Podman([]string{"network", "create", "--subnet", "10.11.0/17000", "fail"})
nc.WaitWithDefaultTimeout()
@@ -247,6 +306,29 @@ var _ = Describe("Podman network create", func() {
Expect(ncFail).To(ExitWithError())
})
+ It("podman network create two networks with same subnet should fail", func() {
+ nc := podmanTest.Podman([]string{"network", "create", "--subnet", "10.11.13.0/24", "subnet1"})
+ nc.WaitWithDefaultTimeout()
+ Expect(nc.ExitCode()).To(BeZero())
+ defer podmanTest.removeCNINetwork("subnet1")
+
+ ncFail := podmanTest.Podman([]string{"network", "create", "--subnet", "10.11.13.0/24", "subnet2"})
+ ncFail.WaitWithDefaultTimeout()
+ Expect(ncFail).To(ExitWithError())
+ })
+
+ It("podman network create two IPv6 networks with same subnet should fail", func() {
+ SkipIfRootless("FIXME It needs the ip6tables modules loaded")
+ nc := podmanTest.Podman([]string{"network", "create", "--subnet", "fd00:4:4:4:4::/64", "--ipv6", "subnet1v6"})
+ nc.WaitWithDefaultTimeout()
+ Expect(nc.ExitCode()).To(BeZero())
+ defer podmanTest.removeCNINetwork("subnet1v6")
+
+ ncFail := podmanTest.Podman([]string{"network", "create", "--subnet", "fd00:4:4:4:4::/64", "--ipv6", "subnet2v6"})
+ ncFail.WaitWithDefaultTimeout()
+ Expect(ncFail).To(ExitWithError())
+ })
+
It("podman network create with invalid network name", func() {
nc := podmanTest.Podman([]string{"network", "create", "foo "})
nc.WaitWithDefaultTimeout()
diff --git a/test/e2e/network_test.go b/test/e2e/network_test.go
index 9bd16c008..7933580a5 100644
--- a/test/e2e/network_test.go
+++ b/test/e2e/network_test.go
@@ -6,6 +6,7 @@ import (
"os"
"path/filepath"
"strings"
+ "time"
"github.com/containers/podman/v2/pkg/rootless"
. "github.com/containers/podman/v2/test/utils"
@@ -351,4 +352,42 @@ var _ = Describe("Podman network", func() {
Expect(lines[0]).To(Equal(netName1))
Expect(lines[1]).To(Equal(netName2))
})
+ It("podman network with multiple aliases", func() {
+ Skip("Until DNSName is updated on our CI images")
+ var worked bool
+ netName := "aliasTest" + stringid.GenerateNonCryptoID()
+ session := podmanTest.Podman([]string{"network", "create", netName})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(BeZero())
+ defer podmanTest.removeCNINetwork(netName)
+
+ top := podmanTest.Podman([]string{"run", "-dt", "--name=web", "--network=" + netName, "--network-alias=web1", "--network-alias=web2", nginx})
+ top.WaitWithDefaultTimeout()
+ Expect(top.ExitCode()).To(BeZero())
+ interval := time.Duration(250 * time.Millisecond)
+ // Wait for the nginx service to be running
+ for i := 0; i < 6; i++ {
+ // Test curl against the container's name
+ c1 := podmanTest.Podman([]string{"run", "--network=" + netName, nginx, "curl", "web"})
+ c1.WaitWithDefaultTimeout()
+ worked = Expect(c1.ExitCode()).To(BeZero())
+ if worked {
+ break
+ }
+ time.Sleep(interval)
+ interval *= 2
+ }
+ Expect(worked).To(BeTrue())
+
+ // Nginx is now running so no need to do a loop
+ // Test against the first alias
+ c2 := podmanTest.Podman([]string{"run", "--network=" + netName, nginx, "curl", "web1"})
+ c2.WaitWithDefaultTimeout()
+ Expect(c2.ExitCode()).To(BeZero())
+
+ // Test against the second alias
+ c3 := podmanTest.Podman([]string{"run", "--network=" + netName, nginx, "curl", "web2"})
+ c3.WaitWithDefaultTimeout()
+ Expect(c3.ExitCode()).To(BeZero())
+ })
})
diff --git a/test/e2e/play_kube_test.go b/test/e2e/play_kube_test.go
index 1d683e987..7ae474c76 100644
--- a/test/e2e/play_kube_test.go
+++ b/test/e2e/play_kube_test.go
@@ -959,7 +959,7 @@ var _ = Describe("Podman play kube", func() {
kube.WaitWithDefaultTimeout()
Expect(kube.ExitCode()).To(Equal(0))
- inspect := podmanTest.Podman([]string{"inspect", getCtrNameInPod(pod), "--format", "{{ .HostConfig.ExtraHosts }}"})
+ inspect := podmanTest.Podman([]string{"inspect", pod.Name, "--format", "{{ .InfraConfig.HostAdd}}"})
inspect.WaitWithDefaultTimeout()
Expect(inspect.ExitCode()).To(Equal(0))
Expect(inspect.OutputToString()).
@@ -1466,4 +1466,20 @@ MemoryReservation: {{ .HostConfig.MemoryReservation }}`})
Expect(kube.ExitCode()).To(Equal(125))
Expect(kube.ErrorToString()).To(ContainSubstring(invalidImageName))
})
+
+ It("podman play kube applies log driver to containers", func() {
+ Skip("need to verify images have correct packages for journald")
+ pod := getPod()
+ err := generateKubeYaml("pod", pod, kubeYaml)
+ Expect(err).To(BeNil())
+
+ kube := podmanTest.Podman([]string{"play", "kube", "--log-driver", "journald", kubeYaml})
+ kube.WaitWithDefaultTimeout()
+ Expect(kube.ExitCode()).To(Equal(0))
+
+ inspect := podmanTest.Podman([]string{"inspect", getCtrNameInPod(pod), "--format", "'{{ .HostConfig.LogConfig.Type }}'"})
+ inspect.WaitWithDefaultTimeout()
+ Expect(inspect.ExitCode()).To(Equal(0))
+ Expect(inspect.OutputToString()).To(ContainSubstring("journald"))
+ })
})
diff --git a/test/e2e/run_networking_test.go b/test/e2e/run_networking_test.go
index a3cc352b1..e9c1bab21 100644
--- a/test/e2e/run_networking_test.go
+++ b/test/e2e/run_networking_test.go
@@ -6,6 +6,7 @@ import (
"strings"
. "github.com/containers/podman/v2/test/utils"
+ "github.com/containers/storage/pkg/stringid"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/uber/jaeger-client-go/utils"
@@ -601,11 +602,11 @@ var _ = Describe("Podman run networking", func() {
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(BeZero())
- net := "dnsNetTest"
+ net := "IntTest" + stringid.GenerateNonCryptoID()
session = podmanTest.Podman([]string{"network", "create", net})
session.WaitWithDefaultTimeout()
- Expect(session.ExitCode()).To(BeZero())
defer podmanTest.removeCNINetwork(net)
+ Expect(session.ExitCode()).To(BeZero())
pod2 := "testpod2"
session = podmanTest.Podman([]string{"pod", "create", "--network", net, "--name", pod2})
diff --git a/test/e2e/search_test.go b/test/e2e/search_test.go
index 424a191c5..edd2fedad 100644
--- a/test/e2e/search_test.go
+++ b/test/e2e/search_test.go
@@ -116,6 +116,14 @@ registries = ['{{.Host}}:{{.Port}}']`
Expect(search.LineInOutputContains("docker.io/library/alpine")).To(BeTrue())
})
+ It("podman search format json", func() {
+ search := podmanTest.Podman([]string{"search", "--format", "json", "alpine"})
+ search.WaitWithDefaultTimeout()
+ Expect(search.ExitCode()).To(Equal(0))
+ Expect(search.IsJSONOutputValid()).To(BeTrue())
+ Expect(search.OutputToString()).To(ContainSubstring("docker.io/library/alpine"))
+ })
+
It("podman search no-trunc flag", func() {
search := podmanTest.Podman([]string{"search", "--no-trunc", "alpine"})
search.WaitWithDefaultTimeout()
diff --git a/test/python/docker/README.md b/test/python/docker/README.md
new file mode 100644
index 000000000..c10fd636d
--- /dev/null
+++ b/test/python/docker/README.md
@@ -0,0 +1,38 @@
+# Docker regression test
+
+Python test suite to validate Podman endpoints using docker library (aka docker-py).
+See [Docker SDK for Python](https://docker-py.readthedocs.io/en/stable/index.html).
+
+## Running Tests
+
+To run the tests locally in your sandbox (Fedora 32,33):
+
+```shell
+# dnf install python3-docker
+```
+
+### Run the entire test suite
+
+```shell
+# python3 -m unittest discover test/python/docker
+```
+
+Passing the -v option to your test script will instruct unittest.main() to enable a higher level of verbosity, and produce detailed output:
+
+```shell
+# python3 -m unittest -v discover test/python/docker
+```
+
+### Run a specific test class
+
+```shell
+# cd test/python/docker
+# python3 -m unittest -v tests.test_images
+```
+
+### Run a specific test within the test class
+
+```shell
+# cd test/python/docker
+# python3 -m unittest tests.test_images.TestImages.test_import_image
+```
diff --git a/test/python/docker/__init__.py b/test/python/docker/__init__.py
new file mode 100644
index 000000000..0e10676b9
--- /dev/null
+++ b/test/python/docker/__init__.py
@@ -0,0 +1,157 @@
+import configparser
+import json
+import os
+import pathlib
+import shutil
+import subprocess
+import tempfile
+
+from test.python.docker import constant
+
+
+class Podman(object):
+ """
+ Instances hold the configuration and setup for running podman commands
+ """
+
+ def __init__(self):
+ """Initialize a Podman instance with global options"""
+ binary = os.getenv("PODMAN", "bin/podman")
+ self.cmd = [binary, "--storage-driver=vfs"]
+
+ cgroupfs = os.getenv("CGROUP_MANAGER", "systemd")
+ self.cmd.append(f"--cgroup-manager={cgroupfs}")
+
+ # No support for tmpfs (/tmp) or extfs (/var/tmp)
+ # self.cmd.append("--storage-driver=overlay")
+
+ if os.getenv("DEBUG"):
+ self.cmd.append("--log-level=debug")
+ self.cmd.append("--syslog=true")
+
+ self.anchor_directory = tempfile.mkdtemp(prefix="podman_docker_")
+
+ self.image_cache = os.path.join(self.anchor_directory, "cache")
+ os.makedirs(self.image_cache, exist_ok=True)
+
+ self.cmd.append("--root=" + os.path.join(self.anchor_directory, "crio"))
+ self.cmd.append("--runroot=" + os.path.join(self.anchor_directory, "crio-run"))
+
+ os.environ["REGISTRIES_CONFIG_PATH"] = os.path.join(
+ self.anchor_directory, "registry.conf"
+ )
+ p = configparser.ConfigParser()
+ p.read_dict(
+ {
+ "registries.search": {"registries": "['quay.io', 'docker.io']"},
+ "registries.insecure": {"registries": "[]"},
+ "registries.block": {"registries": "[]"},
+ }
+ )
+ with open(os.environ["REGISTRIES_CONFIG_PATH"], "w") as w:
+ p.write(w)
+
+ os.environ["CNI_CONFIG_PATH"] = os.path.join(
+ self.anchor_directory, "cni", "net.d"
+ )
+ os.makedirs(os.environ["CNI_CONFIG_PATH"], exist_ok=True)
+ self.cmd.append("--cni-config-dir=" + os.environ["CNI_CONFIG_PATH"])
+ cni_cfg = os.path.join(
+ os.environ["CNI_CONFIG_PATH"], "87-podman-bridge.conflist"
+ )
+ # json decoded and encoded to ensure legal json
+ buf = json.loads(
+ """
+ {
+ "cniVersion": "0.3.0",
+ "name": "podman",
+ "plugins": [{
+ "type": "bridge",
+ "bridge": "cni0",
+ "isGateway": true,
+ "ipMasq": true,
+ "ipam": {
+ "type": "host-local",
+ "subnet": "10.88.0.0/16",
+ "routes": [{
+ "dst": "0.0.0.0/0"
+ }]
+ }
+ },
+ {
+ "type": "portmap",
+ "capabilities": {
+ "portMappings": true
+ }
+ }
+ ]
+ }
+ """
+ )
+ with open(cni_cfg, "w") as w:
+ json.dump(buf, w)
+
+ def open(self, command, *args, **kwargs):
+ """Podman initialized instance to run a given command
+
+ :param self: Podman instance
+ :param command: podman sub-command to run
+ :param args: arguments and options for command
+ :param kwargs: See subprocess.Popen() for shell keyword
+ :return: subprocess.Popen() instance configured to run podman instance
+ """
+ cmd = self.cmd.copy()
+ cmd.append(command)
+ cmd.extend(args)
+
+ shell = kwargs.get("shell", False)
+
+ return subprocess.Popen(
+ cmd,
+ shell=shell,
+ stdin=subprocess.DEVNULL,
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.DEVNULL,
+ )
+
+ def run(self, command, *args, **kwargs):
+ """Podman initialized instance to run a given command
+
+ :param self: Podman instance
+ :param command: podman sub-command to run
+ :param args: arguments and options for command
+ :param kwargs: See subprocess.Popen() for shell and check keywords
+ :return: subprocess.Popen() instance configured to run podman instance
+ """
+ cmd = self.cmd.copy()
+ cmd.append(command)
+ cmd.extend(args)
+
+ check = kwargs.get("check", False)
+ shell = kwargs.get("shell", False)
+
+ return subprocess.run(
+ cmd,
+ shell=shell,
+ check=check,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+
+ def tear_down(self):
+ shutil.rmtree(self.anchor_directory, ignore_errors=True)
+
+ def restore_image_from_cache(self, client):
+ img = os.path.join(self.image_cache, constant.ALPINE_TARBALL)
+ if not os.path.exists(img):
+ client.pull(constant.ALPINE)
+ image = client.get_image(constant.ALPINE)
+ with open(img, mode="wb") as tarball:
+ for frame in image:
+ tarball.write(frame)
+ else:
+ self.run("load", "-i", img, check=True)
+
+ def flush_image_cache(self):
+ for f in pathlib.Path(self.image_cache).glob("*.tar"):
+ f.unlink(f)
diff --git a/test/python/docker/common.py b/test/python/docker/common.py
new file mode 100644
index 000000000..2828d2d20
--- /dev/null
+++ b/test/python/docker/common.py
@@ -0,0 +1,21 @@
+from docker import APIClient
+
+from test.python.docker import constant
+
+
+def run_top_container(client: APIClient):
+ c = client.create_container(
+ constant.ALPINE, command="top", detach=True, tty=True, name="top"
+ )
+ client.start(c.get("Id"))
+ return c.get("Id")
+
+
+def remove_all_containers(client: APIClient):
+ for ctnr in client.containers(quiet=True):
+ client.remove_container(ctnr, force=True)
+
+
+def remove_all_images(client: APIClient):
+ for image in client.images(quiet=True):
+ client.remove_image(image, force=True)
diff --git a/test/python/docker/constant.py b/test/python/docker/constant.py
new file mode 100644
index 000000000..892293c97
--- /dev/null
+++ b/test/python/docker/constant.py
@@ -0,0 +1,6 @@
+ALPINE = "quay.io/libpod/alpine:latest"
+ALPINE_SHORTNAME = "alpine"
+ALPINE_TARBALL = "alpine.tar"
+BB = "quay.io/libpod/busybox:latest"
+NGINX = "quay.io/libpod/alpine_nginx:latest"
+infra = "k8s.gcr.io/pause:3.2"
diff --git a/test/python/docker/test_containers.py b/test/python/docker/test_containers.py
new file mode 100644
index 000000000..1c4c9ab53
--- /dev/null
+++ b/test/python/docker/test_containers.py
@@ -0,0 +1,214 @@
+import subprocess
+import sys
+import time
+import unittest
+
+from docker import APIClient, errors
+
+from test.python.docker import Podman, common, constant
+
+
+class TestContainers(unittest.TestCase):
+ podman = None # initialized podman configuration for tests
+ service = None # podman service instance
+ topContainerId = ""
+
+ def setUp(self):
+ super().setUp()
+ self.client = APIClient(base_url="tcp://127.0.0.1:8080", timeout=15)
+ TestContainers.podman.restore_image_from_cache(self.client)
+ TestContainers.topContainerId = common.run_top_container(self.client)
+ self.assertIsNotNone(TestContainers.topContainerId)
+
+ def tearDown(self):
+ common.remove_all_containers(self.client)
+ common.remove_all_images(self.client)
+ self.client.close()
+ return super().tearDown()
+
+ @classmethod
+ def setUpClass(cls):
+ super().setUpClass()
+ TestContainers.podman = Podman()
+ TestContainers.service = TestContainers.podman.open(
+ "system", "service", "tcp:127.0.0.1:8080", "--time=0"
+ )
+ # give the service some time to be ready...
+ time.sleep(2)
+
+ rc = TestContainers.service.poll()
+ if rc is not None:
+ raise subprocess.CalledProcessError(rc, "podman system service")
+
+ @classmethod
+ def tearDownClass(cls):
+ TestContainers.service.terminate()
+ stdout, stderr = TestContainers.service.communicate(timeout=0.5)
+ if stdout:
+ sys.stdout.write("\nContainers Service Stdout:\n" + stdout.decode("utf-8"))
+ if stderr:
+ sys.stderr.write("\nContainers Service Stderr:\n" + stderr.decode("utf-8"))
+
+ TestContainers.podman.tear_down()
+ return super().tearDownClass()
+
+ def test_inspect_container(self):
+ # Inspect bogus container
+ with self.assertRaises(errors.NotFound) as error:
+ self.client.inspect_container("dummy")
+ self.assertEqual(error.exception.response.status_code, 404)
+
+ # Inspect valid container by Id
+ container = self.client.inspect_container(TestContainers.topContainerId)
+ self.assertIn("top", container["Name"])
+
+ # Inspect valid container by name
+ container = self.client.inspect_container("top")
+ self.assertIn(TestContainers.topContainerId, container["Id"])
+
+ def test_create_container(self):
+ # Run a container with detach mode
+ container = self.client.create_container(image="alpine", detach=True)
+ self.assertEqual(len(container), 2)
+
+ def test_start_container(self):
+ # Start bogus container
+ with self.assertRaises(errors.NotFound) as error:
+ self.client.start("dummy")
+ self.assertEqual(error.exception.response.status_code, 404)
+
+ # Podman docs says it should give a 304 but returns with no response
+ # # Start a already started container should return 304
+ # response = self.client.start(container=TestContainers.topContainerId)
+ # self.assertEqual(error.exception.response.status_code, 304)
+
+ # Create a new container and validate the count
+ self.client.create_container(image=constant.ALPINE, name="container2")
+ containers = self.client.containers(quiet=True, all=True)
+ self.assertEqual(len(containers), 2)
+
+ def test_stop_container(self):
+ # Stop bogus container
+ with self.assertRaises(errors.NotFound) as error:
+ self.client.stop("dummy")
+ self.assertEqual(error.exception.response.status_code, 404)
+
+ # Validate the container state
+ container = self.client.inspect_container("top")
+ self.assertEqual(container["State"]["Status"], "running")
+
+ # Stop a running container and validate the state
+ self.client.stop(TestContainers.topContainerId)
+ container = self.client.inspect_container("top")
+ self.assertIn(
+ container["State"]["Status"],
+ "stopped exited",
+ )
+
+ def test_restart_container(self):
+ # Restart bogus container
+ with self.assertRaises(errors.NotFound) as error:
+ self.client.restart("dummy")
+ self.assertEqual(error.exception.response.status_code, 404)
+
+ # Validate the container state
+ self.client.stop(TestContainers.topContainerId)
+ container = self.client.inspect_container("top")
+ self.assertEqual(container["State"]["Status"], "stopped")
+
+ # restart a running container and validate the state
+ self.client.restart(TestContainers.topContainerId)
+ container = self.client.inspect_container("top")
+ self.assertEqual(container["State"]["Status"], "running")
+
+ def test_remove_container(self):
+ # Remove bogus container
+ with self.assertRaises(errors.NotFound) as error:
+ self.client.remove_container("dummy")
+ self.assertEqual(error.exception.response.status_code, 404)
+
+ # Remove container by ID with force
+ self.client.remove_container(TestContainers.topContainerId, force=True)
+ containers = self.client.containers()
+ self.assertEqual(len(containers), 0)
+
+ def test_remove_container_without_force(self):
+ # Validate current container count
+ containers = self.client.containers()
+ self.assertTrue(len(containers), 1)
+
+ # Remove running container should throw error
+ with self.assertRaises(errors.APIError) as error:
+ self.client.remove_container(TestContainers.topContainerId)
+ self.assertEqual(error.exception.response.status_code, 500)
+
+ # Remove container by ID with force
+ self.client.stop(TestContainers.topContainerId)
+ self.client.remove_container(TestContainers.topContainerId)
+ containers = self.client.containers()
+ self.assertEqual(len(containers), 0)
+
+ def test_pause_container(self):
+ # Pause bogus container
+ with self.assertRaises(errors.NotFound) as error:
+ self.client.pause("dummy")
+ self.assertEqual(error.exception.response.status_code, 404)
+
+ # Validate the container state
+ container = self.client.inspect_container("top")
+ self.assertEqual(container["State"]["Status"], "running")
+
+ # Pause a running container and validate the state
+ self.client.pause(container["Id"])
+ container = self.client.inspect_container("top")
+ self.assertEqual(container["State"]["Status"], "paused")
+
+ def test_pause_stopped_container(self):
+ # Stop the container
+ self.client.stop(TestContainers.topContainerId)
+
+ # Pause exited container should trow error
+ with self.assertRaises(errors.APIError) as error:
+ self.client.pause(TestContainers.topContainerId)
+ self.assertEqual(error.exception.response.status_code, 500)
+
+ def test_unpause_container(self):
+ # Unpause bogus container
+ with self.assertRaises(errors.NotFound) as error:
+ self.client.unpause("dummy")
+ self.assertEqual(error.exception.response.status_code, 404)
+
+ # Validate the container state
+ self.client.pause(TestContainers.topContainerId)
+ container = self.client.inspect_container("top")
+ self.assertEqual(container["State"]["Status"], "paused")
+
+ # Pause a running container and validate the state
+ self.client.unpause(TestContainers.topContainerId)
+ container = self.client.inspect_container("top")
+ self.assertEqual(container["State"]["Status"], "running")
+
+ def test_list_container(self):
+ # Add container and validate the count
+ self.client.create_container(image="alpine", detach=True)
+ containers = self.client.containers(all=True)
+ self.assertEqual(len(containers), 2)
+
+ def test_filters(self):
+ self.skipTest("TODO Endpoint does not yet support filters")
+
+ # List container with filter by id
+ filters = {"id": TestContainers.topContainerId}
+ ctnrs = self.client.containers(all=True, filters=filters)
+ self.assertEqual(len(ctnrs), 1)
+
+ # List container with filter by name
+ filters = {"name": "top"}
+ ctnrs = self.client.containers(all=True, filters=filters)
+ self.assertEqual(len(ctnrs), 1)
+
+ def test_rename_container(self):
+ # rename bogus container
+ with self.assertRaises(errors.APIError) as error:
+ self.client.rename(container="dummy", name="newname")
+ self.assertEqual(error.exception.response.status_code, 404)
diff --git a/test/python/docker/test_images.py b/test/python/docker/test_images.py
new file mode 100644
index 000000000..f049da96f
--- /dev/null
+++ b/test/python/docker/test_images.py
@@ -0,0 +1,169 @@
+import collections
+import os
+import subprocess
+import sys
+import time
+import unittest
+
+from docker import APIClient, errors
+
+from test.python.docker import Podman, common, constant
+
+
+class TestImages(unittest.TestCase):
+ podman = None # initialized podman configuration for tests
+ service = None # podman service instance
+
+ def setUp(self):
+ super().setUp()
+ self.client = APIClient(base_url="tcp://127.0.0.1:8080", timeout=15)
+
+ TestImages.podman.restore_image_from_cache(self.client)
+
+ def tearDown(self):
+ common.remove_all_images(self.client)
+ self.client.close()
+ return super().tearDown()
+
+ @classmethod
+ def setUpClass(cls):
+ super().setUpClass()
+ TestImages.podman = Podman()
+ TestImages.service = TestImages.podman.open(
+ "system", "service", "tcp:127.0.0.1:8080", "--time=0"
+ )
+ # give the service some time to be ready...
+ time.sleep(2)
+
+ returncode = TestImages.service.poll()
+ if returncode is not None:
+ raise subprocess.CalledProcessError(returncode, "podman system service")
+
+ @classmethod
+ def tearDownClass(cls):
+ TestImages.service.terminate()
+ stdout, stderr = TestImages.service.communicate(timeout=0.5)
+ if stdout:
+ sys.stdout.write("\nImages Service Stdout:\n" + stdout.decode("utf-8"))
+ if stderr:
+ sys.stderr.write("\nImAges Service Stderr:\n" + stderr.decode("utf-8"))
+
+ TestImages.podman.tear_down()
+ return super().tearDownClass()
+
+ def test_inspect_image(self):
+ """Inspect Image"""
+ # Check for error with wrong image name
+ with self.assertRaises(errors.NotFound):
+ self.client.inspect_image("dummy")
+ alpine_image = self.client.inspect_image(constant.ALPINE)
+ self.assertIn(constant.ALPINE, alpine_image["RepoTags"])
+
+ def test_tag_invalid_image(self):
+ """Tag Image
+
+ Validates if invalid image name is given a bad response is encountered
+ """
+ with self.assertRaises(errors.NotFound):
+ self.client.tag("dummy", "demo")
+
+ def test_tag_valid_image(self):
+ """Validates if the image is tagged successfully"""
+ self.client.tag(constant.ALPINE, "demo", constant.ALPINE_SHORTNAME)
+ alpine_image = self.client.inspect_image(constant.ALPINE)
+ for x in alpine_image["RepoTags"]:
+ self.assertIn("alpine", x)
+
+ # @unittest.skip("doesn't work now")
+ def test_retag_valid_image(self):
+ """Validates if name updates when the image is retagged"""
+ self.client.tag(constant.ALPINE_SHORTNAME, "demo", "rename")
+ alpine_image = self.client.inspect_image(constant.ALPINE)
+ self.assertNotIn("demo:test", alpine_image["RepoTags"])
+
+ def test_list_images(self):
+ """List images"""
+ all_images = self.client.images()
+ self.assertEqual(len(all_images), 1)
+ # Add more images
+ self.client.pull(constant.BB)
+ all_images = self.client.images()
+ self.assertEqual(len(all_images), 2)
+
+ # List images with filter
+ filters = {"reference": "alpine"}
+ all_images = self.client.images(filters=filters)
+ self.assertEqual(len(all_images), 1)
+
+ def test_search_image(self):
+ """Search for image"""
+ response = self.client.search("libpod/alpine")
+ for i in response:
+ self.assertIn("quay.io/libpod/alpine", i["Name"])
+
+ def test_remove_image(self):
+ """Remove image"""
+ # Check for error with wrong image name
+ with self.assertRaises(errors.NotFound):
+ self.client.remove_image("dummy")
+ all_images = self.client.images()
+ self.assertEqual(len(all_images), 1)
+
+ alpine_image = self.client.inspect_image(constant.ALPINE)
+ self.client.remove_image(alpine_image["Id"])
+ all_images = self.client.images()
+ self.assertEqual(len(all_images), 0)
+
+ def test_image_history(self):
+ """Image history"""
+ # Check for error with wrong image name
+ with self.assertRaises(errors.NotFound):
+ self.client.history("dummy")
+
+ # NOTE: history() has incorrect return type hint
+ history = self.client.history(constant.ALPINE)
+ alpine_image = self.client.inspect_image(constant.ALPINE)
+ image_id = (
+ alpine_image["Id"][7:]
+ if alpine_image["Id"].startswith("sha256:")
+ else alpine_image["Id"]
+ )
+
+ found = False
+ for change in history:
+ found |= image_id in change.values()
+ self.assertTrue(found, f"image id {image_id} not found in history")
+
+ def test_get_image_exists_not(self):
+ """Negative test for get image"""
+ with self.assertRaises(errors.NotFound):
+ response = self.client.get_image("image_does_not_exists")
+ collections.deque(response)
+
+ def test_export_image(self):
+ """Export Image"""
+ self.client.pull(constant.BB)
+ image = self.client.get_image(constant.BB)
+
+ file = os.path.join(TestImages.podman.image_cache, "busybox.tar")
+ with open(file, mode="wb") as tarball:
+ for frame in image:
+ tarball.write(frame)
+ sz = os.path.getsize(file)
+ self.assertGreater(sz, 0)
+
+ def test_import_image(self):
+ """Import|Load Image"""
+ all_images = self.client.images()
+ self.assertEqual(len(all_images), 1)
+
+ file = os.path.join(TestImages.podman.image_cache, constant.ALPINE_TARBALL)
+ self.client.import_image_from_file(filename=file)
+
+ all_images = self.client.images()
+ self.assertEqual(len(all_images), 2)
+
+
+if __name__ == "__main__":
+ # Setup temporary space
+ unittest.main()
diff --git a/test/python/docker/test_system.py b/test/python/docker/test_system.py
new file mode 100644
index 000000000..f911baee4
--- /dev/null
+++ b/test/python/docker/test_system.py
@@ -0,0 +1,66 @@
+import subprocess
+import sys
+import time
+import unittest
+
+from docker import APIClient
+
+from test.python.docker import Podman, common, constant
+
+
+class TestSystem(unittest.TestCase):
+ podman = None # initialized podman configuration for tests
+ service = None # podman service instance
+ topContainerId = ""
+
+ def setUp(self):
+ super().setUp()
+ self.client = APIClient(base_url="tcp://127.0.0.1:8080", timeout=15)
+
+ TestSystem.podman.restore_image_from_cache(self.client)
+ TestSystem.topContainerId = common.run_top_container(self.client)
+
+ def tearDown(self):
+ common.remove_all_containers(self.client)
+ common.remove_all_images(self.client)
+ self.client.close()
+ return super().tearDown()
+
+ @classmethod
+ def setUpClass(cls):
+ super().setUpClass()
+ TestSystem.podman = Podman()
+ TestSystem.service = TestSystem.podman.open(
+ "system", "service", "tcp:127.0.0.1:8080", "--time=0"
+ )
+ # give the service some time to be ready...
+ time.sleep(2)
+
+ returncode = TestSystem.service.poll()
+ if returncode is not None:
+ raise subprocess.CalledProcessError(returncode, "podman system service")
+
+ @classmethod
+ def tearDownClass(cls):
+ TestSystem.service.terminate()
+ stdout, stderr = TestSystem.service.communicate(timeout=0.5)
+ if stdout:
+ sys.stdout.write("\nImages Service Stdout:\n" + stdout.decode("utf-8"))
+ if stderr:
+ sys.stderr.write("\nImAges Service Stderr:\n" + stderr.decode("utf-8"))
+
+ TestSystem.podman.tear_down()
+ return super().tearDownClass()
+
+ def test_Info(self):
+ self.assertIsNotNone(self.client.info())
+
+ def test_info_container_details(self):
+ info = self.client.info()
+ self.assertEqual(info["Containers"], 1)
+ self.client.create_container(image=constant.ALPINE)
+ info = self.client.info()
+ self.assertEqual(info["Containers"], 2)
+
+ def test_version(self):
+ self.assertIsNotNone(self.client.version())
diff --git a/test/python/dockerpy/README.md b/test/python/dockerpy/README.md
deleted file mode 100644
index 22908afc6..000000000
--- a/test/python/dockerpy/README.md
+++ /dev/null
@@ -1,40 +0,0 @@
-# Dockerpy regression test
-
-Python test suite to validate Podman endpoints using dockerpy library
-
-## Running Tests
-
-To run the tests locally in your sandbox (Fedora 32):
-
-```shell script
-# dnf install python3-docker
-```
-
-### Run the entire test suite
-
-```shell
-# cd test/python/dockerpy
-# PYTHONPATH=/usr/bin/python python -m unittest discover .
-```
-
-Passing the -v option to your test script will instruct unittest.main() to enable a higher level of verbosity, and produce detailed output:
-
-```shell
-# cd test/python/dockerpy
-# PYTHONPATH=/usr/bin/python python -m unittest -v discover .
-```
-
-### Run a specific test class
-
-```shell
-# cd test/python/dockerpy
-# PYTHONPATH=/usr/bin/python python -m unittest -v tests.test_images
-```
-
-### Run a specific test within the test class
-
-```shell
-# cd test/python/dockerpy
-# PYTHONPATH=/usr/bin/python python -m unittest tests.test_images.TestImages.test_import_image
-
-```
diff --git a/test/python/dockerpy/__init__.py b/test/python/dockerpy/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/test/python/dockerpy/__init__.py
+++ /dev/null
diff --git a/test/python/dockerpy/tests/__init__.py b/test/python/dockerpy/tests/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/test/python/dockerpy/tests/__init__.py
+++ /dev/null
diff --git a/test/python/dockerpy/tests/common.py b/test/python/dockerpy/tests/common.py
deleted file mode 100644
index f83f4076f..000000000
--- a/test/python/dockerpy/tests/common.py
+++ /dev/null
@@ -1,105 +0,0 @@
-import os
-import pathlib
-import subprocess
-import sys
-import time
-
-from docker import APIClient
-
-from . import constant
-
-alpineDict = {
- "name": "docker.io/library/alpine:latest",
- "shortName": "alpine",
- "tarballName": "alpine.tar"
-}
-
-
-def get_client():
- client = APIClient(base_url="http://localhost:8080", timeout=15)
- return client
-
-
-client = get_client()
-
-
-def podman():
- binary = os.getenv("PODMAN_BINARY")
- if binary is None:
- binary = "../../../bin/podman"
- return binary
-
-
-def restore_image_from_cache(TestClass):
- alpineImage = os.path.join(constant.ImageCacheDir,
- alpineDict["tarballName"])
- if not os.path.exists(alpineImage):
- os.makedirs(constant.ImageCacheDir, exist_ok=True)
- client.pull(constant.ALPINE)
- image = client.get_image(constant.ALPINE)
- tarball = open(alpineImage, mode="wb")
- for frame in image:
- tarball.write(frame)
- tarball.close()
- else:
- subprocess.run(
- [podman(), "load", "-i", alpineImage],
- shell=False,
- stdin=subprocess.DEVNULL,
- stdout=subprocess.DEVNULL,
- stderr=subprocess.DEVNULL,
- check=True,
- )
-
-
-def flush_image_cache(TestCase):
- for f in pathlib.Path(constant.ImageCacheDir).glob("*"):
- f.unlink(f)
-
-
-def run_top_container():
- c = client.create_container(image=constant.ALPINE,
- command='/bin/sleep 5',
- name=constant.TOP)
- client.start(container=c.get("Id"))
- return c.get("Id")
-
-
-def enable_sock(TestClass):
- TestClass.podman = subprocess.Popen(
- [
- podman(), "system", "service", "tcp:localhost:8080",
- "--log-level=debug", "--time=0"
- ],
- shell=False,
- stdin=subprocess.DEVNULL,
- stdout=subprocess.DEVNULL,
- stderr=subprocess.DEVNULL,
- )
- time.sleep(2)
-
-
-def terminate_connection(TestClass):
- TestClass.podman.terminate()
- stdout, stderr = TestClass.podman.communicate(timeout=0.5)
- if stdout:
- print("\nService Stdout:\n" + stdout.decode('utf-8'))
- if stderr:
- print("\nService Stderr:\n" + stderr.decode('utf-8'))
-
- if TestClass.podman.returncode > 0:
- sys.stderr.write("podman exited with error code {}\n".format(
- TestClass.podman.returncode))
- sys.exit(2)
-
-
-def remove_all_containers():
- containers = client.containers(quiet=True)
- for c in containers:
- client.remove_container(container=c.get("Id"), force=True)
-
-
-def remove_all_images():
- allImages = client.images()
- for image in allImages:
- client.remove_image(image, force=True)
diff --git a/test/python/dockerpy/tests/constant.py b/test/python/dockerpy/tests/constant.py
deleted file mode 100644
index b44442d02..000000000
--- a/test/python/dockerpy/tests/constant.py
+++ /dev/null
@@ -1,13 +0,0 @@
-BB = "docker.io/library/busybox:latest"
-NGINX = "docker.io/library/nginx:latest"
-ALPINE = "docker.io/library/alpine:latest"
-ALPINE_SHORTNAME = "alpine"
-ALPINELISTTAG = "docker.io/library/alpine:3.10.2"
-ALPINELISTDIGEST = "docker.io/library/alpine@sha256:72c42ed48c3a2db31b7dafe17d275b634664a708d901ec9fd57b1529280f01fb"
-ALPINEAMD64DIGEST = "docker.io/library/alpine@sha256:acd3ca9941a85e8ed16515bfc5328e4e2f8c128caa72959a58a127b7801ee01f"
-ALPINEAMD64ID = "961769676411f082461f9ef46626dd7a2d1e2b2a38e6a44364bcbecf51e66dd4"
-ALPINEARM64DIGEST = "docker.io/library/alpine@sha256:db7f3dcef3d586f7dd123f107c93d7911515a5991c4b9e51fa2a43e46335a43e"
-ALPINEARM64ID = "915beeae46751fc564998c79e73a1026542e945ca4f73dc841d09ccc6c2c0672"
-infra = "k8s.gcr.io/pause:3.2"
-TOP = "top"
-ImageCacheDir = "/tmp/podman/imagecachedir"
diff --git a/test/python/dockerpy/tests/test_containers.py b/test/python/dockerpy/tests/test_containers.py
deleted file mode 100644
index 6b89688d4..000000000
--- a/test/python/dockerpy/tests/test_containers.py
+++ /dev/null
@@ -1,193 +0,0 @@
-import os
-import time
-import unittest
-
-import requests
-
-from . import common, constant
-
-client = common.get_client()
-
-
-class TestContainers(unittest.TestCase):
- topContainerId = ""
-
- def setUp(self):
- super().setUp()
- common.restore_image_from_cache(self)
- TestContainers.topContainerId = common.run_top_container()
-
- def tearDown(self):
- common.remove_all_containers()
- common.remove_all_images()
- return super().tearDown()
-
- @classmethod
- def setUpClass(cls):
- super().setUpClass()
- common.enable_sock(cls)
-
- @classmethod
- def tearDownClass(cls):
- common.terminate_connection(cls)
- common.flush_image_cache(cls)
- return super().tearDownClass()
-
- def test_inspect_container(self):
- # Inspect bogus container
- with self.assertRaises(requests.HTTPError) as error:
- client.inspect_container("dummy")
- self.assertEqual(error.exception.response.status_code, 404)
- # Inspect valid container by name
- container = client.inspect_container(constant.TOP)
- self.assertIn(TestContainers.topContainerId, container["Id"])
- # Inspect valid container by Id
- container = client.inspect_container(TestContainers.topContainerId)
- self.assertIn(constant.TOP, container["Name"])
-
- def test_create_container(self):
- # Run a container with detach mode
- container = client.create_container(image="alpine", detach=True)
- self.assertEqual(len(container), 2)
-
- def test_start_container(self):
- # Start bogus container
- with self.assertRaises(requests.HTTPError) as error:
- client.start("dummy")
- self.assertEqual(error.exception.response.status_code, 404)
-
- # Podman docs says it should give a 304 but returns with no response
- # # Start a already started container should return 304
- # response = client.start(container=TestContainers.topContainerId)
- # self.assertEqual(error.exception.response.status_code, 304)
-
- # Create a new container and validate the count
- client.create_container(image=constant.ALPINE, name="container2")
- containers = client.containers(quiet=True, all=True)
- self.assertEqual(len(containers), 2)
-
- def test_stop_container(self):
- # Stop bogus container
- with self.assertRaises(requests.HTTPError) as error:
- client.stop("dummy")
- self.assertEqual(error.exception.response.status_code, 404)
-
- # Validate the container state
- container = client.inspect_container(constant.TOP)
- self.assertEqual(container["State"]["Status"], "running")
-
- # Stop a running container and validate the state
- client.stop(TestContainers.topContainerId)
- container = client.inspect_container(constant.TOP)
- self.assertIn(
- container["State"]["Status"],
- "stopped exited",
- )
-
- def test_restart_container(self):
- # Restart bogus container
- with self.assertRaises(requests.HTTPError) as error:
- client.restart("dummy")
- self.assertEqual(error.exception.response.status_code, 404)
-
- # Validate the container state
- client.stop(TestContainers.topContainerId)
- container = client.inspect_container(constant.TOP)
- self.assertEqual(container["State"]["Status"], "stopped")
-
- # restart a running container and validate the state
- client.restart(TestContainers.topContainerId)
- container = client.inspect_container(constant.TOP)
- self.assertEqual(container["State"]["Status"], "running")
-
- def test_remove_container(self):
- # Remove bogus container
- with self.assertRaises(requests.HTTPError) as error:
- client.remove_container("dummy")
- self.assertEqual(error.exception.response.status_code, 404)
-
- # Remove container by ID with force
- client.remove_container(TestContainers.topContainerId, force=True)
- containers = client.containers()
- self.assertEqual(len(containers), 0)
-
- def test_remove_container_without_force(self):
- # Validate current container count
- containers = client.containers()
- self.assertTrue(len(containers), 1)
-
- # Remove running container should throw error
- with self.assertRaises(requests.HTTPError) as error:
- client.remove_container(TestContainers.topContainerId)
- self.assertEqual(error.exception.response.status_code, 500)
-
- # Remove container by ID with force
- client.stop(TestContainers.topContainerId)
- client.remove_container(TestContainers.topContainerId)
- containers = client.containers()
- self.assertEqual(len(containers), 0)
-
- def test_pause_container(self):
- # Pause bogus container
- with self.assertRaises(requests.HTTPError) as error:
- client.pause("dummy")
- self.assertEqual(error.exception.response.status_code, 404)
-
- # Validate the container state
- container = client.inspect_container(constant.TOP)
- self.assertEqual(container["State"]["Status"], "running")
-
- # Pause a running container and validate the state
- client.pause(container)
- container = client.inspect_container(constant.TOP)
- self.assertEqual(container["State"]["Status"], "paused")
-
- def test_pause_stoped_container(self):
- # Stop the container
- client.stop(TestContainers.topContainerId)
-
- # Pause exited container should trow error
- with self.assertRaises(requests.HTTPError) as error:
- client.pause(TestContainers.topContainerId)
- self.assertEqual(error.exception.response.status_code, 500)
-
- def test_unpause_container(self):
- # Unpause bogus container
- with self.assertRaises(requests.HTTPError) as error:
- client.unpause("dummy")
- self.assertEqual(error.exception.response.status_code, 404)
-
- # Validate the container state
- client.pause(TestContainers.topContainerId)
- container = client.inspect_container(constant.TOP)
- self.assertEqual(container["State"]["Status"], "paused")
-
- # Pause a running container and validate the state
- client.unpause(TestContainers.topContainerId)
- container = client.inspect_container(constant.TOP)
- self.assertEqual(container["State"]["Status"], "running")
-
- def test_list_container(self):
-
- # Add container and validate the count
- client.create_container(image="alpine", detach=True)
- containers = client.containers(all=True)
- self.assertEqual(len(containers), 2)
-
- # Not working for now......checking
- # # List container with filter by id
- # filters = {'id':TestContainers.topContainerId}
- # filteredContainers = client.containers(all=True,filters = filters)
- # self.assertEqual(len(filteredContainers) , 1)
-
- # # List container with filter by name
- # filters = {'name':constant.TOP}
- # filteredContainers = client.containers(all=True,filters = filters)
- # self.assertEqual(len(filteredContainers) , 1)
-
- @unittest.skip("Not Supported yet")
- def test_rename_container(self):
- # rename bogus container
- with self.assertRaises(requests.HTTPError) as error:
- client.rename(container="dummy", name="newname")
- self.assertEqual(error.exception.response.status_code, 404)
diff --git a/test/python/dockerpy/tests/test_images.py b/test/python/dockerpy/tests/test_images.py
deleted file mode 100644
index 602a86de2..000000000
--- a/test/python/dockerpy/tests/test_images.py
+++ /dev/null
@@ -1,162 +0,0 @@
-import os
-import stat
-import unittest
-from os import remove
-from stat import ST_SIZE
-
-import docker
-import requests
-
-from . import common, constant
-
-client = common.get_client()
-
-
-class TestImages(unittest.TestCase):
- def setUp(self):
- super().setUp()
- common.restore_image_from_cache(self)
-
- def tearDown(self):
- common.remove_all_images()
- return super().tearDown()
-
- @classmethod
- def setUpClass(cls):
- super().setUpClass()
- common.enable_sock(cls)
-
- @classmethod
- def tearDownClass(cls):
- common.terminate_connection(cls)
- common.flush_image_cache(cls)
- return super().tearDownClass()
-
-# Inspect Image
-
- def test_inspect_image(self):
- # Check for error with wrong image name
- with self.assertRaises(requests.HTTPError):
- client.inspect_image("dummy")
- alpine_image = client.inspect_image(constant.ALPINE)
- self.assertIn(constant.ALPINE, alpine_image["RepoTags"])
-
-# Tag Image
-
-# Validates if invalid image name is given a bad response is encountered.
-
- def test_tag_invalid_image(self):
- with self.assertRaises(requests.HTTPError):
- client.tag("dummy", "demo")
-
- # Validates if the image is tagged successfully.
- def test_tag_valid_image(self):
- client.tag(constant.ALPINE, "demo", constant.ALPINE_SHORTNAME)
- alpine_image = client.inspect_image(constant.ALPINE)
- for x in alpine_image["RepoTags"]:
- if ("demo:alpine" in x):
- self.assertTrue
- self.assertFalse
-
- # Validates if name updates when the image is retagged.
- @unittest.skip("doesn't work now")
- def test_retag_valid_image(self):
- client.tag(constant.ALPINE_SHORTNAME, "demo", "rename")
- alpine_image = client.inspect_image(constant.ALPINE)
- self.assertNotIn("demo:test", alpine_image["RepoTags"])
-
-# List Image
-# List All Images
-
- def test_list_images(self):
- allImages = client.images()
- self.assertEqual(len(allImages), 1)
- # Add more images
- client.pull(constant.BB)
- allImages = client.images()
- self.assertEqual(len(allImages), 2)
-
- # List images with filter
- filters = {'reference': 'alpine'}
- allImages = client.images(filters=filters)
- self.assertEqual(len(allImages), 1)
-
-# Search Image
-
- def test_search_image(self):
- response = client.search("alpine")
- for i in response:
- # Alpine found
- if "docker.io/library/alpine" in i["Name"]:
- self.assertTrue
- self.assertFalse
-
-# Image Exist (No docker-py support yet)
-
-# Remove Image
-
- def test_remove_image(self):
- # Check for error with wrong image name
- with self.assertRaises(requests.HTTPError):
- client.remove_image("dummy")
- allImages = client.images()
- self.assertEqual(len(allImages), 1)
- alpine_image = client.inspect_image(constant.ALPINE)
- client.remove_image(alpine_image)
- allImages = client.images()
- self.assertEqual(len(allImages), 0)
-
-# Image History
-
- def test_image_history(self):
- # Check for error with wrong image name
- with self.assertRaises(requests.HTTPError):
- client.history("dummy")
-
- imageHistory = client.history(constant.ALPINE)
- alpine_image = client.inspect_image(constant.ALPINE)
- for h in imageHistory:
- if h["Id"] in alpine_image["Id"]:
- self.assertTrue
- self.assertFalse
-
-# Prune Image (No docker-py support yet)
-
- def test_get_image_dummy(self):
- # FIXME: seems to be an error in the library
- self.skipTest("Documentation and library do not match")
- # Check for error with wrong image name
- with self.assertRaises(docker.errors.ImageNotFound):
- client.get_image("dummy")
-
-# Export Image
-
- def test_export_image(self):
- client.pull(constant.BB)
- if not os.path.exists(constant.ImageCacheDir):
- os.makedirs(constant.ImageCacheDir)
-
- image = client.get_image(constant.BB)
-
- file = os.path.join(constant.ImageCacheDir, "busybox.tar")
- tarball = open(file, mode="wb")
- for frame in image:
- tarball.write(frame)
- tarball.close()
- sz = os.path.getsize(file)
- self.assertGreater(sz, 0)
-
-
-# Import|Load Image
-
- def test_import_image(self):
- allImages = client.images()
- self.assertEqual(len(allImages), 1)
- file = os.path.join(constant.ImageCacheDir, "alpine.tar")
- client.import_image_from_file(filename=file)
- allImages = client.images()
- self.assertEqual(len(allImages), 2)
-
-if __name__ == '__main__':
- # Setup temporary space
- unittest.main()
diff --git a/test/python/dockerpy/tests/test_info_version.py b/test/python/dockerpy/tests/test_info_version.py
deleted file mode 100644
index e3ee18ec7..000000000
--- a/test/python/dockerpy/tests/test_info_version.py
+++ /dev/null
@@ -1,44 +0,0 @@
-import unittest
-
-from . import common, constant
-
-client = common.get_client()
-
-
-class TestInfo_Version(unittest.TestCase):
-
- podman = None
- topContainerId = ""
-
- def setUp(self):
- super().setUp()
- common.restore_image_from_cache(self)
- TestInfo_Version.topContainerId = common.run_top_container()
-
- def tearDown(self):
- common.remove_all_containers()
- common.remove_all_images()
- return super().tearDown()
-
- @classmethod
- def setUpClass(cls):
- super().setUpClass()
- common.enable_sock(cls)
-
- @classmethod
- def tearDownClass(cls):
- common.terminate_connection(cls)
- return super().tearDownClass()
-
- def test_Info(self):
- self.assertIsNotNone(client.info())
-
- def test_info_container_details(self):
- info = client.info()
- self.assertEqual(info["Containers"], 1)
- client.create_container(image=constant.ALPINE)
- info = client.info()
- self.assertEqual(info["Containers"], 2)
-
- def test_version(self):
- self.assertIsNotNone(client.version())
diff --git a/test/system/030-run.bats b/test/system/030-run.bats
index b0c855d81..12df966e2 100644
--- a/test/system/030-run.bats
+++ b/test/system/030-run.bats
@@ -436,6 +436,16 @@ json-file | f
@test "podman run --log-driver journald" {
skip_if_remote "We cannot read journalctl over remote."
+ # We can't use journald on RHEL as rootless, either: rhbz#1895105
+ if is_rootless; then
+ run journalctl -n 1
+ if [[ $status -ne 0 ]]; then
+ if [[ $output =~ permission ]]; then
+ skip "Cannot use rootless journald on this system"
+ fi
+ fi
+ fi
+
msg=$(random_string 20)
pidfile="${PODMAN_TMPDIR}/$(random_string 20)"
diff --git a/test/system/035-logs.bats b/test/system/035-logs.bats
index 130bc5243..a3d6a5800 100644
--- a/test/system/035-logs.bats
+++ b/test/system/035-logs.bats
@@ -51,6 +51,16 @@ ${cid[0]} d" "Sequential output from logs"
}
@test "podman logs over journald" {
+ # We can't use journald on RHEL as rootless: rhbz#1895105
+ if is_rootless; then
+ run journalctl -n 1
+ if [[ $status -ne 0 ]]; then
+ if [[ $output =~ permission ]]; then
+ skip "Cannot use rootless journald on this system"
+ fi
+ fi
+ fi
+
msg=$(random_string 20)
run_podman run --name myctr --log-driver journald $IMAGE echo $msg