diff options
Diffstat (limited to 'test')
-rw-r--r-- | test/apiv2/20-containers.at | 9 | ||||
-rw-r--r-- | test/apiv2/python/rest_api/fixtures/api_testcase.py | 2 | ||||
-rw-r--r-- | test/apiv2/python/rest_api/test_v2_0_0_container.py | 4 | ||||
-rw-r--r-- | test/apiv2/python/rest_api/test_v2_0_0_image.py | 5 | ||||
-rw-r--r-- | test/e2e/checkpoint_test.go | 154 | ||||
-rw-r--r-- | test/e2e/events_test.go | 13 | ||||
-rw-r--r-- | test/e2e/generate_systemd_test.go | 4 | ||||
-rw-r--r-- | test/system/090-events.bats | 5 | ||||
-rw-r--r-- | test/system/255-auto-update.bats | 274 | ||||
-rw-r--r-- | test/system/450-interactive.bats | 3 | ||||
-rw-r--r-- | test/system/500-networking.bats | 58 |
11 files changed, 511 insertions, 20 deletions
diff --git a/test/apiv2/20-containers.at b/test/apiv2/20-containers.at index a81210855..ef51757c9 100644 --- a/test/apiv2/20-containers.at +++ b/test/apiv2/20-containers.at @@ -341,3 +341,12 @@ t GET containers/$cid/json 200 \ .HostConfig.NanoCpus=500000 t DELETE containers/$cid?v=true 204 + +# Test Compat Create with default network mode (#10569) +t POST containers/create Image=$IMAGE HostConfig='{"NetworkMode":"default"}' 201 \ + .Id~[0-9a-f]\\{64\\} +cid=$(jq -r '.Id' <<<"$output") +t GET containers/$cid/json 200 \ + .HostConfig.NetworkMode="bridge" + +t DELETE containers/$cid?v=true 204 diff --git a/test/apiv2/python/rest_api/fixtures/api_testcase.py b/test/apiv2/python/rest_api/fixtures/api_testcase.py index 8b771774b..155e93928 100644 --- a/test/apiv2/python/rest_api/fixtures/api_testcase.py +++ b/test/apiv2/python/rest_api/fixtures/api_testcase.py @@ -49,7 +49,7 @@ class APITestCase(unittest.TestCase): def setUp(self): super().setUp() - APITestCase.podman.run("run", "alpine", "/bin/ls", check=True) + APITestCase.podman.run("run", "-d", "alpine", "top", check=True) def tearDown(self) -> None: APITestCase.podman.run("pod", "rm", "--all", "--force", check=True) diff --git a/test/apiv2/python/rest_api/test_v2_0_0_container.py b/test/apiv2/python/rest_api/test_v2_0_0_container.py index f67013117..b4b3af2df 100644 --- a/test/apiv2/python/rest_api/test_v2_0_0_container.py +++ b/test/apiv2/python/rest_api/test_v2_0_0_container.py @@ -12,7 +12,7 @@ class ContainerTestCase(APITestCase): r = requests.get(self.uri("/containers/json"), timeout=5) self.assertEqual(r.status_code, 200, r.text) obj = r.json() - self.assertEqual(len(obj), 0) + self.assertEqual(len(obj), 1) def test_list_all(self): r = requests.get(self.uri("/containers/json?all=true")) @@ -36,7 +36,7 @@ class ContainerTestCase(APITestCase): self.assertId(r.content) def test_delete(self): - r = requests.delete(self.uri(self.resolve_container("/containers/{}"))) + r = requests.delete(self.uri(self.resolve_container("/containers/{}?force=true"))) self.assertEqual(r.status_code, 204, r.text) def test_stop(self): diff --git a/test/apiv2/python/rest_api/test_v2_0_0_image.py b/test/apiv2/python/rest_api/test_v2_0_0_image.py index 243b1d5f5..2cd7bfa96 100644 --- a/test/apiv2/python/rest_api/test_v2_0_0_image.py +++ b/test/apiv2/python/rest_api/test_v2_0_0_image.py @@ -86,6 +86,11 @@ class ImageTestCase(APITestCase): self.assertTrue(keys["id"], "Expected to find id stanza") self.assertTrue(keys["images"], "Expected to find images stanza") self.assertTrue(keys["stream"], "Expected to find stream progress stanza's") + def test_create(self): + r = requests.post(self.podman_url + "/v1.40/images/create?fromImage=alpine&platform=linux/amd64/v8", timeout=15) + self.assertEqual(r.status_code, 200, r.text) + r = requests.post(self.podman_url + "/v1.40/images/create?fromSrc=-&repo=fedora&message=testing123", timeout=15) + self.assertEqual(r.status_code, 200, r.text) def test_search_compat(self): url = self.podman_url + "/v1.40/images/search" diff --git a/test/e2e/checkpoint_test.go b/test/e2e/checkpoint_test.go index 9d0049910..70a1d09ed 100644 --- a/test/e2e/checkpoint_test.go +++ b/test/e2e/checkpoint_test.go @@ -425,6 +425,106 @@ var _ = Describe("Podman checkpoint", func() { // Remove exported checkpoint os.Remove(fileName) }) + // This test does the same steps which are necessary for migrating + // a container from one host to another + It("podman checkpoint container with export and different compression algorithms", func() { + localRunString := getRunString([]string{"--rm", ALPINE, "top"}) + session := podmanTest.Podman(localRunString) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1)) + cid := session.OutputToString() + fileName := "/tmp/checkpoint-" + cid + ".tar" + + // Checkpoint with the default algorithm + result := podmanTest.Podman([]string{"container", "checkpoint", "-l", "-e", fileName}) + result.WaitWithDefaultTimeout() + + // As the container has been started with '--rm' it will be completely + // cleaned up after checkpointing. + Expect(result.ExitCode()).To(Equal(0)) + Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0)) + Expect(podmanTest.NumberOfContainers()).To(Equal(0)) + + // Restore container + result = podmanTest.Podman([]string{"container", "restore", "-i", fileName}) + result.WaitWithDefaultTimeout() + + Expect(result.ExitCode()).To(Equal(0)) + Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1)) + Expect(podmanTest.GetContainerStatus()).To(ContainSubstring("Up")) + + // Checkpoint with the zstd algorithm + result = podmanTest.Podman([]string{"container", "checkpoint", "-l", "-e", fileName, "--compress", "zstd"}) + result.WaitWithDefaultTimeout() + + // As the container has been started with '--rm' it will be completely + // cleaned up after checkpointing. + Expect(result.ExitCode()).To(Equal(0)) + Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0)) + Expect(podmanTest.NumberOfContainers()).To(Equal(0)) + + // Restore container + result = podmanTest.Podman([]string{"container", "restore", "-i", fileName}) + result.WaitWithDefaultTimeout() + + Expect(result.ExitCode()).To(Equal(0)) + Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1)) + Expect(podmanTest.GetContainerStatus()).To(ContainSubstring("Up")) + + // Checkpoint with the none algorithm + result = podmanTest.Podman([]string{"container", "checkpoint", "-l", "-e", fileName, "-c", "none"}) + result.WaitWithDefaultTimeout() + + // As the container has been started with '--rm' it will be completely + // cleaned up after checkpointing. + Expect(result.ExitCode()).To(Equal(0)) + Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0)) + Expect(podmanTest.NumberOfContainers()).To(Equal(0)) + + // Restore container + result = podmanTest.Podman([]string{"container", "restore", "-i", fileName}) + result.WaitWithDefaultTimeout() + + Expect(result.ExitCode()).To(Equal(0)) + Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1)) + Expect(podmanTest.GetContainerStatus()).To(ContainSubstring("Up")) + + // Checkpoint with the gzip algorithm + result = podmanTest.Podman([]string{"container", "checkpoint", "-l", "-e", fileName, "-c", "gzip"}) + result.WaitWithDefaultTimeout() + + // As the container has been started with '--rm' it will be completely + // cleaned up after checkpointing. + Expect(result.ExitCode()).To(Equal(0)) + Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0)) + Expect(podmanTest.NumberOfContainers()).To(Equal(0)) + + // Restore container + result = podmanTest.Podman([]string{"container", "restore", "-i", fileName}) + result.WaitWithDefaultTimeout() + + Expect(result.ExitCode()).To(Equal(0)) + Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1)) + Expect(podmanTest.GetContainerStatus()).To(ContainSubstring("Up")) + + // Checkpoint with the non-existing algorithm + result = podmanTest.Podman([]string{"container", "checkpoint", "-l", "-e", fileName, "-c", "non-existing"}) + result.WaitWithDefaultTimeout() + + Expect(result.ExitCode()).To(Equal(125)) + Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1)) + Expect(podmanTest.NumberOfContainers()).To(Equal(1)) + + result = podmanTest.Podman([]string{"rm", "-fa"}) + result.WaitWithDefaultTimeout() + Expect(result.ExitCode()).To(Equal(0)) + Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0)) + Expect(podmanTest.NumberOfContainers()).To(Equal(0)) + + // Remove exported checkpoint + os.Remove(fileName) + }) It("podman checkpoint and restore container with root file-system changes", func() { // Start the container @@ -822,4 +922,58 @@ var _ = Describe("Podman checkpoint", func() { os.Remove(checkpointFileName) os.Remove(preCheckpointFileName) }) + + It("podman checkpoint and restore container with different port mappings", func() { + localRunString := getRunString([]string{"-p", "1234:6379", "--rm", redis}) + session := podmanTest.Podman(localRunString) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + cid := session.OutputToString() + fileName := "/tmp/checkpoint-" + cid + ".tar.gz" + + // Open a network connection to the redis server via initial port mapping + conn, err := net.Dial("tcp", "localhost:1234") + if err != nil { + os.Exit(1) + } + conn.Close() + + // Checkpoint the container + result := podmanTest.Podman([]string{"container", "checkpoint", "-l", "-e", fileName}) + result.WaitWithDefaultTimeout() + + // As the container has been started with '--rm' it will be completely + // cleaned up after checkpointing. + Expect(result.ExitCode()).To(Equal(0)) + Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0)) + Expect(podmanTest.NumberOfContainers()).To(Equal(0)) + + // Restore container with different port mapping + result = podmanTest.Podman([]string{"container", "restore", "-p", "1235:6379", "-i", fileName}) + result.WaitWithDefaultTimeout() + + Expect(result.ExitCode()).To(Equal(0)) + Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1)) + Expect(podmanTest.GetContainerStatus()).To(ContainSubstring("Up")) + + // Open a network connection to the redis server via initial port mapping + // This should fail + conn, err = net.Dial("tcp", "localhost:1234") + Expect(err.Error()).To(ContainSubstring("connection refused")) + // Open a network connection to the redis server via new port mapping + conn, err = net.Dial("tcp", "localhost:1235") + if err != nil { + os.Exit(1) + } + conn.Close() + + result = podmanTest.Podman([]string{"rm", "-fa"}) + result.WaitWithDefaultTimeout() + Expect(result.ExitCode()).To(Equal(0)) + Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0)) + Expect(podmanTest.NumberOfContainers()).To(Equal(0)) + + // Remove exported checkpoint + os.Remove(fileName) + }) }) diff --git a/test/e2e/events_test.go b/test/e2e/events_test.go index 4dbbe9dd8..cc7c4d996 100644 --- a/test/e2e/events_test.go +++ b/test/e2e/events_test.go @@ -8,6 +8,7 @@ import ( "sync" "time" + "github.com/containers/podman/v3/libpod/events" . "github.com/containers/podman/v3/test/utils" "github.com/containers/storage/pkg/stringid" . "github.com/onsi/ginkgo" @@ -134,12 +135,10 @@ var _ = Describe("Podman events", func() { jsonArr := test.OutputToStringArray() Expect(test.OutputToStringArray()).ShouldNot(BeEmpty()) - eventsMap := make(map[string]string) - err := json.Unmarshal([]byte(jsonArr[0]), &eventsMap) + event := events.Event{} + err := json.Unmarshal([]byte(jsonArr[0]), &event) Expect(err).ToNot(HaveOccurred()) - Expect(eventsMap).To(HaveKey("Status")) - test = podmanTest.Podman([]string{"events", "--stream=false", "--format", "{{json.}}"}) test.WaitWithDefaultTimeout() Expect(test).To(Exit(0)) @@ -147,11 +146,9 @@ var _ = Describe("Podman events", func() { jsonArr = test.OutputToStringArray() Expect(test.OutputToStringArray()).ShouldNot(BeEmpty()) - eventsMap = make(map[string]string) - err = json.Unmarshal([]byte(jsonArr[0]), &eventsMap) + event = events.Event{} + err = json.Unmarshal([]byte(jsonArr[0]), &event) Expect(err).ToNot(HaveOccurred()) - - Expect(eventsMap).To(HaveKey("Status")) }) It("podman events --until future", func() { diff --git a/test/e2e/generate_systemd_test.go b/test/e2e/generate_systemd_test.go index 75d778f10..e03d6899e 100644 --- a/test/e2e/generate_systemd_test.go +++ b/test/e2e/generate_systemd_test.go @@ -215,7 +215,6 @@ var _ = Describe("Podman generate systemd", func() { // Grepping the output (in addition to unit tests) Expect(session.OutputToString()).To(ContainSubstring("# container-foo.service")) Expect(session.OutputToString()).To(ContainSubstring(" --replace ")) - Expect(session.OutputToString()).To(ContainSubstring(" stop --ignore --cidfile %t/container-foo.ctr-id -t 42")) if !IsRemote() { // The podman commands in the unit should contain the root flags if generate systemd --new is used Expect(session.OutputToString()).To(ContainSubstring(" --runroot")) @@ -234,7 +233,6 @@ var _ = Describe("Podman generate systemd", func() { // Grepping the output (in addition to unit tests) Expect(session.OutputToString()).To(ContainSubstring("# container-foo.service")) Expect(session.OutputToString()).To(ContainSubstring(" --replace ")) - Expect(session.OutputToString()).To(ContainSubstring(" stop --ignore --cidfile %t/container-foo.ctr-id -t 42")) }) It("podman generate systemd --new without explicit detaching param", func() { @@ -247,7 +245,7 @@ var _ = Describe("Podman generate systemd", func() { Expect(session.ExitCode()).To(Equal(0)) // Grepping the output (in addition to unit tests) - Expect(session.OutputToString()).To(ContainSubstring("--cgroups=no-conmon -d")) + Expect(session.OutputToString()).To(ContainSubstring(" -d ")) }) It("podman generate systemd --new with explicit detaching param in middle", func() { diff --git a/test/system/090-events.bats b/test/system/090-events.bats index 09c2d0c10..d889bd7f9 100644 --- a/test/system/090-events.bats +++ b/test/system/090-events.bats @@ -6,7 +6,6 @@ load helpers @test "events with a filter by label" { - skip_if_remote "FIXME: -remote does not include labels in event output" cname=test-$(random_string 30 | tr A-Z a-z) labelname=$(random_string 10) labelvalue=$(random_string 15) @@ -27,7 +26,7 @@ load helpers } @test "image events" { - skip_if_remote "FIXME: remove events on podman-remote seem to be broken" + skip_if_remote "remote does not support --events-backend" pushedDir=$PODMAN_TMPDIR/dir mkdir -p $pushedDir @@ -86,7 +85,5 @@ function _events_disjunctive_filters() { } @test "events with disjunctive filters - default" { - # NOTE: the last event for bar doesn't show up reliably. - skip_if_remote "FIXME #10529: remote events lose data" _events_disjunctive_filters "" } diff --git a/test/system/255-auto-update.bats b/test/system/255-auto-update.bats new file mode 100644 index 000000000..9bfb44791 --- /dev/null +++ b/test/system/255-auto-update.bats @@ -0,0 +1,274 @@ +#!/usr/bin/env bats -*- bats -*- +# +# Tests for automatically update images for containerized services +# + +load helpers + +UNIT_DIR="/usr/lib/systemd/system" +SNAME_FILE=$BATS_TMPDIR/services + +function setup() { + skip_if_remote "systemd tests are meaningless over remote" + skip_if_rootless + + basic_setup +} + +function teardown() { + while read line; do + if [[ "$line" =~ "podman-auto-update" ]]; then + echo "Stop timer: $line.timer" + systemctl stop $line.timer + systemctl disable $line.timer + else + systemctl stop $line + fi + rm -f $UNIT_DIR/$line.{service,timer} + done < $SNAME_FILE + + rm -f $SNAME_FILE + run_podman ? rmi quay.io/libpod/alpine:latest + run_podman ? rmi quay.io/libpod/alpine_nginx:latest + run_podman ? rmi quay.io/libpod/localtest:latest + basic_teardown +} + +# This functions is used for handle the basic step in auto-update related +# tests. Including following steps: +# 1. Generate a random container name and echo it to output. +# 2. Tag the fake image before test +# 3. Start a container with io.containers.autoupdate +# 4. Generate the service file from the container +# 5. Remove the origin container +# 6. Start the container from service +function generate_service() { + local target_img_basename=$1 + local autoupdate=$2 + + # Container name. Include the autoupdate type, to make debugging easier. + # IMPORTANT: variable 'cname' is passed (out of scope) up to caller! + cname=c_${autoupdate//\'/}_$(random_string) + target_img="quay.io/libpod/$target_img_basename:latest" + run_podman tag $IMAGE $target_img + if [[ -n "$autoupdate" ]]; then + label="--label io.containers.autoupdate=$autoupdate" + else + label="" + fi + run_podman run -d --name $cname $label $target_img top -d 120 + + run_podman generate systemd --new $cname + echo "$output" > "$UNIT_DIR/container-$cname.service" + echo "container-$cname" >> $SNAME_FILE + run_podman rm -f $cname + + systemctl daemon-reload + systemctl start container-$cname + systemctl status container-$cname + + # Original image ID. + # IMPORTANT: variable 'ori_image' is passed (out of scope) up to caller! + run_podman inspect --format "{{.Image}}" $cname + ori_image=$output +} + +function _wait_service_ready() { + local sname=$1 + + local timeout=6 + while [[ $timeout -gt 1 ]]; do + if systemctl -q is-active $sname; then + return + fi + sleep 1 + let timeout=$timeout-1 + done + + # Print serivce status as debug information before failed the case + systemctl status $sname + die "Timed out waiting for $sname to start" +} + +# Wait for container to update, as confirmed by its image ID changing +function _confirm_update() { + local cname=$1 + local old_iid=$2 + + # Image has already been pulled, so this shouldn't take too long + local timeout=5 + while [[ $timeout -gt 0 ]]; do + run_podman '?' inspect --format "{{.Image}}" $cname + if [[ $status != 0 ]]; then + if [[ $output =~ (no such object|does not exist in database): ]]; then + # this is ok, it just means the container is being restarted + : + else + die "podman inspect $cname failed unexpectedly" + fi + elif [[ $output != $old_iid ]]; then + return + fi + sleep 1 + done + + die "Timed out waiting for $cname to update; old IID=$old_iid" +} + +# This test can fail in dev. environment because of SELinux. +# quick fix: chcon -t container_runtime_exec_t ./bin/podman +@test "podman auto-update - label io.containers.autoupdate=image" { + generate_service alpine image + + _wait_service_ready container-$cname.service + run_podman auto-update + is "$output" "Trying to pull.*" "Image is updated." + _confirm_update $cname $ori_image +} + +@test "podman auto-update - label io.containers.autoupdate=disabled" { + generate_service alpine disabled + + _wait_service_ready container-$cname.service + run_podman auto-update + is "$output" "" "Image is not updated when autoupdate=disabled." + + run_podman inspect --format "{{.Image}}" $cname + is "$output" "$ori_image" "Image ID should not change" +} + +@test "podman auto-update - label io.containers.autoupdate=fakevalue" { + fakevalue=fake_$(random_string) + generate_service alpine $fakevalue + + _wait_service_ready container-$cname.service + run_podman 125 auto-update + is "$output" ".*invalid auto-update policy.*" "invalid policy setup" + + run_podman inspect --format "{{.Image}}" $cname + is "$output" "$ori_image" "Image ID should not change" +} + +@test "podman auto-update - label io.containers.autoupdate=local" { + generate_service localtest local + podman commit --change CMD=/bin/bash $cname quay.io/libpod/localtest:latest + + _wait_service_ready container-$cname.service + run_podman auto-update + _confirm_update $cname $ori_image +} + +@test "podman auto-update with multiple services" { + # Preserve original image ID, to confirm that it changes (or not) + run_podman inspect --format "{{.Id}}" $IMAGE + local img_id="$output" + + local cnames=() + local -A expect_update + local -A will_update=([image]=1 [registry]=1 [local]=1) + + local fakevalue=fake_$(random_string) + for auto_update in image registry "" disabled "''" $fakevalue local + do + local img_base="alpine" + if [[ $auto_update == "registry" ]]; then + img_base="alpine_nginx" + elif [[ $auto_update == "local" ]]; then + img_base="localtest" + fi + generate_service $img_base $auto_update + cnames+=($cname) + if [[ $auto_update == "local" ]]; then + local_cname=$cname + fi + + if [[ -n "$auto_update" && -n "${will_update[$auto_update]}" ]]; then + expect_update[$cname]=1 + fi + done + + # Only check the last service is started. Previous services should already actived. + _wait_service_ready container-$cname.service + run_podman commit --change CMD=/bin/bash $local_cname quay.io/libpod/localtest:latest + # Exit code is expected, due to invalid 'fakevalue' + run_podman 125 auto-update + update_log=$output + is "$update_log" ".*invalid auto-update policy.*" "invalid policy setup" + is "$update_log" ".*1 error occurred.*" "invalid policy setup" + + local n_updated=$(grep -c 'Trying to pull' <<<"$update_log") + is "$n_updated" "2" "Number of images updated from registry." + + for cname in "${!expect_update[@]}"; do + is "$update_log" ".*$cname.*" "container with auto-update policy image updated" + # Just because podman says it fetched, doesn't mean it actually updated + _confirm_update $cname $img_id + done + + # Final confirmation that all image IDs have/haven't changed + for cname in "${cnames[@]}"; do + run_podman inspect --format "{{.Image}}" $cname + if [[ -n "${expect_update[$cname]}" ]]; then + if [[ "$output" == "$img_id" ]]; then + die "$cname: image ID ($output) did not change" + fi + else + is "$output" "$img_id" "Image should not be changed." + fi + done +} + +@test "podman auto-update using systemd" { + generate_service alpine image + + cat >$UNIT_DIR/podman-auto-update-$cname.timer <<EOF +[Unit] +Description=Podman auto-update testing timer + +[Timer] +OnCalendar=*-*-* *:*:0/2 +Persistent=true + +[Install] +WantedBy=timers.target +EOF + cat >$UNIT_DIR/podman-auto-update-$cname.service <<EOF +[Unit] +Description=Podman auto-update testing service +Documentation=man:podman-auto-update(1) +Wants=network.target +After=network-online.target + +[Service] +Type=oneshot +ExecStart=/usr/bin/podman auto-update + +[Install] +WantedBy=multi-user.target default.target +EOF + + echo "podman-auto-update-$cname" >> $SNAME_FILE + systemctl enable --now podman-auto-update-$cname.timer + systemctl list-timers --all + + local expect='Finished Podman auto-update testing service' + local failed_start=failed + local count=0 + while [ $count -lt 120 ]; do + run journalctl -n 15 -u podman-auto-update-$cname.service + if [[ "$output" =~ $expect ]]; then + failed_start= + break + fi + ((count+=1)) + sleep 1 + done + + if [[ -n "$failed_start" ]]; then + die "Did not find expected string '$expect' in journalctl output for $cname" + fi + + _confirm_update $cname $ori_image +} + +# vim: filetype=sh diff --git a/test/system/450-interactive.bats b/test/system/450-interactive.bats index a9bf52ee8..a2db39492 100644 --- a/test/system/450-interactive.bats +++ b/test/system/450-interactive.bats @@ -56,8 +56,7 @@ function teardown() { stty rows $rows cols $cols <$PODMAN_TEST_PTY # ...and make sure stty under podman reads that. - # FIXME: 'sleep 1' is needed for podman-remote; without it, there's - run_podman run -it --name mystty $IMAGE sh -c 'sleep 1;stty size' <$PODMAN_TEST_PTY + run_podman run -it --name mystty $IMAGE stty size <$PODMAN_TEST_PTY is "$output" "$rows $cols" "stty under podman reads the correct dimensions" } diff --git a/test/system/500-networking.bats b/test/system/500-networking.bats index 63b9a7c14..55ec80bb2 100644 --- a/test/system/500-networking.bats +++ b/test/system/500-networking.bats @@ -329,4 +329,62 @@ load helpers run_podman network rm -f $mynetname } +@test "podman ipv6 in /etc/resolv.conf" { + ipv6_regex='([0-9A-Fa-f]{0,4}:){2,7}([0-9A-Fa-f]{0,4})(%\w+)?' + + # Make sure to read the correct /etc/resolv.conf file in case of systemd-resolved. + resolve_file=$(readlink -f /etc/resolv.conf) + if [[ "$resolve_file" == "/run/systemd/resolve/stub-resolv.conf" ]]; then + resolve_file="/run/systemd/resolve/resolv.conf" + fi + + # If the host doesn't have an ipv6 in resolv.conf skip this test. + # We should never modify resolv.conf on the host. + if ! grep -E "$ipv6_regex" "$resolve_file"; then + skip "This test needs an ipv6 nameserver in $resolve_file" + fi + + # ipv4 slirp + run_podman run --rm --network slirp4netns:enable_ipv6=false $IMAGE cat /etc/resolv.conf + if grep -E "$ipv6_regex" <<< $output; then + die "resolv.conf contains a ipv6 nameserver" + fi + + # ipv6 slirp + run_podman run --rm --network slirp4netns:enable_ipv6=true $IMAGE cat /etc/resolv.conf + # "is" does not like the ipv6 regex + if ! grep -E "$ipv6_regex" <<< $output; then + die "resolv.conf does not contain a ipv6 nameserver" + fi + + # ipv4 cni + local mysubnet=$(random_rfc1918_subnet) + local netname=testnet-$(random_string 10) + + run_podman network create --subnet $mysubnet.0/24 $netname + is "$output" ".*/cni/net.d/$netname.conflist" "output of 'network create'" + + run_podman run --rm --network $netname $IMAGE cat /etc/resolv.conf + if grep -E "$ipv6_regex" <<< $output; then + die "resolv.conf contains a ipv6 nameserver" + fi + + run_podman network rm -f $netname + + # ipv6 cni + mysubnet=fd00:4:4:4:4::/64 + netname=testnet-$(random_string 10) + + run_podman network create --subnet $mysubnet $netname + is "$output" ".*/cni/net.d/$netname.conflist" "output of 'network create'" + + run_podman run --rm --network $netname $IMAGE cat /etc/resolv.conf + # "is" does not like the ipv6 regex + if ! grep -E "$ipv6_regex" <<< $output; then + die "resolv.conf does not contain a ipv6 nameserver" + fi + + run_podman network rm -f $netname +} + # vim: filetype=sh |