#!/usr/bin/env bats load helpers # This is a long ugly way to clean up pods and remove the pause image function teardown() { run_podman pod rm -f -t 0 -a run_podman rm -f -t 0 -a run_podman rmi --ignore $(pause_image) basic_teardown } @test "podman pod - basic tests" { run_podman pod list --noheading is "$output" "" "baseline: empty results from list --noheading" run_podman pod ls --noheading is "$output" "" "baseline: empty results from ls --noheading" run_podman pod ps --noheading is "$output" "" "baseline: empty results from ps --noheading" } @test "podman pod top - containers in different PID namespaces" { # With infra=false, we don't get a /pause container no_infra='--infra=false' run_podman pod create $no_infra podid="$output" # Start two containers... run_podman run -d --pod $podid $IMAGE top -d 2 cid1="$output" run_podman run -d --pod $podid $IMAGE top -d 2 cid2="$output" # ...and wait for them to actually start. wait_for_output "PID \+PPID \+USER " $cid1 wait_for_output "PID \+PPID \+USER " $cid2 # Both containers have emitted at least one top-like line. # Now run 'pod top', and expect two 'top -d 2' processes running. run_podman pod top $podid is "$output" ".*root.*top -d 2.*root.*top -d 2" "two 'top' containers" # By default (podman pod create w/ default --infra) there should be # a /pause container. if [ -z "$no_infra" ]; then is "$output" ".*0 \+1 \+0 \+[0-9. ?s]\+/pause" "there is a /pause container" fi # Clean up run_podman --noout pod rm -f -t 0 $podid is "$output" "" "output should be empty" } @test "podman pod create - custom infra image" { skip_if_remote "CONTAINERS_CONF only effects server side" image="i.do/not/exist:image" tmpdir=$PODMAN_TMPDIR/pod-test mkdir -p $tmpdir containersconf=$tmpdir/containers.conf cat >$containersconf <$port_in/tcp $c_name" \ "output of 'podman ps'" # send a random string to the container. This will cause the container # to output the string to its logs, then exit. teststring=$(random_string 30) echo "$teststring" | nc 127.0.0.1 $port_out # Confirm that the container log output is the string we sent it. run_podman logs $cid is "$output" "$teststring" "test string received on container" # Finally, confirm the infra-container and -command. We run this late, # not at pod creation, to give the infra container time to start & log. run_podman logs $infra_cid is "$output" "Confirmed: testimage pause invoked as $infra_command" \ "pod ran with our desired infra container + command" # Clean up run_podman rm $cid run_podman pod rm -t 0 -f mypod run_podman rmi $infra_image } @test "podman pod create should fail when infra-name is already in use" { local infra_name="infra_container_$(random_string 10 | tr A-Z a-z)" local infra_image="k8s.gcr.io/pause:3.5" local pod_name="$(random_string 10 | tr A-Z a-z)" run_podman --noout pod create --name $pod_name --infra-name "$infra_name" --infra-image "$infra_image" is "$output" "" "output from pod create should be empty" run_podman 125 pod create --infra-name "$infra_name" assert "$output" =~ "^Error: .*: the container name \"$infra_name\" is already in use by .* You have to remove that container to be able to reuse that name.: that name is already in use" \ "Trying to create two pods with same infra-name" run_podman pod rm -f $pod_name run_podman rmi $infra_image } @test "podman pod create --share" { local pod_name="$(random_string 10 | tr A-Z a-z)" run_podman 125 pod create --share bogus --name $pod_name is "$output" ".*Invalid kernel namespace to share: bogus. Options are: cgroup, ipc, net, pid, uts or none" \ "pod test for bogus --share option" run_podman pod create --share ipc --name $pod_name run_podman pod inspect $pod_name --format "{{.SharedNamespaces}}" is "$output" "[ipc]" run_podman run --rm --pod $pod_name --hostname foobar $IMAGE hostname is "$output" "foobar" "--hostname should work with non share UTS namespace" run_podman pod create --share +pid --replace --name $pod_name run_podman pod inspect $pod_name --format "{{.SharedNamespaces}}" for ns in uts pid ipc net; do is "$output" ".*$ns" done } @test "podman pod create --pod new:$POD --hostname" { local pod_name="$(random_string 10 | tr A-Z a-z)" run_podman run --rm --pod "new:$pod_name" --hostname foobar $IMAGE hostname is "$output" "foobar" "--hostname should work when creating a new:pod" run_podman pod rm $pod_name run_podman run --rm --pod "new:$pod_name" $IMAGE hostname is "$output" "$pod_name" "new:POD should have hostname name set to podname" } @test "podman rm --force to remove infra container" { local pod_name="$(random_string 10 | tr A-Z a-z)" run_podman create --pod "new:$pod_name" $IMAGE container_ID="$output" run_podman pod inspect --format "{{.InfraContainerID}}" $pod_name infra_ID="$output" run_podman 125 container rm $infra_ID is "$output" ".* and cannot be removed without removing the pod" run_podman 125 container rm --force $infra_ID is "$output" ".* and cannot be removed without removing the pod" run_podman container rm --depend $infra_ID is "$output" ".*$infra_ID.*" is "$output" ".*$container_ID.*" # Now make sure that --force --all works as well run_podman create --pod "new:$pod_name" $IMAGE container_1_ID="$output" run_podman create --pod "$pod_name" $IMAGE container_2_ID="$output" run_podman create $IMAGE container_3_ID="$output" run_podman pod inspect --format "{{.InfraContainerID}}" $pod_name infra_ID="$output" run_podman container rm --force --all $infraID is "$output" ".*$infra_ID.*" is "$output" ".*$container_1_ID.*" is "$output" ".*$container_2_ID.*" is "$output" ".*$container_3_ID.*" } @test "podman pod create share net" { run_podman pod create --name test run_podman pod inspect test --format {{.InfraConfig.HostNetwork}} is "$output" "false" "Default network sharing should be false" run_podman pod rm test run_podman pod create --share ipc --network private test run_podman pod inspect test --format {{.InfraConfig.HostNetwork}} is "$output" "false" "Private network sharing with only ipc should be false" run_podman pod rm test local name="$(random_string 10 | tr A-Z a-z)" run_podman pod create --name $name --share net --network private run_podman pod inspect $name --format {{.InfraConfig.HostNetwork}} is "$output" "false" "Private network sharing with only net should be false" run_podman pod create --share net --network host --replace $name run_podman pod inspect $name --format {{.InfraConfig.HostNetwork}} is "$output" "true" "Host network sharing with only net should be true" run_podman pod rm $name run_podman pod create --name test --share ipc --network host run_podman pod inspect test --format {{.InfraConfig.HostNetwork}} is "$output" "true" "Host network sharing with only ipc should be true" run_podman pod rm test } @test "pod exit policies" { # Test setting exit policies run_podman pod create podID="$output" run_podman pod inspect $podID --format "{{.ExitPolicy}}" is "$output" "continue" "default exit policy" run_podman pod rm $podID run_podman pod create --exit-policy stop podID="$output" run_podman pod inspect $podID --format "{{.ExitPolicy}}" is "$output" "stop" "custom exit policy" run_podman pod rm $podID run_podman 125 pod create --exit-policy invalid is "$output" "Error: .*error running pod create option: invalid pod exit policy: \"invalid\"" "invalid exit policy" # Test exit-policy behaviour run_podman pod create --exit-policy continue podID="$output" run_podman run --pod $podID $IMAGE true run_podman pod inspect $podID --format "{{.State}}" _ensure_pod_state $podID Degraded run_podman pod rm $podID run_podman pod create --exit-policy stop podID="$output" run_podman run --pod $podID $IMAGE true run_podman pod inspect $podID --format "{{.State}}" _ensure_pod_state $podID Exited run_podman pod rm $podID } @test "pod exit policies - play kube" { # play-kube sets the exit policy to "stop" local name="$(random_string 10 | tr A-Z a-z)" kubeFile="apiVersion: v1 kind: Pod metadata: name: $name-pod spec: containers: - command: - \"true\" image: $IMAGE name: ctr restartPolicy: OnFailure" echo "$kubeFile" > $PODMAN_TMPDIR/test.yaml run_podman play kube $PODMAN_TMPDIR/test.yaml run_podman pod inspect $name-pod --format "{{.ExitPolicy}}" is "$output" "stop" "custom exit policy" _ensure_pod_state $name-pod Exited run_podman pod rm $name-pod } @test "pod resource limits" { skip_if_remote "resource limits only implemented on non-remote" if is_rootless; then skip "only meaningful for rootful" fi local name1="resources1" run_podman --cgroup-manager=systemd pod create --name=$name1 --cpus=5 --memory=10m run_podman --cgroup-manager=systemd pod start $name1 run_podman pod inspect --format '{{.CgroupPath}}' $name1 local path1="$output" local actual1=$(< /sys/fs/cgroup/$path1/cpu.max) is "$actual1" "500000 100000" "resource limits set properly" local actual2=$(< /sys/fs/cgroup/$path1/memory.max) is "$actual2" "10485760" "resource limits set properly" run_podman pod --cgroup-manager=systemd rm -f $name1 local name2="resources2" run_podman --cgroup-manager=cgroupfs pod create --cpus=5 --memory=10m --name=$name2 run_podman --cgroup-manager=cgroupfs pod start $name2 run_podman pod inspect --format '{{.CgroupPath}}' $name2 local path2="$output" local actual2=$(< /sys/fs/cgroup/$path2/cpu.max) is "$actual2" "500000 100000" "resource limits set properly" local actual2=$(< /sys/fs/cgroup/$path2/memory.max) is "$actual2" "10485760" "resource limits set properly" run_podman --cgroup-manager=cgroupfs pod rm $name2 } @test "podman pod ps doesn't race with pod rm" { # create a few pods for i in {0..10}; do run_podman pod create done # and delete them $PODMAN pod rm -a & # pod ps should not fail while pods are deleted run_podman pod ps -q # wait for pod rm -a wait } # vim: filetype=sh