#!/usr/bin/env bats load helpers # This is a long ugly way to clean up pods and remove the pause image function teardown() { run_podman pod rm -f -t 0 -a run_podman rm -f -t 0 -a run_podman rmi --ignore $(pause_image) basic_teardown } @test "podman pod - basic tests" { run_podman pod list --noheading is "$output" "" "baseline: empty results from list --noheading" run_podman pod ls --noheading is "$output" "" "baseline: empty results from ls --noheading" run_podman pod ps --noheading is "$output" "" "baseline: empty results from ps --noheading" } @test "podman pod top - containers in different PID namespaces" { # With infra=false, we don't get a /pause container no_infra='--infra=false' run_podman pod create $no_infra podid="$output" # Start two containers... run_podman run -d --pod $podid $IMAGE top -d 2 cid1="$output" run_podman run -d --pod $podid $IMAGE top -d 2 cid2="$output" # ...and wait for them to actually start. wait_for_output "PID \+PPID \+USER " $cid1 wait_for_output "PID \+PPID \+USER " $cid2 # Both containers have emitted at least one top-like line. # Now run 'pod top', and expect two 'top -d 2' processes running. run_podman pod top $podid is "$output" ".*root.*top -d 2.*root.*top -d 2" "two 'top' containers" # By default (podman pod create w/ default --infra) there should be # a /pause container. if [ -z "$no_infra" ]; then is "$output" ".*0 \+1 \+0 \+[0-9. ?s]\+/pause" "there is a /pause container" fi # Clean up run_podman --noout pod rm -f -t 0 $podid is "$output" "" "output should be empty" } @test "podman pod create - custom infra image" { skip_if_remote "CONTAINERS_CONF only effects server side" image="i.do/not/exist:image" tmpdir=$PODMAN_TMPDIR/pod-test mkdir -p $tmpdir containersconf=$tmpdir/containers.conf cat >$containersconf <$port_in/tcp $c_name" \ "output of 'podman ps'" # send a random string to the container. This will cause the container # to output the string to its logs, then exit. teststring=$(random_string 30) echo "$teststring" | nc 127.0.0.1 $port_out # Confirm that the container log output is the string we sent it. run_podman logs $cid is "$output" "$teststring" "test string received on container" # Finally, confirm the infra-container and -command. We run this late, # not at pod creation, to give the infra container time to start & log. run_podman logs $infra_cid is "$output" "Confirmed: testimage pause invoked as $infra_command" \ "pod ran with our desired infra container + command" # Clean up run_podman rm $cid run_podman pod rm -t 0 -f mypod run_podman rmi $infra_image } @test "podman pod create should fail when infra-name is already in use" { local infra_name="infra_container_$(random_string 10 | tr A-Z a-z)" local infra_image="k8s.gcr.io/pause:3.5" local pod_name="$(random_string 10 | tr A-Z a-z)" run_podman --noout pod create --name $pod_name --infra-name "$infra_name" --infra-image "$infra_image" is "$output" "" "output from pod create should be empty" run_podman 125 pod create --infra-name "$infra_name" assert "$output" =~ "^Error: .*: the container name \"$infra_name\" is already in use by .* You have to remove that container to be able to reuse that name.: that name is already in use" \ "Trying to create two pods with same infra-name" run_podman pod rm -f $pod_name run_podman rmi $infra_image } @test "podman pod create --share" { local pod_name="$(random_string 10 | tr A-Z a-z)" run_podman 125 pod create --share bogus --name $pod_name is "$output" ".*Invalid kernel namespace to share: bogus. Options are: cgroup, ipc, net, pid, uts or none" \ "pod test for bogus --share option" run_podman pod create --share ipc --name $pod_name run_podman run --rm --pod $pod_name --hostname foobar $IMAGE hostname is "$output" "foobar" "--hostname should work with non share UTS namespace" } @test "podman pod create --pod new:$POD --hostname" { local pod_name="$(random_string 10 | tr A-Z a-z)" run_podman run --rm --pod "new:$pod_name" --hostname foobar $IMAGE hostname is "$output" "foobar" "--hostname should work when creating a new:pod" run_podman pod rm $pod_name run_podman run --rm --pod "new:$pod_name" $IMAGE hostname is "$output" "$pod_name" "new:POD should have hostname name set to podname" } @test "podman rm --force to remove infra container" { local pod_name="$(random_string 10 | tr A-Z a-z)" run_podman create --pod "new:$pod_name" $IMAGE container_ID="$output" run_podman pod inspect --format "{{.InfraContainerID}}" $pod_name infra_ID="$output" run_podman 125 container rm $infra_ID is "$output" ".* and cannot be removed without removing the pod" run_podman 125 container rm --force $infra_ID is "$output" ".* and cannot be removed without removing the pod" run_podman container rm --depend $infra_ID is "$output" ".*$infra_ID.*" is "$output" ".*$container_ID.*" # Now make sure that --force --all works as well run_podman create --pod "new:$pod_name" $IMAGE container_1_ID="$output" run_podman create --pod "$pod_name" $IMAGE container_2_ID="$output" run_podman create $IMAGE container_3_ID="$output" run_podman pod inspect --format "{{.InfraContainerID}}" $pod_name infra_ID="$output" run_podman container rm --force --all $infraID is "$output" ".*$infra_ID.*" is "$output" ".*$container_1_ID.*" is "$output" ".*$container_2_ID.*" is "$output" ".*$container_3_ID.*" } @test "podman pod create share net" { run_podman pod create --name test run_podman pod inspect test --format {{.InfraConfig.HostNetwork}} is "$output" "false" "Default network sharing should be false" run_podman pod rm test run_podman pod create --name test --share ipc --network private run_podman pod inspect test --format {{.InfraConfig.HostNetwork}} is "$output" "false" "Private network sharing with only ipc should be false" run_podman pod rm test run_podman pod create --name test --share net --network private run_podman pod inspect test --format {{.InfraConfig.HostNetwork}} is "$output" "false" "Private network sharing with only net should be false" run_podman pod rm test run_podman pod create --name test --share net --network host run_podman pod inspect test --format {{.InfraConfig.HostNetwork}} is "$output" "true" "Host network sharing with only net should be true" run_podman pod rm test run_podman pod create --name test --share ipc --network host run_podman pod inspect test --format {{.InfraConfig.HostNetwork}} is "$output" "true" "Host network sharing with only ipc should be true" run_podman pod rm test } @test "pod exit policies" { # Test setting exit policies run_podman pod create podID="$output" run_podman pod inspect $podID --format "{{.ExitPolicy}}" is "$output" "continue" "default exit policy" run_podman pod rm $podID run_podman pod create --exit-policy stop podID="$output" run_podman pod inspect $podID --format "{{.ExitPolicy}}" is "$output" "stop" "custom exit policy" run_podman pod rm $podID run_podman 125 pod create --exit-policy invalid is "$output" "Error: .*error running pod create option: invalid pod exit policy: \"invalid\"" "invalid exit policy" # Test exit-policy behaviour run_podman pod create --exit-policy continue podID="$output" run_podman run --pod $podID $IMAGE true run_podman pod inspect $podID --format "{{.State}}" _ensure_pod_state $podID Degraded run_podman pod rm $podID run_podman pod create --exit-policy stop podID="$output" run_podman run --pod $podID $IMAGE true run_podman pod inspect $podID --format "{{.State}}" _ensure_pod_state $podID Exited run_podman pod rm $podID } @test "pod exit policies - play kube" { # play-kube sets the exit policy to "stop" local name="$(random_string 10 | tr A-Z a-z)" kubeFile="apiVersion: v1 kind: Pod metadata: name: $name-pod spec: containers: - command: - \"true\" image: $IMAGE name: ctr restartPolicy: OnFailure" echo "$kubeFile" > $PODMAN_TMPDIR/test.yaml run_podman play kube $PODMAN_TMPDIR/test.yaml run_podman pod inspect $name-pod --format "{{.ExitPolicy}}" is "$output" "stop" "custom exit policy" _ensure_pod_state $name-pod Exited run_podman pod rm $name-pod } # vim: filetype=sh