diff options
Diffstat (limited to 'test/system')
-rw-r--r-- | test/system/170-run-userns.bats | 13 | ||||
-rw-r--r-- | test/system/250-systemd.bats | 76 | ||||
-rw-r--r-- | test/system/255-auto-update.bats | 12 | ||||
-rw-r--r-- | test/system/260-sdnotify.bats | 48 | ||||
-rw-r--r-- | test/system/700-play.bats | 34 | ||||
-rw-r--r-- | test/system/helpers.bash | 13 |
6 files changed, 181 insertions, 15 deletions
diff --git a/test/system/170-run-userns.bats b/test/system/170-run-userns.bats index d754306b2..b80351902 100644 --- a/test/system/170-run-userns.bats +++ b/test/system/170-run-userns.bats @@ -36,6 +36,19 @@ function _require_crun() { is "$output" ".*457" "Check group leaked into container" } +@test "rootful pod with custom ID mapping" { + skip_if_rootless "does not work rootless - rootful feature" + skip_if_remote "remote --uidmap is broken (see #14233)" + random_pod_name=$(random_string 30) + run_podman pod create --uidmap 0:200000:5000 --name=$random_pod_name + run_podman pod start $random_pod_name + + # Remove the pod and the pause image + run_podman pod rm $random_pod_name + run_podman version --format "{{.Server.Version}}-{{.Server.Built}}" + run_podman rmi -f localhost/podman-pause:$output +} + @test "podman --remote --group-add keep-groups " { if is_remote; then run_podman 125 run --rm --group-add keep-groups $IMAGE id diff --git a/test/system/250-systemd.bats b/test/system/250-systemd.bats index d0da654ad..567fa89c1 100644 --- a/test/system/250-systemd.bats +++ b/test/system/250-systemd.bats @@ -292,4 +292,80 @@ LISTEN_FDNAMES=listen_fdnames" | sort) run_podman network rm -f $netname } +@test "podman-play-kube@.service template" { + skip_if_remote "systemd units do not work with remote clients" + + # If running from a podman source directory, build and use the source + # version of the play-kube-@ unit file + unit_name="podman-play-kube@.service" + unit_file="contrib/systemd/system/${unit_name}" + if [[ -e ${unit_file}.in ]]; then + echo "# [Building & using $unit_name from source]" >&3 + BINDIR=$(dirname $PODMAN) make $unit_file + cp $unit_file $UNIT_DIR/$unit_name + fi + + # Create the YAMl file + yaml_source="$PODMAN_TMPDIR/test.yaml" + cat >$yaml_source <<EOF +apiVersion: v1 +kind: Pod +metadata: + labels: + app: test + name: test_pod +spec: + containers: + - command: + - top + image: $IMAGE + name: test + resources: {} +EOF + + # Dispatch the YAML file + service_name="podman-play-kube@$(systemd-escape $yaml_source).service" + systemctl start $service_name + systemctl is-active $service_name + + # The name of the service container is predictable: the first 12 characters + # of the hash of the YAML file followed by the "-service" suffix + yaml_sha=$(sha256sum $yaml_source) + service_container="${yaml_sha:0:12}-service" + + # Make sure that the service container exists and runs. + run_podman container inspect $service_container --format "{{.State.Running}}" + is "$output" "true" + + # Check for an error when trying to remove the service container + run_podman 125 container rm $service_container + is "$output" "Error: container .* is the service container of pod(s) .* and cannot be removed without removing the pod(s)" + + # Kill the pod and make sure the service is not running. + # The restart policy is set to "never" since there is no + # design yet for propagating exit codes up to the service + # container. + run_podman pod kill test_pod + for i in {0..5}; do + run systemctl is-failed $service_name + if [[ $output == "failed" ]]; then + break + fi + sleep 0.5 + done + is "$output" "failed" "systemd service transitioned to 'failed' state" + + # Now stop and start the service again. + systemctl stop $service_name + systemctl start $service_name + systemctl is-active $service_name + run_podman container inspect $service_container --format "{{.State.Running}}" + is "$output" "true" + + # Clean up + systemctl stop $service_name + run_podman 1 container exists $service_container + run_podman 1 pod exists test_pod +} + # vim: filetype=sh diff --git a/test/system/255-auto-update.bats b/test/system/255-auto-update.bats index 6cdae2ada..6cee939fb 100644 --- a/test/system/255-auto-update.bats +++ b/test/system/255-auto-update.bats @@ -135,15 +135,27 @@ function _confirm_update() { # This test can fail in dev. environment because of SELinux. # quick fix: chcon -t container_runtime_exec_t ./bin/podman @test "podman auto-update - label io.containers.autoupdate=image" { + since=$(date --iso-8601=seconds) + run_podman auto-update + is "$output" "" + run_podman events --filter type=system --since $since --stream=false + is "$output" "" + generate_service alpine image _wait_service_ready container-$cname.service + since=$(date --iso-8601=seconds) run_podman auto-update --dry-run --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}" is "$output" ".*container-$cname.service,quay.io/libpod/alpine:latest,pending,registry.*" "Image update is pending." + run_podman events --filter type=system --since $since --stream=false + is "$output" ".* system auto-update" + since=$(date --iso-8601=seconds) run_podman auto-update --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}" is "$output" "Trying to pull.*" "Image is updated." is "$output" ".*container-$cname.service,quay.io/libpod/alpine:latest,true,registry.*" "Image is updated." + run_podman events --filter type=system --since $since --stream=false + is "$output" ".* system auto-update" _confirm_update $cname $ori_image } diff --git a/test/system/260-sdnotify.bats b/test/system/260-sdnotify.bats index 88d84c86f..59456de24 100644 --- a/test/system/260-sdnotify.bats +++ b/test/system/260-sdnotify.bats @@ -172,4 +172,52 @@ READY=1" "sdnotify sent MAINPID and READY" _stop_socat } +@test "sdnotify : play kube" { + # Create the YAMl file + yaml_source="$PODMAN_TMPDIR/test.yaml" + cat >$yaml_source <<EOF +apiVersion: v1 +kind: Pod +metadata: + labels: + app: test + name: test_pod +spec: + containers: + - command: + - top + image: $IMAGE + name: test + resources: {} +EOF + + # The name of the service container is predictable: the first 12 characters + # of the hash of the YAML file followed by the "-service" suffix + yaml_sha=$(sha256sum $yaml_source) + service_container="${yaml_sha:0:12}-service" + + + export NOTIFY_SOCKET=$PODMAN_TMPDIR/conmon.sock + _start_socat + + run_podman play kube --service-container=true $yaml_source + run_podman container inspect $service_container --format "{{.State.ConmonPid}}" + mainPID="$output" + # The 'echo's help us debug failed runs + run cat $_SOCAT_LOG + echo "socat log:" + echo "$output" + + is "$output" "MAINPID=$mainPID +READY=1" "sdnotify sent MAINPID and READY" + + _stop_socat + + # Clean up pod and pause image + run_podman play kube --down $PODMAN_TMPDIR/test.yaml + run_podman version --format "{{.Server.Version}}-{{.Server.Built}}" + podman rmi -f localhost/podman-pause:$output +} + + # vim: filetype=sh diff --git a/test/system/700-play.bats b/test/system/700-play.bats index 2e5327a85..6c2a8c8b1 100644 --- a/test/system/700-play.bats +++ b/test/system/700-play.bats @@ -103,10 +103,9 @@ RELABEL="system_u:object_r:container_file_t:s0" @test "podman play --service-container" { skip_if_remote "service containers only work locally" - TESTDIR=$PODMAN_TMPDIR/testdir - mkdir -p $TESTDIR - -yaml=" + # Create the YAMl file + yaml_source="$PODMAN_TMPDIR/test.yaml" + cat >$yaml_source <<EOF apiVersion: v1 kind: Pod metadata: @@ -120,13 +119,16 @@ spec: image: $IMAGE name: test resources: {} -" +EOF + run_podman play kube --service-container=true $yaml_source - echo "$yaml" > $PODMAN_TMPDIR/test.yaml - run_podman play kube --service-container=true $PODMAN_TMPDIR/test.yaml + # The name of the service container is predictable: the first 12 characters + # of the hash of the YAML file followed by the "-service" suffix + yaml_sha=$(sha256sum $yaml_source) + service_container="${yaml_sha:0:12}-service" # Make sure that the service container exists and runs. - run_podman container inspect "352a88685060-service" --format "{{.State.Running}}" + run_podman container inspect $service_container --format "{{.State.Running}}" is "$output" "true" # Stop the *main* container and make sure that @@ -135,24 +137,26 @@ spec: # #) The service container is marked as an service container run_podman stop test_pod-test _ensure_pod_state test_pod Exited - run_podman container inspect "352a88685060-service" --format "{{.State.Running}}" - is "$output" "false" - run_podman container inspect "352a88685060-service" --format "{{.IsService}}" + _ensure_container_running $service_container false + run_podman container inspect $service_container --format "{{.IsService}}" is "$output" "true" # Restart the pod, make sure the service is running again run_podman pod restart test_pod - run_podman container inspect "352a88685060-service" --format "{{.State.Running}}" + run_podman container inspect $service_container --format "{{.State.Running}}" is "$output" "true" + # Check for an error when trying to remove the service container + run_podman 125 container rm $service_container + is "$output" "Error: container .* is the service container of pod(s) .* and cannot be removed without removing the pod(s)" + # Kill the pod and make sure the service is not running run_podman pod kill test_pod - run_podman container inspect "352a88685060-service" --format "{{.State.Running}}" - is "$output" "false" + _ensure_container_running $service_container false # Remove the pod and make sure the service is removed along with it run_podman pod rm test_pod - run_podman 1 container exists "352a88685060-service" + run_podman 1 container exists $service_container } @test "podman play --network" { diff --git a/test/system/helpers.bash b/test/system/helpers.bash index 072131202..6868f2691 100644 --- a/test/system/helpers.bash +++ b/test/system/helpers.bash @@ -405,6 +405,19 @@ function _ensure_pod_state() { is "$output" "$2" "unexpected pod state" } +# Wait for the container's (1st arg) running state (2nd arg) +function _ensure_container_running() { + for i in {0..5}; do + run_podman container inspect $1 --format "{{.State.Running}}" + if [[ $output == "$2" ]]; then + break + fi + sleep 0.5 + done + + is "$output" "$2" "unexpected pod state" +} + ########################### # _add_label_if_missing # make sure skip messages include rootless/remote ########################### |