diff options
Diffstat (limited to 'test/system')
-rw-r--r-- | test/system/250-systemd.bats | 29 | ||||
-rw-r--r-- | test/system/255-auto-update.bats | 97 | ||||
-rw-r--r-- | test/system/helpers.systemd.bash | 14 |
3 files changed, 123 insertions, 17 deletions
diff --git a/test/system/250-systemd.bats b/test/system/250-systemd.bats index 8f4471f91..dd1a0f05a 100644 --- a/test/system/250-systemd.bats +++ b/test/system/250-systemd.bats @@ -301,24 +301,16 @@ LISTEN_FDNAMES=listen_fdnames" | sort) } @test "podman-kube@.service template" { - # If running from a podman source directory, build and use the source - # version of the play-kube-@ unit file - unit_name="podman-kube@.service" - unit_file="contrib/systemd/system/${unit_name}" - if [[ -e ${unit_file}.in ]]; then - echo "# [Building & using $unit_name from source]" >&3 - # Force regenerating unit file (existing one may have /usr/bin path) - rm -f $unit_file - BINDIR=$(dirname $PODMAN) make $unit_file - cp $unit_file $UNIT_DIR/$unit_name - fi - + install_kube_template # Create the YAMl file yaml_source="$PODMAN_TMPDIR/test.yaml" cat >$yaml_source <<EOF apiVersion: v1 kind: Pod metadata: + annotations: + io.containers.autoupdate: "local" + io.containers.autoupdate/b: "registry" labels: app: test name: test_pod @@ -327,8 +319,11 @@ spec: - command: - top image: $IMAGE - name: test - resources: {} + name: a + - command: + - top + image: $IMAGE + name: b EOF # Dispatch the YAML file @@ -349,6 +344,12 @@ EOF run_podman 125 container rm $service_container is "$output" "Error: container .* is the service container of pod(s) .* and cannot be removed without removing the pod(s)" + # Add a simple `auto-update --dry-run` test here to avoid too much redundancy + # with 255-auto-update.bats + run_podman auto-update --dry-run --format "{{.Unit}},{{.Container}},{{.Image}},{{.Updated}},{{.Policy}}" + is "$output" ".*$service_name,.* (test_pod-a),$IMAGE,false,local.*" "global auto-update policy gets applied" + is "$output" ".*$service_name,.* (test_pod-b),$IMAGE,false,registry.*" "container-specified auto-update policy gets applied" + # Kill the pod and make sure the service is not running. # The restart policy is set to "never" since there is no # design yet for propagating exit codes up to the service diff --git a/test/system/255-auto-update.bats b/test/system/255-auto-update.bats index c6f9600b6..1f350e87f 100644 --- a/test/system/255-auto-update.bats +++ b/test/system/255-auto-update.bats @@ -266,8 +266,6 @@ EOF # Generate a healthy image that will run correctly. run_podman build -t quay.io/libpod/$image -f $dockerfile1 - podman image inspect --format "{{.ID}}" $image - oldID="$output" generate_service $image local /runme --sdnotify=container noTag _wait_service_ready container-$cname.service @@ -277,7 +275,7 @@ EOF # Generate an unhealthy image that will fail. run_podman build -t quay.io/libpod/$image -f $dockerfile2 - podman image inspect --format "{{.ID}}" $image + run_podman image inspect --format "{{.ID}}" $image newID="$output" run_podman auto-update --dry-run --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}" @@ -409,4 +407,97 @@ EOF _confirm_update $cname $ori_image } +@test "podman-kube@.service template with rollback" { + # sdnotify fails with runc 1.0.0-3-dev2 on Ubuntu. Let's just + # assume that we work only with crun, nothing else. + # [copied from 260-sdnotify.bats] + runtime=$(podman_runtime) + if [[ "$runtime" != "crun" ]]; then + skip "this test only works with crun, not $runtime" + fi + + install_kube_template + + dockerfile1=$PODMAN_TMPDIR/Dockerfile.1 + cat >$dockerfile1 <<EOF +FROM quay.io/libpod/fedora:31 +RUN echo -e "#!/bin/sh\n\ +printenv NOTIFY_SOCKET; echo READY; systemd-notify --ready;\n\ +trap 'echo Received SIGTERM, finishing; exit' SIGTERM; echo WAITING; while :; do sleep 0.1; done" \ +>> /runme +RUN chmod +x /runme +EOF + + dockerfile2=$PODMAN_TMPDIR/Dockerfile.2 + cat >$dockerfile2 <<EOF +FROM quay.io/libpod/fedora:31 +RUN echo -e "#!/bin/sh\n\ +exit 1" >> /runme +RUN chmod +x /runme +EOF + local_image=localhost/image:$(random_string 10) + + # Generate a healthy image that will run correctly. + run_podman build -t $local_image -f $dockerfile1 + run_podman image inspect --format "{{.ID}}" $local_image + oldID="$output" + + # Create the YAMl file + yaml_source="$PODMAN_TMPDIR/test.yaml" + cat >$yaml_source <<EOF +apiVersion: v1 +kind: Pod +metadata: + annotations: + io.containers.autoupdate: "registry" + io.containers.autoupdate/b: "local" + io.containers.sdnotify/b: "container" + labels: + app: test + name: test_pod +spec: + containers: + - command: + - top + image: $IMAGE + name: a + - command: + - /runme + image: $local_image + name: b +EOF + + # Dispatch the YAML file + service_name="podman-kube@$(systemd-escape $yaml_source).service" + systemctl start $service_name + systemctl is-active $service_name + + # Make sure the containers are properly configured + run_podman auto-update --dry-run --format "{{.Unit}},{{.Container}},{{.Image}},{{.Updated}},{{.Policy}}" + is "$output" ".*$service_name,.* (test_pod-a),$IMAGE,false,registry.*" "global auto-update policy gets applied" + is "$output" ".*$service_name,.* (test_pod-b),$local_image,false,local.*" "container-specified auto-update policy gets applied" + + # Generate a broken image that will fail. + run_podman build -t $local_image -f $dockerfile2 + run_podman image inspect --format "{{.ID}}" $local_image + newID="$output" + + assert "$oldID" != "$newID" "broken image really is a new one" + + # Make sure container b sees the new image + run_podman auto-update --dry-run --format "{{.Unit}},{{.Container}},{{.Image}},{{.Updated}},{{.Policy}}" + is "$output" ".*$service_name,.* (test_pod-a),$IMAGE,false,registry.*" "global auto-update policy gets applied" + is "$output" ".*$service_name,.* (test_pod-b),$local_image,pending,local.*" "container b sees the new image" + + # Now update and check for the rollback + run_podman auto-update --format "{{.Unit}},{{.Container}},{{.Image}},{{.Updated}},{{.Policy}}" + is "$output" ".*$service_name,.* (test_pod-a),$IMAGE,rolled back,registry.*" "container a was rolled back as the update of b failed" + is "$output" ".*$service_name,.* (test_pod-b),$local_image,rolled back,local.*" "container b was rolled back as its update has failed" + + # Clean up + systemctl stop $service_name + run_podman rmi -f $(pause_image) $local_image $newID $oldID + rm -f $UNIT_DIR/$unit_name +} + # vim: filetype=sh diff --git a/test/system/helpers.systemd.bash b/test/system/helpers.systemd.bash index d9abc087d..afbab6e08 100644 --- a/test/system/helpers.systemd.bash +++ b/test/system/helpers.systemd.bash @@ -32,3 +32,17 @@ journalctl() { systemd-run() { command systemd-run $_DASHUSER "$@"; } + +install_kube_template() { + # If running from a podman source directory, build and use the source + # version of the play-kube-@ unit file + unit_name="podman-kube@.service" + unit_file="contrib/systemd/system/${unit_name}" + if [[ -e ${unit_file}.in ]]; then + echo "# [Building & using $unit_name from source]" >&3 + # Force regenerating unit file (existing one may have /usr/bin path) + rm -f $unit_file + BINDIR=$(dirname $PODMAN) make $unit_file + cp $unit_file $UNIT_DIR/$unit_name + fi +} |