diff options
author | Valentin Rothberg <rothberg@redhat.com> | 2021-07-28 15:19:04 +0200 |
---|---|---|
committer | Valentin Rothberg <rothberg@redhat.com> | 2021-08-05 15:20:38 +0200 |
commit | 30df551bde460f4f37d6dbd373701873fa5353dc (patch) | |
tree | a1a48d3b25706006ac391ac271ed21c3a6191c8f /test/system | |
parent | 117583c293713f2baa920c4035e820ad59fe6622 (diff) | |
download | podman-30df551bde460f4f37d6dbd373701873fa5353dc.tar.gz podman-30df551bde460f4f37d6dbd373701873fa5353dc.tar.bz2 podman-30df551bde460f4f37d6dbd373701873fa5353dc.zip |
auto-update: simple rollback
Add support for simple rollbacks during `podman auto-update`. Rollbacks
are enabled by default. If a systemd unit cannot be restarted after an
update, the previous image will be retagged and the unit will be
restarted a second time.
Add system tests for rollbacks. Also fix a bug in the restart sequence;
we have to use the channel to actually know whether the restart was
successful or not.
NOTE: To make rollbacks really useful, users must run their containers
with `--sdnotify=container` such that the containers send the ready
message over the (mounted) socket. This way, restarting the systemd
units during auto update will block until the message has been received
(or a timeout kicked in).
Signed-off-by: Valentin Rothberg <rothberg@redhat.com>
Diffstat (limited to 'test/system')
-rw-r--r-- | test/system/255-auto-update.bats | 117 |
1 files changed, 112 insertions, 5 deletions
diff --git a/test/system/255-auto-update.bats b/test/system/255-auto-update.bats index 6fb40f41e..4e242e1f1 100644 --- a/test/system/255-auto-update.bats +++ b/test/system/255-auto-update.bats @@ -26,9 +26,17 @@ function teardown() { done < $SNAME_FILE rm -f $SNAME_FILE - run_podman ? rmi quay.io/libpod/alpine:latest - run_podman ? rmi quay.io/libpod/busybox:latest - run_podman ? rmi quay.io/libpod/localtest:latest + run_podman ? rmi -f \ + quay.io/libpod/alpine:latest \ + quay.io/libpod/busybox:latest \ + quay.io/libpod/localtest:latest \ + quay.io/libpod/autoupdatebroken:latest \ + quay.io/libpod/test:latest \ + quay.io/libpod/fedora:31 + + # The rollback tests may leave some dangling images behind, so let's prune + # them to leave a clean state. + run_podman ? image prune -f basic_teardown } @@ -43,18 +51,30 @@ function teardown() { function generate_service() { local target_img_basename=$1 local autoupdate=$2 + local command=$3 + local extraArgs=$4 + local noTag=$5 + + # Unless specified, set a default command. + if [[ -z "$command" ]]; then + command="top -d 120" + fi # Container name. Include the autoupdate type, to make debugging easier. # IMPORTANT: variable 'cname' is passed (out of scope) up to caller! cname=c_${autoupdate//\'/}_$(random_string) target_img="quay.io/libpod/$target_img_basename:latest" - run_podman tag $IMAGE $target_img + + if [[ -z "$noTag" ]]; then + run_podman tag $IMAGE $target_img + fi + if [[ -n "$autoupdate" ]]; then label="--label io.containers.autoupdate=$autoupdate" else label="" fi - run_podman run -d --name $cname $label $target_img top -d 120 + run_podman create $extraArgs --name $cname $label $target_img $command (cd $UNIT_DIR; run_podman generate systemd --new --files --name $cname) echo "container-$cname" >> $SNAME_FILE @@ -128,6 +148,38 @@ function _confirm_update() { _confirm_update $cname $ori_image } +@test "podman auto-update - label io.containers.autoupdate=image with rollback" { + # Note: the autoupdatebroken image is empty on purpose so it cannot be + # executed and force a rollback. The rollback test for the local policy + # is exercising the case where the container doesn't send a ready message. + image=quay.io/libpod/autoupdatebroken + + run_podman tag $IMAGE $image + generate_service autoupdatebroken image + + _wait_service_ready container-$cname.service + run_podman auto-update --dry-run --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}" + is "$output" ".*container-$cname.service,$image:latest,pending,registry.*" "Image update is pending." + + run_podman container inspect --format "{{.Image}}" $cname + oldID="$output" + + run_podman inspect --format "{{.ID}}" $cname + containerID="$output" + + run_podman auto-update --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}" + is "$output" "Trying to pull.*" "Image is updated." + is "$output" ".*container-$cname.service,$image:latest,rolled back,registry.*" "Image has been rolled back." + + run_podman container inspect --format "{{.Image}}" $cname + is "$output" "$oldID" "container rolled back to previous image" + + run_podman container inspect --format "{{.ID}}" $cname + if [[ $output == $containerID ]]; then + die "container has not been restarted during rollback (previous id: $containerID, current id: $output)" + fi +} + @test "podman auto-update - label io.containers.autoupdate=disabled" { generate_service alpine disabled @@ -168,6 +220,61 @@ function _confirm_update() { _confirm_update $cname $ori_image } +@test "podman auto-update - label io.containers.autoupdate=local with rollback" { + # sdnotify fails with runc 1.0.0-3-dev2 on Ubuntu. Let's just + # assume that we work only with crun, nothing else. + # [copied from 260-sdnotify.bats] + runtime=$(podman_runtime) + if [[ "$runtime" != "crun" ]]; then + skip "this test only works with crun, not $runtime" + fi + + dockerfile1=$PODMAN_TMPDIR/Dockerfile.1 + cat >$dockerfile1 <<EOF +FROM quay.io/libpod/fedora:31 +RUN echo -e "#!/bin/sh\n\ +printenv NOTIFY_SOCKET; echo READY; systemd-notify --ready;\n\ +trap 'echo Received SIGTERM, finishing; exit' SIGTERM; echo WAITING; while :; do sleep 0.1; done" \ +>> /runme +RUN chmod +x /runme +EOF + + dockerfile2=$PODMAN_TMPDIR/Dockerfile.2 + cat >$dockerfile2 <<EOF +FROM quay.io/libpod/fedora:31 +RUN echo -e "#!/bin/sh\n\ +exit 1" >> /runme +RUN chmod +x /runme +EOF + image=test + + # Generate a healthy image that will run correctly. + run_podman build -t quay.io/libpod/$image -f $dockerfile1 + podman image inspect --format "{{.ID}}" $image + oldID="$output" + + generate_service $image local /runme --sdnotify=container noTag + _wait_service_ready container-$cname.service + + run_podman auto-update --dry-run --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}" + is "$output" ".*container-$cname.service,quay.io/libpod/$image:latest,false,local.*" "No update available" + + # Generate an unhealthy image that will fail. + run_podman build -t quay.io/libpod/$image -f $dockerfile2 + podman image inspect --format "{{.ID}}" $image + newID="$output" + + run_podman auto-update --dry-run --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}" + is "$output" ".*container-$cname.service,quay.io/libpod/$image:latest,pending,local.*" "Image updated is pending" + + # Note: we rollback automatically by default. + run_podman auto-update --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}" + is "$output" ".*container-$cname.service,quay.io/libpod/$image:latest,rolled back,local.*" "Rolled back to old image" + + # Make sure that new container is not using the new image ID anymore. + _confirm_update $cname $newID +} + @test "podman auto-update with multiple services" { # Preserve original image ID, to confirm that it changes (or not) run_podman inspect --format "{{.Id}}" $IMAGE |