summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorOpenShift Merge Robot <openshift-merge-robot@users.noreply.github.com>2021-04-29 13:37:50 -0400
committerGitHub <noreply@github.com>2021-04-29 13:37:50 -0400
commit4d2ba323f2e67fbfd6150582196c6e0229c70a8e (patch)
treecb7edff74ad43f4f4876952e945d9bf4744a3712
parentc01b1cbfcd205678af820d43b1966025e9c463e7 (diff)
parentcaf19efdabbabb49ef48b91fa5db3fc98b2fdf1b (diff)
downloadpodman-4d2ba323f2e67fbfd6150582196c6e0229c70a8e.tar.gz
podman-4d2ba323f2e67fbfd6150582196c6e0229c70a8e.tar.bz2
podman-4d2ba323f2e67fbfd6150582196c6e0229c70a8e.zip
Merge pull request #10063 from ParkerVR/autoupdate-local
Autoupdate Local
-rw-r--r--docs/source/markdown/podman-auto-update.1.md55
-rw-r--r--pkg/autoupdate/autoupdate.go136
-rw-r--r--test/system/250-systemd.bats70
3 files changed, 204 insertions, 57 deletions
diff --git a/docs/source/markdown/podman-auto-update.1.md b/docs/source/markdown/podman-auto-update.1.md
index f82a1375c..f298d6bf6 100644
--- a/docs/source/markdown/podman-auto-update.1.md
+++ b/docs/source/markdown/podman-auto-update.1.md
@@ -9,21 +9,25 @@ podman-auto-update - Auto update containers according to their auto-update polic
## DESCRIPTION
`podman auto-update` looks up containers with a specified "io.containers.autoupdate" label (i.e., the auto-update policy).
-If the label is present and set to "image", Podman reaches out to the corresponding registry to check if the image has been updated.
+If the label is present and set to "registry", Podman reaches out to the corresponding registry to check if the image has been updated.
+The label "image" is an alternative to "registry" maintained for backwards compatibility.
An image is considered updated if the digest in the local storage is different than the one of the remote image.
If an image must be updated, Podman pulls it down and restarts the systemd unit executing the container.
+The registry policy requires a requires a fully-qualified image reference (e.g., quay.io/podman/stable:latest) to be used to create the container.
+This enforcement is necessary to know which image to actually check and pull.
+If an image ID was used, Podman would not know which image to check/pull anymore.
+
+Alternatively, if the autoupdate label is set to "local", Podman will compare the image a container is using to the image with it's raw name in local storage.
+If an image is updated locally, Podman simply restarts the systemd unit executing the container.
+
If "io.containers.autoupdate.authfile" label is present, Podman reaches out to corresponding authfile when pulling images.
At container-creation time, Podman looks up the "PODMAN_SYSTEMD_UNIT" environment variables and stores it verbatim in the container's label.
This variable is now set by all systemd units generated by `podman-generate-systemd` and is set to `%n` (i.e., the name of systemd unit starting the container).
This data is then being used in the auto-update sequence to instruct systemd (via DBUS) to restart the unit and hence to restart the container.
-Note that `podman auto-update` relies on systemd and requires a fully-qualified image reference (e.g., quay.io/podman/stable:latest) to be used to create the container.
-This enforcement is necessary to know which image to actually check and pull.
-If an image ID was used, Podman would not know which image to check/pull anymore.
-
-Moreover, the systemd units are expected to be generated with `podman-generate-systemd --new`, or similar units that create new containers in order to run the updated images.
+Note that`podman auto-update` relies on systemd. The systemd units are expected to be generated with `podman-generate-systemd --new`, or similar units that create new containers in order to run the updated images.
Systemd units that start and stop a container cannot run a new image.
@@ -44,9 +48,11 @@ environment variable. `export REGISTRY_AUTH_FILE=path`
## EXAMPLES
+Autoupdate with registry policy
+
```
# Start a container
-$ podman run --label "io.containers.autoupdate=image" \
+$ podman run --label "io.containers.autoupdate=registry" \
--label "io.containers.autoupdate.authfile=/some/authfile.json" \
-d busybox:latest top
bc219740a210455fa27deacc96d50a9e20516492f1417507c13ce1533dbdcd9d
@@ -70,5 +76,40 @@ $ podman auto-update
container-bc219740a210455fa27deacc96d50a9e20516492f1417507c13ce1533dbdcd9d.service
```
+Autoupdate with local policy
+
+```
+# Start a container
+$ podman run --label "io.containers.autoupdate=local" \
+ -d busybox:latest top
+be0889fd06f252a2e5141b37072c6bada68563026cb2b2649f53394d87ccc338
+
+# Generate a systemd unit for this container
+$ podman generate systemd --new --files be0889fd06f252a2e5141b37072c6bada68563026cb2b2649f53394d87ccc338
+/home/user/containers/libpod/container-be0889fd06f252a2e5141b37072c6bada68563026cb2b2649f53394d87ccc338.service
+
+# Load the new systemd unit and start it
+$ mv ./container-be0889fd06f252a2e5141b37072c6bada68563026cb2b2649f53394d87ccc338.service ~/.config/systemd/user
+$ systemctl --user daemon-reload
+
+# If the previously created containers or pods are using shared resources, such as ports, make sure to remove them before starting the generated systemd units.
+$ podman stop be0889fd06f252a2e5141b37072c6bada68563026cb2b2649f53394d87ccc338
+$ podman rm be0889fd06f252a2e5141b37072c6bada68563026cb2b2649f53394d87ccc338
+
+$ systemctl --user start container-be0889fd06f252a2e5141b37072c6bada68563026cb2b2649f53394d87ccc338.service
+
+# Get the name of the container
+$ podman ps
+CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
+01f5c8113e84 docker.io/library/busybox:latest top 2 seconds ago Up 3 seconds ago inspiring_galileo
+
+# Modify the image
+$ podman commit --change CMD=/bin/bash inspiring_galileo busybox:latest
+
+# Auto-update the container
+$ podman auto-update
+container-be0889fd06f252a2e5141b37072c6bada68563026cb2b2649f53394d87ccc338.service
+```
+
## SEE ALSO
podman(1), podman-generate-systemd(1), podman-run(1), systemd.unit(5)
diff --git a/pkg/autoupdate/autoupdate.go b/pkg/autoupdate/autoupdate.go
index e271b9466..e4b43109f 100644
--- a/pkg/autoupdate/autoupdate.go
+++ b/pkg/autoupdate/autoupdate.go
@@ -33,17 +33,24 @@ type Policy string
const (
// PolicyDefault is the default policy denoting no auto updates.
PolicyDefault Policy = "disabled"
- // PolicyNewImage is the policy to update as soon as there's a new image found.
- PolicyNewImage = "image"
+ // PolicyRegistryImage is the policy to update as soon as there's a new image found.
+ PolicyRegistryImage = "registry"
+ // PolicyLocalImage is the policy to run auto-update based on a local image
+ PolicyLocalImage = "local"
)
// Map for easy lookups of supported policies.
var supportedPolicies = map[string]Policy{
"": PolicyDefault,
"disabled": PolicyDefault,
- "image": PolicyNewImage,
+ "image": PolicyRegistryImage,
+ "registry": PolicyRegistryImage,
+ "local": PolicyLocalImage,
}
+// policyMapper is used for tying a container to it's autoupdate policy
+type policyMapper map[Policy][]*libpod.Container
+
// LookupPolicy looks up the corresponding Policy for the specified
// string. If none is found, an errors is returned including the list of
// supported policies.
@@ -99,11 +106,17 @@ func ValidateImageReference(imageName string) error {
}
// AutoUpdate looks up containers with a specified auto-update policy and acts
-// accordingly. If the policy is set to PolicyNewImage, it checks if the image
+// accordingly.
+//
+// If the policy is set to PolicyRegistryImage, it checks if the image
// on the remote registry is different than the local one. If the image digests
// differ, it pulls the remote image and restarts the systemd unit running the
// container.
//
+// If the policy is set to PolicyLocalImage, it checks if the image
+// of a running container is different than the local one. If the image digests
+// differ, it restarts the systemd unit with the new image.
+//
// It returns a slice of successfully restarted systemd units and a slice of
// errors encountered during auto update.
func AutoUpdate(runtime *libpod.Runtime, options Options) ([]string, []error) {
@@ -134,7 +147,7 @@ func AutoUpdate(runtime *libpod.Runtime, options Options) ([]string, []error) {
// Update images.
containersToRestart := []*libpod.Container{}
updatedRawImages := make(map[string]bool)
- for imageID, containers := range containerMap {
+ for imageID, policyMapper := range containerMap {
image, exists := imageMap[imageID]
if !exists {
errs = append(errs, errors.Errorf("container image ID %q not found in local storage", imageID))
@@ -143,34 +156,50 @@ func AutoUpdate(runtime *libpod.Runtime, options Options) ([]string, []error) {
// Now we have to check if the image of any containers must be updated.
// Note that the image ID is NOT enough for this check as a given image
// may have multiple tags.
- for i, ctr := range containers {
- rawImageName := ctr.RawImageName()
+ for _, registryCtr := range policyMapper[PolicyRegistryImage] {
+ cid := registryCtr.ID()
+ rawImageName := registryCtr.RawImageName()
if rawImageName == "" {
- errs = append(errs, errors.Errorf("error auto-updating container %q: raw-image name is empty", ctr.ID()))
- }
- labels := ctr.Labels()
- authFilePath, exists := labels[AuthfileLabel]
- if exists {
- options.Authfile = authFilePath
+ errs = append(errs, errors.Errorf("error registry auto-updating container %q: raw-image name is empty", cid))
}
- needsUpdate, err := newerImageAvailable(runtime, image, rawImageName, options)
+ readAuthenticationPath(registryCtr, options)
+ needsUpdate, err := newerRemoteImageAvailable(runtime, image, rawImageName, options)
if err != nil {
- errs = append(errs, errors.Wrapf(err, "error auto-updating container %q: image check for %q failed", ctr.ID(), rawImageName))
+ errs = append(errs, errors.Wrapf(err, "error registry auto-updating container %q: image check for %q failed", cid, rawImageName))
continue
}
- if !needsUpdate {
+
+ if needsUpdate {
+ logrus.Infof("Auto-updating container %q using registry image %q", cid, rawImageName)
+ if _, updated := updatedRawImages[rawImageName]; !updated {
+ _, err = updateImage(runtime, rawImageName, options)
+ if err != nil {
+ errs = append(errs, errors.Wrapf(err, "error registry auto-updating container %q: image update for %q failed", cid, rawImageName))
+ continue
+ }
+ updatedRawImages[rawImageName] = true
+ }
+ containersToRestart = append(containersToRestart, registryCtr)
+ }
+ }
+
+ for _, localCtr := range policyMapper[PolicyLocalImage] {
+ cid := localCtr.ID()
+ rawImageName := localCtr.RawImageName()
+ if rawImageName == "" {
+ errs = append(errs, errors.Errorf("error locally auto-updating container %q: raw-image name is empty", cid))
+ }
+ // This avoids restarting containers unnecessarily.
+ needsUpdate, err := newerLocalImageAvailable(image, rawImageName)
+ if err != nil {
+ errs = append(errs, errors.Wrapf(err, "error locally auto-updating container %q: image check for %q failed", cid, rawImageName))
continue
}
- logrus.Infof("Auto-updating container %q using image %q", ctr.ID(), rawImageName)
- if _, updated := updatedRawImages[rawImageName]; !updated {
- _, err = updateImage(runtime, rawImageName, options)
- if err != nil {
- errs = append(errs, errors.Wrapf(err, "error auto-updating container %q: image update for %q failed", ctr.ID(), rawImageName))
- continue
- }
- updatedRawImages[rawImageName] = true
+
+ if needsUpdate {
+ logrus.Infof("Auto-updating container %q using local image %q", cid, rawImageName)
+ containersToRestart = append(containersToRestart, localCtr)
}
- containersToRestart = append(containersToRestart, containers[i])
}
}
@@ -198,15 +227,15 @@ func AutoUpdate(runtime *libpod.Runtime, options Options) ([]string, []error) {
// imageContainersMap generates a map[image ID] -> [containers using the image]
// of all containers with a valid auto-update policy.
-func imageContainersMap(runtime *libpod.Runtime) (map[string][]*libpod.Container, []error) {
+func imageContainersMap(runtime *libpod.Runtime) (map[string]policyMapper, []error) {
allContainers, err := runtime.GetAllContainers()
if err != nil {
return nil, []error{err}
}
errors := []error{}
- imageMap := make(map[string][]*libpod.Container)
- for i, ctr := range allContainers {
+ containerMap := make(map[string]policyMapper)
+ for _, ctr := range allContainers {
state, err := ctr.State()
if err != nil {
errors = append(errors, err)
@@ -230,22 +259,36 @@ func imageContainersMap(runtime *libpod.Runtime) (map[string][]*libpod.Container
continue
}
- // Skip non-image labels (could be explicitly disabled).
- if policy != PolicyNewImage {
+ // Skip labels not related to autoupdate
+ if policy == PolicyDefault {
continue
+ } else {
+ id, _ := ctr.Image()
+ policyMap, exists := containerMap[id]
+ if !exists {
+ policyMap = make(map[Policy][]*libpod.Container)
+ }
+ policyMap[policy] = append(policyMap[policy], ctr)
+ containerMap[id] = policyMap
+ // Now we know that `ctr` is configured for auto updates.
}
-
- // Now we know that `ctr` is configured for auto updates.
- id, _ := ctr.Image()
- imageMap[id] = append(imageMap[id], allContainers[i])
}
- return imageMap, errors
+ return containerMap, errors
}
-// newerImageAvailable returns true if there corresponding image on the remote
+// readAuthenticationPath reads a container's labels and reads authentication path into options
+func readAuthenticationPath(ctr *libpod.Container, options Options) {
+ labels := ctr.Labels()
+ authFilePath, exists := labels[AuthfileLabel]
+ if exists {
+ options.Authfile = authFilePath
+ }
+}
+
+// newerRemoteImageAvailable returns true if there corresponding image on the remote
// registry is newer.
-func newerImageAvailable(runtime *libpod.Runtime, img *image.Image, origName string, options Options) (bool, error) {
+func newerRemoteImageAvailable(runtime *libpod.Runtime, img *image.Image, origName string, options Options) (bool, error) {
remoteRef, err := docker.ParseReference("//" + origName)
if err != nil {
return false, err
@@ -282,6 +325,25 @@ func newerImageAvailable(runtime *libpod.Runtime, img *image.Image, origName str
return img.Digest().String() != remoteDigest.String(), nil
}
+// newerLocalImageAvailable returns true if the container and local image have different digests
+func newerLocalImageAvailable(img *image.Image, rawImageName string) (bool, error) {
+ rt, err := libpod.NewRuntime(context.TODO())
+ if err != nil {
+ return false, err
+ }
+
+ localImg, err := rt.ImageRuntime().NewFromLocal(rawImageName)
+ if err != nil {
+ return false, err
+ }
+
+ localDigest := localImg.Digest().String()
+
+ ctrDigest := img.Digest().String()
+
+ return localDigest != ctrDigest, nil
+}
+
// updateImage pulls the specified image.
func updateImage(runtime *libpod.Runtime, name string, options Options) (*image.Image, error) {
sys := runtime.SystemContext()
diff --git a/test/system/250-systemd.bats b/test/system/250-systemd.bats
index ac3ae2f98..b42769409 100644
--- a/test/system/250-systemd.bats
+++ b/test/system/250-systemd.bats
@@ -27,23 +27,23 @@ function teardown() {
run '?' $SYSTEMCTL stop "$SERVICE_NAME"
rm -f "$UNIT_FILE"
$SYSTEMCTL daemon-reload
+ run_podman rmi -a
+
basic_teardown
}
-# This test can fail in dev. environment because of SELinux.
-# quick fix: chcon -t container_runtime_exec_t ./bin/podman
-@test "podman generate - systemd - basic" {
+# Helper to setup xdg runtime for rootless
+function xdg_rootless() {
# podman initializes this if unset, but systemctl doesn't
if is_rootless; then
if [ -z "$XDG_RUNTIME_DIR" ]; then
export XDG_RUNTIME_DIR=/run/user/$(id -u)
fi
fi
+}
- cname=$(random_string)
- # See #7407 for --pull=always.
- run_podman create --pull=always --name $cname --label "io.containers.autoupdate=image" $IMAGE top
-
+# Helper to start a systemd service running a container
+function service_setup() {
run_podman generate systemd --new $cname
echo "$output" > "$UNIT_FILE"
run_podman rm $cname
@@ -59,6 +59,30 @@ function teardown() {
if [ $status -ne 0 ]; then
die "Non-zero status of systemd unit $SERVICE_NAME, output: $output"
fi
+}
+
+# Helper to stop a systemd service running a container
+function service_cleanup() {
+ run $SYSTEMCTL stop "$SERVICE_NAME"
+ if [ $status -ne 0 ]; then
+ die "Error stopping systemd unit $SERVICE_NAME, output: $output"
+ fi
+
+ rm -f "$UNIT_FILE"
+ $SYSTEMCTL daemon-reload
+}
+
+# These tests can fail in dev. environment because of SELinux.
+# quick fix: chcon -t container_runtime_exec_t ./bin/podman
+@test "podman generate - systemd - basic" {
+ xdg_rootless
+
+ cname=$(random_string)
+ # See #7407 for --pull=always.
+ run_podman create --pull=always --name $cname --label "io.containers.autoupdate=registry" $IMAGE top
+
+ # Start systemd service to run this container
+ service_setup
# Give container time to start; make sure output looks top-like
sleep 2
@@ -72,13 +96,33 @@ function teardown() {
run_podman auto-update
# All good. Stop service, clean up.
- run $SYSTEMCTL stop "$SERVICE_NAME"
- if [ $status -ne 0 ]; then
- die "Error stopping systemd unit $SERVICE_NAME, output: $output"
- fi
+ service_cleanup
+}
- rm -f "$UNIT_FILE"
- $SYSTEMCTL daemon-reload
+@test "podman autoupdate local" {
+ xdg_rootless
+
+ cname=$(random_string)
+ run_podman create --name $cname --label "io.containers.autoupdate=local" $IMAGE top
+
+ # Start systemd service to run this container
+ service_setup
+
+ # Give container time to start; make sure output looks top-like
+ sleep 2
+ run_podman logs $cname
+ is "$output" ".*Load average:.*" "running container 'top'-like output"
+
+ # Save the container id before updating
+ run_podman ps --format '{{.ID}}'
+
+ # Run auto-update and check that it restarted the container
+ run_podman commit --change "CMD=/bin/bash" $cname $IMAGE
+ run_podman auto-update
+ is $output $SERVICE_NAME "autoupdate local restarted container"
+
+ # All good. Stop service, clean up.
+ service_cleanup
}
# vim: filetype=sh