summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2022-01-19 13:20:53 +1100
committerDavid Gibson <david@gibson.dropbear.id.au>2022-02-18 12:35:25 +1100
commitdb4d15e852c574a2f2f4039cfef4814982776544 (patch)
treeedbac426dffcba771617b21bc390fb61675cba31
parentd615ab81f9f8bbed5f335683f4bbcdda0e789ee1 (diff)
downloadpodman-db4d15e852c574a2f2f4039cfef4814982776544.tar.gz
podman-db4d15e852c574a2f2f4039cfef4814982776544.tar.bz2
podman-db4d15e852c574a2f2f4039cfef4814982776544.zip
Propagate $CONTAINERS_CONF to conmon
The CONTAINERS_CONF environment variable can be used to override the configuration file, which is useful for testing. However, at the moment this variable is not propagated to conmon. That means in particular, that conmon can't propagate it back to podman when invoking its --exit-command. The mismatch in configuration between the starting and cleaning up podman instances can cause a variety of errors. This patch also adds two related test cases. One checks explicitly that the correct CONTAINERS_CONF value appears in conmon's environment. The other checks for a possible specific impact of this bug: if we use a nonstandard name for the runtime (even if its path is just a regular crun), then the podman container cleanup invoked at container exit will fail. That has the effect of meaning that a container started with -d --rm won't be correctly removed once complete. Fixes #12917 Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
-rw-r--r--libpod/oci_conmon_linux.go4
-rw-r--r--test/system/800-config.bats80
2 files changed, 84 insertions, 0 deletions
diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go
index 268a301fb..a328f7621 100644
--- a/libpod/oci_conmon_linux.go
+++ b/libpod/oci_conmon_linux.go
@@ -1318,6 +1318,10 @@ func (r *ConmonOCIRuntime) configureConmonEnv(ctr *Container, runtimeDir string)
env = append(env, e)
}
}
+ conf, ok := os.LookupEnv("CONTAINERS_CONF")
+ if ok {
+ env = append(env, fmt.Sprintf("CONTAINERS_CONF=%s", conf))
+ }
env = append(env, fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir))
env = append(env, fmt.Sprintf("_CONTAINERS_USERNS_CONFIGURED=%s", os.Getenv("_CONTAINERS_USERNS_CONFIGURED")))
env = append(env, fmt.Sprintf("_CONTAINERS_ROOTLESS_UID=%s", os.Getenv("_CONTAINERS_ROOTLESS_UID")))
diff --git a/test/system/800-config.bats b/test/system/800-config.bats
new file mode 100644
index 000000000..f5b4e9570
--- /dev/null
+++ b/test/system/800-config.bats
@@ -0,0 +1,80 @@
+#!/usr/bin/env bats -*- bats -*-
+#
+# Test specific configuration options and overrides
+#
+
+load helpers
+
+@test "podman CONTAINERS_CONF - CONTAINERS_CONF in conmon" {
+ skip_if_remote "can't check conmon environment over remote"
+
+ # Get the normal runtime for this host
+ run_podman info --format '{{ .Host.OCIRuntime.Name }}'
+ runtime="$output"
+ run_podman info --format "{{ .Host.OCIRuntime.Path }}"
+ ocipath="$output"
+
+ # Make an innocuous containers.conf in a non-standard location
+ conf_tmp="$PODMAN_TMPDIR/containers.conf"
+ cat >$conf_tmp <<EOF
+[engine]
+runtime="$runtime"
+[engine.runtimes]
+$runtime = ["$ocipath"]
+EOF
+ CONTAINERS_CONF="$conf_tmp" run_podman run -d $IMAGE sleep infinity
+ cid="$output"
+
+ CONTAINERS_CONF="$conf_tmp" run_podman inspect "$cid" --format "{{ .State.ConmonPid }}"
+ conmon="$output"
+
+ output="$(tr '\0' '\n' < /proc/$conmon/environ | grep '^CONTAINERS_CONF=')"
+ is "$output" "CONTAINERS_CONF=$conf_tmp"
+
+ # Clean up
+ # Oddly, sleep can't be interrupted with SIGTERM, so we need the
+ # "-f -t 0" to force a SIGKILL
+ CONTAINERS_CONF="$conf_tmp" run_podman rm -f -t 0 "$cid"
+}
+
+@test "podman CONTAINERS_CONF - override runtime name" {
+ skip_if_remote "Can't set CONTAINERS_CONF over remote"
+
+ # Get the path of the normal runtime
+ run_podman info --format "{{ .Host.OCIRuntime.Path }}"
+ ocipath="$output"
+
+ export conf_tmp="$PODMAN_TMPDIR/nonstandard_runtime_name.conf"
+ cat > $conf_tmp <<EOF
+[engine]
+runtime = "nonstandard_runtime_name"
+[engine.runtimes]
+nonstandard_runtime_name = ["$ocipath"]
+EOF
+
+ CONTAINERS_CONF="$conf_tmp" run_podman run -d --rm $IMAGE true
+ cid="$output"
+
+ # We need to wait for the container to finish before we can check
+ # if it was cleaned up properly. But in the common case that the
+ # container completes fast, and the cleanup *did* happen properly
+ # the container is now gone. So, we need to ignore "no such
+ # container" errors from podman wait.
+ CONTAINERS_CONF="$conf_tmp" run_podman '?' wait "$cid"
+ if [[ $status != 0 ]]; then
+ is "$output" "Error:.*no such container" "unexpected error from podman wait"
+ fi
+
+ # The --rm option means the container should no longer exist.
+ # However https://github.com/containers/podman/issues/12917 meant
+ # that the container cleanup triggered by conmon's --exit-cmd
+ # could fail, leaving the container in place.
+ #
+ # We verify that the container is indeed gone, by checking that a
+ # podman rm *fails* here - and it has the side effect of cleaning
+ # up in the case this test fails.
+ CONTAINERS_CONF="$conf_tmp" run_podman 1 rm "$cid"
+ is "$output" "Error:.*no such container"
+}
+
+# vim: filetype=sh