1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
|
#!/usr/bin/env bats -*- bats -*-
#
# Tests for systemd sdnotify
#
load helpers
# Shared throughout this module: PID of socat process, and path to its log
_SOCAT_PID=
_SOCAT_LOG=
function setup() {
skip_if_remote "systemd tests are meaningless over remote"
# Skip if systemd is not running
systemctl list-units &>/dev/null || skip "systemd not available"
# sdnotify fails with runc 1.0.0-3-dev2 on Ubuntu. Let's just
# assume that we work only with crun, nothing else.
runtime=$(podman_runtime)
if [[ "$runtime" != "crun" ]]; then
skip "this test only works with crun, not $runtime"
fi
basic_setup
}
function teardown() {
unset NOTIFY_SOCKET
_stop_socat
basic_teardown
}
###############################################################################
# BEGIN helpers
# Run socat process on a socket, logging to well-known path. Each received
# packet is logged with a newline appended, for ease of parsing the log file.
function _start_socat() {
_SOCAT_LOG="$PODMAN_TMPDIR/socat.log"
rm -f $_SOCAT_LOG
# Execute in subshell so we can close fd3 (which BATS uses).
# This is a superstitious ritual to try to avoid leaving processes behind,
# and thus prevent CI hangs.
(exec socat unix-recvfrom:"$NOTIFY_SOCKET",fork \
system:"(cat;echo) >> $_SOCAT_LOG" 3>&-) &
_SOCAT_PID=$!
}
# Stop the socat background process and clean up logs
function _stop_socat() {
if [[ -n "$_SOCAT_PID" ]]; then
# Kill all child processes, then the process itself.
# This is a superstitious incantation to avoid leaving processes behind.
# The '|| true' is because only f35 leaves behind socat processes;
# f33 (and perhaps others?) behave nicely. ARGH!
pkill -P $_SOCAT_PID || true
kill $_SOCAT_PID
fi
_SOCAT_PID=
if [[ -n "$_SOCAT_LOG" ]]; then
rm -f $_SOCAT_LOG
fi
_SOCAT_LOG=
}
# Check that MAINPID=xxxxx points to a running conmon process
function _assert_mainpid_is_conmon() {
local mainpid=$(expr "$1" : ".*MAINPID=\([0-9]\+\)")
test -n "$mainpid" || die "Could not parse '$1' as 'MAINPID=nnnn'"
test -d /proc/$mainpid || die "sdnotify MAINPID=$mainpid - but /proc/$mainpid does not exist"
# e.g. /proc/12345/exe -> /usr/bin/conmon
local mainpid_bin=$(readlink /proc/$mainpid/exe)
is "$mainpid_bin" ".*/conmon" "sdnotify MAINPID=$mainpid is conmon process"
}
# END helpers
###############################################################################
# BEGIN tests themselves
@test "sdnotify : ignore" {
export NOTIFY_SOCKET=$PODMAN_TMPDIR/ignore.sock
_start_socat
run_podman create --rm --sdnotify=ignore $IMAGE printenv NOTIFY_SOCKET
cid="$output"
run_podman container inspect $cid --format "{{.Config.SdNotifyMode}} {{.Config.SdNotifySocket}}"
is "$output" "ignore " "NOTIFY_SOCKET is not set with 'ignore' mode"
run_podman 1 start --attach $cid
is "$output" "" "\$NOTIFY_SOCKET in container"
is "$(< $_SOCAT_LOG)" "" "nothing received on socket"
_stop_socat
}
@test "sdnotify : conmon" {
export NOTIFY_SOCKET=$PODMAN_TMPDIR/conmon.sock
_start_socat
run_podman run -d --name sdnotify_conmon_c \
--sdnotify=conmon \
$IMAGE \
sh -c 'printenv NOTIFY_SOCKET;echo READY;while ! test -f /stop;do sleep 0.1;done'
cid="$output"
wait_for_ready $cid
run_podman container inspect $cid --format "{{.Config.SdNotifyMode}} {{.Config.SdNotifySocket}}"
is "$output" "conmon $NOTIFY_SOCKET"
run_podman container inspect sdnotify_conmon_c --format "{{.State.ConmonPid}}"
mainPID="$output"
run_podman logs sdnotify_conmon_c
is "$output" "READY" "\$NOTIFY_SOCKET in container"
# The 'echo's help us debug failed runs
wait_for_file $_SOCAT_LOG
run cat $_SOCAT_LOG
echo "socat log:"
echo "$output"
is "$output" "MAINPID=$mainPID
READY=1" "sdnotify sent MAINPID and READY"
_assert_mainpid_is_conmon "$output"
# Done. Stop container, clean up.
run_podman exec $cid touch /stop
run_podman wait $cid
run_podman rm $cid
_stop_socat
}
# These tests can fail in dev. environment because of SELinux.
# quick fix: chcon -t container_runtime_exec_t ./bin/podman
@test "sdnotify : container" {
# Sigh... we need to pull a humongous image because it has systemd-notify.
# (IMPORTANT: fedora:32 and above silently removed systemd-notify; this
# caused CI to hang. That's why we explicitly require fedora:31)
# FIXME: is there a smaller image we could use?
local _FEDORA="$PODMAN_TEST_IMAGE_REGISTRY/$PODMAN_TEST_IMAGE_USER/fedora:31"
# Pull that image. Retry in case of flakes.
run_podman pull $_FEDORA || \
run_podman pull $_FEDORA || \
run_podman pull $_FEDORA
export NOTIFY_SOCKET=$PODMAN_TMPDIR/container.sock
_start_socat
run_podman run -d --sdnotify=container $_FEDORA \
sh -c 'printenv NOTIFY_SOCKET; echo READY; while ! test -f /stop;do sleep 0.1;done;systemd-notify --ready'
cid="$output"
wait_for_ready $cid
run_podman container inspect $cid --format "{{.Config.SdNotifyMode}} {{.Config.SdNotifySocket}}"
is "$output" "container $NOTIFY_SOCKET"
run_podman logs $cid
is "${lines[0]}" "/run/notify/notify.sock" "NOTIFY_SOCKET is passed to container"
run_podman container inspect $cid --format "{{.State.ConmonPid}}"
mainPID="$output"
# With container, READY=1 isn't necessarily the last message received;
# just look for it anywhere in received messages
run cat $_SOCAT_LOG
# The 'echo's help us debug failed runs
echo "socat log:"
echo "$output"
is "$output" "MAINPID=$mainPID" "Container is not ready yet, so we only know the main PID"
# Done. Stop container, clean up.
run_podman exec $cid touch /stop
run_podman wait $cid
wait_for_file $_SOCAT_LOG
run cat $_SOCAT_LOG
echo "socat log:"
echo "$output"
is "$output" "MAINPID=$mainPID
READY=1"
run_podman rm $cid
run_podman rmi $_FEDORA
_stop_socat
}
@test "sdnotify : play kube - no policies" {
# Create the YAMl file
yaml_source="$PODMAN_TMPDIR/test.yaml"
cat >$yaml_source <<EOF
apiVersion: v1
kind: Pod
metadata:
labels:
app: test
name: test_pod
spec:
containers:
- command:
- top
image: $IMAGE
name: test
resources: {}
EOF
# The name of the service container is predictable: the first 12 characters
# of the hash of the YAML file followed by the "-service" suffix
yaml_sha=$(sha256sum $yaml_source)
service_container="${yaml_sha:0:12}-service"
export NOTIFY_SOCKET=$PODMAN_TMPDIR/conmon.sock
_start_socat
run_podman play kube --service-container=true $yaml_source
# Make sure the containers have the correct policy.
run_podman container inspect test_pod-test $service_container --format "{{.Config.SdNotifyMode}}"
is "$output" "ignore
ignore"
run_podman container inspect $service_container --format "{{.State.ConmonPid}}"
mainPID="$output"
wait_for_file $_SOCAT_LOG
# The 'echo's help us debug failed runs
run cat $_SOCAT_LOG
echo "socat log:"
echo "$output"
is "$output" "MAINPID=$mainPID
READY=1" "sdnotify sent MAINPID and READY"
_stop_socat
# Clean up pod and pause image
run_podman play kube --down $PODMAN_TMPDIR/test.yaml
run_podman rmi $(pause_image)
}
@test "sdnotify : play kube - with policies" {
# Sigh... we need to pull a humongous image because it has systemd-notify.
# (IMPORTANT: fedora:32 and above silently removed systemd-notify; this
# caused CI to hang. That's why we explicitly require fedora:31)
# FIXME: is there a smaller image we could use?
local _FEDORA="$PODMAN_TEST_IMAGE_REGISTRY/$PODMAN_TEST_IMAGE_USER/fedora:31"
# Pull that image. Retry in case of flakes.
run_podman pull $_FEDORA || \
run_podman pull $_FEDORA || \
run_podman pull $_FEDORA
# Create the YAMl file
yaml_source="$PODMAN_TMPDIR/test.yaml"
cat >$yaml_source <<EOF
apiVersion: v1
kind: Pod
metadata:
labels:
app: test
name: test_pod
annotations:
io.containers.sdnotify: "container"
io.containers.sdnotify/b: "conmon"
spec:
containers:
- command:
- /bin/sh
- -c
- 'printenv NOTIFY_SOCKET; echo READY; while ! test -f /stop;do sleep 0.1;done;systemd-notify --ready'
image: $_FEDORA
name: a
- command:
- /bin/sh
- -c
- 'echo READY; top'
image: $IMAGE
name: b
EOF
container_a="test_pod-a"
container_b="test_pod-b"
# The name of the service container is predictable: the first 12 characters
# of the hash of the YAML file followed by the "-service" suffix
yaml_sha=$(sha256sum $yaml_source)
service_container="${yaml_sha:0:12}-service"
export NOTIFY_SOCKET=$PODMAN_TMPDIR/conmon.sock
_start_socat
# Run `play kube` in the background as it will wait for all containers to
# send the READY=1 message.
timeout --foreground -v --kill=10 60 \
$PODMAN play kube --service-container=true $yaml_source &>/dev/null &
# Wait for both containers to be running
for i in $(seq 1 20); do
run_podman "?" container wait $container_a $container_b --condition="running"
if [[ $status == 0 ]]; then
break
fi
sleep 0.5
# Just for debugging
run_podman ps -a
done
if [[ $status != 0 ]]; then
die "container $container_a and/or $container_b did not start"
fi
# Make sure the containers have the correct policy
run_podman container inspect $container_a $container_b $service_container --format "{{.Config.SdNotifyMode}}"
is "$output" "container
conmon
ignore"
is "$(< $_SOCAT_LOG)" "" "nothing received on socket"
# Make sure the container received a "proxy" socket and is not using the
# one of `kube play`
run_podman container inspect $container_a --format "{{.Config.SdNotifySocket}}"
assert "$output" != $NOTIFY_SOCKET
run_podman logs $container_a
is "${lines[0]}" "/run/notify/notify.sock" "NOTIFY_SOCKET is passed to container"
# Instruct the container to send the READY
run_podman exec $container_a /bin/touch /stop
run_podman container inspect $service_container --format "{{.State.ConmonPid}}"
main_pid="$output"
run_podman container wait $container_a
wait_for_file $_SOCAT_LOG
# The 'echo's help us debug failed runs
run cat $_SOCAT_LOG
echo "socat log:"
echo "$output"
is "$output" "MAINPID=$main_pid
READY=1" "sdnotify sent MAINPID and READY"
_stop_socat
# Clean up pod and pause image
run_podman play kube --down $yaml_source
run_podman rmi $_FEDORA $(pause_image)
}
# vim: filetype=sh
|