summaryrefslogtreecommitdiff
path: root/test/system
diff options
context:
space:
mode:
Diffstat (limited to 'test/system')
-rw-r--r--test/system/200-pod.bats2
-rw-r--r--test/system/410-selinux.bats2
-rw-r--r--test/system/710-kube.bats171
-rw-r--r--test/system/helpers.bash14
4 files changed, 173 insertions, 16 deletions
diff --git a/test/system/200-pod.bats b/test/system/200-pod.bats
index b1b9ee5e1..b9063ad1b 100644
--- a/test/system/200-pod.bats
+++ b/test/system/200-pod.bats
@@ -478,7 +478,7 @@ spec:
}
@test "pod resource limits" {
- # FIXME: #15074 - possible flake on aarch64
+ skip_if_aarch64 "FIXME: #15074 - flakes on aarch64 non-remote"
skip_if_remote "resource limits only implemented on non-remote"
skip_if_rootless "resource limits only work with root"
skip_if_cgroupsv1 "resource limits only meaningful on cgroups V2"
diff --git a/test/system/410-selinux.bats b/test/system/410-selinux.bats
index 082482c7a..cc86f282a 100644
--- a/test/system/410-selinux.bats
+++ b/test/system/410-selinux.bats
@@ -212,7 +212,7 @@ function check_label() {
# https://github.com/opencontainers/selinux/pull/148/commits/a5dc47f74c56922d58ead05d1fdcc5f7f52d5f4e
# from failed to set /proc/self/attr/keycreate on procfs
# to write /proc/self/attr/keycreate: invalid argument
- runc) expect="OCI runtime error: .*: \(failed to set|write\) /proc/self/attr/keycreate" ;;
+ runc) expect="OCI runtime error: .*: \(failed to set\|write\) /proc/self/attr/keycreate.*" ;;
*) skip "Unknown runtime '$runtime'";;
esac
diff --git a/test/system/710-kube.bats b/test/system/710-kube.bats
new file mode 100644
index 000000000..58e42148a
--- /dev/null
+++ b/test/system/710-kube.bats
@@ -0,0 +1,171 @@
+#!/usr/bin/env bats -*- bats -*-
+#
+# Test podman kube generate
+#
+
+load helpers
+
+# standard capability drop list
+capabilities='{"drop":["CAP_MKNOD","CAP_NET_RAW","CAP_AUDIT_WRITE"]}'
+
+# Warning that is emitted once on containers, multiple times on pods
+kubernetes_63='Truncation Annotation: .* Kubernetes only allows 63 characters'
+
+# filter: convert yaml to json, because bash+yaml=madness
+function yaml2json() {
+ egrep -v "$kubernetes_63" | python3 -c 'import yaml
+import json
+import sys
+json.dump(yaml.safe_load(sys.stdin), sys.stdout)'
+}
+
+###############################################################################
+# BEGIN tests
+
+@test "podman kube generate - usage message" {
+ run_podman kube generate --help
+ is "$output" ".*podman.* kube generate \[options\] {CONTAINER...|POD...|VOLUME...}"
+ run_podman generate kube --help
+ is "$output" ".*podman.* generate kube \[options\] {CONTAINER...|POD...|VOLUME...}"
+}
+
+@test "podman kube generate - container" {
+ cname=c$(random_string 15)
+ run_podman container create --name $cname $IMAGE top
+ run_podman kube generate $cname
+
+ # Convert yaml to json, and dump to stdout (to help in case of errors)
+ json=$(yaml2json <<<"$output")
+ jq . <<<"$json"
+
+ # What we expect to see. This is by necessity an incomplete list.
+ # For instance, it does not include org.opencontainers.image.base.*
+ # because sometimes we get that, sometimes we don't. No clue why.
+ #
+ # And, unfortunately, if new fields are added to the YAML, we won't
+ # test those unless a developer remembers to add them here.
+ #
+ # Reasons for doing it this way, instead of straight-comparing yaml:
+ # 1) the arbitrariness of the org.opencontainers.image.base annotations
+ # 2) YAML order is nondeterministic, so on a pod with two containers
+ # (as in the pod test below) we cannot rely on cname1/cname2.
+ expect="
+apiVersion | = | v1
+kind | = | Pod
+
+metadata.annotations.\"io.kubernetes.cri-o.TTY/$cname\" | = | false
+metadata.annotations.\"io.podman.annotations.autoremove/$cname\" | = | FALSE
+metadata.annotations.\"io.podman.annotations.init/$cname\" | = | FALSE
+metadata.annotations.\"io.podman.annotations.privileged/$cname\" | = | FALSE
+metadata.annotations.\"io.podman.annotations.publish-all/$cname\" | = | FALSE
+
+metadata.creationTimestamp | =~ | [0-9T:-]\\+Z
+metadata.labels.app | = | ${cname}-pod
+metadata.name | = | ${cname}-pod
+
+spec.containers[0].command | = | [\"top\"]
+spec.containers[0].image | = | $IMAGE
+spec.containers[0].name | = | $cname
+
+spec.containers[0].securityContext.capabilities | = | $capabilities
+
+status | = | null
+"
+
+ # Parse and check all those
+ while read key op expect; do
+ actual=$(jq -r -c ".$key" <<<"$json")
+ assert "$actual" $op "$expect" ".$key"
+ done < <(parse_table "$expect")
+
+ if ! is_remote; then
+ count=$(egrep -c "$kubernetes_63" <<<"$output")
+ assert "$count" = 1 "1 instance of the Kubernetes-63-char warning"
+ fi
+
+ run_podman rm $cname
+}
+
+@test "podman kube generate - pod" {
+ local pname=p$(random_string 15)
+ local cname1=c1$(random_string 15)
+ local cname2=c2$(random_string 15)
+
+ run_podman pod create --name $pname --publish 9999:8888
+
+ # Needs at least one container. Error is slightly different between
+ # regular and remote podman:
+ # regular: Error: pod ... only has...
+ # remote: Error: error generating YAML: pod ... only has...
+ run_podman 125 kube generate $pname
+ assert "$output" =~ "Error: .* only has an infra container"
+
+ run_podman container create --name $cname1 --pod $pname $IMAGE top
+ run_podman container create --name $cname2 --pod $pname $IMAGE bottom
+ run_podman kube generate $pname
+
+ json=$(yaml2json <<<"$output")
+ jq . <<<"$json"
+
+ # See container test above for description of this table
+ expect="
+apiVersion | = | v1
+kind | = | Pod
+
+metadata.annotations.\"io.kubernetes.cri-o.ContainerType/$cname1\" | = | container
+metadata.annotations.\"io.kubernetes.cri-o.ContainerType/$cname2\" | = | container
+metadata.annotations.\"io.kubernetes.cri-o.SandboxID/$cname1\" | =~ | [0-9a-f]\\{56\\}
+metadata.annotations.\"io.kubernetes.cri-o.SandboxID/$cname2\" | =~ | [0-9a-f]\\{56\\}
+metadata.annotations.\"io.kubernetes.cri-o.TTY/$cname1\" | = | false
+metadata.annotations.\"io.kubernetes.cri-o.TTY/$cname2\" | = | false
+metadata.annotations.\"io.podman.annotations.autoremove/$cname1\" | = | FALSE
+metadata.annotations.\"io.podman.annotations.autoremove/$cname2\" | = | FALSE
+metadata.annotations.\"io.podman.annotations.init/$cname1\" | = | FALSE
+metadata.annotations.\"io.podman.annotations.init/$cname2\" | = | FALSE
+metadata.annotations.\"io.podman.annotations.privileged/$cname1\" | = | FALSE
+metadata.annotations.\"io.podman.annotations.privileged/$cname2\" | = | FALSE
+metadata.annotations.\"io.podman.annotations.publish-all/$cname1\" | = | FALSE
+metadata.annotations.\"io.podman.annotations.publish-all/$cname2\" | = | FALSE
+
+metadata.creationTimestamp | =~ | [0-9T:-]\\+Z
+metadata.labels.app | = | ${pname}
+metadata.name | = | ${pname}
+
+spec.hostname | = | $pname
+spec.restartPolicy | = | Never
+
+spec.containers[0].command | = | [\"top\"]
+spec.containers[0].image | = | $IMAGE
+spec.containers[0].name | = | $cname1
+spec.containers[0].ports[0].containerPort | = | 8888
+spec.containers[0].ports[0].hostPort | = | 9999
+spec.containers[0].resources | = | {}
+
+spec.containers[1].command | = | [\"bottom\"]
+spec.containers[1].image | = | $IMAGE
+spec.containers[1].name | = | $cname2
+spec.containers[1].ports | = | null
+spec.containers[1].resources | = | {}
+
+spec.containers[0].securityContext.capabilities | = | $capabilities
+
+status | = | {}
+"
+
+ while read key op expect; do
+ actual=$(jq -r -c ".$key" <<<"$json")
+ assert "$actual" $op "$expect" ".$key"
+ done < <(parse_table "$expect")
+
+ # Why 4? Maybe two for each container?
+ if ! is_remote; then
+ count=$(egrep -c "$kubernetes_63" <<<"$output")
+ assert "$count" = 4 "instances of the Kubernetes-63-char warning"
+ fi
+
+ run_podman rm $cname1 $cname2
+ run_podman pod rm $pname
+ run_podman rmi $(pause_image)
+}
+
+# vim: filetype=sh
diff --git a/test/system/helpers.bash b/test/system/helpers.bash
index 5ff3fae6d..f2eb3016c 100644
--- a/test/system/helpers.bash
+++ b/test/system/helpers.bash
@@ -36,20 +36,6 @@ fi
# That way individual tests can override with their own setup/teardown,
# while retaining the ability to include these if they so desire.
-# Some CI systems set this to runc, overriding the default crun.
-if [[ -n $OCI_RUNTIME ]]; then
- if [[ -z $CONTAINERS_CONF ]]; then
- # FIXME: BATS provides no mechanism for end-of-run cleanup[1]; how
- # can we avoid leaving this file behind when we finish?
- # [1] https://github.com/bats-core/bats-core/issues/39
- export CONTAINERS_CONF=$(mktemp --tmpdir=${BATS_TMPDIR:-/tmp} podman-bats-XXXXXXX.containers.conf)
- cat >$CONTAINERS_CONF <<EOF
-[engine]
-runtime="$OCI_RUNTIME"
-EOF
- fi
-fi
-
# Setup helper: establish a test environment with exactly the images needed
function basic_setup() {
# Clean up all containers