summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
Diffstat (limited to 'test')
-rw-r--r--test/apiv2/12-imagesMore.at44
-rw-r--r--test/e2e/config.go1
-rw-r--r--test/e2e/network_test.go37
-rw-r--r--test/e2e/search_test.go20
-rw-r--r--test/e2e/toolbox_test.go368
-rw-r--r--test/system/010-images.bats52
-rw-r--r--test/system/060-mount.bats13
-rw-r--r--test/system/130-kill.bats20
-rw-r--r--test/system/410-selinux.bats108
-rw-r--r--test/system/helpers.bash11
10 files changed, 649 insertions, 25 deletions
diff --git a/test/apiv2/12-imagesMore.at b/test/apiv2/12-imagesMore.at
new file mode 100644
index 000000000..30ccf0cfc
--- /dev/null
+++ b/test/apiv2/12-imagesMore.at
@@ -0,0 +1,44 @@
+# -*- sh -*-
+#
+# Tests for more image-related endpoints
+#
+
+podman pull -q $IMAGE
+
+t GET libpod/images/json 200 \
+ .[0].Id~[0-9a-f]\\{64\\}
+iid=$(jq -r '.[0].Id' <<<"$output")
+
+# Retrieve the image tree
+t GET libpod/images/$IMAGE/tree 200 \
+ .Tree~^Image
+
+# Tag nonesuch image
+t POST "libpod/images/nonesuch/tag?repo=myrepo&tag=mytag" '' 404
+
+# Tag the image
+t POST "libpod/images/$IMAGE/tag?repo=localhost:5000/myrepo&tag=mytag" '' 201
+
+t GET libpod/images/$IMAGE/json 200 \
+ .RepoTags[1]=localhost:5000/myrepo:mytag
+
+# Run registry container
+podman run -d --name registry -p 5000:5000 docker.io/library/registry:2.6 /entrypoint.sh /etc/docker/registry/config.yml
+
+# Push to local registry
+t POST libpod/images/localhost:5000/myrepo:mytag/push\?tlsVerify\=false '' 200
+
+# Untag the image
+t POST "libpod/images/$iid/untag?repo=localhost:5000/myrepo&tag=mytag" '' 201
+
+t GET libpod/images/$IMAGE/json 200 \
+ .RepoTags[-1]=$IMAGE
+
+# Remove the registry container
+t DELETE libpod/containers/registry?force=true 204
+
+# Remove images
+t DELETE libpod/images/$IMAGE 200 \
+ .ExitCode=0
+t DELETE libpod/images/docker.io/library/registry:2.6 200 \
+ .ExitCode=0
diff --git a/test/e2e/config.go b/test/e2e/config.go
index 49a47c7da..54e39f9d2 100644
--- a/test/e2e/config.go
+++ b/test/e2e/config.go
@@ -14,6 +14,7 @@ var (
BB = "docker.io/library/busybox:latest"
healthcheck = "docker.io/libpod/alpine_healthcheck:latest"
ImageCacheDir = "/tmp/podman/imagecachedir"
+ fedoraToolbox = "registry.fedoraproject.org/f32/fedora-toolbox:latest"
// This image has seccomp profiles that blocks all syscalls.
// The intention behind blocking all syscalls is to prevent
diff --git a/test/e2e/network_test.go b/test/e2e/network_test.go
index cbfd72da6..9bd16c008 100644
--- a/test/e2e/network_test.go
+++ b/test/e2e/network_test.go
@@ -211,6 +211,43 @@ var _ = Describe("Podman network", func() {
Expect(rmAll.ExitCode()).To(BeZero())
})
+ It("podman inspect container two CNI networks (container not running)", func() {
+ netName1 := "testNetThreeCNI1"
+ network1 := podmanTest.Podman([]string{"network", "create", netName1})
+ network1.WaitWithDefaultTimeout()
+ Expect(network1.ExitCode()).To(BeZero())
+ defer podmanTest.removeCNINetwork(netName1)
+
+ netName2 := "testNetThreeCNI2"
+ network2 := podmanTest.Podman([]string{"network", "create", netName2})
+ network2.WaitWithDefaultTimeout()
+ Expect(network2.ExitCode()).To(BeZero())
+ defer podmanTest.removeCNINetwork(netName2)
+
+ ctrName := "testCtr"
+ container := podmanTest.Podman([]string{"create", "--network", fmt.Sprintf("%s,%s", netName1, netName2), "--name", ctrName, ALPINE, "top"})
+ container.WaitWithDefaultTimeout()
+ Expect(container.ExitCode()).To(BeZero())
+
+ inspect := podmanTest.Podman([]string{"inspect", ctrName})
+ inspect.WaitWithDefaultTimeout()
+ Expect(inspect.ExitCode()).To(BeZero())
+ conData := inspect.InspectContainerToJSON()
+ Expect(len(conData)).To(Equal(1))
+ Expect(len(conData[0].NetworkSettings.Networks)).To(Equal(2))
+ net1, ok := conData[0].NetworkSettings.Networks[netName1]
+ Expect(ok).To(BeTrue())
+ Expect(net1.NetworkID).To(Equal(netName1))
+ net2, ok := conData[0].NetworkSettings.Networks[netName2]
+ Expect(ok).To(BeTrue())
+ Expect(net2.NetworkID).To(Equal(netName2))
+
+ // Necessary to ensure the CNI network is removed cleanly
+ rmAll := podmanTest.Podman([]string{"rm", "-f", ctrName})
+ rmAll.WaitWithDefaultTimeout()
+ Expect(rmAll.ExitCode()).To(BeZero())
+ })
+
It("podman inspect container two CNI networks", func() {
netName1 := "testNetTwoCNI1"
network1 := podmanTest.Podman([]string{"network", "create", "--subnet", "10.50.51.0/25", netName1})
diff --git a/test/e2e/search_test.go b/test/e2e/search_test.go
index 043da9059..0cf005529 100644
--- a/test/e2e/search_test.go
+++ b/test/e2e/search_test.go
@@ -423,4 +423,24 @@ registries = ['{{.Host}}:{{.Port}}']`
Expect(search.ExitCode()).To(Equal(0))
Expect(len(search.OutputToStringArray()) > 1).To(BeTrue())
})
+
+ It("podman search repository tags", func() {
+ search := podmanTest.Podman([]string{"search", "--list-tags", "--limit", "30", "docker.io/library/alpine"})
+ search.WaitWithDefaultTimeout()
+ Expect(search.ExitCode()).To(Equal(0))
+ Expect(len(search.OutputToStringArray())).To(Equal(31))
+
+ search = podmanTest.Podman([]string{"search", "--list-tags", "docker.io/library/alpine"})
+ search.WaitWithDefaultTimeout()
+ Expect(search.ExitCode()).To(Equal(0))
+ Expect(len(search.OutputToStringArray()) > 2).To(BeTrue())
+
+ search = podmanTest.Podman([]string{"search", "--filter=is-official", "--list-tags", "docker.io/library/alpine"})
+ search.WaitWithDefaultTimeout()
+ Expect(search.ExitCode()).To(Not(Equal(0)))
+
+ search = podmanTest.Podman([]string{"search", "--list-tags", "docker.io/library/"})
+ search.WaitWithDefaultTimeout()
+ Expect(len(search.OutputToStringArray()) == 0).To(BeTrue())
+ })
})
diff --git a/test/e2e/toolbox_test.go b/test/e2e/toolbox_test.go
new file mode 100644
index 000000000..6122cee19
--- /dev/null
+++ b/test/e2e/toolbox_test.go
@@ -0,0 +1,368 @@
+package integration
+
+/*
+ toolbox_test.go is under the care of the Toolbox Team.
+
+ The tests are trying to stress parts of Podman that Toolbox[0] needs for
+ its functionality.
+
+ [0] https://github.com/containers/toolbox
+
+ Info about test cases:
+ - some tests rely on a certain configuration of a container that is done by
+ executing several commands in the entry-point of a container. To make
+ sure the initialization had enough time to be executed,
+ WaitContainerReady() after the container is started.
+
+ - in several places there's an invocation of 'podman logs' It is there mainly
+ to ease debugging when a test goes wrong (during the initialization of a
+ container) but sometimes it is also used in the test case itself.
+
+ Maintainers (Toolbox Team):
+ - Ondřej Míchal <harrymichal@fedoraproject.org>
+ - Debarshi Ray <rishi@fedoraproject.org>
+
+ Also available on Freenode IRC on #silverblue or #podman
+*/
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "os/user"
+ "strconv"
+ "strings"
+ "syscall"
+
+ . "github.com/containers/podman/v2/test/utils"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("Toolbox-specific testing", func() {
+ var (
+ tempdir string
+ err error
+ podmanTest *PodmanTestIntegration
+ )
+
+ BeforeEach(func() {
+ tempdir, err = CreateTempDirInTempDir()
+ if err != nil {
+ os.Exit(1)
+ }
+ podmanTest = PodmanTestCreate(tempdir)
+ podmanTest.Setup()
+ podmanTest.SeedImages()
+ })
+
+ AfterEach(func() {
+ podmanTest.Cleanup()
+ f := CurrentGinkgoTestDescription()
+ processTestResult(f)
+ })
+
+ It("podman run --dns=none - allows self-management of /etc/resolv.conf", func() {
+ var session *PodmanSessionIntegration
+
+ session = podmanTest.Podman([]string{"run", "--dns", "none", ALPINE, "sh", "-c",
+ "rm -f /etc/resolv.conf; touch -d '1970-01-01 00:02:03' /etc/resolv.conf; stat -c %s:%Y /etc/resolv.conf"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.OutputToString()).To(ContainSubstring("0:123"))
+ })
+
+ It("podman run --no-hosts - allows self-management of /etc/hosts", func() {
+ var session *PodmanSessionIntegration
+
+ session = podmanTest.Podman([]string{"run", "--no-hosts", ALPINE, "sh", "-c",
+ "rm -f /etc/hosts; touch -d '1970-01-01 00:02:03' /etc/hosts; stat -c %s:%Y /etc/hosts"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.OutputToString()).To(ContainSubstring("0:123"))
+ })
+
+ It("podman create --ulimit host + podman exec - correctly mirrors hosts ulimits", func() {
+ if podmanTest.RemoteTest {
+ Skip("Ulimit check does not work with a remote client")
+ }
+ var session *PodmanSessionIntegration
+ var containerHardLimit int
+ var rlimit syscall.Rlimit
+ var err error
+
+ err = syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlimit)
+ Expect(err).To(BeNil())
+ fmt.Printf("Expected value: %d", rlimit.Max)
+
+ session = podmanTest.Podman([]string{"create", "--name", "test", "--ulimit", "host", ALPINE,
+ "sleep", "1000"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"start", "test"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"exec", "test", "sh", "-c",
+ "ulimit -H -n"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ containerHardLimit, err = strconv.Atoi(strings.Trim(session.OutputToString(), "\n"))
+ Expect(err).To(BeNil())
+ Expect(containerHardLimit).To(BeNumerically(">=", rlimit.Max))
+ })
+
+ It("podman create --ipc=host --pid=host + podman exec - correct shared memory limit size", func() {
+ // Comparison of the size of /dev/shm on the host being equal to the one in
+ // a container
+ if podmanTest.RemoteTest {
+ Skip("Shm size check does not work with a remote client")
+ }
+ var session *PodmanSessionIntegration
+ var cmd *exec.Cmd
+ var hostShmSize, containerShmSize int
+ var err error
+
+ // Because Alpine uses busybox, most commands don't offer advanced options
+ // like "--output" in df. Therefore the value of the field 'Size' (or
+ // ('1K-blocks') needs to be extracted manually.
+ cmd = exec.Command("df", "/dev/shm")
+ res, err := cmd.Output()
+ Expect(err).To(BeNil())
+ lines := strings.SplitN(string(res), "\n", 2)
+ fields := strings.Fields(lines[len(lines)-1])
+ hostShmSize, err = strconv.Atoi(fields[1])
+ Expect(err).To(BeNil())
+
+ session = podmanTest.Podman([]string{"create", "--name", "test", "--ipc=host", "--pid=host", ALPINE,
+ "sleep", "1000"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"start", "test"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"exec", "test",
+ "df", "/dev/shm"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ lines = session.OutputToStringArray()
+ fields = strings.Fields(lines[len(lines)-1])
+ containerShmSize, err = strconv.Atoi(fields[1])
+ Expect(err).To(BeNil())
+
+ // In some cases it may happen that the size of /dev/shm is not exactly
+ // equal. Therefore it's fine if there's a slight tolerance between the
+ // compared values.
+ Expect(hostShmSize).To(BeNumerically("~", containerShmSize, 100))
+ })
+
+ It("podman create --userns=keep-id --user root:root - entrypoint - entrypoint is executed as root", func() {
+ var session *PodmanSessionIntegration
+
+ session = podmanTest.Podman([]string{"run", "--userns=keep-id", "--user", "root:root", ALPINE,
+ "id"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.OutputToString()).To(ContainSubstring("uid=0(root) gid=0(root)"))
+ })
+
+ It("podman create --userns=keep-id + podman exec - correct names of user and group", func() {
+ var session *PodmanSessionIntegration
+ var err error
+
+ currentUser, err := user.Current()
+ Expect(err).To(BeNil())
+
+ currentGroup, err := user.LookupGroupId(currentUser.Gid)
+ Expect(err).To(BeNil())
+
+ session = podmanTest.Podman([]string{"create", "--name", "test", "--userns=keep-id", ALPINE,
+ "sleep", "1000"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(err).To(BeNil())
+
+ session = podmanTest.Podman([]string{"start", "test"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ expectedOutput := fmt.Sprintf("uid=%s(%s) gid=%s(%s)",
+ currentUser.Uid, currentUser.Username,
+ currentGroup.Gid, currentGroup.Name)
+
+ session = podmanTest.Podman([]string{"exec", "test",
+ "id"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.OutputToString()).To(ContainSubstring(expectedOutput))
+ })
+
+ It("podman create --userns=keep-id - entrypoint - adding user with useradd and then removing their password", func() {
+ var session *PodmanSessionIntegration
+
+ var username string = "testuser"
+ var homeDir string = "/home/testuser"
+ var shell string = "/bin/sh"
+ var uid string = "1001"
+ var gid string = "1001"
+
+ useradd := fmt.Sprintf("useradd --home-dir %s --shell %s --uid %s %s",
+ homeDir, shell, uid, username)
+ passwd := fmt.Sprintf("passwd --delete %s", username)
+
+ session = podmanTest.Podman([]string{"create", "--name", "test", "--userns=keep-id", "--user", "root:root", fedoraToolbox, "sh", "-c",
+ fmt.Sprintf("%s; %s; echo READY; sleep 1000", useradd, passwd)})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"start", "test"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ Expect(WaitContainerReady(podmanTest, "test", "READY", 2, 1)).To(BeTrue())
+
+ expectedOutput := fmt.Sprintf("%s:x:%s:%s::%s:%s",
+ username, uid, gid, homeDir, shell)
+
+ session = podmanTest.Podman([]string{"exec", "test", "cat", "/etc/passwd"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.OutputToString()).To(ContainSubstring(expectedOutput))
+
+ expectedOutput = "passwd: Note: deleting a password also unlocks the password."
+
+ session = podmanTest.Podman([]string{"logs", "test"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.OutputToString()).To(ContainSubstring(expectedOutput))
+ })
+
+ It("podman create --userns=keep-id + podman exec - adding group with groupadd", func() {
+ var session *PodmanSessionIntegration
+
+ var groupName string = "testgroup"
+ var gid string = "1001"
+
+ groupadd := fmt.Sprintf("groupadd --gid %s %s", gid, groupName)
+
+ session = podmanTest.Podman([]string{"create", "--name", "test", "--userns=keep-id", "--user", "root:root", fedoraToolbox, "sh", "-c",
+ fmt.Sprintf("%s; echo READY; sleep 1000", groupadd)})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"start", "test"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ Expect(WaitContainerReady(podmanTest, "test", "READY", 2, 1)).To(BeTrue())
+
+ session = podmanTest.Podman([]string{"exec", "test", "cat", "/etc/group"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.OutputToString()).To(ContainSubstring(groupName))
+
+ session = podmanTest.Podman([]string{"logs", "test"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.OutputToString()).To(ContainSubstring("READY"))
+ })
+
+ It("podman create --userns=keep-id - entrypoint - modifying existing user with usermod - add to new group, change home/shell/uid", func() {
+ var session *PodmanSessionIntegration
+ var badHomeDir string = "/home/badtestuser"
+ var badShell string = "/bin/sh"
+ var badUID string = "1001"
+ var username string = "testuser"
+ var homeDir string = "/home/testuser"
+ var shell string = "/bin/bash"
+ var uid string = "2000"
+ var groupName string = "testgroup"
+ var gid string = "2000"
+
+ // The use of bad* in the name of variables does not imply the invocation
+ // of useradd should fail The user is supposed to be created successfuly
+ // but later his information (uid, home, shell,..) is changed via usermod.
+ useradd := fmt.Sprintf("useradd --home-dir %s --shell %s --uid %s %s",
+ badHomeDir, badShell, badUID, username)
+ groupadd := fmt.Sprintf("groupadd --gid %s %s",
+ gid, groupName)
+ usermod := fmt.Sprintf("usermod --append --groups wheel --home %s --shell %s --uid %s --gid %s %s",
+ homeDir, shell, uid, gid, username)
+
+ session = podmanTest.Podman([]string{"create", "--name", "test", "--userns=keep-id", "--user", "root:root", fedoraToolbox, "sh", "-c",
+ fmt.Sprintf("%s; %s; %s; echo READY; sleep 1000", useradd, groupadd, usermod)})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"start", "test"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ Expect(WaitContainerReady(podmanTest, "test", "READY", 2, 1)).To(BeTrue())
+
+ expectedUser := fmt.Sprintf("%s:x:%s:%s::%s:%s",
+ username, uid, gid, homeDir, shell)
+
+ session = podmanTest.Podman([]string{"exec", "test", "cat", "/etc/passwd"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.OutputToString()).To(ContainSubstring(expectedUser))
+
+ session = podmanTest.Podman([]string{"logs", "test"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.OutputToString()).To(ContainSubstring("READY"))
+ })
+
+ It("podman run --privileged --userns=keep-id --user root:root - entrypoint - (bind)mounting", func() {
+ var session *PodmanSessionIntegration
+
+ session = podmanTest.Podman([]string{"run", "--privileged", "--userns=keep-id", "--user", "root:root", ALPINE,
+ "mount", "-t", "tmpfs", "tmpfs", "/tmp"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"run", "--privileged", "--userns=keep-id", "--user", "root:root", ALPINE,
+ "mount", "--rbind", "/tmp", "/var/tmp"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ })
+
+ It("podman create + start - with all needed switches for create - sleep as entry-point", func() {
+ var session *PodmanSessionIntegration
+
+ // These should be most of the switches that Toolbox uses to create a "toolbox" container
+ // https://github.com/containers/toolbox/blob/master/src/cmd/create.go
+ session = podmanTest.Podman([]string{"create",
+ "--dns", "none",
+ "--hostname", "toolbox",
+ "--ipc", "host",
+ "--label", "com.github.containers.toolbox=true",
+ "--name", "test",
+ "--network", "host",
+ "--no-hosts",
+ "--pid", "host",
+ "--privileged",
+ "--security-opt", "label=disable",
+ "--ulimit", "host",
+ "--userns=keep-id",
+ "--user", "root:root",
+ fedoraToolbox, "sh", "-c", "echo READY; sleep 1000"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"start", "test"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ Expect(WaitContainerReady(podmanTest, "test", "READY", 2, 1)).To(BeTrue())
+
+ session = podmanTest.Podman([]string{"logs", "test"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.OutputToString()).To(ContainSubstring("READY"))
+ })
+})
diff --git a/test/system/010-images.bats b/test/system/010-images.bats
index ac65e54d9..900a24368 100644
--- a/test/system/010-images.bats
+++ b/test/system/010-images.bats
@@ -159,4 +159,56 @@ Labels.created_at | 20[0-9-]\\\+T[0-9:]\\\+Z
is "$output" "$images_baseline" "after podman rmi @sha, still the same"
}
+# Tests #7199 (Restore "table" --format from V1)
+#
+# Tag our image with different-length strings; confirm table alignment
+@test "podman images - table format" {
+ # Craft two tags such that they will bracket $IMAGE on either side (above
+ # and below). This assumes that $IMAGE is quay.io or foo.com or simply
+ # not something insane that will sort before 'aaa' or after 'zzz'.
+ local aaa_name=a.b/c
+ local aaa_tag=d
+ local zzz_name=zzzzzzzzzz.yyyyyyyyy/xxxxxxxxx
+ local zzz_tag=$(random_string 15)
+
+ # Helper function to check one line of tabular output; all this does is
+ # generate a line with the given repo/tag, formatted to the width of the
+ # widest image, which is the zzz one. Fields are separated by TWO spaces.
+ function _check_line() {
+ local lineno=$1
+ local name=$2
+ local tag=$3
+
+ is "${lines[$lineno]}" \
+ "$(printf '%-*s %-*s %s' ${#zzz_name} ${name} ${#zzz_tag} ${tag} $iid)" \
+ "podman images, $testname, line $lineno"
+ }
+
+ function _run_format_test() {
+ local testname=$1
+ local format=$2
+
+ run_podman images --sort repository --format "$format"
+ _check_line 0 ${aaa_name} ${aaa_tag}
+ _check_line 1 "${PODMAN_TEST_IMAGE_REGISTRY}/${PODMAN_TEST_IMAGE_USER}/${PODMAN_TEST_IMAGE_NAME}" "${PODMAN_TEST_IMAGE_TAG}"
+ _check_line 2 ${zzz_name} ${zzz_tag}
+ }
+
+ # Begin the test: tag $IMAGE with both the given names
+ run_podman tag $IMAGE ${aaa_name}:${aaa_tag}
+ run_podman tag $IMAGE ${zzz_name}:${zzz_tag}
+
+ # Get the image ID, used to verify output below (all images share same IID)
+ run_podman inspect --format '{{.ID}}' $IMAGE
+ iid=${output:0:12}
+
+ # Run the test: this will output three column-aligned rows. Test them.
+ # Tab character (\t) should have the same effect as the 'table' directive
+ _run_format_test 'table' 'table {{.Repository}} {{.Tag}} {{.ID}}'
+ _run_format_test 'tabs' '{{.Repository}}\t{{.Tag}}\t{{.ID}}'
+
+ # Clean up.
+ run_podman rmi ${aaa_name}:${aaa_tag} ${zzz_name}:${zzz_tag}
+}
+
# vim: filetype=sh
diff --git a/test/system/060-mount.bats b/test/system/060-mount.bats
index f11aff773..ece87acf6 100644
--- a/test/system/060-mount.bats
+++ b/test/system/060-mount.bats
@@ -43,6 +43,11 @@ load helpers
# Start with clean slate
run_podman image umount -a
+ # Get full image ID, to verify umount
+ run_podman image inspect --format '{{.ID}}' $IMAGE
+ iid="$output"
+
+ # Mount, and make sure the mount point exists
run_podman image mount $IMAGE
mount_path="$output"
@@ -60,6 +65,14 @@ load helpers
# Clean up
run_podman image umount $IMAGE
+ is "$output" "$iid" "podman image umount: image ID of what was umounted"
+
+ run_podman image umount $IMAGE
+ is "$output" "" "podman image umount: does not re-umount"
+
+ run_podman 125 image umount no-such-container
+ is "$output" "Error: unable to find a name and tag match for no-such-container in repotags: no such image" \
+ "error message from image umount no-such-container"
run_podman image mount
is "$output" "" "podman image mount, no args, after umount"
diff --git a/test/system/130-kill.bats b/test/system/130-kill.bats
index c16e64c58..3770eac27 100644
--- a/test/system/130-kill.bats
+++ b/test/system/130-kill.bats
@@ -6,23 +6,9 @@
load helpers
@test "podman kill - test signal handling in containers" {
- # podman-remote and crun interact poorly in f31: crun seems to gobble up
- # some signals.
- # Workaround: run 'env --default-signal sh' instead of just 'sh' in
- # the container. Since env on our regular alpine image doesn't support
- # that flag, we need to pull fedora-minimal. See:
- # https://github.com/containers/podman/issues/5004
- # FIXME: remove this kludge once we get rid of podman-remote
- local _image=$IMAGE
- local _sh_cmd="sh"
- if is_remote; then
- _image=quay.io/libpod/fedora-minimal:latest
- _sh_cmd="env --default-signal sh"
- fi
-
# Start a container that will handle all signals by emitting 'got: N'
local -a signals=(1 2 3 4 5 6 8 10 12 13 14 15 16 20 21 22 23 24 25 26 64)
- run_podman run -d $_image $_sh_cmd -c \
+ run_podman run -d $IMAGE sh -c \
"for i in ${signals[*]}; do trap \"echo got: \$i\" \$i; done;
echo READY;
while ! test -e /stop; do sleep 0.05; done;
@@ -81,10 +67,6 @@ load helpers
run_podman wait $cid
run_podman rm $cid
wait $podman_log_pid
-
- if [[ $_image != $IMAGE ]]; then
- run_podman rmi $_image
- fi
}
@test "podman kill - rejects invalid args" {
diff --git a/test/system/410-selinux.bats b/test/system/410-selinux.bats
index 497e29b3e..1e44fe06c 100644
--- a/test/system/410-selinux.bats
+++ b/test/system/410-selinux.bats
@@ -7,9 +7,7 @@ load helpers
function check_label() {
- if [ ! -e /usr/sbin/selinuxenabled ] || ! /usr/sbin/selinuxenabled; then
- skip "selinux disabled or not available"
- fi
+ skip_if_no_selinux
local args="$1"; shift # command-line args for run
@@ -52,15 +50,33 @@ function check_label() {
check_label "--privileged --userns=host" "spc_t"
}
+@test "podman selinux: pid=host" {
+ # FIXME FIXME FIXME: Remove these lines once all VMs have >= 2.146.0
+ # (this is ugly, but better than an unconditional skip)
+ skip_if_no_selinux
+ if is_rootless; then
+ if [ -x /usr/bin/rpm ]; then
+ cs_version=$(rpm -q --qf '%{version}' container-selinux)
+ else
+ # SELinux not enabled on Ubuntu, so we should never get here
+ die "WHOA! SELinux enabled, but no /usr/bin/rpm!"
+ fi
+ if [[ "$cs_version" < "2.146" ]]; then
+ skip "FIXME: #7939: requires container-selinux-2.146.0 (currently installed: $cs_version)"
+ fi
+ fi
+ # FIXME FIXME FIXME: delete up to here, leaving just check_label
+
+ check_label "--pid=host" "spc_t"
+}
+
@test "podman selinux: container with overridden range" {
check_label "--security-opt label=level:s0:c1,c2" "container_t" "s0:c1,c2"
}
# pr #6752
@test "podman selinux: inspect multiple labels" {
- if [ ! -e /usr/sbin/selinuxenabled ] || ! /usr/sbin/selinuxenabled; then
- skip "selinux disabled or not available"
- fi
+ skip_if_no_selinux
run_podman run -d --name myc \
--security-opt seccomp=unconfined \
@@ -75,4 +91,84 @@ function check_label() {
run_podman rm -f myc
}
+# Sharing context between two containers not in a pod
+# These tests were piggybacked in with #7902, but are not actually related
+@test "podman selinux: shared context in (some) namespaces" {
+ skip_if_no_selinux
+
+ run_podman run -d --name myctr $IMAGE top
+ run_podman exec myctr cat -v /proc/self/attr/current
+ context_c1="$output"
+
+ # --ipc container
+ run_podman run --name myctr2 --ipc container:myctr $IMAGE cat -v /proc/self/attr/current
+ is "$output" "$context_c1" "new container, run with ipc of existing one "
+
+ # --pid container
+ run_podman run --rm --pid container:myctr $IMAGE cat -v /proc/self/attr/current
+ is "$output" "$context_c1" "new container, run with --pid of existing one "
+
+ # net NS: do not share context
+ run_podman run --rm --net container:myctr $IMAGE cat -v /proc/self/attr/current
+ if [[ "$output" = "$context_c1" ]]; then
+ die "run --net : context ($output) is same as running container (it should not be)"
+ fi
+
+ # The 'myctr2' above was not run with --rm, so it still exists, and
+ # we can't remove the original container until this one is gone.
+ run_podman stop -t 0 myctr
+ run_podman 125 rm myctr
+ is "$output" "Error: container .* has dependent containers"
+
+ # We have to do this in two steps: even if ordered as 'myctr2 myctr',
+ # podman will try the removes in random order, which fails if it
+ # tries myctr first.
+ run_podman rm myctr2
+ run_podman rm myctr
+}
+
+# pr #7902 - containers in pods should all run under same context
+@test "podman selinux: containers in pods share full context" {
+ skip_if_no_selinux
+
+ # We don't need a fullblown pause container; avoid pulling the k8s one
+ run_podman pod create --name myselinuxpod \
+ --infra-image $IMAGE \
+ --infra-command /home/podman/pause
+
+ # Get baseline
+ run_podman run --rm --pod myselinuxpod $IMAGE cat -v /proc/self/attr/current
+ context_c1="$output"
+
+ # Prior to #7902, the labels (':c123,c456') would be different
+ run_podman run --rm --pod myselinuxpod $IMAGE cat -v /proc/self/attr/current
+ is "$output" "$context_c1" "SELinux context of 2nd container matches 1st"
+
+ # What the heck. Try a third time just for extra confidence
+ run_podman run --rm --pod myselinuxpod $IMAGE cat -v /proc/self/attr/current
+ is "$output" "$context_c1" "SELinux context of 3rd container matches 1st"
+
+ run_podman pod rm myselinuxpod
+}
+
+# more pr #7902
+@test "podman selinux: containers in --no-infra pods do not share context" {
+ skip_if_no_selinux
+
+ # We don't need a fullblown pause container; avoid pulling the k8s one
+ run_podman pod create --name myselinuxpod --infra=false
+
+ # Get baseline
+ run_podman run --rm --pod myselinuxpod $IMAGE cat -v /proc/self/attr/current
+ context_c1="$output"
+
+ # Even after #7902, labels (':c123,c456') should be different
+ run_podman run --rm --pod myselinuxpod $IMAGE cat -v /proc/self/attr/current
+ if [[ "$output" = "$context_c1" ]]; then
+ die "context ($output) is the same on two separate containers, it should have been different"
+ fi
+
+ run_podman pod rm myselinuxpod
+}
+
# vim: filetype=sh
diff --git a/test/system/helpers.bash b/test/system/helpers.bash
index 998db5283..c6c2c12df 100644
--- a/test/system/helpers.bash
+++ b/test/system/helpers.bash
@@ -286,6 +286,17 @@ function skip_if_remote() {
fi
}
+########################
+# skip_if_no_selinux #
+########################
+function skip_if_no_selinux() {
+ if [ ! -e /usr/sbin/selinuxenabled ]; then
+ skip "selinux not available"
+ elif ! /usr/sbin/selinuxenabled; then
+ skip "selinux disabled"
+ fi
+}
+
#########
# die # Abort with helpful message
#########