aboutsummaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
Diffstat (limited to 'test')
-rw-r--r--test/README.md106
-rw-r--r--test/apparmor.bats164
-rw-r--r--test/bin2img/bin2img.go226
-rw-r--r--test/cgroups.bats40
-rw-r--r--test/checkseccomp/checkseccomp.go18
-rw-r--r--test/command.bats12
-rw-r--r--test/copyimg/copyimg.go202
-rw-r--r--test/ctr.bats873
-rw-r--r--test/default_mounts.bats69
-rw-r--r--test/helpers.bash493
-rw-r--r--test/hooks.bats38
-rw-r--r--test/hooks/checkhook.json5
-rwxr-xr-xtest/hooks/checkhook.sh4
-rw-r--r--test/image.bats250
-rw-r--r--test/image_remove.bats74
-rw-r--r--test/image_volume.bats68
-rw-r--r--test/inspect.bats72
-rw-r--r--test/kpod_diff.bats40
-rw-r--r--test/kpod_export.bats31
-rw-r--r--test/kpod_history.bats80
-rw-r--r--test/kpod_images.bats47
-rw-r--r--test/kpod_inspect.bats58
-rw-r--r--test/kpod_kill.bats83
-rw-r--r--test/kpod_load.bats84
-rw-r--r--test/kpod_logs.bats74
-rw-r--r--test/kpod_mount.bats48
-rw-r--r--test/kpod_pause.bats169
-rw-r--r--test/kpod_ps.bats313
-rw-r--r--test/kpod_pull.bats138
-rw-r--r--test/kpod_push.bats90
-rw-r--r--test/kpod_rename.bats33
-rw-r--r--test/kpod_rm.bats90
-rw-r--r--test/kpod_save.bats65
-rw-r--r--test/kpod_stats.bats104
-rw-r--r--test/kpod_stop.bats58
-rw-r--r--test/kpod_tag.bats50
-rw-r--r--test/kpod_version.bats13
-rw-r--r--test/kpod_wait.bats69
-rw-r--r--test/network.bats186
-rw-r--r--test/pod.bats365
-rw-r--r--test/policy.json23
-rw-r--r--test/redhat_sigstore.yaml3
-rw-r--r--test/registries.conf9
-rw-r--r--test/restore.bats267
-rw-r--r--test/runtimeversion.bats15
-rw-r--r--test/seccomp.bats368
-rw-r--r--test/selinux.bats26
-rwxr-xr-xtest/test_runner.sh18
-rw-r--r--test/testdata/README.md15
-rw-r--r--test/testdata/apparmor_test_deny_write10
-rw-r--r--test/testdata/container_config.json70
-rw-r--r--test/testdata/container_config_by_imageid.json70
-rw-r--r--test/testdata/container_config_hostport.json72
-rw-r--r--test/testdata/container_config_logging.json72
-rw-r--r--test/testdata/container_config_resolvconf.json72
-rw-r--r--test/testdata/container_config_resolvconf_ro.json72
-rw-r--r--test/testdata/container_config_seccomp.json72
-rw-r--r--test/testdata/container_config_sleep.json71
-rw-r--r--test/testdata/container_exit_test.json22
-rw-r--r--test/testdata/container_redis.json61
-rw-r--r--test/testdata/container_redis_default_mounts.json67
-rw-r--r--test/testdata/container_redis_device.json68
-rw-r--r--test/testdata/fake_ocid_default1
-rw-r--r--test/testdata/sandbox_config.json51
-rw-r--r--test/testdata/sandbox_config_hostnet.json48
-rw-r--r--test/testdata/sandbox_config_hostport.json55
-rw-r--r--test/testdata/sandbox_config_seccomp.json53
-rw-r--r--test/testdata/sandbox_config_selinux.json48
-rw-r--r--test/testdata/template_container_config.json68
-rw-r--r--test/testdata/template_sandbox_config.json51
70 files changed, 6920 insertions, 0 deletions
diff --git a/test/README.md b/test/README.md
new file mode 100644
index 000000000..1dd2e3c76
--- /dev/null
+++ b/test/README.md
@@ -0,0 +1,106 @@
+# CRIO Integration Tests
+
+Integration tests provide end-to-end testing of CRIO.
+
+Note that integration tests do **not** replace unit tests.
+
+As a rule of thumb, code should be tested thoroughly with unit tests.
+Integration tests on the other hand are meant to test a specific feature end
+to end.
+
+Integration tests are written in *bash* using the
+[bats](https://github.com/sstephenson/bats) framework.
+
+## Running integration tests
+
+### Containerized tests
+
+The easiest way to run integration tests is with Docker:
+```
+$ make integration
+```
+
+To run a single test bucket:
+```
+$ make integration TESTFLAGS="runtimeversion.bats"
+```
+
+### On your host
+
+To run the integration tests on your host, you will first need to setup a development environment plus
+[bats](https://github.com/sstephenson/bats#installing-bats-from-source)
+For example:
+```
+$ cd ~/go/src/github.com
+$ git clone https://github.com/sstephenson/bats.git
+$ cd bats
+$ ./install.sh /usr/local
+```
+
+You will also need to install the [CNI](https://github.com/containernetworking/cni) plugins as
+the the default pod test template runs without host networking:
+
+```
+$ go get github.com/containernetworking/cni
+$ cd "$GOPATH/src/github.com/containernetworking/cni"
+$ git checkout -q d4bbce1865270cd2d2be558d6a23e63d314fe769
+$ ./build.sh \
+$ mkdir -p /opt/cni/bin \
+$ cp bin/* /opt/cni/bin/
+```
+
+Then you can run the tests on your host:
+```
+$ sudo make localintegration
+```
+
+To run a single test bucket:
+```
+$ make localintegration TESTFLAGS="runtimeversion.bats"
+```
+
+Or you can just run them directly using bats
+```
+$ sudo bats test
+```
+
+#### Runtime selection
+Tests on the host will run with `runc` as the default runtime.
+However you can select other OCI compatible runtimes by setting
+the `RUNTIME` environment variable.
+
+For example one could use the [Clear Containers](https://github.com/01org/cc-oci-runtime/wiki/Installation)
+runtime instead of `runc`:
+
+```
+make localintegration RUNTIME=cc-oci-runtime
+```
+
+## Writing integration tests
+
+[Helper functions]
+(https://github.com/kubernetes-incubator/crio/blob/master/test/helpers.bash)
+are provided in order to facilitate writing tests.
+
+```sh
+#!/usr/bin/env bats
+
+# This will load the helpers.
+load helpers
+
+# setup is called at the beginning of every test.
+function setup() {
+}
+
+# teardown is called at the end of every test.
+function teardown() {
+ cleanup_test
+}
+
+@test "crioctl runtimeversion" {
+ start_crio
+ crioctl runtimeversion
+ [ "$status" -eq 0 ]
+}
+
+```
diff --git a/test/apparmor.bats b/test/apparmor.bats
new file mode 100644
index 000000000..e5c89bf0a
--- /dev/null
+++ b/test/apparmor.bats
@@ -0,0 +1,164 @@
+#!/usr/bin/env bats
+
+load helpers
+
+function teardown() {
+ cleanup_test
+}
+
+# 1. test running with loading the default apparmor profile.
+# test that we can run with the default apparmor profile which will not block touching a file in `.`
+@test "load default apparmor profile and run a container with it" {
+ # this test requires apparmor, so skip this test if apparmor is not enabled.
+ enabled=$(is_apparmor_enabled)
+ if [[ "$enabled" -eq 0 ]]; then
+ skip "skip this test since apparmor is not enabled."
+ fi
+
+ start_crio
+
+ sed -e 's/%VALUE%/,"container\.apparmor\.security\.beta\.kubernetes\.io\/testname1": "runtime\/default"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/apparmor1.json
+
+ run crioctl pod run --name apparmor1 --config "$TESTDIR"/apparmor1.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --name testname1 --config "$TESTDATA"/container_redis.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr execsync --id "$ctr_id" touch test.txt
+ echo "$output"
+ [ "$status" -eq 0 ]
+
+
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+# 2. test running with loading a specific apparmor profile as crio default apparmor profile.
+# test that we can run with a specific apparmor profile which will block touching a file in `.` as crio default apparmor profile.
+@test "load a specific apparmor profile as default apparmor and run a container with it" {
+ # this test requires apparmor, so skip this test if apparmor is not enabled.
+ enabled=$(is_apparmor_enabled)
+ if [[ "$enabled" -eq 0 ]]; then
+ skip "skip this test since apparmor is not enabled."
+ fi
+
+ load_apparmor_profile "$APPARMOR_TEST_PROFILE_PATH"
+ start_crio "" "$APPARMOR_TEST_PROFILE_NAME"
+
+ sed -e 's/%VALUE%/,"container\.apparmor\.security\.beta\.kubernetes\.io\/testname2": "apparmor-test-deny-write"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/apparmor2.json
+
+ run crioctl pod run --name apparmor2 --config "$TESTDIR"/apparmor2.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --name testname2 --config "$TESTDATA"/container_redis.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr execsync --id "$ctr_id" touch test.txt
+ echo "$output"
+ [ "$status" -ne 0 ]
+ [[ "$output" =~ "Permission denied" ]]
+
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+ remove_apparmor_profile "$APPARMOR_TEST_PROFILE_PATH"
+}
+
+# 3. test running with loading a specific apparmor profile but not as crio default apparmor profile.
+# test that we can run with a specific apparmor profile which will block touching a file in `.`
+@test "load default apparmor profile and run a container with another apparmor profile" {
+ # this test requires apparmor, so skip this test if apparmor is not enabled.
+ enabled=$(is_apparmor_enabled)
+ if [[ "$enabled" -eq 0 ]]; then
+ skip "skip this test since apparmor is not enabled."
+ fi
+
+ load_apparmor_profile "$APPARMOR_TEST_PROFILE_PATH"
+ start_crio
+
+ sed -e 's/%VALUE%/,"container\.apparmor\.security\.beta\.kubernetes\.io\/testname3": "apparmor-test-deny-write"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/apparmor3.json
+
+ run crioctl pod run --name apparmor3 --config "$TESTDIR"/apparmor3.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --name testname3 --config "$TESTDATA"/container_redis.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr execsync --id "$ctr_id" touch test.txt
+ echo "$output"
+ [ "$status" -ne 0 ]
+ [[ "$output" =~ "Permission denied" ]]
+
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+ remove_apparmor_profile "$APPARMOR_TEST_PROFILE_PATH"
+}
+
+# 4. test running with wrong apparmor profile name.
+# test that we can will fail when running a ctr with rong apparmor profile name.
+@test "run a container with wrong apparmor profile name" {
+ # this test requires apparmor, so skip this test if apparmor is not enabled.
+ enabled=$(is_apparmor_enabled)
+ if [[ "$enabled" -eq 0 ]]; then
+ skip "skip this test since apparmor is not enabled."
+ fi
+
+ start_crio
+
+ sed -e 's/%VALUE%/,"container\.apparmor\.security\.beta\.kubernetes\.io\/testname4": "not-exists"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/apparmor4.json
+
+ run crioctl pod run --name apparmor4 --config "$TESTDIR"/apparmor4.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --name testname4 --config "$TESTDATA"/container_redis.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -ne 0 ]
+ [[ "$output" =~ "Creating container failed" ]]
+
+
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+# 5. test running with default apparmor profile unloaded.
+# test that we can will fail when running a ctr with rong apparmor profile name.
+@test "run a container after unloading default apparmor profile" {
+ # this test requires apparmor, so skip this test if apparmor is not enabled.
+ enabled=$(is_apparmor_enabled)
+ if [[ "$enabled" -eq 0 ]]; then
+ skip "skip this test since apparmor is not enabled."
+ fi
+
+ start_crio
+ remove_apparmor_profile "$FAKE_CRIO_DEFAULT_PROFILE_PATH"
+
+ sed -e 's/%VALUE%/,"container\.apparmor\.security\.beta\.kubernetes\.io\/testname5": "runtime\/default"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/apparmor5.json
+
+ run crioctl pod run --name apparmor5 --config "$TESTDIR"/apparmor5.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --name testname5 --config "$TESTDATA"/container_redis.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr execsync --id "$ctr_id" touch test.txt
+ echo "$output"
+ [ "$status" -eq 0 ]
+
+
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
diff --git a/test/bin2img/bin2img.go b/test/bin2img/bin2img.go
new file mode 100644
index 000000000..b75289458
--- /dev/null
+++ b/test/bin2img/bin2img.go
@@ -0,0 +1,226 @@
+package main
+
+import (
+ "archive/tar"
+ "bytes"
+ "encoding/json"
+ "io"
+ "os"
+ "runtime"
+
+ "github.com/containers/image/storage"
+ "github.com/containers/image/types"
+ sstorage "github.com/containers/storage"
+ "github.com/containers/storage/pkg/reexec"
+ digest "github.com/opencontainers/go-digest"
+ specs "github.com/opencontainers/image-spec/specs-go"
+ "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sirupsen/logrus"
+ "github.com/urfave/cli"
+)
+
+func main() {
+ if reexec.Init() {
+ return
+ }
+
+ app := cli.NewApp()
+ app.Name = "bin2img"
+ app.Usage = "barebones image builder"
+ app.Version = "0.0.1"
+
+ app.Flags = []cli.Flag{
+ cli.BoolFlag{
+ Name: "debug",
+ Usage: "turn on debug logging",
+ },
+ cli.StringFlag{
+ Name: "root",
+ Usage: "graph root directory",
+ },
+ cli.StringFlag{
+ Name: "runroot",
+ Usage: "run root directory",
+ },
+ cli.StringFlag{
+ Name: "storage-driver",
+ Usage: "storage driver",
+ },
+ cli.StringSliceFlag{
+ Name: "storage-opt",
+ Usage: "storage option",
+ },
+ cli.StringFlag{
+ Name: "image-name",
+ Usage: "set image name",
+ Value: "kubernetes/pause",
+ },
+ cli.StringFlag{
+ Name: "source-binary",
+ Usage: "source binary",
+ Value: "../../pause/pause",
+ },
+ cli.StringFlag{
+ Name: "image-binary",
+ Usage: "image binary",
+ Value: "/pause",
+ },
+ }
+
+ app.Action = func(c *cli.Context) error {
+ debug := c.GlobalBool("debug")
+ rootDir := c.GlobalString("root")
+ runrootDir := c.GlobalString("runroot")
+ storageDriver := c.GlobalString("storage-driver")
+ storageOptions := c.GlobalStringSlice("storage-opt")
+ imageName := c.GlobalString("image-name")
+ sourceBinary := c.GlobalString("source-binary")
+ imageBinary := c.GlobalString("image-binary")
+
+ if debug {
+ logrus.SetLevel(logrus.DebugLevel)
+ } else {
+ logrus.SetLevel(logrus.ErrorLevel)
+ }
+ if rootDir == "" && runrootDir != "" {
+ logrus.Errorf("must set --root and --runroot, or neither")
+ os.Exit(1)
+ }
+ if rootDir != "" && runrootDir == "" {
+ logrus.Errorf("must set --root and --runroot, or neither")
+ os.Exit(1)
+ }
+ storeOptions := sstorage.DefaultStoreOptions
+ if rootDir != "" && runrootDir != "" {
+ storeOptions.GraphDriverName = storageDriver
+ storeOptions.GraphDriverOptions = storageOptions
+ storeOptions.GraphRoot = rootDir
+ storeOptions.RunRoot = runrootDir
+ }
+ store, err := sstorage.GetStore(storeOptions)
+ if err != nil {
+ logrus.Errorf("error opening storage: %v", err)
+ os.Exit(1)
+ }
+ defer func() {
+ _, _ = store.Shutdown(false)
+ }()
+
+ layerBuffer := &bytes.Buffer{}
+ binary, err := os.Open(sourceBinary)
+ if err != nil {
+ logrus.Errorf("error opening image binary: %v", err)
+ os.Exit(1)
+ }
+ binInfo, err := binary.Stat()
+ if err != nil {
+ logrus.Errorf("error statting image binary: %v", err)
+ os.Exit(1)
+ }
+ archive := tar.NewWriter(layerBuffer)
+ err = archive.WriteHeader(&tar.Header{
+ Name: imageBinary,
+ Size: binInfo.Size(),
+ Mode: 0555,
+ ModTime: binInfo.ModTime(),
+ Typeflag: tar.TypeReg,
+ Uname: "root",
+ Gname: "root",
+ })
+ if err != nil {
+ logrus.Errorf("error writing archive header: %v", err)
+ os.Exit(1)
+ }
+ _, err = io.Copy(archive, binary)
+ if err != nil {
+ logrus.Errorf("error archiving image binary: %v", err)
+ os.Exit(1)
+ }
+ archive.Close()
+ binary.Close()
+ layerInfo := types.BlobInfo{
+ Digest: digest.Canonical.FromBytes(layerBuffer.Bytes()),
+ Size: int64(layerBuffer.Len()),
+ }
+
+ ref, err := storage.Transport.ParseStoreReference(store, imageName)
+ if err != nil {
+ logrus.Errorf("error parsing image name: %v", err)
+ os.Exit(1)
+ }
+ img, err := ref.NewImageDestination(nil)
+ if err != nil {
+ logrus.Errorf("error preparing to write image: %v", err)
+ os.Exit(1)
+ }
+ defer img.Close()
+ layer, err := img.PutBlob(layerBuffer, layerInfo)
+ if err != nil {
+ logrus.Errorf("error preparing to write image: %v", err)
+ os.Exit(1)
+ }
+ config := &v1.Image{
+ Architecture: runtime.GOARCH,
+ OS: runtime.GOOS,
+ Config: v1.ImageConfig{
+ User: "root",
+ Entrypoint: []string{imageBinary},
+ },
+ RootFS: v1.RootFS{
+ Type: "layers",
+ DiffIDs: []digest.Digest{
+ layer.Digest,
+ },
+ },
+ }
+ cbytes, err := json.Marshal(config)
+ if err != nil {
+ logrus.Errorf("error encoding configuration: %v", err)
+ os.Exit(1)
+ }
+ configInfo := types.BlobInfo{
+ Digest: digest.Canonical.FromBytes(cbytes),
+ Size: int64(len(cbytes)),
+ }
+ configInfo, err = img.PutBlob(bytes.NewBuffer(cbytes), configInfo)
+ if err != nil {
+ logrus.Errorf("error saving configuration: %v", err)
+ os.Exit(1)
+ }
+ manifest := &v1.Manifest{
+ Versioned: specs.Versioned{
+ SchemaVersion: 2,
+ },
+ Config: v1.Descriptor{
+ MediaType: v1.MediaTypeImageConfig,
+ Digest: configInfo.Digest,
+ Size: int64(len(cbytes)),
+ },
+ Layers: []v1.Descriptor{{
+ MediaType: v1.MediaTypeImageLayer,
+ Digest: layer.Digest,
+ Size: layer.Size,
+ }},
+ }
+ mbytes, err := json.Marshal(manifest)
+ if err != nil {
+ logrus.Errorf("error encoding manifest: %v", err)
+ os.Exit(1)
+ }
+ err = img.PutManifest(mbytes)
+ if err != nil {
+ logrus.Errorf("error saving manifest: %v", err)
+ os.Exit(1)
+ }
+ err = img.Commit()
+ if err != nil {
+ logrus.Errorf("error committing image: %v", err)
+ os.Exit(1)
+ }
+ return nil
+ }
+
+ if err := app.Run(os.Args); err != nil {
+ logrus.Fatal(err)
+ }
+}
diff --git a/test/cgroups.bats b/test/cgroups.bats
new file mode 100644
index 000000000..cbe27be41
--- /dev/null
+++ b/test/cgroups.bats
@@ -0,0 +1,40 @@
+#!/usr/bin/env bats
+
+load helpers
+
+function teardown() {
+ cleanup_test
+}
+
+@test "pids limit" {
+ if ! grep pids /proc/self/cgroup; then
+ skip "pids cgroup controller is not mounted"
+ fi
+ PIDS_LIMIT=1234 start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ pids_limit_config=$(cat "$TESTDATA"/container_config.json | python -c 'import json,sys;obj=json.load(sys.stdin); obj["command"] = ["/bin/sleep", "600"]; json.dump(obj, sys.stdout)')
+ echo "$pids_limit_config" > "$TESTDIR"/container_pids_limit.json
+ run crioctl ctr create --config "$TESTDIR"/container_pids_limit.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr execsync --id "$ctr_id" cat /sys/fs/cgroup/pids/pids.max
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" =~ "1234" ]]
+ run crioctl pod stop --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl pod remove --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
diff --git a/test/checkseccomp/checkseccomp.go b/test/checkseccomp/checkseccomp.go
new file mode 100644
index 000000000..ec7ee1020
--- /dev/null
+++ b/test/checkseccomp/checkseccomp.go
@@ -0,0 +1,18 @@
+package main
+
+import (
+ "os"
+
+ "golang.org/x/sys/unix"
+)
+
+func main() {
+ // Check if Seccomp is supported, via CONFIG_SECCOMP.
+ if err := unix.Prctl(unix.PR_GET_SECCOMP, 0, 0, 0, 0); err != unix.EINVAL {
+ // Make sure the kernel has CONFIG_SECCOMP_FILTER.
+ if err := unix.Prctl(unix.PR_SET_SECCOMP, unix.SECCOMP_MODE_FILTER, 0, 0, 0); err != unix.EINVAL {
+ os.Exit(0)
+ }
+ }
+ os.Exit(1)
+}
diff --git a/test/command.bats b/test/command.bats
new file mode 100644
index 000000000..86e58f9db
--- /dev/null
+++ b/test/command.bats
@@ -0,0 +1,12 @@
+#!/usr/bin/env bats
+
+load helpers
+
+@test "crio commands" {
+ run ${CRIO_BINARY} --config /dev/null config > /dev/null
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${CRIO_BINARY} badoption > /dev/null
+ echo "$output"
+ [ "$status" -ne 0 ]
+}
diff --git a/test/copyimg/copyimg.go b/test/copyimg/copyimg.go
new file mode 100644
index 000000000..f83f92766
--- /dev/null
+++ b/test/copyimg/copyimg.go
@@ -0,0 +1,202 @@
+package main
+
+import (
+ "os"
+
+ "github.com/containers/image/copy"
+ "github.com/containers/image/signature"
+ "github.com/containers/image/storage"
+ "github.com/containers/image/transports/alltransports"
+ "github.com/containers/image/types"
+ sstorage "github.com/containers/storage"
+ "github.com/containers/storage/pkg/reexec"
+ "github.com/sirupsen/logrus"
+ "github.com/urfave/cli"
+)
+
+func main() {
+ if reexec.Init() {
+ return
+ }
+
+ app := cli.NewApp()
+ app.Name = "copyimg"
+ app.Usage = "barebones image copier"
+ app.Version = "0.0.1"
+
+ app.Flags = []cli.Flag{
+ cli.BoolFlag{
+ Name: "debug",
+ Usage: "turn on debug logging",
+ },
+ cli.StringFlag{
+ Name: "root",
+ Usage: "graph root directory",
+ },
+ cli.StringFlag{
+ Name: "runroot",
+ Usage: "run root directory",
+ },
+ cli.StringFlag{
+ Name: "storage-driver",
+ Usage: "storage driver",
+ },
+ cli.StringSliceFlag{
+ Name: "storage-opt",
+ Usage: "storage option",
+ },
+ cli.StringFlag{
+ Name: "signature-policy",
+ Usage: "signature policy",
+ },
+ cli.StringFlag{
+ Name: "image-name",
+ Usage: "set image name",
+ },
+ cli.StringFlag{
+ Name: "add-name",
+ Usage: "name to add to image",
+ },
+ cli.StringFlag{
+ Name: "import-from",
+ Usage: "import source",
+ },
+ cli.StringFlag{
+ Name: "export-to",
+ Usage: "export target",
+ },
+ }
+
+ app.Action = func(c *cli.Context) error {
+ var store sstorage.Store
+ var ref, importRef, exportRef types.ImageReference
+ var err error
+
+ debug := c.GlobalBool("debug")
+ rootDir := c.GlobalString("root")
+ runrootDir := c.GlobalString("runroot")
+ storageDriver := c.GlobalString("storage-driver")
+ storageOptions := c.GlobalStringSlice("storage-opt")
+ signaturePolicy := c.GlobalString("signature-policy")
+ imageName := c.GlobalString("image-name")
+ addName := c.GlobalString("add-name")
+ importFrom := c.GlobalString("import-from")
+ exportTo := c.GlobalString("export-to")
+
+ if debug {
+ logrus.SetLevel(logrus.DebugLevel)
+ } else {
+ logrus.SetLevel(logrus.ErrorLevel)
+ }
+
+ if imageName != "" {
+ if rootDir == "" && runrootDir != "" {
+ logrus.Errorf("must set --root and --runroot, or neither")
+ os.Exit(1)
+ }
+ if rootDir != "" && runrootDir == "" {
+ logrus.Errorf("must set --root and --runroot, or neither")
+ os.Exit(1)
+ }
+ storeOptions := sstorage.DefaultStoreOptions
+ if rootDir != "" && runrootDir != "" {
+ storeOptions.GraphDriverName = storageDriver
+ storeOptions.GraphDriverOptions = storageOptions
+ storeOptions.GraphRoot = rootDir
+ storeOptions.RunRoot = runrootDir
+ }
+ store, err = sstorage.GetStore(storeOptions)
+ if err != nil {
+ logrus.Errorf("error opening storage: %v", err)
+ os.Exit(1)
+ }
+ defer func() {
+ _, _ = store.Shutdown(false)
+ }()
+
+ storage.Transport.SetStore(store)
+ ref, err = storage.Transport.ParseStoreReference(store, imageName)
+ if err != nil {
+ logrus.Errorf("error parsing image name: %v", err)
+ os.Exit(1)
+ }
+ }
+
+ systemContext := types.SystemContext{
+ SignaturePolicyPath: signaturePolicy,
+ }
+ policy, err := signature.DefaultPolicy(&systemContext)
+ if err != nil {
+ logrus.Errorf("error loading signature policy: %v", err)
+ os.Exit(1)
+ }
+ policyContext, err := signature.NewPolicyContext(policy)
+ if err != nil {
+ logrus.Errorf("error loading signature policy: %v", err)
+ os.Exit(1)
+ }
+ defer func() {
+ _ = policyContext.Destroy()
+ }()
+ options := &copy.Options{}
+
+ if importFrom != "" {
+ importRef, err = alltransports.ParseImageName(importFrom)
+ if err != nil {
+ logrus.Errorf("error parsing image name %v: %v", importFrom, err)
+ os.Exit(1)
+ }
+ }
+
+ if exportTo != "" {
+ exportRef, err = alltransports.ParseImageName(exportTo)
+ if err != nil {
+ logrus.Errorf("error parsing image name %v: %v", exportTo, err)
+ os.Exit(1)
+ }
+ }
+
+ if imageName != "" {
+ if importFrom != "" {
+ err = copy.Image(policyContext, ref, importRef, options)
+ if err != nil {
+ logrus.Errorf("error importing %s: %v", importFrom, err)
+ os.Exit(1)
+ }
+ }
+ if addName != "" {
+ destImage, err1 := storage.Transport.GetStoreImage(store, ref)
+ if err1 != nil {
+ logrus.Errorf("error finding image: %v", err1)
+ os.Exit(1)
+ }
+ names := append(destImage.Names, imageName, addName)
+ err = store.SetNames(destImage.ID, names)
+ if err != nil {
+ logrus.Errorf("error adding name to %s: %v", imageName, err)
+ os.Exit(1)
+ }
+ }
+ if exportTo != "" {
+ err = copy.Image(policyContext, exportRef, ref, options)
+ if err != nil {
+ logrus.Errorf("error exporting %s: %v", exportTo, err)
+ os.Exit(1)
+ }
+ }
+ } else {
+ if importFrom != "" && exportTo != "" {
+ err = copy.Image(policyContext, exportRef, importRef, options)
+ if err != nil {
+ logrus.Errorf("error copying %s to %s: %v", importFrom, exportTo, err)
+ os.Exit(1)
+ }
+ }
+ }
+ return nil
+ }
+
+ if err := app.Run(os.Args); err != nil {
+ logrus.Fatal(err)
+ }
+}
diff --git a/test/ctr.bats b/test/ctr.bats
new file mode 100644
index 000000000..90f42b687
--- /dev/null
+++ b/test/ctr.bats
@@ -0,0 +1,873 @@
+#!/usr/bin/env bats
+
+load helpers
+
+function teardown() {
+ cleanup_test
+}
+
+@test "ctr not found correct error message" {
+ start_crio
+ run crioctl ctr status --id randomid
+ echo "$output"
+ [ "$status" -eq 1 ]
+ [[ "$output" =~ "container with ID starting with randomid not found" ]]
+
+ stop_crio
+}
+
+@test "ctr termination reason Completed" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr status --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" =~ "Reason: Completed" ]]
+
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "ctr termination reason Error" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ errorconfig=$(cat "$TESTDATA"/container_config.json | python -c 'import json,sys;obj=json.load(sys.stdin);obj["command"] = ["false"]; json.dump(obj, sys.stdout)')
+ echo "$errorconfig" > "$TESTDIR"/container_config_error.json
+ run crioctl ctr create --config "$TESTDIR"/container_config_error.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr status --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" =~ "Reason: Error" ]]
+
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "ctr remove" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr remove --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl pod stop --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl pod remove --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "ctr lifecycle" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl pod list
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr list
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr status --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr status --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr list
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr stop --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr status --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr list
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr remove --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr list
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl pod stop --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl pod list
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr list
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl pod remove --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl pod list
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr list
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "ctr logging" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl pod list
+ echo "$output"
+ [ "$status" -eq 0 ]
+
+ # Create a new container.
+ newconfig=$(mktemp --tmpdir crio-config.XXXXXX.json)
+ cp "$TESTDATA"/container_config_logging.json "$newconfig"
+ sed -i 's|"%shellcommand%"|"echo here is some output \&\& echo and some from stderr >\&2"|' "$newconfig"
+ run crioctl ctr create --config "$newconfig" --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr stop --id "$ctr_id"
+ echo "$output"
+ # Ignore errors on stop.
+ run crioctl ctr status --id "$ctr_id"
+ [ "$status" -eq 0 ]
+ run crioctl ctr remove --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+
+ # Check that the output is what we expect.
+ logpath="$DEFAULT_LOG_PATH/$pod_id/$ctr_id.log"
+ [ -f "$logpath" ]
+ echo "$logpath :: $(cat "$logpath")"
+ grep -E "^[^\n]+ stdout here is some output$" "$logpath"
+ grep -E "^[^\n]+ stderr and some from stderr$" "$logpath"
+
+ run crioctl pod stop --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl pod remove --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "ctr logging [tty=true]" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl pod list
+ echo "$output"
+ [ "$status" -eq 0 ]
+
+ # Create a new container.
+ newconfig=$(mktemp --tmpdir crio-config.XXXXXX.json)
+ cp "$TESTDATA"/container_config_logging.json "$newconfig"
+ sed -i 's|"%shellcommand%"|"echo here is some output"|' "$newconfig"
+ sed -i 's|"tty": false,|"tty": true,|' "$newconfig"
+ run crioctl ctr create --config "$newconfig" --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr stop --id "$ctr_id"
+ echo "$output"
+ # Ignore errors on stop.
+ run crioctl ctr status --id "$ctr_id"
+ [ "$status" -eq 0 ]
+ run crioctl ctr remove --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+
+ # Check that the output is what we expect.
+ logpath="$DEFAULT_LOG_PATH/$pod_id/$ctr_id.log"
+ [ -f "$logpath" ]
+ echo "$logpath :: $(cat "$logpath")"
+ grep --binary -P "^[^\n]+ stdout here is some output\x0d$" "$logpath"
+
+ run crioctl pod stop --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl pod remove --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "ctr log max" {
+ LOG_SIZE_MAX_LIMIT=10000 start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl pod list
+ echo "$output"
+ [ "$status" -eq 0 ]
+
+ # Create a new container.
+ newconfig=$(mktemp --tmpdir crio-config.XXXXXX.json)
+ cp "$TESTDATA"/container_config_logging.json "$newconfig"
+ sed -i 's|"%shellcommand%"|"for i in $(seq 250); do echo $i; done"|' "$newconfig"
+ run crioctl ctr create --config "$newconfig" --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ sleep 6
+ run crioctl ctr status --id "$ctr_id"
+ [ "$status" -eq 0 ]
+ run crioctl ctr remove --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+
+ # Check that the output is what we expect.
+ logpath="$DEFAULT_LOG_PATH/$pod_id/$ctr_id.log"
+ [ -f "$logpath" ]
+ echo "$logpath :: $(cat "$logpath")"
+ len=$(wc -l "$logpath" | awk '{print $1}')
+ [ $len -lt 250 ]
+
+ run crioctl pod stop --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl pod remove --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+# regression test for #127
+@test "ctrs status for a pod" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+
+ run crioctl ctr list --quiet
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "${output}" != "" ]]
+
+ printf '%s\n' "$output" | while IFS= read -r id
+ do
+ run crioctl ctr status --id "$id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ done
+
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "ctr list filtering" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json --name pod1
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod1_id="$output"
+ run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod1_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr1_id="$output"
+ run crioctl ctr start --id "$ctr1_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json --name pod2
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod2_id="$output"
+ run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod2_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr2_id="$output"
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json --name pod3
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod3_id="$output"
+ run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod3_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr3_id="$output"
+ run crioctl ctr start --id "$ctr3_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr stop --id "$ctr3_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr list --id "$ctr1_id" --quiet
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" != "" ]]
+ [[ "$output" =~ "$ctr1_id" ]]
+ run crioctl ctr list --id "${ctr1_id:0:4}" --quiet
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" != "" ]]
+ [[ "$output" =~ "$ctr1_id" ]]
+ run crioctl ctr list --id "$ctr2_id" --pod "$pod2_id" --quiet
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" != "" ]]
+ [[ "$output" =~ "$ctr2_id" ]]
+ run crioctl ctr list --id "$ctr2_id" --pod "$pod3_id" --quiet
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" == "" ]]
+ run crioctl ctr list --state created --quiet
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" != "" ]]
+ [[ "$output" =~ "$ctr2_id" ]]
+ run crioctl ctr list --state running --quiet
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" != "" ]]
+ [[ "$output" =~ "$ctr1_id" ]]
+ run crioctl ctr list --state stopped --quiet
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" != "" ]]
+ [[ "$output" =~ "$ctr3_id" ]]
+ run crioctl ctr list --pod "$pod1_id" --quiet
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" != "" ]]
+ [[ "$output" =~ "$ctr1_id" ]]
+ run crioctl ctr list --pod "$pod2_id" --quiet
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" != "" ]]
+ [[ "$output" =~ "$ctr2_id" ]]
+ run crioctl ctr list --pod "$pod3_id" --quiet
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" != "" ]]
+ [[ "$output" =~ "$ctr3_id" ]]
+ run crioctl pod stop --id "$pod1_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl pod remove --id "$pod1_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl pod stop --id "$pod2_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl pod remove --id "$pod2_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl pod stop --id "$pod3_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl pod remove --id "$pod3_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "ctr list label filtering" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" --name ctr1 --label "a=b" --label "c=d" --label "e=f"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr1_id="$output"
+ run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" --name ctr2 --label "a=b" --label "c=d"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr2_id="$output"
+ run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" --name ctr3 --label "a=b"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr3_id="$output"
+ run crioctl ctr list --label "tier=backend" --label "a=b" --label "c=d" --label "e=f" --quiet
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" != "" ]]
+ [[ "$output" =~ "$ctr1_id" ]]
+ run crioctl ctr list --label "tier=frontend" --quiet
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" == "" ]]
+ run crioctl ctr list --label "a=b" --label "c=d" --quiet
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" != "" ]]
+ [[ "$output" =~ "$ctr1_id" ]]
+ [[ "$output" =~ "$ctr2_id" ]]
+ run crioctl ctr list --label "a=b" --quiet
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" != "" ]]
+ [[ "$output" =~ "$ctr1_id" ]]
+ [[ "$output" =~ "$ctr2_id" ]]
+ [[ "$output" =~ "$ctr3_id" ]]
+ run crioctl pod stop --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl pod remove --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "ctr metadata in list & status" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+
+ run crioctl ctr list --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ # TODO: expected value should not hard coded here
+ [[ "$output" =~ "Name: container1" ]]
+ [[ "$output" =~ "Attempt: 1" ]]
+
+ run crioctl ctr status --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ # TODO: expected value should not hard coded here
+ [[ "$output" =~ "Name: container1" ]]
+ [[ "$output" =~ "Attempt: 1" ]]
+
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "ctr execsync conflicting with conmon flags parsing" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr execsync --id "$ctr_id" sh -c "echo hello world"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" =~ "hello world" ]]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "ctr execsync" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr execsync --id "$ctr_id" echo HELLO
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" =~ "HELLO" ]]
+ run crioctl ctr execsync --id "$ctr_id" --timeout 1 sleep 10
+ echo "$output"
+ [[ "$output" =~ "command timed out" ]]
+ [ "$status" -ne 0 ]
+ run crioctl pod stop --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl pod remove --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "ctr device add" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --config "$TESTDATA"/container_redis_device.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr execsync --id "$ctr_id" ls /dev/mynull
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" =~ "/dev/mynull" ]]
+ run crioctl pod stop --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl pod remove --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "ctr execsync failure" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr execsync --id "$ctr_id" doesnotexist
+ echo "$output"
+ [ "$status" -ne 0 ]
+
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "ctr execsync exit code" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr execsync --id "$ctr_id" false
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" =~ "Exit code: 1" ]]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "ctr execsync std{out,err}" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr execsync --id "$ctr_id" echo hello0 stdout
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" == *"$(printf "Stdout:\nhello0 stdout")"* ]]
+
+ stderrconfig=$(cat "$TESTDATA"/container_config.json | python -c 'import json,sys;obj=json.load(sys.stdin);obj["image"]["image"] = "runcom/stderr-test"; obj["command"] = ["/bin/sleep", "600"]; json.dump(obj, sys.stdout)')
+ echo "$stderrconfig" > "$TESTDIR"/container_config_stderr.json
+ run crioctl ctr create --config "$TESTDIR"/container_config_stderr.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr execsync --id "$ctr_id" stderr
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" == *"$(printf "Stderr:\nthis goes to stderr")"* ]]
+ run crioctl pod stop --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl pod remove --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "ctr stop idempotent" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr stop --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr stop --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "ctr caps drop" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ capsconfig=$(cat "$TESTDATA"/container_config.json | python -c 'import json,sys;obj=json.load(sys.stdin);obj["linux"]["security_context"]["capabilities"] = {u"add_capabilities": [], u"drop_capabilities": [u"mknod", u"kill", u"sys_chroot", u"setuid", u"setgid"]}; json.dump(obj, sys.stdout)')
+ echo "$capsconfig" > "$TESTDIR"/container_config_caps.json
+ run crioctl ctr create --config "$TESTDIR"/container_config_caps.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "run ctr with image with Config.Volumes" {
+ start_crio
+ run crioctl image pull gcr.io/k8s-testimages/redis:e2e
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ volumesconfig=$(cat "$TESTDATA"/container_redis.json | python -c 'import json,sys;obj=json.load(sys.stdin);obj["image"]["image"] = "gcr.io/k8s-testimages/redis:e2e"; obj["args"] = []; json.dump(obj, sys.stdout)')
+ echo "$volumesconfig" > "$TESTDIR"/container_config_volumes.json
+ run crioctl ctr create --config "$TESTDIR"/container_config_volumes.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "ctr oom" {
+ if [[ "$TRAVIS" == "true" ]]; then
+ skip "travis container tests don't support testing OOM"
+ fi
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ oomconfig=$(cat "$TESTDATA"/container_config.json | python -c 'import json,sys;obj=json.load(sys.stdin);obj["image"]["image"] = "mrunalp/oom"; obj["linux"]["resources"]["memory_limit_in_bytes"] = 5120000; obj["command"] = ["/oom"]; json.dump(obj, sys.stdout)')
+ echo "$oomconfig" > "$TESTDIR"/container_config_oom.json
+ run crioctl ctr create --config "$TESTDIR"/container_config_oom.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ # Wait for container to OOM
+ attempt=0
+ while [ $attempt -le 100 ]; do
+ attempt=$((attempt+1))
+ run crioctl ctr status --id "$ctr_id"
+ echo "$output"
+ if [[ "$output" =~ "OOMKilled" ]]; then
+ break
+ fi
+ sleep 10
+ done
+ [[ "$output" =~ "OOMKilled" ]]
+ run crioctl pod stop --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl pod remove --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "ctr /etc/resolv.conf rw/ro mode" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --config "$TESTDATA"/container_config_resolvconf.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr status --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" =~ "Status: CONTAINER_EXITED" ]]
+ [[ "$output" =~ "Exit Code: 0" ]]
+ [[ "$output" =~ "Reason: Completed" ]]
+
+ run crioctl ctr create --name roctr --config "$TESTDATA"/container_config_resolvconf_ro.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr status --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" =~ "Status: CONTAINER_EXITED" ]]
+ [[ "$output" =~ "Exit Code: 1" ]]
+ [[ "$output" =~ "Reason: Error" ]]
+
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "ctr create with non-existent command" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ newconfig=$(cat "$TESTDATA"/container_config.json | python -c 'import json,sys;obj=json.load(sys.stdin);obj["command"] = ["nonexistent"]; json.dump(obj, sys.stdout)')
+ echo "$newconfig" > "$TESTDIR"/container_nonexistent.json
+ run crioctl ctr create --config "$TESTDIR"/container_nonexistent.json --pod "$pod_id"
+ [ "$status" -ne 0 ]
+ [[ "$output" =~ "executable file not found" ]]
+ run crioctl pod stop --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl pod remove --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "ctr create with non-existent command [tty]" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ newconfig=$(cat "$TESTDATA"/container_config.json | python -c 'import json,sys;obj=json.load(sys.stdin);obj["command"] = ["nonexistent"]; obj["tty"] = True; json.dump(obj, sys.stdout)')
+ echo "$newconfig" > "$TESTDIR"/container_nonexistent.json
+ run crioctl ctr create --config "$TESTDIR"/container_nonexistent.json --pod "$pod_id"
+ [ "$status" -ne 0 ]
+ [[ "$output" =~ "executable file not found" ]]
+ run crioctl pod stop --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl pod remove --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
diff --git a/test/default_mounts.bats b/test/default_mounts.bats
new file mode 100644
index 000000000..8e727085e
--- /dev/null
+++ b/test/default_mounts.bats
@@ -0,0 +1,69 @@
+#!/usr/bin/env bats
+
+load helpers
+
+IMAGE="redis:alpine"
+
+function teardown() {
+ cleanup_test
+}
+
+@test "bind secrets mounts to container" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl image pull "$IMAGE"
+ [ "$status" -eq 0 ]
+ run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr execsync --id "$ctr_id" cat /proc/mounts
+ echo "$output"
+ [ "$status" -eq 0 ]
+ mount_info="$output"
+ run grep /container/path1 <<< "$mount_info"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "default mounts correctly sorted with other mounts" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl image pull "$IMAGE"
+ [ "$status" -eq 0 ]
+ host_path="$TESTDIR"/clash
+ mkdir "$host_path"
+ echo "clashing..." > "$host_path"/clashing.txt
+ sed -e "s,%HPATH%,$host_path,g" "$TESTDATA"/container_redis_default_mounts.json > "$TESTDIR"/defmounts_pre.json
+ sed -e 's,%CPATH%,\/container\/path1\/clash,g' "$TESTDIR"/defmounts_pre.json > "$TESTDIR"/defmounts.json
+ run crioctl ctr create --config "$TESTDIR"/defmounts.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr execsync --id "$ctr_id" ls -la /container/path1/clash
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr execsync --id "$ctr_id" cat /container/path1/clash/clashing.txt
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" =~ "clashing..." ]]
+ run crioctl ctr execsync --id "$ctr_id" ls -la /container/path1
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr execsync --id "$ctr_id" cat /container/path1/test.txt
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" =~ "Testing secrets mounts!" ]]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
diff --git a/test/helpers.bash b/test/helpers.bash
new file mode 100644
index 000000000..22955d333
--- /dev/null
+++ b/test/helpers.bash
@@ -0,0 +1,493 @@
+#!/bin/bash
+
+# Root directory of integration tests.
+INTEGRATION_ROOT=$(dirname "$(readlink -f "$BASH_SOURCE")")
+
+# Test data path.
+TESTDATA="${INTEGRATION_ROOT}/testdata"
+
+# Root directory of the repository.
+CRIO_ROOT=${CRIO_ROOT:-$(cd "$INTEGRATION_ROOT/../.."; pwd -P)}
+
+# Path of the crio binary.
+CRIO_BINARY=${CRIO_BINARY:-${CRIO_ROOT}/cri-o/bin/crio}
+# Path of the crictl binary.
+CRICTL_PATH=$(command -v crictl || true)
+CRICTL_BINARY=${CRICTL_PATH:-/usr/bin/crictl}
+# Path to kpod binary.
+KPOD_BINARY=${KPOD_BINARY:-${CRIO_ROOT}/cri-o/bin/kpod}
+# Path of the conmon binary.
+CONMON_BINARY=${CONMON_BINARY:-${CRIO_ROOT}/cri-o/bin/conmon}
+# Path of the pause binary.
+PAUSE_BINARY=${PAUSE_BINARY:-${CRIO_ROOT}/cri-o/bin/pause}
+# Path of the default seccomp profile.
+SECCOMP_PROFILE=${SECCOMP_PROFILE:-${CRIO_ROOT}/cri-o/seccomp.json}
+# Name of the default apparmor profile.
+APPARMOR_PROFILE=${APPARMOR_PROFILE:-crio-default}
+# Runtime
+RUNTIME=${RUNTIME:-runc}
+RUNTIME_PATH=$(command -v $RUNTIME || true)
+RUNTIME_BINARY=${RUNTIME_PATH:-/usr/local/sbin/runc}
+# Path of the apparmor_parser binary.
+APPARMOR_PARSER_BINARY=${APPARMOR_PARSER_BINARY:-/sbin/apparmor_parser}
+# Path of the apparmor profile for test.
+APPARMOR_TEST_PROFILE_PATH=${APPARMOR_TEST_PROFILE_PATH:-${TESTDATA}/apparmor_test_deny_write}
+# Path of the apparmor profile for unloading crio-default.
+FAKE_CRIO_DEFAULT_PROFILE_PATH=${FAKE_CRIO_DEFAULT_PROFILE_PATH:-${TESTDATA}/fake_crio_default}
+# Name of the apparmor profile for test.
+APPARMOR_TEST_PROFILE_NAME=${APPARMOR_TEST_PROFILE_NAME:-apparmor-test-deny-write}
+# Path of boot config.
+BOOT_CONFIG_FILE_PATH=${BOOT_CONFIG_FILE_PATH:-/boot/config-`uname -r`}
+# Path of apparmor parameters file.
+APPARMOR_PARAMETERS_FILE_PATH=${APPARMOR_PARAMETERS_FILE_PATH:-/sys/module/apparmor/parameters/enabled}
+# Path of the bin2img binary.
+BIN2IMG_BINARY=${BIN2IMG_BINARY:-${CRIO_ROOT}/cri-o/test/bin2img/bin2img}
+# Path of the copyimg binary.
+COPYIMG_BINARY=${COPYIMG_BINARY:-${CRIO_ROOT}/cri-o/test/copyimg/copyimg}
+# Path of tests artifacts.
+ARTIFACTS_PATH=${ARTIFACTS_PATH:-${CRIO_ROOT}/cri-o/.artifacts}
+# Path of the checkseccomp binary.
+CHECKSECCOMP_BINARY=${CHECKSECCOMP_BINARY:-${CRIO_ROOT}/cri-o/test/checkseccomp/checkseccomp}
+# XXX: This is hardcoded inside cri-o at the moment.
+DEFAULT_LOG_PATH=/var/log/crio/pods
+# Cgroup manager to be used
+CGROUP_MANAGER=${CGROUP_MANAGER:-cgroupfs}
+# Image volumes handling
+IMAGE_VOLUMES=${IMAGE_VOLUMES:-mkdir}
+# Container pids limit
+PIDS_LIMIT=${PIDS_LIMIT:-1024}
+# Log size max limit
+LOG_SIZE_MAX_LIMIT=${LOG_SIZE_MAX_LIMIT:--1}
+
+TESTDIR=$(mktemp -d)
+
+# kpod pull needs a configuration file for shortname pulls
+export REGISTRIES_CONFIG_PATH="$INTEGRATION_ROOT/registries.conf"
+
+# Setup default hooks dir
+HOOKSDIR=$TESTDIR/hooks
+mkdir ${HOOKSDIR}
+HOOKS_OPTS="--hooks-dir-path=$HOOKSDIR"
+
+# Setup default secrets mounts
+MOUNT_PATH="$TESTDIR/secrets"
+mkdir ${MOUNT_PATH}
+MOUNT_FILE="${MOUNT_PATH}/test.txt"
+touch ${MOUNT_FILE}
+echo "Testing secrets mounts!" > ${MOUNT_FILE}
+
+DEFAULT_MOUNTS_OPTS="--default-mounts=${MOUNT_PATH}:/container/path1"
+
+# We may need to set some default storage options.
+case "$(stat -f -c %T ${TESTDIR})" in
+ aufs)
+ # None of device mapper, overlay, or aufs can be used dependably over aufs, and of course btrfs and zfs can't,
+ # and we have to explicitly specify the "vfs" driver in order to use it, so do that now.
+ STORAGE_OPTIONS=${STORAGE_OPTIONS:---storage-driver vfs}
+ ;;
+esac
+
+if [ -e /usr/sbin/selinuxenabled ] && /usr/sbin/selinuxenabled; then
+ . /etc/selinux/config
+ filelabel=$(awk -F'"' '/^file.*=.*/ {print $2}' /etc/selinux/${SELINUXTYPE}/contexts/lxc_contexts)
+ chcon -R ${filelabel} $TESTDIR
+fi
+CRIO_SOCKET="$TESTDIR/crio.sock"
+CRIO_CONFIG="$TESTDIR/crio.conf"
+CRIO_CNI_CONFIG="$TESTDIR/cni/net.d/"
+CRIO_CNI_PLUGIN=${CRIO_CNI_PLUGIN:-/opt/cni/bin/}
+POD_CIDR="10.88.0.0/16"
+POD_CIDR_MASK="10.88.*.*"
+
+KPOD_OPTIONS="--root $TESTDIR/crio $STORAGE_OPTIONS --runroot $TESTDIR/crio-run --runtime ${RUNTIME_BINARY}"
+
+cp "$CONMON_BINARY" "$TESTDIR/conmon"
+
+PATH=$PATH:$TESTDIR
+
+# Make sure we have a copy of the redis:alpine image.
+if ! [ -d "$ARTIFACTS_PATH"/redis-image ]; then
+ mkdir -p "$ARTIFACTS_PATH"/redis-image
+ if ! "$COPYIMG_BINARY" --import-from=docker://redis:alpine --export-to=dir:"$ARTIFACTS_PATH"/redis-image --signature-policy="$INTEGRATION_ROOT"/policy.json ; then
+ echo "Error pulling docker://redis"
+ rm -fr "$ARTIFACTS_PATH"/redis-image
+ exit 1
+ fi
+fi
+
+# TODO: remove the code below for pulling redis:alpine using a canonical reference once
+# https://github.com/kubernetes-incubator/cri-o/issues/531 is complete and we can
+# pull the image using a tagged reference and then subsequently find the image without
+# having to explicitly record the canonical reference as one of the image's names
+if ! [ -d "$ARTIFACTS_PATH"/redis-image-digest ]; then
+ mkdir -p "$ARTIFACTS_PATH"/redis-image-digest
+ if ! "$COPYIMG_BINARY" --import-from=docker://redis@sha256:03789f402b2ecfb98184bf128d180f398f81c63364948ff1454583b02442f73b --export-to=dir:"$ARTIFACTS_PATH"/redis-image-digest --signature-policy="$INTEGRATION_ROOT"/policy.json ; then
+ echo "Error pulling docker://redis@sha256:03789f402b2ecfb98184bf128d180f398f81c63364948ff1454583b02442f73b"
+ rm -fr "$ARTIFACTS_PATH"/redis-image-digest
+ exit 1
+ fi
+fi
+
+# Make sure we have a copy of the runcom/stderr-test image.
+if ! [ -d "$ARTIFACTS_PATH"/stderr-test ]; then
+ mkdir -p "$ARTIFACTS_PATH"/stderr-test
+ if ! "$COPYIMG_BINARY" --import-from=docker://runcom/stderr-test:latest --export-to=dir:"$ARTIFACTS_PATH"/stderr-test --signature-policy="$INTEGRATION_ROOT"/policy.json ; then
+ echo "Error pulling docker://stderr-test"
+ rm -fr "$ARTIFACTS_PATH"/stderr-test
+ exit 1
+ fi
+fi
+
+# Make sure we have a copy of the busybox:latest image.
+if ! [ -d "$ARTIFACTS_PATH"/busybox-image ]; then
+ mkdir -p "$ARTIFACTS_PATH"/busybox-image
+ if ! "$COPYIMG_BINARY" --import-from=docker://busybox --export-to=dir:"$ARTIFACTS_PATH"/busybox-image --signature-policy="$INTEGRATION_ROOT"/policy.json ; then
+ echo "Error pulling docker://busybox"
+ rm -fr "$ARTIFACTS_PATH"/busybox-image
+ exit 1
+ fi
+fi
+
+# Make sure we have a copy of the mrunalp/oom:latest image.
+if ! [ -d "$ARTIFACTS_PATH"/oom-image ]; then
+ mkdir -p "$ARTIFACTS_PATH"/oom-image
+ if ! "$COPYIMG_BINARY" --import-from=docker://mrunalp/oom --export-to=dir:"$ARTIFACTS_PATH"/oom-image --signature-policy="$INTEGRATION_ROOT"/policy.json ; then
+ echo "Error pulling docker://mrunalp/oom"
+ rm -fr "$ARTIFACTS_PATH"/oom-image
+ exit 1
+ fi
+fi
+
+# Make sure we have a copy of the mrunalp/image-volume-test:latest image.
+if ! [ -d "$ARTIFACTS_PATH"/image-volume-test-image ]; then
+ mkdir -p "$ARTIFACTS_PATH"/image-volume-test-image
+ if ! "$COPYIMG_BINARY" --import-from=docker://mrunalp/image-volume-test --export-to=dir:"$ARTIFACTS_PATH"/image-volume-test-image --signature-policy="$INTEGRATION_ROOT"/policy.json ; then
+ echo "Error pulling docker://mrunalp/image-volume-test-image"
+ rm -fr "$ARTIFACTS_PATH"/image-volume-test-image
+ exit 1
+ fi
+fi
+# Run crio using the binary specified by $CRIO_BINARY.
+# This must ONLY be run on engines created with `start_crio`.
+function crio() {
+ "$CRIO_BINARY" --listen "$CRIO_SOCKET" "$@"
+}
+
+# DEPRECATED
+OCIC_BINARY=${OCIC_BINARY:-${CRIO_ROOT}/cri-o/bin/crioctl}
+# Run crioctl using the binary specified by $OCIC_BINARY.
+function crioctl() {
+ "$OCIC_BINARY" --connect "$CRIO_SOCKET" "$@"
+}
+
+# Run crictl using the binary specified by $CRICTL_BINARY.
+function crictl() {
+ "$CRICTL_BINARY" -r "$CRIO_SOCKET" -i "$CRIO_SOCKET" "$@"
+}
+
+# Communicate with Docker on the host machine.
+# Should rarely use this.
+function docker_host() {
+ command docker "$@"
+}
+
+# Retry a command $1 times until it succeeds. Wait $2 seconds between retries.
+function retry() {
+ local attempts=$1
+ shift
+ local delay=$1
+ shift
+ local i
+
+ for ((i=0; i < attempts; i++)); do
+ run "$@"
+ if [[ "$status" -eq 0 ]] ; then
+ return 0
+ fi
+ sleep $delay
+ done
+
+ echo "Command \"$@\" failed $attempts times. Output: $output"
+ false
+}
+
+# Waits until the given crio becomes reachable.
+function wait_until_reachable() {
+ retry 15 1 crictl status
+}
+
+# Start crio.
+function start_crio() {
+ if [[ -n "$1" ]]; then
+ seccomp="$1"
+ else
+ seccomp="$SECCOMP_PROFILE"
+ fi
+
+ if [[ -n "$2" ]]; then
+ apparmor="$2"
+ else
+ apparmor="$APPARMOR_PROFILE"
+ fi
+
+ # Don't forget: bin2img, copyimg, and crio have their own default drivers, so if you override any, you probably need to override them all
+ if ! [ "$3" = "--no-pause-image" ] ; then
+ "$BIN2IMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --source-binary "$PAUSE_BINARY"
+ fi
+ "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=docker.io/library/redis:alpine --import-from=dir:"$ARTIFACTS_PATH"/redis-image --signature-policy="$INTEGRATION_ROOT"/policy.json
+# TODO: remove the code below for copying redis:alpine in using a canonical reference once
+# https://github.com/kubernetes-incubator/cri-o/issues/531 is complete and we can
+# copy the image using a tagged reference and then subsequently find the image without
+# having to explicitly record the canonical reference as one of the image's names
+ "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=docker.io/library/redis@sha256:03789f402b2ecfb98184bf128d180f398f81c63364948ff1454583b02442f73b --import-from=dir:"$ARTIFACTS_PATH"/redis-image-digest --signature-policy="$INTEGRATION_ROOT"/policy.json
+ "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=mrunalp/oom --import-from=dir:"$ARTIFACTS_PATH"/oom-image --signature-policy="$INTEGRATION_ROOT"/policy.json
+ "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=docker.io/library/mrunalp/image-volume-test --import-from=dir:"$ARTIFACTS_PATH"/image-volume-test-image --signature-policy="$INTEGRATION_ROOT"/policy.json
+ "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=docker.io/library/busybox:latest --import-from=dir:"$ARTIFACTS_PATH"/busybox-image --signature-policy="$INTEGRATION_ROOT"/policy.json
+ "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=docker.io/library/runcom/stderr-test:latest --import-from=dir:"$ARTIFACTS_PATH"/stderr-test --signature-policy="$INTEGRATION_ROOT"/policy.json
+ "$CRIO_BINARY" ${DEFAULT_MOUNTS_OPTS} ${HOOKS_OPTS} --conmon "$CONMON_BINARY" --listen "$CRIO_SOCKET" --cgroup-manager "$CGROUP_MANAGER" --registry "docker.io" --runtime "$RUNTIME_BINARY" --root "$TESTDIR/crio" --runroot "$TESTDIR/crio-run" $STORAGE_OPTIONS --seccomp-profile "$seccomp" --apparmor-profile "$apparmor" --cni-config-dir "$CRIO_CNI_CONFIG" --cni-plugin-dir "$CRIO_CNI_PLUGIN" --signature-policy "$INTEGRATION_ROOT"/policy.json --image-volumes "$IMAGE_VOLUMES" --pids-limit "$PIDS_LIMIT" --log-size-max "$LOG_SIZE_MAX_LIMIT" --config /dev/null config >$CRIO_CONFIG
+
+ # Prepare the CNI configuration files, we're running with non host networking by default
+ if [[ -n "$4" ]]; then
+ netfunc="$4"
+ else
+ netfunc="prepare_network_conf"
+ fi
+ ${netfunc} $POD_CIDR
+
+ "$CRIO_BINARY" --log-level debug --config "$CRIO_CONFIG" & CRIO_PID=$!
+ wait_until_reachable
+
+ run crictl inspecti redis:alpine
+ if [ "$status" -ne 0 ] ; then
+ crictl pull redis:alpine
+ fi
+ REDIS_IMAGEID=$(crictl inspecti redis:alpine | head -1 | sed -e "s/ID: //g")
+ run crictl inspecti redis@sha256:03789f402b2ecfb98184bf128d180f398f81c63364948ff1454583b02442f73b
+ if [ "$status" -ne 0 ] ; then
+ crictl pull redis@sha256:03789f402b2ecfb98184bf128d180f398f81c63364948ff1454583b02442f73b
+ fi
+ REDIS_IMAGEID_DIGESTED=$(crictl inspecti redis@sha256:03789f402b2ecfb98184bf128d180f398f81c63364948ff1454583b02442f73b | head -1 | sed -e "s/ID: //g")
+ run crictl inspecti mrunalp/oom
+ if [ "$status" -ne 0 ] ; then
+ crictl pull mrunalp/oom
+ fi
+ OOM_IMAGEID=$(crictl inspecti mrunalp/oom | head -1 | sed -e "s/ID: //g")
+ run crioctl image status --id=runcom/stderr-test
+ if [ "$status" -ne 0 ] ; then
+ crictl pull runcom/stderr-test:latest
+ fi
+ STDERR_IMAGEID=$(crictl inspecti runcom/stderr-test | head -1 | sed -e "s/ID: //g")
+ run crictl inspecti busybox
+ if [ "$status" -ne 0 ] ; then
+ crictl pull busybox:latest
+ fi
+ BUSYBOX_IMAGEID=$(crictl inspecti busybox | head -1 | sed -e "s/ID: //g")
+ run crictl inspecti mrunalp/image-volume-test
+ if [ "$status" -ne 0 ] ; then
+ crictl pull mrunalp/image-volume-test:latest
+ fi
+ VOLUME_IMAGEID=$(crictl inspecti mrunalp/image-volume-test | head -1 | sed -e "s/ID: //g")
+}
+
+function cleanup_ctrs() {
+ run crictl ps --quiet
+ if [ "$status" -eq 0 ]; then
+ if [ "$output" != "" ]; then
+ printf '%s\n' "$output" | while IFS= read -r line
+ do
+ crictl stop "$line"
+ crictl rm "$line"
+ done
+ fi
+ fi
+ rm -f /run/hookscheck
+}
+
+function cleanup_images() {
+ run crictl images --quiet
+ if [ "$status" -eq 0 ]; then
+ if [ "$output" != "" ]; then
+ printf '%s\n' "$output" | while IFS= read -r line
+ do
+ crictl rmi "$line"
+ done
+ fi
+ fi
+}
+
+function cleanup_pods() {
+ run crictl sandboxes --quiet
+ if [ "$status" -eq 0 ]; then
+ if [ "$output" != "" ]; then
+ printf '%s\n' "$output" | while IFS= read -r line
+ do
+ crictl stops "$line"
+ crictl rms "$line"
+ done
+ fi
+ fi
+}
+
+# Stop crio.
+function stop_crio() {
+ if [ "$CRIO_PID" != "" ]; then
+ kill "$CRIO_PID" >/dev/null 2>&1
+ wait "$CRIO_PID"
+ rm -f "$CRIO_CONFIG"
+ fi
+
+ cleanup_network_conf
+}
+
+function restart_crio() {
+ if [ "$CRIO_PID" != "" ]; then
+ kill "$CRIO_PID" >/dev/null 2>&1
+ wait "$CRIO_PID"
+ start_crio
+ else
+ echo "you must start crio first"
+ exit 1
+ fi
+}
+
+function cleanup_test() {
+ rm -rf "$TESTDIR"
+}
+
+
+function load_apparmor_profile() {
+ "$APPARMOR_PARSER_BINARY" -r "$1"
+}
+
+function remove_apparmor_profile() {
+ "$APPARMOR_PARSER_BINARY" -R "$1"
+}
+
+function is_seccomp_enabled() {
+ if ! "$CHECKSECCOMP_BINARY" ; then
+ echo 0
+ return
+ fi
+ echo 1
+}
+
+function is_apparmor_enabled() {
+ if [[ -f "$APPARMOR_PARAMETERS_FILE_PATH" ]]; then
+ out=$(cat "$APPARMOR_PARAMETERS_FILE_PATH")
+ if [[ "$out" =~ "Y" ]]; then
+ echo 1
+ return
+ fi
+ fi
+ echo 0
+}
+
+function prepare_network_conf() {
+ mkdir -p $CRIO_CNI_CONFIG
+ cat >$CRIO_CNI_CONFIG/10-crio.conf <<-EOF
+{
+ "cniVersion": "0.2.0",
+ "name": "crionet",
+ "type": "bridge",
+ "bridge": "cni0",
+ "isGateway": true,
+ "ipMasq": true,
+ "ipam": {
+ "type": "host-local",
+ "subnet": "$1",
+ "routes": [
+ { "dst": "0.0.0.0/0" }
+ ]
+ }
+}
+EOF
+
+ cat >$CRIO_CNI_CONFIG/99-loopback.conf <<-EOF
+{
+ "cniVersion": "0.2.0",
+ "type": "loopback"
+}
+EOF
+
+ echo 0
+}
+
+function prepare_plugin_test_args_network_conf() {
+ mkdir -p $CRIO_CNI_CONFIG
+ cat >$CRIO_CNI_CONFIG/10-plugin-test-args.conf <<-EOF
+{
+ "cniVersion": "0.2.0",
+ "name": "crionet_test_args",
+ "type": "bridge-custom",
+ "bridge": "cni0",
+ "isGateway": true,
+ "ipMasq": true,
+ "ipam": {
+ "type": "host-local",
+ "subnet": "$1",
+ "routes": [
+ { "dst": "0.0.0.0/0" }
+ ]
+ }
+}
+EOF
+
+ echo 0
+}
+
+function check_pod_cidr() {
+ run crioctl ctr execsync --id $1 ip addr show dev eth0 scope global 2>&1
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" =~ $POD_CIDR_MASK ]]
+}
+
+function parse_pod_ip() {
+ for arg
+ do
+ cidr=`echo "$arg" | grep $POD_CIDR_MASK`
+ if [ "$cidr" == "$arg" ]
+ then
+ echo `echo "$arg" | sed "s/\/[0-9][0-9]//"`
+ fi
+ done
+}
+
+function get_host_ip() {
+ gateway_dev=`ip -o route show default 0.0.0.0/0 | sed 's/.*dev \([^[:space:]]*\).*/\1/'`
+ [ "$gateway_dev" ]
+ host_ip=`ip -o -4 addr show dev $gateway_dev scope global | sed 's/.*inet \([0-9.]*\).*/\1/'`
+}
+
+function ping_pod() {
+ inet=`crioctl ctr execsync --id $1 ip addr show dev eth0 scope global 2>&1 | grep inet`
+
+ IFS=" "
+ ip=`parse_pod_ip $inet`
+
+ ping -W 1 -c 5 $ip
+
+ echo $?
+}
+
+function ping_pod_from_pod() {
+ inet=`crioctl ctr execsync --id $1 ip addr show dev eth0 scope global 2>&1 | grep inet`
+
+ IFS=" "
+ ip=`parse_pod_ip $inet`
+
+ run crioctl ctr execsync --id $2 ping -W 1 -c 2 $ip
+ echo "$output"
+ [ "$status" -eq 0 ]
+}
+
+
+function cleanup_network_conf() {
+ rm -rf $CRIO_CNI_CONFIG
+
+ echo 0
+}
+
+function temp_sandbox_conf() {
+ sed -e s/\"namespace\":.*/\"namespace\":\ \"$1\",/g "$TESTDATA"/sandbox_config.json > $TESTDIR/sandbox_config_$1.json
+}
diff --git a/test/hooks.bats b/test/hooks.bats
new file mode 100644
index 000000000..92aa725fc
--- /dev/null
+++ b/test/hooks.bats
@@ -0,0 +1,38 @@
+#!/usr/bin/env bats
+
+load helpers
+
+function teardown() {
+ cleanup_test
+}
+
+cp hooks/checkhook.sh ${HOOKSDIR}
+sed "s|HOOKSDIR|${HOOKSDIR}|" hooks/checkhook.json > ${HOOKSDIR}/checkhook.json
+
+@test "pod test hooks" {
+ rm -f /run/hookscheck
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl pod stop --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl pod remove --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run cat /run/hookscheck
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
diff --git a/test/hooks/checkhook.json b/test/hooks/checkhook.json
new file mode 100644
index 000000000..50ff23727
--- /dev/null
+++ b/test/hooks/checkhook.json
@@ -0,0 +1,5 @@
+{
+ "cmd" : [".*"],
+ "hook" : "HOOKSDIR/checkhook.sh",
+ "stage" : [ "prestart" ]
+}
diff --git a/test/hooks/checkhook.sh b/test/hooks/checkhook.sh
new file mode 100755
index 000000000..8b755cb40
--- /dev/null
+++ b/test/hooks/checkhook.sh
@@ -0,0 +1,4 @@
+#!/bin/sh
+echo $@ >> /run/hookscheck
+read line
+echo $line >> /run/hookscheck
diff --git a/test/image.bats b/test/image.bats
new file mode 100644
index 000000000..5458fe134
--- /dev/null
+++ b/test/image.bats
@@ -0,0 +1,250 @@
+#!/usr/bin/env bats
+
+load helpers
+
+IMAGE=kubernetes/pause
+SIGNED_IMAGE=registry.access.redhat.com/rhel7-atomic:latest
+UNSIGNED_IMAGE=docker.io/library/hello-world:latest
+
+function teardown() {
+ cleanup_test
+}
+
+@test "run container in pod with image ID" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ sed -e "s/%VALUE%/$REDIS_IMAGEID/g" "$TESTDATA"/container_config_by_imageid.json > "$TESTDIR"/ctr_by_imageid.json
+ run crioctl ctr create --config "$TESTDIR"/ctr_by_imageid.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "container status return image:tag if created by image ID" {
+ start_crio
+
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+
+ sed -e "s/%VALUE%/$REDIS_IMAGEID/g" "$TESTDATA"/container_config_by_imageid.json > "$TESTDIR"/ctr_by_imageid.json
+
+ run crioctl ctr create --config "$TESTDIR"/ctr_by_imageid.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+
+ run crioctl ctr status --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" =~ "Image: redis:alpine" ]]
+
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "container status return image@digest if created by image ID" {
+ start_crio
+
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+
+ sed -e "s/%VALUE%/$REDIS_IMAGEID_DIGESTED/g" "$TESTDATA"/container_config_by_imageid.json > "$TESTDIR"/ctr_by_imageid.json
+
+ run crioctl ctr create --config "$TESTDIR"/ctr_by_imageid.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+
+ run crioctl ctr status --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" =~ "ImageRef: redis@sha256:03789f402b2ecfb98184bf128d180f398f81c63364948ff1454583b02442f73b" ]]
+
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "image pull and list" {
+ start_crio "" "" --no-pause-image
+ run crioctl image pull "$IMAGE"
+ echo "$output"
+ [ "$status" -eq 0 ]
+
+ run crioctl image list --quiet "$IMAGE"
+ [ "$status" -eq 0 ]
+ echo "$output"
+ [ "$output" != "" ]
+ imageid="$output"
+
+ run crioctl image list --quiet @"$imageid"
+ [ "$status" -eq 0 ]
+ echo "$output"
+ [ "$output" != "" ]
+
+ run crioctl image list --quiet "$imageid"
+ [ "$status" -eq 0 ]
+ echo "$output"
+ [ "$output" != "" ]
+ cleanup_images
+ stop_crio
+}
+
+@test "image pull with signature" {
+ start_crio "" "" --no-pause-image
+ run crioctl image pull "$SIGNED_IMAGE"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_images
+ stop_crio
+}
+
+@test "image pull without signature" {
+ start_crio "" "" --no-pause-image
+ run crioctl image pull "$UNSIGNED_IMAGE"
+ echo "$output"
+ [ "$status" -ne 0 ]
+ cleanup_images
+ stop_crio
+}
+
+@test "image pull and list by tag and ID" {
+ start_crio "" "" --no-pause-image
+ run crioctl image pull "$IMAGE:go"
+ echo "$output"
+ [ "$status" -eq 0 ]
+
+ run crioctl image list --quiet "$IMAGE:go"
+ [ "$status" -eq 0 ]
+ echo "$output"
+ [ "$output" != "" ]
+ imageid="$output"
+
+ run crioctl image list --quiet @"$imageid"
+ [ "$status" -eq 0 ]
+ echo "$output"
+ [ "$output" != "" ]
+
+ run crioctl image list --quiet "$imageid"
+ [ "$status" -eq 0 ]
+ echo "$output"
+ [ "$output" != "" ]
+ cleanup_images
+ stop_crio
+}
+
+@test "image pull and list by digest and ID" {
+ start_crio "" "" --no-pause-image
+ run crioctl image pull nginx@sha256:33eb1ed1e802d4f71e52421f56af028cdf12bb3bfff5affeaf5bf0e328ffa1bc
+ echo "$output"
+ [ "$status" -eq 0 ]
+
+ run crioctl image list --quiet nginx@sha256:33eb1ed1e802d4f71e52421f56af028cdf12bb3bfff5affeaf5bf0e328ffa1bc
+ [ "$status" -eq 0 ]
+ echo "$output"
+ [ "$output" != "" ]
+ imageid="$output"
+
+ run crioctl image list --quiet @"$imageid"
+ [ "$status" -eq 0 ]
+ echo "$output"
+ [ "$output" != "" ]
+
+ run crioctl image list --quiet "$imageid"
+ [ "$status" -eq 0 ]
+ echo "$output"
+ [ "$output" != "" ]
+
+ cleanup_images
+ stop_crio
+}
+
+@test "image list with filter" {
+ start_crio "" "" --no-pause-image
+ run crioctl image pull "$IMAGE"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl image list --quiet "$IMAGE"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ printf '%s\n' "$output" | while IFS= read -r id; do
+ run crioctl image remove --id "$id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ done
+ run crioctl image list --quiet
+ echo "$output"
+ [ "$status" -eq 0 ]
+ printf '%s\n' "$output" | while IFS= read -r id; do
+ echo "$id"
+ status=1
+ done
+ cleanup_images
+ stop_crio
+}
+
+@test "image list/remove" {
+ start_crio "" "" --no-pause-image
+ run crioctl image pull "$IMAGE"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl image list --quiet
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [ "$output" != "" ]
+ printf '%s\n' "$output" | while IFS= read -r id; do
+ run crioctl image remove --id "$id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ done
+ run crioctl image list --quiet
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [ "$output" = "" ]
+ printf '%s\n' "$output" | while IFS= read -r id; do
+ echo "$id"
+ status=1
+ done
+ cleanup_images
+ stop_crio
+}
+
+@test "image status/remove" {
+ start_crio "" "" --no-pause-image
+ run crioctl image pull "$IMAGE"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl image list --quiet
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [ "$output" != "" ]
+ printf '%s\n' "$output" | while IFS= read -r id; do
+ run crioctl image status --id "$id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [ "$output" != "" ]
+ run crioctl image remove --id "$id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ done
+ run crioctl image list --quiet
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [ "$output" = "" ]
+ printf '%s\n' "$output" | while IFS= read -r id; do
+ echo "$id"
+ status=1
+ done
+ cleanup_images
+ stop_crio
+}
diff --git a/test/image_remove.bats b/test/image_remove.bats
new file mode 100644
index 000000000..ca2017d00
--- /dev/null
+++ b/test/image_remove.bats
@@ -0,0 +1,74 @@
+#!/usr/bin/env bats
+
+load helpers
+
+IMAGE=docker.io/kubernetes/pause
+
+function teardown() {
+ cleanup_test
+}
+
+@test "image remove with multiple names, by name" {
+ start_crio "" "" --no-pause-image
+ # Pull the image, giving it one name.
+ run crioctl image pull "$IMAGE"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ # Add a second name to the image.
+ run "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name="$IMAGE":latest --add-name="$IMAGE":othertag --signature-policy="$INTEGRATION_ROOT"/policy.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ # Get the list of image names and IDs.
+ run crioctl image list
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [ "$output" != "" ]
+ # Cycle through each name, removing it by name. The image that we assigned a second
+ # name to should still be around when we get to removing its second name.
+ grep ^Tag: <<< "$output" | while read -r header tag ; do
+ run crioctl image remove --id "$tag"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ done
+ # List all images and their names. There should be none now.
+ run crioctl image list --quiet
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [ "$output" = "" ]
+ printf '%s\n' "$output" | while IFS= read -r id; do
+ echo "$id"
+ done
+ # All done.
+ cleanup_images
+ stop_crio
+}
+
+@test "image remove with multiple names, by ID" {
+ start_crio "" "" --no-pause-image
+ # Pull the image, giving it one name.
+ run crioctl image pull "$IMAGE"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ # Add a second name to the image.
+ run "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name="$IMAGE":latest --add-name="$IMAGE":othertag --signature-policy="$INTEGRATION_ROOT"/policy.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ # Get the image ID of the image we just saved.
+ run crioctl image status --id="$IMAGE"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [ "$output" != "" ]
+ # Try to remove the image using its ID. That should succeed because removing by ID always works.
+ grep ^ID: <<< "$output" | while read -r header id ; do
+ run crioctl image remove --id "$id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ done
+ # The image should be gone.
+ run crioctl image status --id="$IMAGE"
+ echo "$output"
+ [ "$status" -ne 0 ]
+ # All done.
+ cleanup_images
+ stop_crio
+}
diff --git a/test/image_volume.bats b/test/image_volume.bats
new file mode 100644
index 000000000..ff05e9cda
--- /dev/null
+++ b/test/image_volume.bats
@@ -0,0 +1,68 @@
+#!/usr/bin/env bats
+
+load helpers
+
+function teardown() {
+ cleanup_test
+}
+
+@test "image volume ignore" {
+ IMAGE_VOLUMES=ignore start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ image_volume_config=$(cat "$TESTDATA"/container_config.json | python -c 'import json,sys;obj=json.load(sys.stdin);obj["image"]["image"] = "mrunalp/image-volume-test"; obj["command"] = ["/bin/sleep", "600"]; json.dump(obj, sys.stdout)')
+ echo "$image_volume_config" > "$TESTDIR"/container_image_volume.json
+ run crioctl ctr create --config "$TESTDIR"/container_image_volume.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr execsync --id "$ctr_id" ls /imagevolume
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" =~ "Exit code: 1" ]]
+ [[ "$output" =~ "ls: /imagevolume: No such file or directory" ]]
+ run crioctl pod stop --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl pod remove --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "image volume bind" {
+ IMAGE_VOLUMES=bind start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ image_volume_config=$(cat "$TESTDATA"/container_config.json | python -c 'import json,sys;obj=json.load(sys.stdin);obj["image"]["image"] = "mrunalp/image-volume-test"; obj["command"] = ["/bin/sleep", "600"]; json.dump(obj, sys.stdout)')
+ echo "$image_volume_config" > "$TESTDIR"/container_image_volume.json
+ run crioctl ctr create --config "$TESTDIR"/container_image_volume.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr execsync --id "$ctr_id" touch /imagevolume/test_file
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" =~ "Exit code: 0" ]]
+ run crioctl pod stop --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl pod remove --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
diff --git a/test/inspect.bats b/test/inspect.bats
new file mode 100644
index 000000000..c63a688e3
--- /dev/null
+++ b/test/inspect.bats
@@ -0,0 +1,72 @@
+#!/usr/bin/env bats
+
+load helpers
+
+function teardown() {
+ cleanup_test
+}
+
+@test "info inspect" {
+ start_crio
+ out=`echo -e "GET /info HTTP/1.1\r\nHost: crio\r\n" | socat - UNIX-CONNECT:$CRIO_SOCKET`
+ echo "$out"
+ [[ "$out" =~ "\"cgroup_driver\":\"$CGROUP_MANAGER\"" ]]
+ [[ "$out" =~ "\"storage_root\":\"$TESTDIR/crio\"" ]]
+ run crioctl info
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" =~ "\"cgroup_driver\": \"$CGROUP_MANAGER\"" ]]
+ [[ "$output" =~ "\"storage_root\": \"$TESTDIR/crio\"" ]]
+
+ stop_crio
+}
+
+@test "ctr inspect" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+
+ out=`echo -e "GET /containers/$ctr_id HTTP/1.1\r\nHost: crio\r\n" | socat - UNIX-CONNECT:$CRIO_SOCKET`
+ echo "$out"
+ [[ "$out" =~ "\"sandbox\":\"$pod_id\"" ]]
+ [[ "$out" =~ "\"image\":\"redis:alpine\"" ]]
+
+ run crioctl ctr inspect --id $ctr_id
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" =~ "\"sandbox\": \"$pod_id\"" ]]
+ [[ "$output" =~ "\"image\": \"redis:alpine\"" ]]
+
+ inet=`crioctl ctr execsync --id $ctr_id ip addr show dev eth0 scope global 2>&1 | grep inet`
+
+ IFS=" "
+ ip=`parse_pod_ip $inet`
+ [[ "$out" =~ "\"ip_address\":\"$ip\"" ]]
+ [[ "$out" =~ "\"name\":\"k8s_container1_podsandbox1_redhat.test.crio_redhat-test-crio_1\"" ]]
+ [[ "$output" =~ "\"ip_address\": \"$ip\"" ]]
+ [[ "$output" =~ "\"name\": \"k8s_container1_podsandbox1_redhat.test.crio_redhat-test-crio_1\"" ]]
+
+
+# TODO: add some other check based on the json below:
+#
+# {"name":"k8s_container1_podsandbox1_redhat.test.crio_redhat-test-crio_1","pid":27477,"image":"redis:alpine","created_time":1505223601111546169,"labels":{"batch":"no","type":"small"},"annotations":{"daemon":"crio","owner":"dragon"},"log_path":"/var/log/crio/pods/297d014ba2c54236779da0c2f80dfba45dc31b106e4cd126a1c3c6d78edc2201/81567e9573ea798d6494c9aab156103ee91b72180fd3841a7c24d2ca39886ba2.log","root":"/tmp/tmp.0bkjphWudF/crio/overlay/d7cfc1de83cab9f377a4a1542427d2a019e85a70c1c660a9e6cf9e254df68873/merged","sandbox":"297d014ba2c54236779da0c2f80dfba45dc31b106e4cd126a1c3c6d78edc2201","ip_address":"10.88.9.153"}
+
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "ctr inspect not found" {
+ start_crio
+ out=`echo -e "GET /containers/notexists HTTP/1.1\r\nHost: crio\r\n" | socat - UNIX-CONNECT:$CRIO_SOCKET`
+ echo "$out"
+ [[ "$out" =~ "can't find the container with id notexists" ]]
+
+ stop_crio
+}
diff --git a/test/kpod_diff.bats b/test/kpod_diff.bats
new file mode 100644
index 000000000..53a94d01e
--- /dev/null
+++ b/test/kpod_diff.bats
@@ -0,0 +1,40 @@
+#!/usr/bin/env bats
+
+load helpers
+
+IMAGE="alpine:latest"
+
+function teardown() {
+ cleanup_test
+}
+
+@test "test diff of image and parent" {
+ run ${KPOD_BINARY} $KPOD_OPTIONS pull $IMAGE
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} $KPOD_OPTIONS diff $IMAGE
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} $KPOD_OPTIONS rmi $IMAGE
+ echo "$output"
+ [ "$status" -eq 0 ]
+}
+
+@test "test diff on non-existent layer" {
+ run ${KPOD_BINARY} $KPOD_OPTIONS diff "abc123"
+ echo "$output"
+ [ "$status" -ne 0 ]
+}
+
+@test "test diff with json output" {
+ run ${KPOD_BINARY} $KPOD_OPTIONS pull $IMAGE
+ echo "$output"
+ [ "$status" -eq 0 ]
+ # run bash -c "${KPOD_BINARY} ${KPOD_OPTIONS} diff --format json $IMAGE | python -m json.tool"
+ run ${KPOD_BINARY} $KPOD_OPTIONS diff --format json $IMAGE
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} $KPOD_OPTIONS rmi $IMAGE
+ echo "$output"
+ [ "$status" -eq 0 ]
+}
diff --git a/test/kpod_export.bats b/test/kpod_export.bats
new file mode 100644
index 000000000..9454db399
--- /dev/null
+++ b/test/kpod_export.bats
@@ -0,0 +1,31 @@
+#!/usr/bin/env bats
+
+load helpers
+
+IMAGE="redis:alpine"
+
+function teardown() {
+ cleanup_test
+}
+
+@test "kpod export output flag" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl image pull "$IMAGE"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} export -o container.tar "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+ rm -f container.tar
+}
diff --git a/test/kpod_history.bats b/test/kpod_history.bats
new file mode 100644
index 000000000..aa89cfe65
--- /dev/null
+++ b/test/kpod_history.bats
@@ -0,0 +1,80 @@
+#!/usr/bin/env bats
+
+load helpers
+
+IMAGE="alpine:latest"
+
+function teardown() {
+ cleanup_test
+}
+
+@test "kpod history default" {
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} history $IMAGE
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} $KPOD_OPTIONS rmi $IMAGE
+ echo "$output"
+ [ "$status" -eq 0 ]
+}
+
+@test "kpod history with Go template format" {
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} history --format "{{.ID}} {{.Created}}" $IMAGE
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} $KPOD_OPTIONS rmi $IMAGE
+ echo "$output"
+ [ "$status" -eq 0 ]
+}
+
+@test "kpod history human flag" {
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} history --human=false $IMAGE
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} $KPOD_OPTIONS rmi $IMAGE
+ echo "$output"
+ [ "$status" -eq 0 ]
+}
+
+@test "kpod history quiet flag" {
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} history -q $IMAGE
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} $KPOD_OPTIONS rmi $IMAGE
+ echo "$output"
+ [ "$status" -eq 0 ]
+}
+
+@test "kpod history no-trunc flag" {
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} history --no-trunc $IMAGE
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} $KPOD_OPTIONS rmi $IMAGE
+ echo "$output"
+ [ "$status" -eq 0 ]
+}
+
+@test "kpod history json flag" {
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run bash -c "${KPOD_BINARY} ${KPOD_OPTIONS} history --format json $IMAGE | python -m json.tool"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} $KPOD_OPTIONS rmi $IMAGE
+ echo "$output"
+ [ "$status" -eq 0 ]
+}
diff --git a/test/kpod_images.bats b/test/kpod_images.bats
new file mode 100644
index 000000000..0448d61b2
--- /dev/null
+++ b/test/kpod_images.bats
@@ -0,0 +1,47 @@
+#!/usr/bin/env bats
+
+load helpers
+
+IMAGE="debian:6.0.10"
+
+function teardown() {
+ cleanup_test
+}
+
+@test "kpod images" {
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} pull ${IMAGE}
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} images
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} rmi ${IMAGE}
+ echo "$output"
+ [ "$status" -eq 0 ]
+}
+
+@test "kpod images test valid json" {
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} pull ${IMAGE}
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} images --format json
+ echo "$output" | python -m json.tool
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} rmi ${IMAGE}
+ echo "$output"
+ [ "$status" -eq 0 ]
+}
+
+@test "kpod images check name json output" {
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} pull ${IMAGE}
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} images --format json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ name=$(echo $output | python -c 'import sys; import json; print(json.loads(sys.stdin.read())[0])["names"][0]')
+ [ "$name" = "docker.io/library/${IMAGE}" ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} rmi ${IMAGE}
+ echo "$output"
+ [ "$status" -eq 0 ]
+}
diff --git a/test/kpod_inspect.bats b/test/kpod_inspect.bats
new file mode 100644
index 000000000..ca4b7c8eb
--- /dev/null
+++ b/test/kpod_inspect.bats
@@ -0,0 +1,58 @@
+#!/usr/bin/env bats
+
+load helpers
+
+IMAGE="redis:alpine"
+
+function teardown() {
+ cleanup_test
+}
+
+@test "kpod inspect image" {
+ run ${KPOD_BINARY} $KPOD_OPTIONS pull ${IMAGE}
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run bash -c "${KPOD_BINARY} $KPOD_OPTIONS inspect ${IMAGE} | python -m json.tool"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} $KPOD_OPTIONS rmi ${IMAGE}
+ echo "$output"
+ [ "$status" -eq 0 ]
+}
+
+
+@test "kpod inspect non-existent container" {
+ run ${KPOD_BINARY} $KPOD_OPTIONS inspect 14rcole/non-existent
+ echo "$output"
+ [ "$status" -ne 0 ]
+}
+
+@test "kpod inspect with format" {
+ run ${KPOD_BINARY} $KPOD_OPTIONS pull ${IMAGE}
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} $KPOD_OPTIONS inspect --format {{.ID}} ${IMAGE}
+ echo "$output"
+ [ "$status" -eq 0 ]
+ inspectOutput="$output"
+ run ${KPOD_BINARY} $KPOD_OPTIONS images --no-trunc --quiet ${IMAGE}
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [ "$output" = "$inspectOutput" ]
+ run ${KPOD_BINARY} $KPOD_OPTIONS rmi ${IMAGE}
+ echo "$output"
+ [ "$status" -eq 0 ]
+}
+
+@test "kpod inspect specified type" {
+ run ${KPOD_BINARY} $KPOD_OPTIONS pull ${IMAGE}
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run bash -c "${KPOD_BINARY} $KPOD_OPTIONS inspect --type image ${IMAGE} | python -m json.tool"
+ echo "$output"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} $KPOD_OPTIONS rmi ${IMAGE}
+ echo "$output"
+ [ "$status" -eq 0 ]
+}
diff --git a/test/kpod_kill.bats b/test/kpod_kill.bats
new file mode 100644
index 000000000..154875145
--- /dev/null
+++ b/test/kpod_kill.bats
@@ -0,0 +1,83 @@
+#!/usr/bin/env bats
+
+load helpers
+
+function teardown() {
+ cleanup_test
+}
+
+function start_sleep_container () {
+ pod_id=$(crioctl pod run --config "$TESTDATA"/sandbox_config.json)
+ ctr_id=$(crioctl ctr create --config "$TESTDATA"/container_config_sleep.json --pod "$pod_id")
+ crioctl ctr start --id "$ctr_id"
+}
+
+@test "kill a bogus container" {
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} kill foobar
+ echo "$output"
+ [ "$status" -ne 0 ]
+}
+
+@test "kill a running container by id" {
+ start_crio
+ ${KPOD_BINARY} ${KPOD_OPTIONS} pull docker.io/library/busybox:latest
+ ctr_id=$( start_sleep_container )
+ crioctl ctr status --id "$ctr_id"
+ ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a
+ ${KPOD_BINARY} ${KPOD_OPTIONS} logs "$ctr_id"
+ crioctl ctr status --id "$ctr_id"
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} kill "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "kill a running container by id with TERM" {
+ start_crio
+ ${KPOD_BINARY} ${KPOD_OPTIONS} pull docker.io/library/busybox:latest
+ ctr_id=$( start_sleep_container )
+ crioctl ctr status --id "$ctr_id"
+ ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a
+ ${KPOD_BINARY} ${KPOD_OPTIONS} logs "$ctr_id"
+ crioctl ctr status --id "$ctr_id"
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} kill -s TERM "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "kill a running container by name" {
+ start_crio
+ ${KPOD_BINARY} ${KPOD_OPTIONS} pull docker.io/library/busybox:latest
+ ctr_id=$( start_sleep_container )
+ crioctl ctr status --id "$ctr_id"
+ ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a
+ ${KPOD_BINARY} ${KPOD_OPTIONS} logs "$ctr_id"
+ crioctl ctr status --id "$ctr_id"
+ ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} kill "k8s_container999_podsandbox1_redhat.test.crio_redhat-test-crio_1"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "kill a running container by id with a bogus signal" {
+ start_crio
+ ${KPOD_BINARY} ${KPOD_OPTIONS} pull docker.io/library/busybox:latest
+ ctr_id=$( start_sleep_container )
+ crioctl ctr status --id "$ctr_id"
+ ${KPOD_BINARY} ${KPOD_OPTIONS} logs "$ctr_id"
+ crioctl ctr status --id "$ctr_id"
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} kill -s foobar "$ctr_id"
+ echo "$output"
+ [ "$status" -ne 0 ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
diff --git a/test/kpod_load.bats b/test/kpod_load.bats
new file mode 100644
index 000000000..e3896b2ae
--- /dev/null
+++ b/test/kpod_load.bats
@@ -0,0 +1,84 @@
+#!/usr/bin/env bats
+
+load helpers
+
+IMAGE="alpine:latest"
+
+function teardown() {
+ cleanup_test
+}
+
+@test "kpod load input flag" {
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} save -o alpine.tar $IMAGE
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} rmi $IMAGE
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} load -i alpine.tar
+ echo "$output"
+ [ "$status" -eq 0 ]
+ rm -f alpine.tar
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} rmi $IMAGE
+ echo "$output"
+ [ "$status" -eq 0 ]
+}
+
+@test "kpod load oci-archive image" {
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} save -o alpine.tar --format oci-archive $IMAGE
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} $KPOD_OPTIONS rmi $IMAGE
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} load -i alpine.tar
+ echo "$output"
+ [ "$status" -eq 0 ]
+ rm -f alpine.tar
+ run ${KPOD_BINARY} $KPOD_OPTIONS rmi $IMAGE
+ [ "$status" -eq 0 ]
+}
+
+@test "kpod load oci-archive image with signature-policy" {
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} save -o alpine.tar --format oci-archive $IMAGE
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} $KPOD_OPTIONS rmi $IMAGE
+ [ "$status" -eq 0 ]
+ cp /etc/containers/policy.json /tmp
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} load --signature-policy /tmp/policy.json -i alpine.tar
+ echo "$output"
+ [ "$status" -eq 0 ]
+ rm -f /tmp/policy.json
+ rm -f alpine.tar
+ run ${KPOD_BINARY} $KPOD_OPTIONS rmi $IMAGE
+ [ "$status" -eq 0 ]
+}
+
+@test "kpod load using quiet flag" {
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} save -o alpine.tar $IMAGE
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} rmi $IMAGE
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} load -q -i alpine.tar
+ echo "$output"
+ [ "$status" -eq 0 ]
+ rm -f alpine.tar
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} rmi $IMAGE
+ [ "$status" -eq 0 ]
+}
+
+@test "kpod load non-existent file" {
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} load -i alpine.tar
+ echo "$output"
+ [ "$status" -ne 0 ]
+}
diff --git a/test/kpod_logs.bats b/test/kpod_logs.bats
new file mode 100644
index 000000000..1e3015566
--- /dev/null
+++ b/test/kpod_logs.bats
@@ -0,0 +1,74 @@
+#!/usr/bin/env bats
+
+load helpers
+
+IMAGE="alpine:latest"
+
+function teardown() {
+ cleanup_test
+}
+
+@test "display logs for container" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} $KPOD_OPTIONS logs "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "tail three lines of logs for container" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} $KPOD_OPTIONS logs --tail 3 $ctr_id
+ echo "$output"
+ lines=$(echo "$output" | wc -l)
+ [ "$status" -eq 0 ]
+ [[ $(wc -l < "$output" ) -le 3 ]]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "display logs for container since a given time" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} $KPOD_OPTIONS logs --since 2017-08-07T10:10:09.056611202-04:00 $ctr_id
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
diff --git a/test/kpod_mount.bats b/test/kpod_mount.bats
new file mode 100644
index 000000000..237dd5848
--- /dev/null
+++ b/test/kpod_mount.bats
@@ -0,0 +1,48 @@
+#!/usr/bin/env bats
+
+load helpers
+
+IMAGE="redis:alpine"
+
+function teardown() {
+ cleanup_test
+}
+
+@test "mount" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl image pull "$IMAGE"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} mount $ctr_id
+ echo "$output"
+ echo ${KPOD_BINARY} ${KPOD_OPTIONS} mount $ctr_id
+ [ "$status" -eq 0 ]
+ run bash -c "${KPOD_BINARY} ${KPOD_OPTIONS} mount --notruncate | grep $ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} unmount $ctr_id
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} mount $ctr_id
+ echo "$output"
+ [ "$status" -eq 0 ]
+ root="$output"
+ run bash -c "${KPOD_BINARY} ${KPOD_OPTIONS} mount --format=json | python -m json.tool | grep $ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ touch $root/foobar
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} unmount $ctr_id
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
diff --git a/test/kpod_pause.bats b/test/kpod_pause.bats
new file mode 100644
index 000000000..84321beb3
--- /dev/null
+++ b/test/kpod_pause.bats
@@ -0,0 +1,169 @@
+#!/usr/bin/env bats
+
+load helpers
+
+IMAGE="redis:alpine"
+
+function teardown() {
+ cleanup_test
+}
+
+@test "pause a bogus container" {
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} pause foobar
+ echo "$output"
+ [ "$status" -eq 1 ]
+}
+
+@test "unpause a bogus container" {
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} unpause foobar
+ echo "$output"
+ [ "$status" -eq 1 ]
+}
+
+@test "pause a created container by id" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl image pull "$IMAGE"
+ [ "$status" -eq 0 ]
+ run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} pause "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} unpause "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a --filter id="$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_pods
+ stop_crio
+}
+
+@test "pause a running container by id" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl image pull "$IMAGE"
+ [ "$status" -eq 0 ]
+ run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ id="$output"
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} pause "$id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} unpause "$id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a --filter id="$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_pods
+ stop_crio
+}
+
+@test "pause a running container by name" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl image pull "$IMAGE"
+ [ "$status" -eq 0 ]
+ run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} pause "k8s_podsandbox1-redis_podsandbox1_redhat.test.crio_redhat-test-crio_0"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} unpause "k8s_podsandbox1-redis_podsandbox1_redhat.test.crio_redhat-test-crio_0"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a --filter id="k8s_podsandbox1-redis_podsandbox1_redhat.test.crio_redhat-test-crio_0"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_pods
+ stop_crio
+}
+
+@test "remove a paused container by id" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl image pull "$IMAGE"
+ [ "$status" -eq 0 ]
+ run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ id="$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} pause "$id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} rm "$id"
+ echo "$output"
+ [ "$status" -eq 1 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} rm --force "$id"
+ echo "$output"
+ [ "$status" -eq 1 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} unpause "$id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} stop "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} rm "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_pods
+ stop_crio
+}
+
+@test "stop a paused container created by id" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl image pull "$IMAGE"
+ [ "$status" -eq 0 ]
+ run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} pause "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} stop "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 1 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} unpause "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a --filter id="$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_pods
+ stop_crio
+}
diff --git a/test/kpod_ps.bats b/test/kpod_ps.bats
new file mode 100644
index 000000000..a4a7b6cbc
--- /dev/null
+++ b/test/kpod_ps.bats
@@ -0,0 +1,313 @@
+#!/usr/bin/env bats
+
+load helpers
+
+IMAGE="redis:alpine"
+
+@test "kpod ps with no containers" {
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} ps
+ echo "$output"
+ [ "$status" -eq 0 ]
+}
+
+@test "kpod ps default" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl image pull "$IMAGE"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} ps
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "kpod ps all flag" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl image pull "$IMAGE"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} ps --all
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "kpod ps size flag" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl image pull "$IMAGE"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a -s
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a --size
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "kpod ps quiet flag" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl image pull "$IMAGE"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a -q
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a --quiet
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "kpod ps latest flag" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl image pull "$IMAGE"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} ps --latest
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} ps -l
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "kpod ps last flag" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl image pull "$IMAGE"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} ps --last 2
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} ps -n 2
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "kpod ps no-trunc flag" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl image pull "$IMAGE"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a --no-trunc
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "kpod ps namespace flag" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl image pull "$IMAGE"
+ [ "$status" -eq 0 ]
+ run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a --ns
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} ps --all --namespace
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "kpod ps namespace flag and format flag = json" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl image pull "$IMAGE"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run bash -c "${KPOD_BINARY} ${KPOD_OPTIONS} ps -a --ns --format json | python -m json.tool | grep namespace"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "kpod ps without namespace flag and format flag = json" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl image pull "$IMAGE"
+ [ "$status" -eq 0 ]
+ run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run bash -c "${KPOD_BINARY} ${KPOD_OPTIONS} ps -a --format json | python -m json.tool | grep namespace"
+ echo "$output"
+ [ "$status" -eq 1 ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "kpod ps format flag = go template" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl image pull "$IMAGE"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a --format "table {{.ID}} {{.Image}} {{.Labels}}"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "kpod ps filter flag - ancestor" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl image pull "$IMAGE"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a --filter ancestor=${IMAGE}
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "kpod ps filter flag - id" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl image pull "$IMAGE"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a --filter id="$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "kpod ps filter flag - status" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl image pull "$IMAGE"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a --filter status=running
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
diff --git a/test/kpod_pull.bats b/test/kpod_pull.bats
new file mode 100644
index 000000000..c12c62413
--- /dev/null
+++ b/test/kpod_pull.bats
@@ -0,0 +1,138 @@
+#!/usr/bin/env bats
+
+load helpers
+
+IMAGE="alpine:latest"
+
+function teardown() {
+ cleanup_test
+}
+
+@test "kpod pull from docker with tag" {
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} pull debian:6.0.10
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} $KPOD_OPTIONS rmi debian:6.0.10
+ echo "$output"
+ [ "$status" -eq 0 ]
+}
+
+@test "kpod pull from docker without tag" {
+ run ${KPOD_BINARY} $KPOD_OPTIONS pull debian
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} $KPOD_OPTIONS rmi debian
+ echo "$output"
+ [ "$status" -eq 0 ]
+}
+
+@test "kpod pull from a non-docker registry with tag" {
+ run ${KPOD_BINARY} $KPOD_OPTIONS pull registry.fedoraproject.org/fedora:rawhide
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} $KPOD_OPTIONS rmi registry.fedoraproject.org/fedora:rawhide
+ echo "$output"
+ [ "$status" -eq 0 ]
+}
+
+@test "kpod pull from a non-docker registry without tag" {
+ run ${KPOD_BINARY} $KPOD_OPTIONS pull registry.fedoraproject.org/fedora
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} $KPOD_OPTIONS rmi registry.fedoraproject.org/fedora
+ echo "$output"
+ [ "$status" -eq 0 ]
+}
+
+@test "kpod pull using digest" {
+ run ${KPOD_BINARY} $KPOD_OPTIONS pull alpine@sha256:1072e499f3f655a032e88542330cf75b02e7bdf673278f701d7ba61629ee3ebe
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} $KPOD_OPTIONS rmi alpine:latest
+ echo "$output"
+ [ "$status" -eq 0 ]
+}
+
+@test "kpod pull from a non existent image" {
+ run ${KPOD_BINARY} $KPOD_OPTIONS pull umohnani/get-started
+ echo "$output"
+ [ "$status" -ne 0 ]
+}
+
+@test "kpod pull from docker with shortname" {
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} pull debian
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} $KPOD_OPTIONS rmi docker.io/debian:latest
+ echo "$output"
+ [ "$status" -eq 0 ]
+}
+
+@test "kpod pull from docker with shortname and tag" {
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} pull debian:6.0.10
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} $KPOD_OPTIONS rmi docker.io/debian:6.0.10
+ echo "$output"
+ [ "$status" -eq 0 ]
+}
+
+@test "kpod pull from docker-archive" {
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} pull alpine
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} save -o alp.tar alpine
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} rmi alpine
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} pull docker-archive:alp.tar
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} rmi alpine
+ echo "$output"
+ [ "$status" -eq 0 ]
+ rm -f alp.tar
+}
+
+@test "kpod pull from oci-archive" {
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} pull alpine
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} save --format oci-archive -o oci-alp.tar alpine
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} rmi alpine
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} pull oci-archive:oci-alp.tar
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} rmi alpine
+ echo "$output"
+ [ "$status" -eq 0 ]
+ rm -f oci-alp.tar
+}
+
+@test "kpod pull from local directory" {
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} pull alpine
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run mkdir test_pull_dir
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} push alpine dir:test_pull_dir
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} rmi alpine
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} pull dir:test_pull_dir
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} rmi test_pull_dir
+ echo "$output"
+ [ "$status" -eq 0 ]
+ rm -rf test_pull_dir
+}
diff --git a/test/kpod_push.bats b/test/kpod_push.bats
new file mode 100644
index 000000000..e8fe4fdbc
--- /dev/null
+++ b/test/kpod_push.bats
@@ -0,0 +1,90 @@
+#!/usr/bin/env bats
+
+load helpers
+
+IMAGE="alpine:latest"
+
+function teardown() {
+ cleanup_test
+}
+
+@test "kpod push to containers/storage" {
+ echo # Pull down the image: it gets the name $IMAGE.
+ run ${KPOD_BINARY} $KPOD_OPTIONS --log-level=debug pull "$IMAGE"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ echo # Push the image right back into storage: it now has two names.
+ run ${KPOD_BINARY} $KPOD_OPTIONS --log-level=debug push "$IMAGE" containers-storage:busybox:test
+ echo "$output"
+ [ "$status" -eq 0 ]
+ echo # Try to remove it using the first name. Should be refused.
+ run ${KPOD_BINARY} $KPOD_OPTIONS --log-level=debug rmi "$IMAGE"
+ echo "$output"
+ [ "$status" -ne 0 ]
+ echo # Try to remove it using the second name. Should also be refused.
+ run ${KPOD_BINARY} $KPOD_OPTIONS --log-level=debug rmi busybox:test
+ echo "$output"
+ [ "$status" -ne 0 ]
+ echo # Force removal despite having multiple names. Should succeed.
+ run ${KPOD_BINARY} $KPOD_OPTIONS --log-level=debug rmi -f busybox:test
+ echo "$output"
+ [ "$status" -eq 0 ]
+}
+
+@test "kpod push to directory" {
+ run ${KPOD_BINARY} $KPOD_OPTIONS pull "$IMAGE"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run mkdir /tmp/busybox
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} $KPOD_OPTIONS push "$IMAGE" dir:/tmp/busybox
+ echo "$output"
+ [ "$status" -eq 0 ]
+ rm -rf /tmp/busybox
+ run ${KPOD_BINARY} $KPOD_OPTIONS rmi "$IMAGE"
+ echo "$output"
+ [ "$status" -eq 0 ]
+}
+
+@test "kpod push to docker archive" {
+ run ${KPOD_BINARY} $KPOD_OPTIONS pull "$IMAGE"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} $KPOD_OPTIONS push "$IMAGE" docker-archive:/tmp/busybox-archive:1.26
+ echo "$output"
+ [ "$status" -eq 0 ]
+ rm /tmp/busybox-archive
+ run ${KPOD_BINARY} $KPOD_OPTIONS rmi "$IMAGE"
+ echo "$output"
+ [ "$status" -eq 0 ]
+}
+
+@test "kpod push to oci-archive without compression" {
+ run ${KPOD_BINARY} $KPOD_OPTIONS pull "$IMAGE"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} $KPOD_OPTIONS push "$IMAGE" oci-archive:/tmp/oci-busybox.tar:alpine
+ echo "$output"
+ [ "$status" -eq 0 ]
+ rm -f /tmp/oci-busybox.tar
+ run ${KPOD_BINARY} $KPOD_OPTIONS rmi "$IMAGE"
+ echo "$output"
+ [ "$status" -eq 0 ]
+}
+
+@test "kpod push without signatures" {
+ run ${KPOD_BINARY} $KPOD_OPTIONS pull "$IMAGE"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run mkdir /tmp/busybox
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} $KPOD_OPTIONS push --remove-signatures "$IMAGE" dir:/tmp/busybox
+ echo "$output"
+ [ "$status" -eq 0 ]
+ rm -rf /tmp/busybox
+ run ${KPOD_BINARY} $KPOD_OPTIONS rmi "$IMAGE"
+ echo "$output"
+ [ "$status" -eq 0 ]
+}
diff --git a/test/kpod_rename.bats b/test/kpod_rename.bats
new file mode 100644
index 000000000..ed3fdada1
--- /dev/null
+++ b/test/kpod_rename.bats
@@ -0,0 +1,33 @@
+#!/usr/bin/env bats
+
+load helpers
+
+IMAGE="redis:alpine"
+
+function teardown() {
+ cleanup_test
+}
+
+@test "kpod rename successful" {
+ start_crio
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ pod_id="$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
+ ctr_id="$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} $KPOD_OPTIONS rename "$ctr_id" "$NEW_NAME"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} $KPOD_OPTIONS inspect "$ctr_id" --format {{.Name}}
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [ "$output" == "$NEW_NAME" ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
diff --git a/test/kpod_rm.bats b/test/kpod_rm.bats
new file mode 100644
index 000000000..022e3efc2
--- /dev/null
+++ b/test/kpod_rm.bats
@@ -0,0 +1,90 @@
+#!/usr/bin/env bats
+
+load helpers
+
+IMAGE="alpine:latest"
+
+function teardown() {
+ cleanup_test
+}
+
+@test "remove a stopped container" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr stop --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} $KPOD_OPTIONS rm "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_pods
+ stop_crio
+}
+
+@test "refuse to remove a running container" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} $KPOD_OPTIONS rm "$ctr_id"
+ echo "$output"
+ [ "$status" -ne 0 ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "remove a created container" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run ${KPOD_BINARY} $KPOD_OPTIONS rm -f "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_pods
+ stop_crio
+}
+
+@test "remove a running container" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} $KPOD_OPTIONS rm -f "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_pods
+ stop_crio
+}
diff --git a/test/kpod_save.bats b/test/kpod_save.bats
new file mode 100644
index 000000000..d8c581a5c
--- /dev/null
+++ b/test/kpod_save.bats
@@ -0,0 +1,65 @@
+#!/usr/bin/env bats
+
+load helpers
+
+IMAGE="alpine:latest"
+
+function teardown() {
+ cleanup_test
+}
+
+@test "kpod save output flag" {
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} save -o alpine.tar $IMAGE
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} rmi $IMAGE
+ echo "$output"
+ [ "$status" -eq 0 ]
+ rm -f alpine.tar
+}
+
+@test "kpod save oci flag" {
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} save -o alpine.tar --format oci-archive $IMAGE
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} rmi $IMAGE
+ [ "$status" -eq 0 ]
+ rm -f alpine.tar
+}
+
+@test "kpod save using stdout" {
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} save > alpine.tar $IMAGE
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} rmi $IMAGE
+ echo "$output"
+ [ "$status" -eq 0 ]
+ rm -f alpine.tar
+}
+
+@test "kpod save quiet flag" {
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} save -q -o alpine.tar $IMAGE
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} rmi $IMAGE
+ echo "$output"
+ [ "$status" -eq 0 ]
+ rm -f alpine.tar
+}
+
+@test "kpod save non-existent image" {
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} save -o alpine.tar $IMAGE
+ echo "$output"
+ [ "$status" -ne 0 ]
+}
diff --git a/test/kpod_stats.bats b/test/kpod_stats.bats
new file mode 100644
index 000000000..a4b8e61e6
--- /dev/null
+++ b/test/kpod_stats.bats
@@ -0,0 +1,104 @@
+#!/usr/bin/env bats
+
+load helpers
+
+function teardown() {
+ cleanup_test
+}
+
+@test "stats single output" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} stats --no-stream "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "stats does not output stopped container" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} stats --no-stream
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "stats outputs stopped container with all flag" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} stats --no-stream --all
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "stats output only id" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} stats --no-stream --format {{.ID}} "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ # once ps is implemented, run ps -q and see if that equals the output from above
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "stats streaming output" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run timeout 5s bash -c "${KPOD_BINARY} ${KPOD_OPTIONS} stats --all"
+ echo "$output"
+ [ "$status" -eq 124 ] #124 is the status set by timeout when it has to kill the command at the end of the given time
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
diff --git a/test/kpod_stop.bats b/test/kpod_stop.bats
new file mode 100644
index 000000000..72e818d40
--- /dev/null
+++ b/test/kpod_stop.bats
@@ -0,0 +1,58 @@
+#!/usr/bin/env bats
+
+load helpers
+
+function teardown() {
+ cleanup_test
+}
+
+@test "stop a bogus container" {
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} stop foobar
+ echo "$output"
+ [ "$status" -eq 1 ]
+}
+
+@test "stop a running container by id" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ id="$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} stop "$id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_pods
+ stop_crio
+}
+
+@test "stop a running container by name" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ [ "$status" -eq 0 ]
+ run crioctl ctr inspect --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_name=$(python -c 'import json; import sys; print json.load(sys.stdin)["crio_annotations"]["io.kubernetes.cri-o.Name"]' <<< "$output")
+ echo container name is \""$ctr_name"\"
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} stop "$ctr_name"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_pods
+ stop_crio
+}
diff --git a/test/kpod_tag.bats b/test/kpod_tag.bats
new file mode 100644
index 000000000..93109db52
--- /dev/null
+++ b/test/kpod_tag.bats
@@ -0,0 +1,50 @@
+#!/usr/bin/env bats
+
+load helpers
+
+IMAGE="alpine:latest"
+
+function teardown() {
+ cleanup_test
+}
+
+@test "kpod tag with shortname:latest" {
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} tag $IMAGE foobar:latest
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} inspect foobar:latest
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} rmi --force foobar:latest
+ [ "$status" -eq 0 ]
+}
+
+@test "kpod tag with shortname" {
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} tag $IMAGE foobar
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} inspect foobar:latest
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} rmi --force foobar:latest
+ [ "$status" -eq 0 ]
+}
+
+@test "kpod tag with shortname:tag" {
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} tag $IMAGE foobar:v
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} inspect foobar:v
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} rmi --force foobar:v
+ [ "$status" -eq 0 ]
+}
diff --git a/test/kpod_version.bats b/test/kpod_version.bats
new file mode 100644
index 000000000..e6c062b80
--- /dev/null
+++ b/test/kpod_version.bats
@@ -0,0 +1,13 @@
+#!/usr/bin/env bats
+
+load helpers
+
+function teardown() {
+ cleanup_test
+}
+
+@test "kpod version test" {
+ run ${KPOD_BINARY} version
+ echo "$output"
+ [ "$status" -eq 0 ]
+}
diff --git a/test/kpod_wait.bats b/test/kpod_wait.bats
new file mode 100644
index 000000000..ba7556b2e
--- /dev/null
+++ b/test/kpod_wait.bats
@@ -0,0 +1,69 @@
+#!/usr/bin/env bats
+
+load helpers
+
+IMAGE="redis:alpine"
+
+# Returns the POD ID
+function pod_run_from_template(){
+ #1=name, 2=uid, 3=namespace) {
+ NAME=$1 CUID=$2 NAMESPACE=$3 envsubst < ${TESTDATA}/template_sandbox_config.json > ${TESTDIR}/pod-${1}.json
+ crioctl pod run --config ${TESTDIR}/pod-${1}.json
+}
+
+# Returns the container ID
+function container_create_from_template() {
+ #1=name, 2=image, 3=command, 4=id) {
+ NAME=$1 IMAGE=$2 COMMAND=$3 envsubst < ${TESTDATA}/template_container_config.json > ${TESTDIR}/ctr-${1}.json
+ crioctl ctr create --config ${TESTDIR}/ctr-${1}.json --pod "$4"
+}
+
+function container_start() {
+ #1=id
+ crioctl ctr start --id "$1"
+
+}
+@test "wait on a bogus container" {
+ start_crio
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} wait 12343
+ echo $output
+ [ "$status" -eq 1 ]
+ stop_crio
+}
+
+@test "wait on a stopped container" {
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} pull docker.io/library/busybox:latest
+ echo $output
+ [ "$status" -eq 0 ]
+ start_crio
+ pod_id=$( pod_run_from_template "test" "test" "test1-1" )
+ echo $pod_id
+ ctr_id=$(container_create_from_template "test-CTR" "docker.io/library/busybox:latest" '["ls"]' "${pod_id}")
+ echo $ctr_id
+ container_start $ctr_id
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} wait $ctr_id
+ [ "$status" -eq 0 ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "wait on a sleeping container" {
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} pull docker.io/library/busybox:latest
+ echo $output
+ [ "$status" -eq 0 ]
+ start_crio
+ pod_id=$( pod_run_from_template "test" "test" "test1-1" )
+ echo $pod_id
+ ctr_id=$(container_create_from_template "test-CTR" "docker.io/library/busybox:latest" '["sleep", "5"]' "${pod_id}")
+ echo $ctr_id
+ run container_start $ctr_id
+ echo $output
+ [ "$status" -eq 0 ]
+ run ${KPOD_BINARY} ${KPOD_OPTIONS} wait $ctr_id
+ echo $output
+ [ "$status" -eq 0 ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
diff --git a/test/network.bats b/test/network.bats
new file mode 100644
index 000000000..dc8143c26
--- /dev/null
+++ b/test/network.bats
@@ -0,0 +1,186 @@
+#!/usr/bin/env bats
+
+load helpers
+
+function teardown() {
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+ rm -f /var/lib/cni/networks/crionet_test_args/*
+ chmod 0755 $CONMON_BINARY
+ cleanup_test
+}
+
+@test "ensure correct hostname" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+
+ run crioctl ctr execsync --id "$ctr_id" sh -c "hostname"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" =~ "crioctl_host" ]]
+ run crioctl ctr execsync --id "$ctr_id" sh -c "echo \$HOSTNAME"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" =~ "crioctl_host" ]]
+ run crioctl ctr execsync --id "$ctr_id" sh -c "cat /etc/hostname"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" =~ "crioctl_host" ]]
+}
+
+@test "ensure correct hostname for hostnetwork:true" {
+ start_crio
+ hostnetworkconfig=$(cat "$TESTDATA"/sandbox_config.json | python -c 'import json,sys;obj=json.load(sys.stdin);obj["linux"]["security_context"]["namespace_options"]["host_network"] = True; obj["annotations"] = {}; obj["hostname"] = ""; json.dump(obj, sys.stdout)')
+ echo "$hostnetworkconfig" > "$TESTDIR"/sandbox_hostnetwork_config.json
+ run crioctl pod run --config "$TESTDIR"/sandbox_hostnetwork_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+
+ run crioctl ctr execsync --id "$ctr_id" sh -c "hostname"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" =~ "$HOSTNAME" ]]
+ run crioctl ctr execsync --id "$ctr_id" sh -c "echo \$HOSTNAME"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" =~ "$HOSTNAME" ]]
+ run crioctl ctr execsync --id "$ctr_id" sh -c "cat /etc/hostname"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" =~ "$HOSTNAME" ]]
+}
+
+@test "Check for valid pod netns CIDR" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+
+ run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+
+ check_pod_cidr $ctr_id
+}
+
+@test "Ping pod from the host" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+
+ run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+
+ ping_pod $ctr_id
+}
+
+@test "Ping pod from another pod" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod1_id="$output"
+ run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod1_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr1_id="$output"
+
+ temp_sandbox_conf cni_test
+
+ run crioctl pod run --config "$TESTDIR"/sandbox_config_cni_test.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod2_id="$output"
+ run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod2_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr2_id="$output"
+
+ ping_pod_from_pod $ctr1_id $ctr2_id
+
+ ping_pod_from_pod $ctr2_id $ctr1_id
+}
+
+@test "Ensure correct CNI plugin namespace/name/container-id arguments" {
+ start_crio "" "" "" "prepare_plugin_test_args_network_conf"
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ [ "$status" -eq 0 ]
+
+ . /tmp/plugin_test_args.out
+
+ [ "$FOUND_CNI_CONTAINERID" != "redhat.test.crio" ]
+ [ "$FOUND_CNI_CONTAINERID" != "podsandbox1" ]
+ [ "$FOUND_K8S_POD_NAMESPACE" = "redhat.test.crio" ]
+ [ "$FOUND_K8S_POD_NAME" = "podsandbox1" ]
+
+ rm -rf /tmp/plugin_test_args.out
+}
+
+@test "Connect to pod hostport from the host" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config_hostport.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+
+ get_host_ip
+ echo $host_ip
+
+ run crioctl ctr create --config "$TESTDATA"/container_config_hostport.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run nc -w 5 $host_ip 4888 </dev/null
+ echo "$output"
+ [ "$output" = "crioctl_host" ]
+ [ "$status" -eq 0 ]
+ run crioctl ctr stop --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+}
+
+@test "Clean up network if pod sandbox fails" {
+ start_crio "" "" "" "prepare_plugin_test_args_network_conf"
+
+ # make conmon non-executable to cause the sandbox setup to fail after
+ # networking has been configured
+ chmod 0644 $CONMON_BINARY
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ chmod 0755 $CONMON_BINARY
+ echo "$output"
+ [ "$status" -ne 0 ]
+
+ # ensure that the server cleaned up sandbox networking if the sandbox
+ # failed after network setup
+ rm -f /var/lib/cni/networks/crionet_test_args/last_reserved_ip
+ num_allocated=$(ls /var/lib/cni/networks/crionet_test_args | wc -l)
+ [[ "${num_allocated}" == "0" ]]
+}
diff --git a/test/pod.bats b/test/pod.bats
new file mode 100644
index 000000000..2b5837903
--- /dev/null
+++ b/test/pod.bats
@@ -0,0 +1,365 @@
+#!/usr/bin/env bats
+
+load helpers
+
+function teardown() {
+ cleanup_test
+}
+
+# PR#59
+@test "pod release name on remove" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ id="$output"
+ run crioctl pod stop --id "$id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl pod remove --id "$id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ id="$output"
+ run crioctl pod stop --id "$id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl pod remove --id "$id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "pod remove" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl pod stop --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl pod remove --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "pod stop ignores not found sandboxes" {
+ start_crio
+
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl pod stop --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl pod remove --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+
+ run crioctl pod stop --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "pod list filtering" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json -name pod1 --label "a=b" --label "c=d" --label "e=f"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod1_id="$output"
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json -name pod2 --label "a=b" --label "c=d"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod2_id="$output"
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json -name pod3 --label "a=b"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod3_id="$output"
+ run crioctl pod list --label "a=b" --label "c=d" --label "e=f" --quiet
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" != "" ]]
+ [[ "$output" =~ "$pod1_id" ]]
+ run crioctl pod list --label "g=h" --quiet
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" == "" ]]
+ run crioctl pod list --label "a=b" --label "c=d" --quiet
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" != "" ]]
+ [[ "$output" =~ "$pod1_id" ]]
+ [[ "$output" =~ "$pod2_id" ]]
+ run crioctl pod list --label "a=b" --quiet
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" != "" ]]
+ [[ "$output" =~ "$pod1_id" ]]
+ [[ "$output" =~ "$pod2_id" ]]
+ [[ "$output" =~ "$pod3_id" ]]
+ run crioctl pod list --id "$pod1_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" != "" ]]
+ [[ "$output" =~ "$pod1_id" ]]
+ # filter by truncated id should work as well
+ run crioctl pod list --id "${pod1_id:0:4}"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" != "" ]]
+ [[ "$output" =~ "$pod1_id" ]]
+ run crioctl pod list --id "$pod2_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" != "" ]]
+ [[ "$output" =~ "$pod2_id" ]]
+ run crioctl pod list --id "$pod3_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" != "" ]]
+ [[ "$output" =~ "$pod3_id" ]]
+ run crioctl pod list --id "$pod1_id" --label "a=b"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" != "" ]]
+ [[ "$output" =~ "$pod1_id" ]]
+ run crioctl pod list --id "$pod2_id" --label "a=b"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" != "" ]]
+ [[ "$output" =~ "$pod2_id" ]]
+ run crioctl pod list --id "$pod3_id" --label "a=b"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" != "" ]]
+ [[ "$output" =~ "$pod3_id" ]]
+ run crioctl pod list --id "$pod3_id" --label "c=d"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" == "" ]]
+ run crioctl pod stop --id "$pod1_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl pod remove --id "$pod1_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl pod stop --id "$pod2_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl pod remove --id "$pod2_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl pod stop --id "$pod3_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl pod remove --id "$pod3_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ cleanup_pods
+ stop_crio
+}
+
+@test "pod metadata in list & status" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+
+ run crioctl pod list --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ # TODO: expected value should not hard coded here
+ [[ "$output" =~ "Name: podsandbox1" ]]
+ [[ "$output" =~ "UID: redhat-test-crio" ]]
+ [[ "$output" =~ "Namespace: redhat.test.crio" ]]
+ [[ "$output" =~ "Attempt: 1" ]]
+
+ run crioctl pod status --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ # TODO: expected value should not hard coded here
+ [[ "$output" =~ "Name: podsandbox1" ]]
+ [[ "$output" =~ "UID: redhat-test-crio" ]]
+ [[ "$output" =~ "Namespace: redhat.test.crio" ]]
+ [[ "$output" =~ "Attempt: 1" ]]
+
+ cleanup_pods
+ stop_crio
+}
+
+@test "pass pod sysctls to runtime" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+
+ run crioctl ctr create --pod "$pod_id" --config "$TESTDATA"/container_redis.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ container_id="$output"
+
+ run crioctl ctr start --id "$container_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+
+ run crioctl ctr execsync --id "$container_id" sysctl kernel.shm_rmid_forced
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" =~ "kernel.shm_rmid_forced = 1" ]]
+
+ run crioctl ctr execsync --id "$container_id" sysctl kernel.msgmax
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" =~ "kernel.msgmax = 8192" ]]
+
+ run crioctl ctr execsync --id "$container_id" sysctl net.ipv4.ip_local_port_range
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" =~ "net.ipv4.ip_local_port_range = 1024 65000" ]]
+
+ cleanup_pods
+ stop_crio
+}
+
+@test "pod stop idempotent" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl pod stop --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl pod stop --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "pod remove idempotent" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl pod stop --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl pod remove --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl pod remove --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "pod stop idempotent with ctrs already stopped" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl pod stop --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "restart crio and still get pod status" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl pod stop --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+
+ restart_crio
+ run crioctl pod status --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [ "$output" != "" ]
+
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "invalid systemd cgroup_parent fail" {
+ if [[ "$CGROUP_MANAGER" != "systemd" ]]; then
+ skip "need systemd cgroup manager"
+ fi
+
+ wrong_cgroup_parent_config=$(cat "$TESTDATA"/sandbox_config.json | python -c 'import json,sys;obj=json.load(sys.stdin);obj["linux"]["cgroup_parent"] = "podsandbox1.slice:container:infra"; json.dump(obj, sys.stdout)')
+ echo "$wrong_cgroup_parent_config" > "$TESTDIR"/sandbox_wrong_cgroup_parent.json
+
+ start_crio
+ run crioctl pod run --config "$TESTDIR"/sandbox_wrong_cgroup_parent.json
+ echo "$output"
+ [ "$status" -eq 1 ]
+
+ stop_crio
+}
+
+@test "systemd cgroup_parent correctly set" {
+ if [[ "$CGROUP_MANAGER" != "systemd" ]]; then
+ skip "need systemd cgroup manager"
+ fi
+
+ cgroup_parent_config=$(cat "$TESTDATA"/sandbox_config.json | python -c 'import json,sys;obj=json.load(sys.stdin);obj["linux"]["cgroup_parent"] = "/Burstable/pod_integration_tests-123"; json.dump(obj, sys.stdout)')
+ echo "$cgroup_parent_config" > "$TESTDIR"/sandbox_systemd_cgroup_parent.json
+
+ start_crio
+ run crioctl pod run --config "$TESTDIR"/sandbox_systemd_cgroup_parent.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+
+ run systemctl list-units --type=slice
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" =~ "Burstable-pod_integration_tests_123.slice" ]]
+
+ cleanup_pods
+ stop_crio
+}
diff --git a/test/policy.json b/test/policy.json
new file mode 100644
index 000000000..ddc46e1a6
--- /dev/null
+++ b/test/policy.json
@@ -0,0 +1,23 @@
+{
+ "default": [
+ {
+ "type": "insecureAcceptAnything"
+ }
+ ],
+ "transports": {
+ "docker": {
+ "docker.io/library/hello-world": [
+ {
+ "type": "reject"
+ }
+ ],
+ "registry.access.redhat.com": [
+ {
+ "keyType": "GPGKeys",
+ "type": "signedBy",
+ "keyData": "VGhlIGZvbGxvd2luZyBwdWJsaWMga2V5IGNhbiBiZSB1c2VkIHRvIHZlcmlmeSBSUE0gcGFja2FnZXMgYnVpbHQgYW5kCnNpZ25lZCBieSBSZWQgSGF0LCBJbmMuICBUaGlzIGtleSBpcyB1c2VkIGZvciBwYWNrYWdlcyBpbiBSZWQgSGF0CnByb2R1Y3RzIHNoaXBwZWQgYWZ0ZXIgTm92ZW1iZXIgMjAwOSwgYW5kIGZvciBhbGwgdXBkYXRlcyB0byB0aG9zZQpwcm9kdWN0cy4KClF1ZXN0aW9ucyBhYm91dCB0aGlzIGtleSBzaG91bGQgYmUgc2VudCB0byBzZWN1cml0eUByZWRoYXQuY29tLgoKcHViICA0MDk2Ui9GRDQzMUQ1MSAyMDA5LTEwLTIyIFJlZCBIYXQsIEluYy4gKHJlbGVhc2Uga2V5IDIpIDxzZWN1cml0eUByZWRoYXQuY29tPgoKLS0tLS1CRUdJTiBQR1AgUFVCTElDIEtFWSBCTE9DSy0tLS0tClZlcnNpb246IEdudVBHIHYxLjIuNiAoR05VL0xpbnV4KQoKbVFJTkJFcmdTVHNCRUFDaDJBNGIwTzl0K3Z6QzlWclZ0TDFBS3ZVV2k5T1BDamt2UjdYZDhEdEp4ZWVNWjVlRgowSHR6SUc1OHFEUnlid1VlODlGWnByQjFmZnVVS3pkRStIY0wzRmJOV1NTT1hWalpJZXJzZFh5SDNOdm5MTExGCjBETlJCMml4M2JYRzlSaC9SWHBGc054RHAyQ0VNZFV2YllDekU3OUsxRW5VVFZoMUwwT2YwMjNGdFBTWlhYMGMKdTdQYjVESTVsWDVZZW9YTzZSb29kcklHWUpzVkJRV25yV3c0eE5UY29uVWZOUGswRUdadEVuenZIMnp5UG9KaApYR0YrTmN1OVh3YmFsbllkZTEwT0N2U1dBWjV6VENwb0xNVHZRaldwYkNkV1hKekNtNkcrL2h4OXVwa2U1NDZICjVJanRZbTRkVElWVG5jM3d2RGlPRGdCS1J6T2w5ckVPQ0lnT3VHdER4UnhjUWtqckMreHZnNVZrcW43dkJVeVcKOXBIZWRPVStQb0YzREdPTStkcXYrZU5LQnZoOVlGOXVnRkFRQmtjRzd2aVpndkdFTUdHVXB6TmdON1huUzFnagovRFBvOW1aRVNPWW5LY2V2ZTJ0SUM4N3AyaHFqcnhPSHVJN2ZrWlllTkljQW9hODNyQmx0RlhhQkRZaFdBS1MxClBjWFMxLzdKelAwa3k3ZDBMNlhidS9JZjVrcVdRcEt3VUluWHR5U1JrdXJhVmZ1SzNCcGErWDFYZWNXaTI0SlkKSFZ0bE5YMDI1eHgxZXdWekdOQ1RsV24xc2tRTjJPT29RVFY0QzgvcUZwVFc2RFRXWXVyZDQrZkUwT0pGSlpRRgpidWhmWFl3bVJsVk9nTjVpNzdOVElKWkpRZllGajM4Yy9JdjV2WkJQb2tPNm1mZnJPVHYzTUhXVmdRQVJBUUFCCnRETlNaV1FnU0dGMExDQkpibU11SUNoeVpXeGxZWE5sSUd0bGVTQXlLU0E4YzJWamRYSnBkSGxBY21Wa2FHRjAKTG1OdmJUNkpBallFRXdFQ0FDQUZBa3JnU1RzQ0d3TUdDd2tJQndNQ0JCVUNDQU1FRmdJREFRSWVBUUlYZ0FBSwpDUkFabmkrUi9VTWRVV3pwRC85czVTRlIvWkYzeWpZNVZMVUZMTVhJS1V6dE5OM29jNDVmeUxkVEkzK1VDbEtDCjJ0RXJ1ellqcU5IaHFBRVhhMnNOMWZNcnN1S2VjNjFMbDJOZnZKamtMS0R2Z1ZJaDdrTTdhc2xOWVZPUDZCVGYKQy9KSjcvdWZ6M1VabXlWaUgvV0RsK0FZZGdrM0pxQ0lPNXc1cnlyQzlJeUJ6WXYybTBIcVliV2ZwaFkzdUh3NQp1bjNuZExKY3U4K0JHUDVGK09OUUVHbCtEUkg1OElsOUpwM0h3YlJhN2R2a1BnRWhmRlIrMWhJK0J0dGEyQzdFCjAvMk5LekN4Wnc3THgzUEJSY1U5MllLeWFFaWhmeS9hUUtaQ0F1eWZLaU12c216cys0cG9JWDdJOU5RQ0pweUUKSUdmSU5vWjdWeHFId1JuL2Q1bXcyTVpUSmpielNmK1VtOVlKeUEwaUVFeUQ2cWpyaVdRUmJ1eHBRWG1sQUpiaAo4b2taNGdiVkZ2MUY4TXpLKzRSOFZ2V0owWHhndGlrU283MmZIandoYTdNQWpxRm5PcTZlbzZmRUMvNzVnM05MCkdodDVWZHBHdUhrMHZiZEVOSE1DOHdTOTllNXFYR05EdWVkM2hsVGF2RE1sRUFIbDM0cTJIOW5ha1RHUkY1S2kKSlVmTmgzRFZSR2hnOGNNSXRpMjFuamlSaDdneUZJMk9jY0FUWTdiQlNyNzlKaHVOd2VsSHV4THJDRnBZN1YyNQpPRmt0bDE1alpKYU14dVFCcVlkQmdTYXkyRzBVNkQxKzdWc1d1ZnB6ZC9BYngxL2Mzb2k5WmFKdlcyMmtBZ2dxCmR6ZEEyN1VVWWpXdng0Mnc5bWVuSndoLzBqZVFjVGVjSVVkMGQwckZjdy9jMXB2Z01NbC9RNzN5ektnS1l3PT0KPXpiSEUKLS0tLS1FTkQgUEdQIFBVQkxJQyBLRVkgQkxPQ0stLS0tLQpUaGUgZm9sbG93aW5nIHB1YmxpYyBrZXkgY2FuIGJlIHVzZWQgdG8gdmVyaWZ5IFJQTSBwYWNrYWdlcyBidWlsdCBhbmQKc2lnbmVkIGJ5IFJlZCBIYXQsIEluYy4gIFRoaXMga2V5IGlzIGEgc3VwcG9ydGluZyAoYXV4aWxpYXJ5KSBrZXkgZm9yClJlZCBIYXQgcHJvZHVjdHMgc2hpcHBlZCBhZnRlciBOb3ZlbWJlciAyMDA2IGFuZCBmb3IgYWxsIHVwZGF0ZXMgdG8KdGhvc2UgcHJvZHVjdHMuCgpRdWVzdGlvbnMgYWJvdXQgdGhpcyBrZXkgc2hvdWxkIGJlIHNlbnQgdG8gc2VjdXJpdHlAcmVkaGF0LmNvbS4KCi0tLS0tQkVHSU4gUEdQIFBVQkxJQyBLRVkgQkxPQ0stLS0tLQpWZXJzaW9uOiBHbnVQRyB2MS4yLjYgKEdOVS9MaW51eCkKCm1RR2lCRVZ3REdrUkJBQ3dQaFpJcHZrakk4d1Y5c0ZURG9xeVBMeDF1YjhTZC93K1l1STVPdm00OW12dkVRVlQKVkxnOEZnRTVKbFNUNTlBYnNMRHlWdFJhOUN4SXZONXN5QlZyV1dXdEh0RG5ueWxGQmNxRy9BNkozYkk0RTkvQQpVdFNMNVp4YmF2MCt1dFA2ZjN3T3B4UXJ4YytXSURWZ3B1cmRCS0FRM2Rzb2JHQnF5cGVYNkZYWjV3Q2dvdTZDCnlacEdJQnFvc0phRFdMek5lT2ZiLzcwRC8xdGhMa1F5aFczSko2Y0hDWUpITmZCU2h2YkxXQmY2UzIzMW1nbXUKTXlNbHQ4S21pcGM5Yncrc2FhQWtTa1ZzUS9aYmZqcldCN2U1a2JNcnVLTFZySCtuR2hhbWxIWVVHeUFQdHNQZwpVai9OVVNqNUJtckNzT2tNcG40M25nVExzc0U5TUxoU1BqMm5JSEdGdjlCK2lWTHZvbURkd25hQlJnUTFhSzh6Cno2TUFBLzQwNnlmNXlWSi9NbFRXczEvNjhWd0Rob3NjOUJ0VTFWNUlFME5YZ1pVQWZCSnp6ZlZ6ektRcTZ6SjIKZVpzTUxocjk2d2JzVzEzelVadDFpbmcrdWx3aDJlZTRtZXVKcTZoLzk3MUpzcEZZL1hCaGNmcTRxQ05xVmpzcQpTWm5Xb0dkQ082SjhDeFBJZW1EMklVSHpqb3l5ZUVqM1JWeWR1cDZwY1daQW1oemtLclF6VW1Wa0lFaGhkQ3dnClNXNWpMaUFvWVhWNGFXeHBZWEo1SUd0bGVTa2dQSE5sWTNWeWFYUjVRSEpsWkdoaGRDNWpiMjAraUY0RUV4RUMKQUI0RkFrVndER2tDR3dNR0N3a0lCd01DQXhVQ0F3TVdBZ0VDSGdFQ0Y0QUFDZ2tRUldpY2lDK21XT0MxclFDZwpvb05MQ0ZPek5QY3ZoZDlaYThDODAxSG1uc1lBbmlDdzN5enJDcXRqWW54RER4bHVmSDBGVlR3WAo9ZC9ibQotLS0tLUVORCBQR1AgUFVCTElDIEtFWSBCTE9DSy0tLS0tCg=="
+ }
+ ]
+ }
+ }
+}
diff --git a/test/redhat_sigstore.yaml b/test/redhat_sigstore.yaml
new file mode 100644
index 000000000..835528538
--- /dev/null
+++ b/test/redhat_sigstore.yaml
@@ -0,0 +1,3 @@
+docker:
+ registry.access.redhat.com:
+ sigstore: https://access.redhat.com/webassets/docker/content/sigstore
diff --git a/test/registries.conf b/test/registries.conf
new file mode 100644
index 000000000..f3bf092b0
--- /dev/null
+++ b/test/registries.conf
@@ -0,0 +1,9 @@
+[registries.search]
+registries = ['registry.access.redhat.com', 'registry.fedoraproject.org', 'docker.io']
+
+[registries.insecure]
+registries = []
+
+#blocked (docker only)
+[registries.block]
+registries = []
diff --git a/test/restore.bats b/test/restore.bats
new file mode 100644
index 000000000..264096ed8
--- /dev/null
+++ b/test/restore.bats
@@ -0,0 +1,267 @@
+#!/usr/bin/env bats
+
+load helpers
+
+function teardown() {
+ cleanup_test
+}
+
+@test "crio restore" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+
+ run crioctl pod list --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_list_info="$output"
+
+ run crioctl pod status --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_status_info="$output"
+
+ run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+
+ run crioctl ctr list --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_list_info="$output"
+
+ run crioctl ctr status --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_status_info="$output"
+
+ stop_crio
+
+ start_crio
+ run crioctl pod list
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "${output}" != "" ]]
+ [[ "${output}" =~ "${pod_id}" ]]
+
+ run crioctl pod list --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "${output}" == "${pod_list_info}" ]]
+
+ run crioctl pod status --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "${output}" == "${pod_status_info}" ]]
+
+ run crioctl ctr list
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "${output}" != "" ]]
+ [[ "${output}" =~ "${ctr_id}" ]]
+
+ run crioctl ctr list --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "${output}" == "${ctr_list_info}" ]]
+
+ run crioctl ctr status --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "${output}" == "${ctr_status_info}" ]]
+
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "crio restore with bad state and pod stopped" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+
+ run crioctl pod stop --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+
+ stop_crio
+
+ # simulate reboot with runc state going away
+ for i in $("$RUNTIME" list -q | xargs); do "$RUNTIME" delete -f $i; done
+
+ start_crio
+
+ run crioctl pod stop --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+
+ cleanup_pods
+ stop_crio
+}
+
+@test "crio restore with bad state and ctr stopped" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+
+ run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+
+ run crioctl ctr stop --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+
+ stop_crio
+
+ # simulate reboot with runc state going away
+ for i in $("$RUNTIME" list -q | xargs); do "$RUNTIME" delete -f $i; done
+
+ start_crio
+
+ run crioctl ctr stop --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "crio restore with bad state and ctr removed" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+
+ run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+
+ run crioctl ctr stop --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+
+ run crioctl ctr remove --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+
+ stop_crio
+
+ # simulate reboot with runc state going away
+ for i in $("$RUNTIME" list -q | xargs); do "$RUNTIME" delete -f $i; done
+
+ start_crio
+
+ run crioctl ctr stop --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 1 ]
+ [[ "${output}" =~ "not found" ]]
+
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+@test "crio restore with bad state and pod removed" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+
+ run crioctl pod stop --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+
+ run crioctl pod remove --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+
+ stop_crio
+
+ # simulate reboot with runc state going away
+ for i in $("$RUNTIME" list -q | xargs); do "$RUNTIME" delete -f $i; done
+
+ start_crio
+
+ run crioctl pod stop --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+
+ cleanup_pods
+ stop_crio
+}
+
+@test "crio restore with bad state" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+
+ run crioctl pod status --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "${output}" =~ "SANDBOX_READY" ]]
+
+ run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+
+ run crioctl ctr status --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "${output}" =~ "CONTAINER_CREATED" ]]
+
+ stop_crio
+
+ # simulate reboot with runc state going away
+ for i in $("$RUNTIME" list -q | xargs); do "$RUNTIME" delete -f $i; done
+
+ start_crio
+ run crioctl pod list
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "${output}" != "" ]]
+ [[ "${output}" =~ "${pod_id}" ]]
+
+ run crioctl pod status --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "${output}" =~ "SANDBOX_NOTREADY" ]]
+
+ run crioctl ctr list
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "${output}" != "" ]]
+ [[ "${output}" =~ "${ctr_id}" ]]
+
+ run crioctl ctr status --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "${output}" =~ "CONTAINER_EXITED" ]]
+ [[ "${output}" =~ "Exit Code: 255" ]]
+
+ run crioctl pod stop --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl pod remove --id "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
diff --git a/test/runtimeversion.bats b/test/runtimeversion.bats
new file mode 100644
index 000000000..f0d2a4367
--- /dev/null
+++ b/test/runtimeversion.bats
@@ -0,0 +1,15 @@
+#!/usr/bin/env bats
+
+load helpers
+
+function teardown() {
+ cleanup_test
+}
+
+@test "crioctl runtimeversion" {
+ start_crio
+ run crioctl runtimeversion
+ echo "$output"
+ [ "$status" -eq 0 ]
+ stop_crio
+}
diff --git a/test/seccomp.bats b/test/seccomp.bats
new file mode 100644
index 000000000..b77a7f8cf
--- /dev/null
+++ b/test/seccomp.bats
@@ -0,0 +1,368 @@
+#!/usr/bin/env bats
+
+load helpers
+
+function teardown() {
+ cleanup_test
+}
+
+# 1. test running with ctr unconfined
+# test that we can run with a syscall which would be otherwise blocked
+@test "ctr seccomp profiles unconfined" {
+ # this test requires seccomp, so skip this test if seccomp is not enabled.
+ enabled=$(is_seccomp_enabled)
+ if [[ "$enabled" -eq 0 ]]; then
+ skip "skip this test since seccomp is not enabled."
+ fi
+
+ sed -e 's/"chmod",//' "$CRIO_ROOT"/cri-o/seccomp.json > "$TESTDIR"/seccomp_profile1.json
+ sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json
+ sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json
+
+ start_crio "$TESTDIR"/seccomp_profile1.json
+
+ sed -e 's/%VALUE%/,"container\.seccomp\.security\.alpha\.kubernetes\.io\/testname": "unconfined"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/seccomp1.json
+ run crioctl pod run --name seccomp1 --config "$TESTDIR"/seccomp1.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --name testname --config "$TESTDATA"/container_redis.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr execsync --id "$ctr_id" chmod 777 .
+ echo "$output"
+ [ "$status" -eq 0 ]
+
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+# 2. test running with ctr runtime/default
+# test that we cannot run with a syscall blocked by the default seccomp profile
+@test "ctr seccomp profiles runtime/default" {
+ # this test requires seccomp, so skip this test if seccomp is not enabled.
+ enabled=$(is_seccomp_enabled)
+ if [[ "$enabled" -eq 0 ]]; then
+ skip "skip this test since seccomp is not enabled."
+ fi
+
+ sed -e 's/"chmod",//' "$CRIO_ROOT"/cri-o/seccomp.json > "$TESTDIR"/seccomp_profile1.json
+ sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json
+ sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json
+
+ start_crio "$TESTDIR"/seccomp_profile1.json
+
+ sed -e 's/%VALUE%/,"container\.seccomp\.security\.alpha\.kubernetes\.io\/testname2": "runtime\/default"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/seccomp2.json
+ run crioctl pod run --name seccomp2 --config "$TESTDIR"/seccomp2.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --name testname2 --config "$TESTDATA"/container_redis.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr execsync --id "$ctr_id" chmod 777 .
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" =~ "Exit code: 1" ]]
+ [[ "$output" =~ "Operation not permitted" ]]
+
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+# 3. test running with ctr wrong profile name
+@test "ctr seccomp profiles wrong profile name" {
+ # this test requires seccomp, so skip this test if seccomp is not enabled.
+ enabled=$(is_seccomp_enabled)
+ if [[ "$enabled" -eq 0 ]]; then
+ skip "skip this test since seccomp is not enabled."
+ fi
+
+ sed -e 's/"chmod",//' "$CRIO_ROOT"/cri-o/seccomp.json > "$TESTDIR"/seccomp_profile1.json
+ sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json
+ sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json
+
+ start_crio "$TESTDIR"/seccomp_profile1.json
+
+ sed -e 's/%VALUE%/,"container\.seccomp\.security\.alpha\.kubernetes\.io\/testname3": "notgood"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/seccomp3.json
+ run crioctl pod run --name seccomp3 --config "$TESTDIR"/seccomp3.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --name testname3 --config "$TESTDATA"/container_config.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -ne 0 ]
+ [[ "$output" =~ "unknown seccomp profile option:" ]]
+ [[ "$output" =~ "notgood" ]]
+
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+# TODO(runcom): need https://issues.k8s.io/36997
+# 4. test running with ctr localhost/profile_name
+@test "ctr seccomp profiles localhost/profile_name" {
+ # this test requires seccomp, so skip this test if seccomp is not enabled.
+ enabled=$(is_seccomp_enabled)
+ if [[ "$enabled" -eq 0 ]]; then
+ skip "skip this test since seccomp is not enabled."
+ fi
+
+ #sed -e 's/"chmod",//' "$CRIO_ROOT"/cri-o/seccomp.json > "$TESTDIR"/seccomp_profile1.json
+ #sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json
+ #sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json
+
+ #start_crio "$TESTDIR"/seccomp_profile1.json
+
+ skip "need https://issues.k8s.io/36997"
+}
+
+# 5. test running with unkwown ctr profile falls back to pod profile
+# unknown ctr -> unconfined
+# pod -> runtime/default
+# result: fail chmod
+@test "ctr seccomp profiles falls back to pod profile" {
+ # this test requires seccomp, so skip this test if seccomp is not enabled.
+ enabled=$(is_seccomp_enabled)
+ if [[ "$enabled" -eq 0 ]]; then
+ skip "skip this test since seccomp is not enabled."
+ fi
+
+ sed -e 's/"chmod",//' "$CRIO_ROOT"/cri-o/seccomp.json > "$TESTDIR"/seccomp_profile1.json
+ sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json
+ sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json
+
+ start_crio "$TESTDIR"/seccomp_profile1.json
+
+ sed -e 's/%VALUE%/,"container\.seccomp\.security\.alpha\.kubernetes\.io\/redhat\.test\.crio-seccomp2-1-testname2-0-not-exists": "unconfined", "seccomp\.security\.alpha\.kubernetes\.io\/pod": "runtime\/default"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/seccomp5.json
+ run crioctl pod run --name seccomp5 --config "$TESTDIR"/seccomp5.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr execsync --id "$ctr_id" chmod 777 .
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" =~ "Exit code: 1" ]]
+ [[ "$output" =~ "Operation not permitted" ]]
+
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+# 6. test running with unkwown ctr profile and no pod, falls back to unconfined
+# unknown ctr -> runtime/default
+# pod -> NO
+# result: success, running unconfined
+@test "ctr seccomp profiles falls back to unconfined" {
+ # this test requires seccomp, so skip this test if seccomp is not enabled.
+ enabled=$(is_seccomp_enabled)
+ if [[ "$enabled" -eq 0 ]]; then
+ skip "skip this test since seccomp is not enabled."
+ fi
+
+ sed -e 's/"chmod",//' "$CRIO_ROOT"/cri-o/seccomp.json > "$TESTDIR"/seccomp_profile1.json
+ sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json
+ sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json
+
+ start_crio "$TESTDIR"/seccomp_profile1.json
+
+ sed -e 's/%VALUE%/,"container\.seccomp\.security\.alpha\.kubernetes\.io\/redhat\.test\.crio-seccomp6-1-testname6-0-not-exists": "runtime-default"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/seccomp6.json
+ run crioctl pod run --name seccomp6 --config "$TESTDIR"/seccomp6.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --name testname6 --config "$TESTDATA"/container_redis.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr execsync --id "$ctr_id" chmod 777 .
+ echo "$output"
+ [ "$status" -eq 0 ]
+
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+# 1. test running with pod unconfined
+# test that we can run with a syscall which would be otherwise blocked
+@test "pod seccomp profiles unconfined" {
+ # this test requires seccomp, so skip this test if seccomp is not enabled.
+ enabled=$(is_seccomp_enabled)
+ if [[ "$enabled" -eq 0 ]]; then
+ skip "skip this test since seccomp is not enabled."
+ fi
+
+ sed -e 's/"chmod",//' "$CRIO_ROOT"/cri-o/seccomp.json > "$TESTDIR"/seccomp_profile1.json
+ sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json
+ sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json
+
+ start_crio "$TESTDIR"/seccomp_profile1.json
+
+ sed -e 's/%VALUE%/,"seccomp\.security\.alpha\.kubernetes\.io\/pod": "unconfined"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/seccomp1.json
+ run crioctl pod run --name seccomp1 --config "$TESTDIR"/seccomp1.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr execsync --id "$ctr_id" chmod 777 .
+ echo "$output"
+ [ "$status" -eq 0 ]
+
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+# 2. test running with pod runtime/default
+# test that we cannot run with a syscall blocked by the default seccomp profile
+@test "pod seccomp profiles runtime/default" {
+ # this test requires seccomp, so skip this test if seccomp is not enabled.
+ enabled=$(is_seccomp_enabled)
+ if [[ "$enabled" -eq 0 ]]; then
+ skip "skip this test since seccomp is not enabled."
+ fi
+
+ sed -e 's/"chmod",//' "$CRIO_ROOT"/cri-o/seccomp.json > "$TESTDIR"/seccomp_profile1.json
+ sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json
+ sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json
+
+ start_crio "$TESTDIR"/seccomp_profile1.json
+
+ sed -e 's/%VALUE%/,"seccomp\.security\.alpha\.kubernetes\.io\/pod": "runtime\/default"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/seccomp2.json
+ run crioctl pod run --name seccomp2 --config "$TESTDIR"/seccomp2.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr execsync --id "$ctr_id" chmod 777 .
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" =~ "Exit code: 1" ]]
+ [[ "$output" =~ "Operation not permitted" ]]
+
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+# 3. test running with pod wrong profile name
+@test "pod seccomp profiles wrong profile name" {
+ # this test requires seccomp, so skip this test if seccomp is not enabled.
+ enabled=$(is_seccomp_enabled)
+ if [[ "$enabled" -eq 0 ]]; then
+ skip "skip this test since seccomp is not enabled."
+ fi
+
+ sed -e 's/"chmod",//' "$CRIO_ROOT"/cri-o/seccomp.json > "$TESTDIR"/seccomp_profile1.json
+ sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json
+ sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json
+
+ start_crio "$TESTDIR"/seccomp_profile1.json
+
+ # 3. test running with pod wrong profile name
+ sed -e 's/%VALUE%/,"seccomp\.security\.alpha\.kubernetes\.io\/pod": "notgood"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/seccomp3.json
+ run crioctl pod run --name seccomp3 --config "$TESTDIR"/seccomp3.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -ne 0 ]
+ [[ "$output" =~ "unknown seccomp profile option:" ]]
+ [[ "$output" =~ "notgood" ]]
+
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
+
+# TODO(runcom): need https://issues.k8s.io/36997
+# 4. test running with pod localhost/profile_name
+@test "pod seccomp profiles localhost/profile_name" {
+ # this test requires seccomp, so skip this test if seccomp is not enabled.
+ enabled=$(is_seccomp_enabled)
+ if [[ "$enabled" -eq 0 ]]; then
+ skip "skip this test since seccomp is not enabled."
+ fi
+
+ #sed -e 's/"chmod",//' "$CRIO_ROOT"/cri-o/seccomp.json > "$TESTDIR"/seccomp_profile1.json
+ #sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json
+ #sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json
+
+ #start_crio "$TESTDIR"/seccomp_profile1.json
+
+ skip "need https://issues.k8s.io/36997"
+}
+
+# test running with ctr docker/default
+# test that we cannot run with a syscall blocked by the default seccomp profile
+@test "ctr seccomp profiles docker/default" {
+ # this test requires seccomp, so skip this test if seccomp is not enabled.
+ enabled=$(is_seccomp_enabled)
+ if [[ "$enabled" -eq 0 ]]; then
+ skip "skip this test since seccomp is not enabled."
+ fi
+
+ sed -e 's/"chmod",//' "$CRIO_ROOT"/cri-o/seccomp.json > "$TESTDIR"/seccomp_profile1.json
+ sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json
+ sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json
+
+ start_crio "$TESTDIR"/seccomp_profile1.json
+
+ sed -e 's/%VALUE%/,"container\.seccomp\.security\.alpha\.kubernetes\.io\/testname2": "docker\/default"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/seccomp2.json
+ run crioctl pod run --name seccomp2 --config "$TESTDIR"/seccomp2.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --name testname2 --config "$TESTDATA"/container_redis.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ run crioctl ctr execsync --id "$ctr_id" chmod 777 .
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [[ "$output" =~ "Exit code: 1" ]]
+ [[ "$output" =~ "Operation not permitted" ]]
+
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
diff --git a/test/selinux.bats b/test/selinux.bats
new file mode 100644
index 000000000..1617e5546
--- /dev/null
+++ b/test/selinux.bats
@@ -0,0 +1,26 @@
+#!/usr/bin/env bats
+
+load helpers
+
+function teardown() {
+ cleanup_test
+}
+
+@test "ctr termination reason Completed" {
+ start_crio
+ run crioctl pod run --config "$TESTDATA"/sandbox_config_selinux.json
+ echo "$output"
+ [ "$status" -eq 0 ]
+ pod_id="$output"
+ run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+ ctr_id="$output"
+ run crioctl ctr start --id "$ctr_id"
+ echo "$output"
+ [ "$status" -eq 0 ]
+
+ cleanup_ctrs
+ cleanup_pods
+ stop_crio
+}
diff --git a/test/test_runner.sh b/test/test_runner.sh
new file mode 100755
index 000000000..868df60e3
--- /dev/null
+++ b/test/test_runner.sh
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+set -e
+
+cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
+
+# Load the helpers.
+. helpers.bash
+
+function execute() {
+ >&2 echo "++ $@"
+ eval "$@"
+}
+
+# Tests to run. Defaults to all.
+TESTS=${@:-.}
+
+# Run the tests.
+execute time bats --tap $TESTS
diff --git a/test/testdata/README.md b/test/testdata/README.md
new file mode 100644
index 000000000..afc6b32f0
--- /dev/null
+++ b/test/testdata/README.md
@@ -0,0 +1,15 @@
+In terminal 1:
+```
+sudo ./crio
+```
+
+In terminal 2:
+```
+sudo ./crioctl runtimeversion
+
+sudo rm -rf /var/lib/containers/storage/sandboxes/podsandbox1
+sudo ./crioctl pod run --config testdata/sandbox_config.json
+
+sudo rm -rf /var/lib/containers/storage/containers/container1
+sudo ./crioctl container create --pod podsandbox1 --config testdata/container_config.json
+```
diff --git a/test/testdata/apparmor_test_deny_write b/test/testdata/apparmor_test_deny_write
new file mode 100644
index 000000000..55311aaf5
--- /dev/null
+++ b/test/testdata/apparmor_test_deny_write
@@ -0,0 +1,10 @@
+#include <tunables/global>
+
+profile apparmor-test-deny-write flags=(attach_disconnected) {
+ #include <abstractions/base>
+
+ file,
+
+ # Deny all file writes.
+ deny /** w,
+}
diff --git a/test/testdata/container_config.json b/test/testdata/container_config.json
new file mode 100644
index 000000000..d8ef76a56
--- /dev/null
+++ b/test/testdata/container_config.json
@@ -0,0 +1,70 @@
+{
+ "metadata": {
+ "name": "container1",
+ "attempt": 1
+ },
+ "image": {
+ "image": "redis:alpine"
+ },
+ "command": [
+ "/bin/ls"
+ ],
+ "args": [],
+ "working_dir": "/",
+ "envs": [
+ {
+ "key": "PATH",
+ "value": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ },
+ {
+ "key": "TERM",
+ "value": "xterm"
+ },
+ {
+ "key": "TESTDIR",
+ "value": "test/dir1"
+ },
+ {
+ "key": "TESTFILE",
+ "value": "test/file1"
+ }
+ ],
+ "labels": {
+ "type": "small",
+ "batch": "no"
+ },
+ "annotations": {
+ "owner": "dragon",
+ "daemon": "crio"
+ },
+ "privileged": true,
+ "log_path": "",
+ "stdin": false,
+ "stdin_once": false,
+ "tty": false,
+ "linux": {
+ "resources": {
+ "cpu_period": 10000,
+ "cpu_quota": 20000,
+ "cpu_shares": 512,
+ "oom_score_adj": 30
+ },
+ "security_context": {
+ "readonly_rootfs": false,
+ "selinux_options": {
+ "user": "system_u",
+ "role": "system_r",
+ "type": "svirt_lxc_net_t",
+ "level": "s0:c4,c5"
+ },
+ "capabilities": {
+ "add_capabilities": [
+ "setuid",
+ "setgid"
+ ],
+ "drop_capabilities": [
+ ]
+ }
+ }
+ }
+}
diff --git a/test/testdata/container_config_by_imageid.json b/test/testdata/container_config_by_imageid.json
new file mode 100644
index 000000000..d953efb3c
--- /dev/null
+++ b/test/testdata/container_config_by_imageid.json
@@ -0,0 +1,70 @@
+{
+ "metadata": {
+ "name": "container1",
+ "attempt": 1
+ },
+ "image": {
+ "image": "%VALUE%"
+ },
+ "command": [
+ "/bin/ls"
+ ],
+ "args": [],
+ "working_dir": "/",
+ "envs": [
+ {
+ "key": "PATH",
+ "value": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ },
+ {
+ "key": "TERM",
+ "value": "xterm"
+ },
+ {
+ "key": "TESTDIR",
+ "value": "test/dir1"
+ },
+ {
+ "key": "TESTFILE",
+ "value": "test/file1"
+ }
+ ],
+ "labels": {
+ "type": "small",
+ "batch": "no"
+ },
+ "annotations": {
+ "owner": "dragon",
+ "daemon": "crio"
+ },
+ "privileged": true,
+ "readonly_rootfs": true,
+ "log_path": "",
+ "stdin": false,
+ "stdin_once": false,
+ "tty": false,
+ "linux": {
+ "resources": {
+ "cpu_period": 10000,
+ "cpu_quota": 20000,
+ "cpu_shares": 512,
+ "oom_score_adj": 30
+ },
+ "security_context": {
+ "capabilities": {
+ "add_capabilities": [
+ "setuid",
+ "setgid"
+ ],
+ "drop_capabilities": [
+ ]
+ },
+ "selinux_options": {
+ "user": "system_u",
+ "role": "system_r",
+ "type": "svirt_lxc_net_t",
+ "level": "s0:c4,c5"
+ }
+ }
+ }
+}
diff --git a/test/testdata/container_config_hostport.json b/test/testdata/container_config_hostport.json
new file mode 100644
index 000000000..e5a0ca67a
--- /dev/null
+++ b/test/testdata/container_config_hostport.json
@@ -0,0 +1,72 @@
+{
+ "metadata": {
+ "name": "container1",
+ "attempt": 1
+ },
+ "image": {
+ "image": "busybox:latest"
+ },
+ "command": [
+ "/bin/nc", "-ll", "-p", "80", "-e"
+ ],
+ "args": [
+ "/bin/hostname"
+ ],
+ "working_dir": "/",
+ "envs": [
+ {
+ "key": "PATH",
+ "value": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ },
+ {
+ "key": "TERM",
+ "value": "xterm"
+ },
+ {
+ "key": "TESTDIR",
+ "value": "test/dir1"
+ },
+ {
+ "key": "TESTFILE",
+ "value": "test/file1"
+ }
+ ],
+ "labels": {
+ "type": "small",
+ "batch": "no"
+ },
+ "annotations": {
+ "owner": "dragon",
+ "daemon": "crio"
+ },
+ "privileged": true,
+ "readonly_rootfs": true,
+ "log_path": "",
+ "stdin": false,
+ "stdin_once": false,
+ "tty": false,
+ "linux": {
+ "resources": {
+ "cpu_period": 10000,
+ "cpu_quota": 20000,
+ "cpu_shares": 512,
+ "oom_score_adj": 30
+ },
+ "security_context": {
+ "capabilities": {
+ "add_capabilities": [
+ "setuid",
+ "setgid"
+ ],
+ "drop_capabilities": [
+ ]
+ },
+ "selinux_options": {
+ "user": "system_u",
+ "role": "system_r",
+ "type": "svirt_lxc_net_t",
+ "level": "s0:c4,c5"
+ }
+ }
+ }
+}
diff --git a/test/testdata/container_config_logging.json b/test/testdata/container_config_logging.json
new file mode 100644
index 000000000..8e8d0da44
--- /dev/null
+++ b/test/testdata/container_config_logging.json
@@ -0,0 +1,72 @@
+{
+ "metadata": {
+ "name": "container1",
+ "attempt": 1
+ },
+ "image": {
+ "image": "busybox:latest"
+ },
+ "command": [
+ "/bin/sh", "-c"
+ ],
+ "args": [
+ "%shellcommand%"
+ ],
+ "working_dir": "/",
+ "envs": [
+ {
+ "key": "PATH",
+ "value": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ },
+ {
+ "key": "TERM",
+ "value": "xterm"
+ },
+ {
+ "key": "TESTDIR",
+ "value": "test/dir1"
+ },
+ {
+ "key": "TESTFILE",
+ "value": "test/file1"
+ }
+ ],
+ "labels": {
+ "type": "small",
+ "batch": "no"
+ },
+ "annotations": {
+ "owner": "dragon",
+ "daemon": "crio"
+ },
+ "privileged": true,
+ "readonly_rootfs": true,
+ "log_path": "",
+ "stdin": false,
+ "stdin_once": false,
+ "tty": false,
+ "linux": {
+ "resources": {
+ "cpu_period": 10000,
+ "cpu_quota": 20000,
+ "cpu_shares": 512,
+ "oom_score_adj": 30
+ },
+ "security_context": {
+ "capabilities": {
+ "add_capabilities": [
+ "setuid",
+ "setgid"
+ ],
+ "drop_capabilities": [
+ ]
+ },
+ "selinux_options": {
+ "user": "system_u",
+ "role": "system_r",
+ "type": "svirt_lxc_net_t",
+ "level": "s0:c4,c5"
+ }
+ }
+ }
+}
diff --git a/test/testdata/container_config_resolvconf.json b/test/testdata/container_config_resolvconf.json
new file mode 100644
index 000000000..52b77e082
--- /dev/null
+++ b/test/testdata/container_config_resolvconf.json
@@ -0,0 +1,72 @@
+{
+ "metadata": {
+ "name": "container1",
+ "attempt": 1
+ },
+ "image": {
+ "image": "redis:alpine"
+ },
+ "command": [
+ "sh",
+ "-c",
+ "echo test >> /etc/resolv.conf"
+ ],
+ "args": [],
+ "working_dir": "/",
+ "envs": [
+ {
+ "key": "PATH",
+ "value": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ },
+ {
+ "key": "TERM",
+ "value": "xterm"
+ },
+ {
+ "key": "TESTDIR",
+ "value": "test/dir1"
+ },
+ {
+ "key": "TESTFILE",
+ "value": "test/file1"
+ }
+ ],
+ "labels": {
+ "type": "small",
+ "batch": "no"
+ },
+ "annotations": {
+ "owner": "dragon",
+ "daemon": "crio"
+ },
+ "privileged": true,
+ "log_path": "",
+ "stdin": false,
+ "stdin_once": false,
+ "tty": false,
+ "linux": {
+ "resources": {
+ "cpu_period": 10000,
+ "cpu_quota": 20000,
+ "cpu_shares": 512,
+ "oom_score_adj": 30
+ },
+ "security_context": {
+ "readonly_rootfs": false,
+ "capabilities": {
+ "add_capabilities": [
+ "setuid",
+ "setgid"
+ ],
+ "drop_capabilities": [
+ ]
+ },
+ "selinux_options": {
+ "user": "system_u",
+ "role": "system_r",
+ "type": "svirt_lxc_net_t",
+ "level": "s0:c4,c5"
+ }
+ }
+ }
+}
diff --git a/test/testdata/container_config_resolvconf_ro.json b/test/testdata/container_config_resolvconf_ro.json
new file mode 100644
index 000000000..7e121c079
--- /dev/null
+++ b/test/testdata/container_config_resolvconf_ro.json
@@ -0,0 +1,72 @@
+{
+ "metadata": {
+ "name": "container1",
+ "attempt": 1
+ },
+ "image": {
+ "image": "redis:alpine"
+ },
+ "command": [
+ "sh",
+ "-c",
+ "echo test >> /etc/resolv.conf"
+ ],
+ "args": [],
+ "working_dir": "/",
+ "envs": [
+ {
+ "key": "PATH",
+ "value": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ },
+ {
+ "key": "TERM",
+ "value": "xterm"
+ },
+ {
+ "key": "TESTDIR",
+ "value": "test/dir1"
+ },
+ {
+ "key": "TESTFILE",
+ "value": "test/file1"
+ }
+ ],
+ "labels": {
+ "type": "small",
+ "batch": "no"
+ },
+ "annotations": {
+ "owner": "dragon",
+ "daemon": "crio"
+ },
+ "privileged": true,
+ "log_path": "",
+ "stdin": false,
+ "stdin_once": false,
+ "tty": false,
+ "linux": {
+ "resources": {
+ "cpu_period": 10000,
+ "cpu_quota": 20000,
+ "cpu_shares": 512,
+ "oom_score_adj": 30
+ },
+ "security_context": {
+ "readonly_rootfs": true,
+ "capabilities": {
+ "add_capabilities": [
+ "setuid",
+ "setgid"
+ ],
+ "drop_capabilities": [
+ ]
+ },
+ "selinux_options": {
+ "user": "system_u",
+ "role": "system_r",
+ "type": "svirt_lxc_net_t",
+ "level": "s0:c4,c5"
+ }
+ }
+ }
+}
diff --git a/test/testdata/container_config_seccomp.json b/test/testdata/container_config_seccomp.json
new file mode 100644
index 000000000..582132b0e
--- /dev/null
+++ b/test/testdata/container_config_seccomp.json
@@ -0,0 +1,72 @@
+{
+ "metadata": {
+ "name": "container1",
+ "attempt": 1
+ },
+ "image": {
+ "image": "redis:alpine"
+ },
+ "command": [
+ "/bin/bash"
+ ],
+ "args": [
+ "/bin/chmod", "777", "."
+ ],
+ "working_dir": "/",
+ "envs": [
+ {
+ "key": "PATH",
+ "value": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ },
+ {
+ "key": "TERM",
+ "value": "xterm"
+ },
+ {
+ "key": "TESTDIR",
+ "value": "test/dir1"
+ },
+ {
+ "key": "TESTFILE",
+ "value": "test/file1"
+ }
+ ],
+ "labels": {
+ "type": "small",
+ "batch": "no"
+ },
+ "annotations": {
+ "owner": "dragon",
+ "daemon": "crio"
+ },
+ "privileged": true,
+ "readonly_rootfs": true,
+ "log_path": "",
+ "stdin": false,
+ "stdin_once": false,
+ "tty": false,
+ "linux": {
+ "resources": {
+ "cpu_period": 10000,
+ "cpu_quota": 20000,
+ "cpu_shares": 512,
+ "oom_score_adj": 30
+ },
+ "security_context": {
+ "capabilities": {
+ "add_capabilities": [
+ "setuid",
+ "setgid"
+ ],
+ "drop_capabilities": [
+ ]
+ },
+ "selinux_options": {
+ "user": "system_u",
+ "role": "system_r",
+ "type": "svirt_lxc_net_t",
+ "level": "s0:c4,c5"
+ }
+ }
+ }
+}
diff --git a/test/testdata/container_config_sleep.json b/test/testdata/container_config_sleep.json
new file mode 100644
index 000000000..c86ff7011
--- /dev/null
+++ b/test/testdata/container_config_sleep.json
@@ -0,0 +1,71 @@
+{
+ "metadata": {
+ "name": "container999",
+ "attempt": 1
+ },
+ "image": {
+ "image": "docker.io/library/busybox:latest"
+ },
+ "command": [
+ "sleep",
+ "9999"
+ ],
+ "args": [],
+ "working_dir": "/",
+ "envs": [
+ {
+ "key": "PATH",
+ "value": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ },
+ {
+ "key": "TERM",
+ "value": "xterm"
+ },
+ {
+ "key": "TESTDIR",
+ "value": "test/dir1"
+ },
+ {
+ "key": "TESTFILE",
+ "value": "test/file1"
+ }
+ ],
+ "labels": {
+ "type": "small",
+ "batch": "no"
+ },
+ "annotations": {
+ "owner": "dragon",
+ "daemon": "crio"
+ },
+ "privileged": true,
+ "log_path": "",
+ "stdin": false,
+ "stdin_once": false,
+ "tty": false,
+ "linux": {
+ "resources": {
+ "cpu_period": 10000,
+ "cpu_quota": 20000,
+ "cpu_shares": 512,
+ "oom_score_adj": 30
+ },
+ "security_context": {
+ "readonly_rootfs": false,
+ "selinux_options": {
+ "user": "system_u",
+ "role": "system_r",
+ "type": "svirt_lxc_net_t",
+ "level": "s0:c4,c5"
+ },
+ "capabilities": {
+ "add_capabilities": [
+ "setuid",
+ "setgid"
+ ],
+ "drop_capabilities": [
+ ]
+ }
+ }
+ }
+}
diff --git a/test/testdata/container_exit_test.json b/test/testdata/container_exit_test.json
new file mode 100644
index 000000000..6ead905a6
--- /dev/null
+++ b/test/testdata/container_exit_test.json
@@ -0,0 +1,22 @@
+{
+ "metadata": {
+ "name": "podsandbox1-exit-test"
+ },
+ "image": {
+ "image": "docker://mrunalp/exit_test:latest"
+ },
+ "args": [
+ "/exit_test"
+ ],
+ "envs": [
+ {
+ "key": "PATH",
+ "value": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ }
+ ],
+ "readonly_rootfs": true,
+ "log_path": "",
+ "stdin": false,
+ "stdin_once": false,
+ "tty": false
+}
diff --git a/test/testdata/container_redis.json b/test/testdata/container_redis.json
new file mode 100644
index 000000000..638aba4fc
--- /dev/null
+++ b/test/testdata/container_redis.json
@@ -0,0 +1,61 @@
+{
+ "metadata": {
+ "name": "podsandbox1-redis"
+ },
+ "image": {
+ "image": "redis:alpine"
+ },
+ "args": [
+ "docker-entrypoint.sh",
+ "redis-server"
+ ],
+ "working_dir": "/data",
+ "envs": [
+ {
+ "key": "PATH",
+ "value": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ },
+ {
+ "key": "TERM",
+ "value": "xterm"
+ },
+ {
+ "key": "REDIS_VERSION",
+ "value": "3.2.3"
+ },
+ {
+ "key": "REDIS_DOWNLOAD_URL",
+ "value": "http://download.redis.io/releases/redis-3.2.3.tar.gz"
+ },
+ {
+ "key": "REDIS_DOWNLOAD_SHA1",
+ "value": "92d6d93ef2efc91e595c8bf578bf72baff397507"
+ }
+ ],
+ "labels": {
+ "tier": "backend"
+ },
+ "annotations": {
+ "pod": "podsandbox1"
+ },
+ "readonly_rootfs": false,
+ "log_path": "",
+ "stdin": false,
+ "stdin_once": false,
+ "tty": false,
+ "linux": {
+ "resources": {
+ "cpu_period": 10000,
+ "cpu_quota": 20000,
+ "cpu_shares": 512,
+ "oom_score_adj": 30
+ },
+ "security_context": {
+ "capabilities": {
+ "add_capabilities": [
+ "sys_admin"
+ ]
+ }
+ }
+ }
+}
diff --git a/test/testdata/container_redis_default_mounts.json b/test/testdata/container_redis_default_mounts.json
new file mode 100644
index 000000000..dff3db5a7
--- /dev/null
+++ b/test/testdata/container_redis_default_mounts.json
@@ -0,0 +1,67 @@
+{
+ "metadata": {
+ "name": "podsandbox1-redis"
+ },
+ "image": {
+ "image": "redis:alpine"
+ },
+ "args": [
+ "docker-entrypoint.sh",
+ "redis-server"
+ ],
+ "mounts": [
+ {
+ "container_path": "%CPATH%",
+ "host_path": "%HPATH%"
+ }
+ ],
+ "working_dir": "/data",
+ "envs": [
+ {
+ "key": "PATH",
+ "value": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ },
+ {
+ "key": "TERM",
+ "value": "xterm"
+ },
+ {
+ "key": "REDIS_VERSION",
+ "value": "3.2.3"
+ },
+ {
+ "key": "REDIS_DOWNLOAD_URL",
+ "value": "http://download.redis.io/releases/redis-3.2.3.tar.gz"
+ },
+ {
+ "key": "REDIS_DOWNLOAD_SHA1",
+ "value": "92d6d93ef2efc91e595c8bf578bf72baff397507"
+ }
+ ],
+ "labels": {
+ "tier": "backend"
+ },
+ "annotations": {
+ "pod": "podsandbox1"
+ },
+ "readonly_rootfs": false,
+ "log_path": "",
+ "stdin": false,
+ "stdin_once": false,
+ "tty": false,
+ "linux": {
+ "resources": {
+ "cpu_period": 10000,
+ "cpu_quota": 20000,
+ "cpu_shares": 512,
+ "oom_score_adj": 30
+ },
+ "security_context": {
+ "capabilities": {
+ "add_capabilities": [
+ "sys_admin"
+ ]
+ }
+ }
+ }
+}
diff --git a/test/testdata/container_redis_device.json b/test/testdata/container_redis_device.json
new file mode 100644
index 000000000..2a2495515
--- /dev/null
+++ b/test/testdata/container_redis_device.json
@@ -0,0 +1,68 @@
+{
+ "metadata": {
+ "name": "podsandbox1-redis"
+ },
+ "image": {
+ "image": "redis:alpine"
+ },
+ "args": [
+ "docker-entrypoint.sh",
+ "redis-server"
+ ],
+ "working_dir": "/data",
+ "envs": [
+ {
+ "key": "PATH",
+ "value": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ },
+ {
+ "key": "TERM",
+ "value": "xterm"
+ },
+ {
+ "key": "REDIS_VERSION",
+ "value": "3.2.3"
+ },
+ {
+ "key": "REDIS_DOWNLOAD_URL",
+ "value": "http://download.redis.io/releases/redis-3.2.3.tar.gz"
+ },
+ {
+ "key": "REDIS_DOWNLOAD_SHA1",
+ "value": "92d6d93ef2efc91e595c8bf578bf72baff397507"
+ }
+ ],
+ "devices": [
+ {
+ "host_path": "/dev/null",
+ "container_path": "/dev/mynull",
+ "permissions": "rwm"
+ }
+ ],
+ "labels": {
+ "tier": "backend"
+ },
+ "annotations": {
+ "pod": "podsandbox1"
+ },
+ "readonly_rootfs": false,
+ "log_path": "",
+ "stdin": false,
+ "stdin_once": false,
+ "tty": false,
+ "linux": {
+ "resources": {
+ "cpu_period": 10000,
+ "cpu_quota": 20000,
+ "cpu_shares": 512,
+ "oom_score_adj": 30
+ },
+ "security_context": {
+ "capabilities": {
+ "add_capabilities": [
+ "sys_admin"
+ ]
+ }
+ }
+ }
+}
diff --git a/test/testdata/fake_ocid_default b/test/testdata/fake_ocid_default
new file mode 100644
index 000000000..915fb17dd
--- /dev/null
+++ b/test/testdata/fake_ocid_default
@@ -0,0 +1 @@
+profile crio-default flags=(attach_disconnected) {}
diff --git a/test/testdata/sandbox_config.json b/test/testdata/sandbox_config.json
new file mode 100644
index 000000000..57e211bd6
--- /dev/null
+++ b/test/testdata/sandbox_config.json
@@ -0,0 +1,51 @@
+{
+ "metadata": {
+ "name": "podsandbox1",
+ "uid": "redhat-test-crio",
+ "namespace": "redhat.test.crio",
+ "attempt": 1
+ },
+ "hostname": "crioctl_host",
+ "log_directory": "",
+ "dns_config": {
+ "searches": [
+ "8.8.8.8"
+ ]
+ },
+ "port_mappings": [],
+ "resources": {
+ "cpu": {
+ "limits": 3,
+ "requests": 2
+ },
+ "memory": {
+ "limits": 50000000,
+ "requests": 2000000
+ }
+ },
+ "labels": {
+ "group": "test"
+ },
+ "annotations": {
+ "owner": "hmeng",
+ "security.alpha.kubernetes.io/sysctls": "kernel.shm_rmid_forced=1,net.ipv4.ip_local_port_range=1024 65000",
+ "security.alpha.kubernetes.io/unsafe-sysctls": "kernel.msgmax=8192" ,
+ "security.alpha.kubernetes.io/seccomp/pod": "unconfined"
+ },
+ "linux": {
+ "cgroup_parent": "/Burstable/pod_123-456",
+ "security_context": {
+ "namespace_options": {
+ "host_network": false,
+ "host_pid": false,
+ "host_ipc": false
+ },
+ "selinux_options": {
+ "user": "system_u",
+ "role": "system_r",
+ "type": "svirt_lxc_net_t",
+ "level": "s0:c4,c5"
+ }
+ }
+ }
+}
diff --git a/test/testdata/sandbox_config_hostnet.json b/test/testdata/sandbox_config_hostnet.json
new file mode 100644
index 000000000..99a7560dc
--- /dev/null
+++ b/test/testdata/sandbox_config_hostnet.json
@@ -0,0 +1,48 @@
+{
+ "metadata": {
+ "name": "podsandbox1",
+ "uid": "redhat-test-crio",
+ "namespace": "redhat.test.crio",
+ "attempt": 1
+ },
+ "hostname": "crioctl_host",
+ "log_directory": "",
+ "dns_options": {
+ "servers": [
+ "server1.redhat.com",
+ "server2.redhat.com"
+ ],
+ "searches": [
+ "8.8.8.8"
+ ]
+ },
+ "port_mappings": [],
+ "resources": {
+ "cpu": {
+ "limits": 3,
+ "requests": 2
+ },
+ "memory": {
+ "limits": 50000000,
+ "requests": 2000000
+ }
+ },
+ "labels": {
+ "group": "test"
+ },
+ "annotations": {
+ "owner": "hmeng",
+ "security.alpha.kubernetes.io/unsafe-sysctls": "kernel.msgmax=8192" ,
+ "security.alpha.kubernetes.io/seccomp/pod": "unconfined"
+ },
+ "linux": {
+ "cgroup_parent": "/Burstable/pod_123-456",
+ "security_context": {
+ "namespace_options": {
+ "host_network": true,
+ "host_pid": false,
+ "host_ipc": false
+ }
+ }
+ }
+}
diff --git a/test/testdata/sandbox_config_hostport.json b/test/testdata/sandbox_config_hostport.json
new file mode 100644
index 000000000..5feda8668
--- /dev/null
+++ b/test/testdata/sandbox_config_hostport.json
@@ -0,0 +1,55 @@
+{
+ "metadata": {
+ "name": "podsandbox1",
+ "uid": "redhat-test-crio",
+ "namespace": "redhat.test.crio",
+ "attempt": 1
+ },
+ "hostname": "crioctl_host",
+ "log_directory": "",
+ "dns_options": {
+ "servers": [
+ "server1.redhat.com",
+ "server2.redhat.com"
+ ],
+ "searches": [
+ "8.8.8.8"
+ ]
+ },
+ "port_mappings": [
+ {
+ "protocol": 0,
+ "container_port": 80,
+ "host_port": 4888
+ }
+ ],
+ "resources": {
+ "cpu": {
+ "limits": 3,
+ "requests": 2
+ },
+ "memory": {
+ "limits": 50000000,
+ "requests": 2000000
+ }
+ },
+ "labels": {
+ "group": "test"
+ },
+ "annotations": {
+ "owner": "hmeng",
+ "security.alpha.kubernetes.io/sysctls": "kernel.shm_rmid_forced=1,net.ipv4.ip_local_port_range=1024 65000",
+ "security.alpha.kubernetes.io/unsafe-sysctls": "kernel.msgmax=8192" ,
+ "security.alpha.kubernetes.io/seccomp/pod": "unconfined"
+ },
+ "linux": {
+ "cgroup_parent": "/Burstable/pod_123-456",
+ "security_context": {
+ "namespace_options": {
+ "host_network": false,
+ "host_pid": false,
+ "host_ipc": false
+ }
+ }
+ }
+}
diff --git a/test/testdata/sandbox_config_seccomp.json b/test/testdata/sandbox_config_seccomp.json
new file mode 100644
index 000000000..8e440b164
--- /dev/null
+++ b/test/testdata/sandbox_config_seccomp.json
@@ -0,0 +1,53 @@
+{
+ "metadata": {
+ "name": "podsandbox1",
+ "uid": "redhat-test-crio",
+ "namespace": "redhat.test.crio",
+ "attempt": 1
+ },
+ "hostname": "crioctl_host",
+ "log_directory": "",
+ "dns_options": {
+ "servers": [
+ "server1.redhat.com",
+ "server2.redhat.com"
+ ],
+ "searches": [
+ "8.8.8.8"
+ ]
+ },
+ "port_mappings": [],
+ "resources": {
+ "cpu": {
+ "limits": 3,
+ "requests": 2
+ },
+ "memory": {
+ "limits": 50000000,
+ "requests": 2000000
+ }
+ },
+ "labels": {
+ "group": "test"
+ },
+ "annotations": {
+ "owner": "hmeng"
+ %VALUE%
+ },
+ "linux": {
+ "cgroup_parent": "/Burstable/pod_123-456",
+ "security_context": {
+ "namespace_options": {
+ "host_network": false,
+ "host_pid": false,
+ "host_ipc": false
+ },
+ "selinux_options": {
+ "user": "system_u",
+ "role": "system_r",
+ "type": "svirt_lxc_net_t",
+ "level": "s0:c4,c5"
+ }
+ }
+ }
+}
diff --git a/test/testdata/sandbox_config_selinux.json b/test/testdata/sandbox_config_selinux.json
new file mode 100644
index 000000000..916a10ecd
--- /dev/null
+++ b/test/testdata/sandbox_config_selinux.json
@@ -0,0 +1,48 @@
+{
+ "metadata": {
+ "name": "podsandbox1",
+ "uid": "redhat-test-crio",
+ "namespace": "redhat.test.crio",
+ "attempt": 1
+ },
+ "hostname": "crioctl_host",
+ "log_directory": "",
+ "dns_config": {
+ "searches": [
+ "8.8.8.8"
+ ]
+ },
+ "port_mappings": [],
+ "resources": {
+ "cpu": {
+ "limits": 3,
+ "requests": 2
+ },
+ "memory": {
+ "limits": 50000000,
+ "requests": 2000000
+ }
+ },
+ "labels": {
+ "group": "test"
+ },
+ "annotations": {
+ "owner": "hmeng",
+ "security.alpha.kubernetes.io/sysctls": "kernel.shm_rmid_forced=1,net.ipv4.ip_local_port_range=1024 65000",
+ "security.alpha.kubernetes.io/unsafe-sysctls": "kernel.msgmax=8192" ,
+ "security.alpha.kubernetes.io/seccomp/pod": "unconfined"
+ },
+ "linux": {
+ "cgroup_parent": "/Burstable/pod_123-456",
+ "security_context": {
+ "namespace_options": {
+ "host_network": false,
+ "host_pid": false,
+ "host_ipc": false
+ },
+ "selinux_options": {
+ "level": "s0"
+ }
+ }
+ }
+}
diff --git a/test/testdata/template_container_config.json b/test/testdata/template_container_config.json
new file mode 100644
index 000000000..a770a7c9d
--- /dev/null
+++ b/test/testdata/template_container_config.json
@@ -0,0 +1,68 @@
+{
+ "metadata": {
+ "name": "${NAME}",
+ "attempt": 1
+ },
+ "image": {
+ "image": "${IMAGE}"
+ },
+ "command": ${COMMAND},
+ "args": [],
+ "working_dir": "/",
+ "envs": [
+ {
+ "key": "PATH",
+ "value": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ },
+ {
+ "key": "TERM",
+ "value": "xterm"
+ },
+ {
+ "key": "TESTDIR",
+ "value": "test/dir1"
+ },
+ {
+ "key": "TESTFILE",
+ "value": "test/file1"
+ }
+ ],
+ "labels": {
+ "type": "small",
+ "batch": "no"
+ },
+ "annotations": {
+ "owner": "dragon",
+ "daemon": "crio"
+ },
+ "privileged": true,
+ "log_path": "",
+ "stdin": false,
+ "stdin_once": false,
+ "tty": false,
+ "linux": {
+ "resources": {
+ "cpu_period": 10000,
+ "cpu_quota": 20000,
+ "cpu_shares": 512,
+ "oom_score_adj": 30
+ },
+ "security_context": {
+ "readonly_rootfs": false,
+ "selinux_options": {
+ "user": "system_u",
+ "role": "system_r",
+ "type": "svirt_lxc_net_t",
+ "level": "s0:c4,c5"
+ },
+ "capabilities": {
+ "add_capabilities": [
+ "setuid",
+ "setgid"
+ ],
+ "drop_capabilities": [
+ ]
+ }
+ }
+ }
+}
diff --git a/test/testdata/template_sandbox_config.json b/test/testdata/template_sandbox_config.json
new file mode 100644
index 000000000..f43ffb0d6
--- /dev/null
+++ b/test/testdata/template_sandbox_config.json
@@ -0,0 +1,51 @@
+{
+ "metadata": {
+ "name": "${NAME}",
+ "uid": "${CUID}",
+ "namespace": "${NAMESPACE}",
+ "attempt": 1
+ },
+ "hostname": "crioctl_host",
+ "log_directory": "",
+ "dns_config": {
+ "searches": [
+ "8.8.8.8"
+ ]
+ },
+ "port_mappings": [],
+ "resources": {
+ "cpu": {
+ "limits": 3,
+ "requests": 2
+ },
+ "memory": {
+ "limits": 50000000,
+ "requests": 2000000
+ }
+ },
+ "labels": {
+ "group": "test"
+ },
+ "annotations": {
+ "owner": "hmeng",
+ "security.alpha.kubernetes.io/sysctls": "kernel.shm_rmid_forced=1,net.ipv4.ip_local_port_range=1024 65000",
+ "security.alpha.kubernetes.io/unsafe-sysctls": "kernel.msgmax=8192" ,
+ "security.alpha.kubernetes.io/seccomp/pod": "unconfined"
+ },
+ "linux": {
+ "cgroup_parent": "/Burstable/pod_123-456",
+ "security_context": {
+ "namespace_options": {
+ "host_network": false,
+ "host_pid": false,
+ "host_ipc": false
+ },
+ "selinux_options": {
+ "user": "system_u",
+ "role": "system_r",
+ "type": "svirt_lxc_net_t",
+ "level": "s0:c4,c5"
+ }
+ }
+ }
+}