summaryrefslogtreecommitdiff
path: root/test/e2e
diff options
context:
space:
mode:
authorValentin Rothberg <vrothberg@redhat.com>2022-04-11 13:32:23 +0200
committerValentin Rothberg <vrothberg@redhat.com>2022-04-20 11:31:28 +0200
commit0162f678c0e68e9ef0756f8cf521cf14d637be29 (patch)
treef0af58bd32bca3c6b77002d4da85c28b2f108765 /test/e2e
parentbc8d8737b760a1f42a9208c6067d2e163817d5f0 (diff)
downloadpodman-0162f678c0e68e9ef0756f8cf521cf14d637be29.tar.gz
podman-0162f678c0e68e9ef0756f8cf521cf14d637be29.tar.bz2
podman-0162f678c0e68e9ef0756f8cf521cf14d637be29.zip
benchmarking Podman: proof of concept
Add a proof of concept for benchmarking Podman. The benchmarks are implemented by means of the end-to-end test suite but hidden behind a `benchmarks` build tag. Running `make localbenchmarks` will run `test/e2e` with the specific build tag and set ginkgo's "focus" to the specific "Podman Benchmark Suite" to only run this spec and skip all others. ginkgo will print a report before terminating listing the CPU and memory stats for each benchmark. New benchmarks can easily be added via the `newBenchmark` function that also supports adding an `init()` function to each benchmark which allows for performing certain setups for the specific benchmark. For instance, benchmarking `podman start` requires creating a container beforehand. Podman may be called more than once in the main function of a benchmark but note that the displayed memory consumption is then a sum of all Podman invocations. The memory consumption is collected via `/usr/bin/time`. A benchmark's report is split into CPU and memory as displayed below: ``` [CPU] podman images: Fastest Time: 0.146s Slowest Time: 0.187s Average Time: 0.180s ± 0.015s [MEM] podman images: Smallest: 41892.0KB Largest: 42792.0KB Average: 42380.7KB ± 286.4KB ``` Note that the benchmarks are not wired into the CI yet. They are meant as a proof of concept. More benchmarks and the plumbing into CI will happen in a later change. Signed-off-by: Valentin Rothberg <vrothberg@redhat.com>
Diffstat (limited to 'test/e2e')
-rw-r--r--test/e2e/benchmarks_test.go173
1 files changed, 173 insertions, 0 deletions
diff --git a/test/e2e/benchmarks_test.go b/test/e2e/benchmarks_test.go
new file mode 100644
index 000000000..c631b06ee
--- /dev/null
+++ b/test/e2e/benchmarks_test.go
@@ -0,0 +1,173 @@
+//go:build benchmarks
+// +build benchmarks
+
+package integration
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path"
+ "strconv"
+ "strings"
+
+ . "github.com/containers/podman/v4/test/utils"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+ . "github.com/onsi/gomega/gexec"
+)
+
+var (
+ // Number of times to execute each benchmark.
+ numBenchmarkSamples = 3
+ // All benchmarks are ququed here.
+ allBenchmarks []benchmark
+)
+
+// An internal struct for queuing benchmarks.
+type benchmark struct {
+ // The name of the benchmark.
+ name string
+ // The function to execute.
+ main func()
+ // Function is run before `main`.
+ init func()
+}
+
+// Allows for customizing the benchnmark in an easy to extend way.
+type newBenchmarkOptions struct {
+ // Sets the benchmark's init function.
+ init func()
+}
+
+// Queue a new benchmark.
+func newBenchmark(name string, main func(), options *newBenchmarkOptions) {
+ bm := benchmark{name: name, main: main}
+ if options != nil {
+ bm.init = options.init
+ }
+ allBenchmarks = append(allBenchmarks, bm)
+}
+
+var _ = Describe("Podman Benchmark Suite", func() {
+ var (
+ timedir string
+ podmanTest *PodmanTestIntegration
+ )
+
+ setup := func() {
+ tempdir, err := CreateTempDirInTempDir()
+ if err != nil {
+ os.Exit(1)
+ }
+ podmanTest = PodmanTestCreate(tempdir)
+ podmanTest.Setup()
+
+ timedir, err = CreateTempDirInTempDir()
+ if err != nil {
+ os.Exit(1)
+ }
+ }
+
+ cleanup := func() {
+ podmanTest.Cleanup()
+ os.RemoveAll(timedir)
+ }
+
+ totalMemoryInKb := func() (total uint64) {
+ files, err := ioutil.ReadDir(timedir)
+ if err != nil {
+ Fail(fmt.Sprintf("Error reading timing dir: %v", err))
+ }
+
+ for _, f := range files {
+ if f.IsDir() {
+ continue
+ }
+ raw, err := ioutil.ReadFile(path.Join(timedir, f.Name()))
+ if err != nil {
+ Fail(fmt.Sprintf("Error reading timing file: %v", err))
+ }
+ rawS := strings.TrimSuffix(string(raw), "\n")
+ number, err := strconv.ParseUint(rawS, 10, 64)
+ if err != nil {
+ Fail(fmt.Sprintf("Error converting timing file to numeric value: %v", err))
+ }
+ total += number
+ }
+
+ return total
+ }
+
+ // Make sure to clean up after the benchmarks.
+ AfterEach(func() {
+ cleanup()
+ })
+
+ // All benchmarks are executed here to have *one* table listing all data.
+ Measure("Podman Benchmark Suite", func(b Benchmarker) {
+ for i := range allBenchmarks {
+ setup()
+ bm := allBenchmarks[i]
+ if bm.init != nil {
+ bm.init()
+ }
+
+ // Set the time dir only for the main() function.
+ os.Setenv(EnvTimeDir, timedir)
+ b.Time("[CPU] "+bm.name, bm.main)
+ os.Unsetenv(EnvTimeDir)
+
+ mem := totalMemoryInKb()
+ b.RecordValueWithPrecision("[MEM] "+bm.name, float64(mem), "KB", 1)
+ cleanup()
+ }
+ }, numBenchmarkSamples)
+
+ BeforeEach(func() {
+
+ // --------------------------------------------------------------------------
+ // IMAGE BENCHMARKS
+ // --------------------------------------------------------------------------
+
+ newBenchmark("podman images", func() {
+ session := podmanTest.Podman([]string{"images"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+ }, nil)
+
+ newBenchmark("podman pull", func() {
+ session := podmanTest.Podman([]string{"pull", "quay.io/libpod/cirros"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+ }, nil)
+
+ // --------------------------------------------------------------------------
+ // CONTAINER BENCHMARKS
+ // --------------------------------------------------------------------------
+
+ newBenchmark("podman create", func() {
+ session := podmanTest.Podman([]string{"run", ALPINE, "true"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+ }, nil)
+
+ newBenchmark("podman start", func() {
+ session := podmanTest.Podman([]string{"start", "foo"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+ }, &newBenchmarkOptions{
+ init: func() {
+ session := podmanTest.Podman([]string{"create", "--name=foo", ALPINE, "true"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+ },
+ })
+
+ newBenchmark("podman run", func() {
+ session := podmanTest.Podman([]string{"run", ALPINE, "true"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+ }, nil)
+ })
+})