summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorValentin Rothberg <vrothberg@redhat.com>2022-04-11 13:32:23 +0200
committerValentin Rothberg <vrothberg@redhat.com>2022-04-20 11:31:28 +0200
commit0162f678c0e68e9ef0756f8cf521cf14d637be29 (patch)
treef0af58bd32bca3c6b77002d4da85c28b2f108765 /test
parentbc8d8737b760a1f42a9208c6067d2e163817d5f0 (diff)
downloadpodman-0162f678c0e68e9ef0756f8cf521cf14d637be29.tar.gz
podman-0162f678c0e68e9ef0756f8cf521cf14d637be29.tar.bz2
podman-0162f678c0e68e9ef0756f8cf521cf14d637be29.zip
benchmarking Podman: proof of concept
Add a proof of concept for benchmarking Podman. The benchmarks are implemented by means of the end-to-end test suite but hidden behind a `benchmarks` build tag. Running `make localbenchmarks` will run `test/e2e` with the specific build tag and set ginkgo's "focus" to the specific "Podman Benchmark Suite" to only run this spec and skip all others. ginkgo will print a report before terminating listing the CPU and memory stats for each benchmark. New benchmarks can easily be added via the `newBenchmark` function that also supports adding an `init()` function to each benchmark which allows for performing certain setups for the specific benchmark. For instance, benchmarking `podman start` requires creating a container beforehand. Podman may be called more than once in the main function of a benchmark but note that the displayed memory consumption is then a sum of all Podman invocations. The memory consumption is collected via `/usr/bin/time`. A benchmark's report is split into CPU and memory as displayed below: ``` [CPU] podman images: Fastest Time: 0.146s Slowest Time: 0.187s Average Time: 0.180s ± 0.015s [MEM] podman images: Smallest: 41892.0KB Largest: 42792.0KB Average: 42380.7KB ± 286.4KB ``` Note that the benchmarks are not wired into the CI yet. They are meant as a proof of concept. More benchmarks and the plumbing into CI will happen in a later change. Signed-off-by: Valentin Rothberg <vrothberg@redhat.com>
Diffstat (limited to 'test')
-rw-r--r--test/e2e/benchmarks_test.go173
-rw-r--r--test/utils/utils.go13
2 files changed, 186 insertions, 0 deletions
diff --git a/test/e2e/benchmarks_test.go b/test/e2e/benchmarks_test.go
new file mode 100644
index 000000000..c631b06ee
--- /dev/null
+++ b/test/e2e/benchmarks_test.go
@@ -0,0 +1,173 @@
+//go:build benchmarks
+// +build benchmarks
+
+package integration
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path"
+ "strconv"
+ "strings"
+
+ . "github.com/containers/podman/v4/test/utils"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+ . "github.com/onsi/gomega/gexec"
+)
+
+var (
+ // Number of times to execute each benchmark.
+ numBenchmarkSamples = 3
+ // All benchmarks are ququed here.
+ allBenchmarks []benchmark
+)
+
+// An internal struct for queuing benchmarks.
+type benchmark struct {
+ // The name of the benchmark.
+ name string
+ // The function to execute.
+ main func()
+ // Function is run before `main`.
+ init func()
+}
+
+// Allows for customizing the benchnmark in an easy to extend way.
+type newBenchmarkOptions struct {
+ // Sets the benchmark's init function.
+ init func()
+}
+
+// Queue a new benchmark.
+func newBenchmark(name string, main func(), options *newBenchmarkOptions) {
+ bm := benchmark{name: name, main: main}
+ if options != nil {
+ bm.init = options.init
+ }
+ allBenchmarks = append(allBenchmarks, bm)
+}
+
+var _ = Describe("Podman Benchmark Suite", func() {
+ var (
+ timedir string
+ podmanTest *PodmanTestIntegration
+ )
+
+ setup := func() {
+ tempdir, err := CreateTempDirInTempDir()
+ if err != nil {
+ os.Exit(1)
+ }
+ podmanTest = PodmanTestCreate(tempdir)
+ podmanTest.Setup()
+
+ timedir, err = CreateTempDirInTempDir()
+ if err != nil {
+ os.Exit(1)
+ }
+ }
+
+ cleanup := func() {
+ podmanTest.Cleanup()
+ os.RemoveAll(timedir)
+ }
+
+ totalMemoryInKb := func() (total uint64) {
+ files, err := ioutil.ReadDir(timedir)
+ if err != nil {
+ Fail(fmt.Sprintf("Error reading timing dir: %v", err))
+ }
+
+ for _, f := range files {
+ if f.IsDir() {
+ continue
+ }
+ raw, err := ioutil.ReadFile(path.Join(timedir, f.Name()))
+ if err != nil {
+ Fail(fmt.Sprintf("Error reading timing file: %v", err))
+ }
+ rawS := strings.TrimSuffix(string(raw), "\n")
+ number, err := strconv.ParseUint(rawS, 10, 64)
+ if err != nil {
+ Fail(fmt.Sprintf("Error converting timing file to numeric value: %v", err))
+ }
+ total += number
+ }
+
+ return total
+ }
+
+ // Make sure to clean up after the benchmarks.
+ AfterEach(func() {
+ cleanup()
+ })
+
+ // All benchmarks are executed here to have *one* table listing all data.
+ Measure("Podman Benchmark Suite", func(b Benchmarker) {
+ for i := range allBenchmarks {
+ setup()
+ bm := allBenchmarks[i]
+ if bm.init != nil {
+ bm.init()
+ }
+
+ // Set the time dir only for the main() function.
+ os.Setenv(EnvTimeDir, timedir)
+ b.Time("[CPU] "+bm.name, bm.main)
+ os.Unsetenv(EnvTimeDir)
+
+ mem := totalMemoryInKb()
+ b.RecordValueWithPrecision("[MEM] "+bm.name, float64(mem), "KB", 1)
+ cleanup()
+ }
+ }, numBenchmarkSamples)
+
+ BeforeEach(func() {
+
+ // --------------------------------------------------------------------------
+ // IMAGE BENCHMARKS
+ // --------------------------------------------------------------------------
+
+ newBenchmark("podman images", func() {
+ session := podmanTest.Podman([]string{"images"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+ }, nil)
+
+ newBenchmark("podman pull", func() {
+ session := podmanTest.Podman([]string{"pull", "quay.io/libpod/cirros"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+ }, nil)
+
+ // --------------------------------------------------------------------------
+ // CONTAINER BENCHMARKS
+ // --------------------------------------------------------------------------
+
+ newBenchmark("podman create", func() {
+ session := podmanTest.Podman([]string{"run", ALPINE, "true"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+ }, nil)
+
+ newBenchmark("podman start", func() {
+ session := podmanTest.Podman([]string{"start", "foo"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+ }, &newBenchmarkOptions{
+ init: func() {
+ session := podmanTest.Podman([]string{"create", "--name=foo", ALPINE, "true"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+ },
+ })
+
+ newBenchmark("podman run", func() {
+ session := podmanTest.Podman([]string{"run", ALPINE, "true"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+ }, nil)
+ })
+})
diff --git a/test/utils/utils.go b/test/utils/utils.go
index 57f002130..da56a3a2e 100644
--- a/test/utils/utils.go
+++ b/test/utils/utils.go
@@ -27,6 +27,8 @@ const (
CNI NetworkBackend = iota
// Netavark network backend
Netavark NetworkBackend = iota
+ // Env variable for creating time files.
+ EnvTimeDir = "_PODMAN_TIME_DIR"
)
func (n NetworkBackend) ToString() string {
@@ -96,6 +98,17 @@ func (p *PodmanTest) PodmanAsUserBase(args []string, uid, gid uint32, cwd string
if p.RemoteTest {
podmanBinary = p.RemotePodmanBinary
}
+
+ if timeDir := os.Getenv(EnvTimeDir); timeDir != "" {
+ timeFile, err := ioutil.TempFile(timeDir, ".time")
+ if err != nil {
+ Fail(fmt.Sprintf("Error creating time file: %v", err))
+ }
+ timeArgs := []string{"-f", "%M", "-o", timeFile.Name()}
+ timeCmd := append([]string{"/usr/bin/time"}, timeArgs...)
+ wrapper = append(timeCmd, wrapper...)
+ }
+
runCmd := append(wrapper, podmanBinary)
if p.NetworkBackend == Netavark {
runCmd = append(runCmd, []string{"--network-backend", "netavark"}...)