summaryrefslogtreecommitdiff
path: root/pkg
diff options
context:
space:
mode:
authorMatthew Heon <matthew.heon@pm.me>2020-06-04 14:28:01 -0400
committerMatthew Heon <matthew.heon@pm.me>2020-06-05 11:31:05 -0400
commit89a1e7db39ed1015762d733379a4a5d443b1f4de (patch)
tree1b99dd8c457c10b96110d86c091a71d13ca3df1d /pkg
parentbf8337b3fc488d3fc449a7622ae7744a67b9f348 (diff)
downloadpodman-89a1e7db39ed1015762d733379a4a5d443b1f4de.tar.gz
podman-89a1e7db39ed1015762d733379a4a5d443b1f4de.tar.bz2
podman-89a1e7db39ed1015762d733379a4a5d443b1f4de.zip
Add parallel execution code for container operations
This code will run container operations in parallel, up to a given maximum number of threads. Currently, it has only been enabled for local `podman rm` as a proof of concept. Signed-off-by: Matthew Heon <matthew.heon@pm.me>
Diffstat (limited to 'pkg')
-rw-r--r--pkg/domain/infra/abi/containers.go21
-rw-r--r--pkg/parallel/parallel.go44
-rw-r--r--pkg/parallel/parallel_linux.go57
3 files changed, 114 insertions, 8 deletions
diff --git a/pkg/domain/infra/abi/containers.go b/pkg/domain/infra/abi/containers.go
index 19232eff1..eb45d4630 100644
--- a/pkg/domain/infra/abi/containers.go
+++ b/pkg/domain/infra/abi/containers.go
@@ -23,6 +23,7 @@ import (
"github.com/containers/libpod/pkg/checkpoint"
"github.com/containers/libpod/pkg/domain/entities"
"github.com/containers/libpod/pkg/domain/infra/abi/terminal"
+ "github.com/containers/libpod/pkg/parallel"
"github.com/containers/libpod/pkg/ps"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/libpod/pkg/signal"
@@ -321,21 +322,25 @@ func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string,
return reports, nil
}
- for _, c := range ctrs {
- report := entities.RmReport{Id: c.ID()}
+ errMap, err := parallel.ParallelContainerOp(ctx, ctrs, func(c *libpod.Container) error {
err := ic.Libpod.RemoveContainer(ctx, c, options.Force, options.Volumes)
if err != nil {
if options.Ignore && errors.Cause(err) == define.ErrNoSuchCtr {
logrus.Debugf("Ignoring error (--allow-missing): %v", err)
- reports = append(reports, &report)
- continue
+ return nil
}
logrus.Debugf("Failed to remove container %s: %s", c.ID(), err.Error())
- report.Err = err
- reports = append(reports, &report)
- continue
}
- reports = append(reports, &report)
+ return err
+ })
+ if err != nil {
+ return nil, err
+ }
+ for ctr, err := range errMap {
+ report := new(entities.RmReport)
+ report.Id = ctr.ID()
+ report.Err = err
+ reports = append(reports, report)
}
return reports, nil
}
diff --git a/pkg/parallel/parallel.go b/pkg/parallel/parallel.go
new file mode 100644
index 000000000..c9e4da50d
--- /dev/null
+++ b/pkg/parallel/parallel.go
@@ -0,0 +1,44 @@
+package parallel
+
+import (
+ "sync"
+
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sync/semaphore"
+)
+
+var (
+ // Maximum number of jobs that will be used.
+ // Set a low, but non-zero, default. We'll be overriding it by default
+ // anyways.
+ numThreads uint = 8
+ // Semaphore to control thread creation and ensure numThreads is
+ // respected.
+ jobControl *semaphore.Weighted
+ // Lock to control changing the semaphore - we don't want to do it
+ // while anyone is using it.
+ jobControlLock sync.RWMutex
+)
+
+// SetMaxThreads sets the number of threads that will be used for parallel jobs.
+func SetMaxThreads(threads uint) error {
+ if threads == 0 {
+ return errors.New("must give a non-zero number of threads to execute with")
+ }
+
+ jobControlLock.Lock()
+ defer jobControlLock.Unlock()
+
+ numThreads = threads
+ jobControl = semaphore.NewWeighted(int64(threads))
+ logrus.Infof("Setting parallel job count to %d", threads)
+
+ return nil
+}
+
+// GetMaxThreads returns the current number of threads that will be used for
+// parallel jobs.
+func GetMaxThreads() uint {
+ return numThreads
+}
diff --git a/pkg/parallel/parallel_linux.go b/pkg/parallel/parallel_linux.go
new file mode 100644
index 000000000..e3f086c0e
--- /dev/null
+++ b/pkg/parallel/parallel_linux.go
@@ -0,0 +1,57 @@
+package parallel
+
+import (
+ "context"
+ "sync"
+
+ "github.com/containers/libpod/libpod"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// ParallelContainerOp performs the given function on the given set of
+// containers, using a number of parallel threads.
+// If no error is returned, each container specified in ctrs will have an entry
+// in the resulting map; containers with no error will be set to nil.
+func ParallelContainerOp(ctx context.Context, ctrs []*libpod.Container, applyFunc func(*libpod.Container) error) (map[*libpod.Container]error, error) {
+ jobControlLock.RLock()
+ defer jobControlLock.RUnlock()
+
+ // We could use a sync.Map but given Go's lack of generic I'd rather
+ // just use a lock on a normal map...
+ // The expectation is that most of the time is spent in applyFunc
+ // anyways.
+ var (
+ errMap map[*libpod.Container]error = make(map[*libpod.Container]error)
+ errLock sync.Mutex
+ allDone sync.WaitGroup
+ )
+
+ for _, ctr := range ctrs {
+ // Block until a thread is available
+ if err := jobControl.Acquire(ctx, 1); err != nil {
+ return nil, errors.Wrapf(err, "error acquiring job control semaphore")
+ }
+
+ allDone.Add(1)
+
+ c := ctr
+ go func() {
+ logrus.Debugf("Launching job on container %s", c.ID())
+
+ err := applyFunc(c)
+ errLock.Lock()
+ errMap[c] = err
+ errLock.Unlock()
+
+ allDone.Done()
+ jobControl.Release(1)
+ }()
+ }
+
+ allDone.Wait()
+
+ return errMap, nil
+}
+
+// TODO: Add an Enqueue() function that returns a promise