blob: 442db1502790e12ac84a92c6dd431759fecaddbe (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
|
package parallel
import (
"context"
"sync"
"github.com/containers/podman/v2/libpod"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// ContainerOp performs the given function on the given set of
// containers, using a number of parallel threads.
// If no error is returned, each container specified in ctrs will have an entry
// in the resulting map; containers with no error will be set to nil.
func ContainerOp(ctx context.Context, ctrs []*libpod.Container, applyFunc func(*libpod.Container) error) (map[*libpod.Container]error, error) {
jobControlLock.RLock()
defer jobControlLock.RUnlock()
// We could use a sync.Map but given Go's lack of generic I'd rather
// just use a lock on a normal map...
// The expectation is that most of the time is spent in applyFunc
// anyways.
var (
errMap = make(map[*libpod.Container]error)
errLock sync.Mutex
allDone sync.WaitGroup
)
for _, ctr := range ctrs {
// Block until a thread is available
if err := jobControl.Acquire(ctx, 1); err != nil {
return nil, errors.Wrapf(err, "error acquiring job control semaphore")
}
allDone.Add(1)
c := ctr
go func() {
logrus.Debugf("Launching job on container %s", c.ID())
err := applyFunc(c)
errLock.Lock()
errMap[c] = err
errLock.Unlock()
allDone.Done()
jobControl.Release(1)
}()
}
allDone.Wait()
return errMap, nil
}
// TODO: Add an Enqueue() function that returns a promise
|