aboutsummaryrefslogtreecommitdiff
path: root/cmd/podman/shared/workers.go
blob: a9d6bb77e025b869cde44cfd9a214834eef09d74 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
package shared

import (
	"reflect"
	"runtime"
	"strings"
	"sync"

	"github.com/sirupsen/logrus"
)

// JobFunc provides the function signature for the pool'ed functions
type JobFunc func() error

// Job defines the function to run
type Job struct {
	ID string
	Fn JobFunc
}

// JobResult defines the results from the function ran
type JobResult struct {
	Job Job
	Err error
}

// Pool defines the worker pool and queues
type Pool struct {
	id       string
	wg       *sync.WaitGroup
	jobs     chan Job
	results  chan JobResult
	size     int
	capacity int
}

// NewPool creates and initializes a new Pool
func NewPool(id string, size int, capacity int) *Pool {
	var wg sync.WaitGroup

	// min for int...
	s := size
	if s > capacity {
		s = capacity
	}

	return &Pool{
		id,
		&wg,
		make(chan Job, capacity),
		make(chan JobResult, capacity),
		s,
		capacity,
	}
}

// Add Job to pool for parallel processing
func (p *Pool) Add(job Job) {
	p.wg.Add(1)
	p.jobs <- job
}

// Run the Job's in the pool, gather and return results
func (p *Pool) Run() ([]string, map[string]error, error) {
	var (
		ok       = []string{}
		failures = map[string]error{}
	)

	for w := 0; w < p.size; w++ {
		w := w
		go p.newWorker(w)
	}
	close(p.jobs)
	p.wg.Wait()

	close(p.results)
	for r := range p.results {
		if r.Err == nil {
			ok = append(ok, r.Job.ID)
		} else {
			failures[r.Job.ID] = r.Err
		}
	}

	if logrus.GetLevel() == logrus.DebugLevel {
		for i, f := range failures {
			logrus.Debugf("Pool[%s, %s: %s]", p.id, i, f.Error())
		}
	}

	return ok, failures, nil
}

// newWorker creates new parallel workers to monitor jobs channel from Pool
func (p *Pool) newWorker(slot int) {
	for job := range p.jobs {
		err := job.Fn()
		p.results <- JobResult{job, err}
		if logrus.GetLevel() == logrus.DebugLevel {
			n := strings.Split(runtime.FuncForPC(reflect.ValueOf(job.Fn).Pointer()).Name(), ".")
			logrus.Debugf("Worker#%d finished job %s/%s (%v)", slot, n[2:], job.ID, err)
		}
		p.wg.Done()
	}
}

// DefaultPoolSize provides the maximum number of parallel workers (int) as calculated by a basic
// heuristic. This can be overridden by the --max-workers primary switch to podman.
func DefaultPoolSize(name string) int {
	numCpus := runtime.NumCPU()
	switch name {
	case "init":
		fallthrough
	case "kill":
		fallthrough
	case "pause":
		fallthrough
	case "rm":
		fallthrough
	case "unpause":
		if numCpus <= 3 {
			return numCpus * 3
		}
		return numCpus * 4
	case "ps":
		return 8
	case "restart":
		return numCpus * 2
	case "stop":
		if numCpus <= 2 {
			return 4
		} else {
			return numCpus * 3
		}
	}
	return 3
}