summaryrefslogtreecommitdiff
path: root/libpod/pod.go
diff options
context:
space:
mode:
authorMatthew Heon <matthew.heon@gmail.com>2018-04-09 11:05:59 -0400
committerAtomic Bot <atomic-devel@projectatomic.io>2018-04-12 03:48:40 +0000
commit8b67fbb3f23b72497d56e8902f56e46a690a77da (patch)
tree65febe6552df5b725a492a35640a0abbb60cfab9 /libpod/pod.go
parent542f8fe98d833ed8b641f91a4d029f35f8128109 (diff)
downloadpodman-8b67fbb3f23b72497d56e8902f56e46a690a77da.tar.gz
podman-8b67fbb3f23b72497d56e8902f56e46a690a77da.tar.bz2
podman-8b67fbb3f23b72497d56e8902f56e46a690a77da.zip
Make pod stop lock one container at a time
Signed-off-by: Matthew Heon <matthew.heon@gmail.com> Closes: #600 Approved by: rhatdan
Diffstat (limited to 'libpod/pod.go')
-rw-r--r--libpod/pod.go55
1 files changed, 29 insertions, 26 deletions
diff --git a/libpod/pod.go b/libpod/pod.go
index e57782201..e6a2ba3dc 100644
--- a/libpod/pod.go
+++ b/libpod/pod.go
@@ -153,39 +153,40 @@ func startNode(node *containerNode, setError bool, ctrErrors map[string]error, c
return
}
- // Going to start the container, mark us as visited
+ // Going to try to start the container, mark us as visited
ctrsVisited[node.id] = true
- // Lock before we start
- node.container.lock.Lock()
+ ctrErrored := false
// Check if dependencies are running
// Graph traversal means we should have started them
// But they could have died before we got here
+ // Does not require that the container be locked, we only need to lock
+ // the dependencies
depsStopped, err := node.container.checkDependenciesRunning()
if err != nil {
- node.container.lock.Unlock()
-
ctrErrors[node.id] = err
- for _, successor := range node.dependedOn {
- startNode(successor, true, ctrErrors, ctrsVisited)
- }
- return
+ ctrErrored = true
} else if len(depsStopped) > 0 {
- node.container.lock.Unlock()
-
// Our dependencies are not running
depsList := strings.Join(depsStopped, ",")
ctrErrors[node.id] = errors.Wrapf(ErrCtrStateInvalid, "the following dependencies of container %s are not running: %s", node.id, depsList)
- for _, successor := range node.dependedOn {
- startNode(successor, true, ctrErrors, ctrsVisited)
+ ctrErrored = true
+ }
+
+ // Lock before we start
+ node.container.lock.Lock()
+
+ // Sync the container to pick up current state
+ if !ctrErrored {
+ if err := node.container.syncContainer(); err != nil {
+ ctrErrored = true
+ ctrErrors[node.id] = err
}
- return
}
// Start the container (only if it is not running)
- ctrErrored := false
- if node.container.state.State != ContainerStateRunning {
+ if !ctrErrored && node.container.state.State != ContainerStateRunning {
if err := node.container.initAndStart(); err != nil {
ctrErrored = true
ctrErrors[node.id] = err
@@ -230,16 +231,6 @@ func (p *Pod) Stop(cleanup bool) (map[string]error, error) {
return nil, err
}
- // We need to lock all the containers
- for _, ctr := range allCtrs {
- ctr.lock.Lock()
- defer ctr.lock.Unlock()
-
- if err := ctr.syncContainer(); err != nil {
- return nil, err
- }
- }
-
ctrErrors := make(map[string]error)
// TODO: There may be cases where it makes sense to order stops based on
@@ -247,12 +238,22 @@ func (p *Pod) Stop(cleanup bool) (map[string]error, error) {
// Stop to all containers
for _, ctr := range allCtrs {
+ ctr.lock.Lock()
+
+ if err := ctr.syncContainer(); err != nil {
+ ctr.lock.Unlock()
+ ctrErrors[ctr.ID()] = err
+ continue
+ }
+
// Ignore containers that are not running
if ctr.state.State != ContainerStateRunning {
+ ctr.lock.Unlock()
continue
}
if err := ctr.stop(ctr.config.StopTimeout); err != nil {
+ ctr.lock.Unlock()
ctrErrors[ctr.ID()] = err
continue
}
@@ -262,6 +263,8 @@ func (p *Pod) Stop(cleanup bool) (map[string]error, error) {
ctrErrors[ctr.ID()] = err
}
}
+
+ ctr.lock.Unlock()
}
if len(ctrErrors) > 0 {