aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Heon <matthew.heon@gmail.com>2018-04-09 10:51:11 -0400
committerAtomic Bot <atomic-devel@projectatomic.io>2018-04-12 03:48:40 +0000
commit542f8fe98d833ed8b641f91a4d029f35f8128109 (patch)
treefefc080bfa0a4d578e5e59debab1b5c52903c9f9
parent77a1665c05517194c4d5b27b7e03326d4c0923c9 (diff)
downloadpodman-542f8fe98d833ed8b641f91a4d029f35f8128109.tar.gz
podman-542f8fe98d833ed8b641f91a4d029f35f8128109.tar.bz2
podman-542f8fe98d833ed8b641f91a4d029f35f8128109.zip
Do not lock all containers during pod start
This solves a nasty locking issue with getting the path of namespaces for dependencies Signed-off-by: Matthew Heon <matthew.heon@gmail.com> Closes: #600 Approved by: rhatdan
-rw-r--r--libpod/pod.go43
1 files changed, 29 insertions, 14 deletions
diff --git a/libpod/pod.go b/libpod/pod.go
index 7d9e2c81b..e57782201 100644
--- a/libpod/pod.go
+++ b/libpod/pod.go
@@ -2,6 +2,7 @@ package libpod
import (
"path/filepath"
+ "strings"
"github.com/containers/storage"
"github.com/docker/docker/pkg/stringid"
@@ -93,16 +94,6 @@ func (p *Pod) Start() (map[string]error, error) {
return nil, err
}
- // We need to lock all the containers
- for _, ctr := range allCtrs {
- ctr.lock.Lock()
- defer ctr.lock.Unlock()
-
- if err := ctr.syncContainer(); err != nil {
- return nil, err
- }
- }
-
// Build a dependency graph of containers in the pod
graph, err := buildContainerGraph(allCtrs)
if err != nil {
@@ -165,10 +156,32 @@ func startNode(node *containerNode, setError bool, ctrErrors map[string]error, c
// Going to start the container, mark us as visited
ctrsVisited[node.id] = true
- // TODO: Maybe have a checkDependenciesRunningLocked here?
- // Graph traversal should ensure our deps have been started, but some
- // might have stopped since?
- // Potentially will hurt our perf, though
+ // Lock before we start
+ node.container.lock.Lock()
+
+ // Check if dependencies are running
+ // Graph traversal means we should have started them
+ // But they could have died before we got here
+ depsStopped, err := node.container.checkDependenciesRunning()
+ if err != nil {
+ node.container.lock.Unlock()
+
+ ctrErrors[node.id] = err
+ for _, successor := range node.dependedOn {
+ startNode(successor, true, ctrErrors, ctrsVisited)
+ }
+ return
+ } else if len(depsStopped) > 0 {
+ node.container.lock.Unlock()
+
+ // Our dependencies are not running
+ depsList := strings.Join(depsStopped, ",")
+ ctrErrors[node.id] = errors.Wrapf(ErrCtrStateInvalid, "the following dependencies of container %s are not running: %s", node.id, depsList)
+ for _, successor := range node.dependedOn {
+ startNode(successor, true, ctrErrors, ctrsVisited)
+ }
+ return
+ }
// Start the container (only if it is not running)
ctrErrored := false
@@ -179,6 +192,8 @@ func startNode(node *containerNode, setError bool, ctrErrors map[string]error, c
}
}
+ node.container.lock.Unlock()
+
// Recurse to anyone who depends on us and start them
for _, successor := range node.dependedOn {
startNode(successor, ctrErrored, ctrErrors, ctrsVisited)