aboutsummaryrefslogtreecommitdiff
path: root/libpod
diff options
context:
space:
mode:
authorPaul Holzinger <pholzing@redhat.com>2021-11-05 00:12:27 +0100
committerPaul Holzinger <pholzing@redhat.com>2021-11-05 00:20:10 +0100
commit614c6f59708090d3cde868891c3bb8e0e667e086 (patch)
tree43453416be7dd9a6f7b1dbb0bf6f1112151d62c9 /libpod
parent28bbd146853166463c060fa93714330fe63e1040 (diff)
downloadpodman-614c6f59708090d3cde868891c3bb8e0e667e086.tar.gz
podman-614c6f59708090d3cde868891c3bb8e0e667e086.tar.bz2
podman-614c6f59708090d3cde868891c3bb8e0e667e086.zip
Fix rootless cni netns cleanup logic
The check if cleanup is needed reads all container and checks if there are running containers with bridge networking. If we do not find any we have to cleanup the ns. However there was a problem with this because the state is empty by default so the running check never worked. Fortunately the was a second check which relies on the CNI files so we still did cleanup anyway. With netavark I noticed that this check is broken because the CNI files were not present. Signed-off-by: Paul Holzinger <pholzing@redhat.com>
Diffstat (limited to 'libpod')
-rw-r--r--libpod/networking_linux.go28
1 files changed, 20 insertions, 8 deletions
diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go
index e792a410c..bf527a81a 100644
--- a/libpod/networking_linux.go
+++ b/libpod/networking_linux.go
@@ -314,8 +314,25 @@ func (r *RootlessCNI) Cleanup(runtime *Runtime) error {
r.Lock.Lock()
defer r.Lock.Unlock()
running := func(c *Container) bool {
+ // no bridge => no need to check
+ if !c.config.NetMode.IsBridge() {
+ return false
+ }
+
// we cannot use c.state() because it will try to lock the container
- // using c.state.State directly should be good enough for this use case
+ // locking is a problem because cleanup is called after net teardown
+ // at this stage the container is already locked.
+ // also do not try to lock only containers which are not currently in net
+ // teardown because this will result in an ABBA deadlock between the rootless
+ // cni lock and the container lock
+ // because we need to get the state we have to sync otherwise this will not
+ // work because the state is empty by default
+ // I do not like this but I do not see a better way at moment
+ err := c.syncContainer()
+ if err != nil {
+ return false
+ }
+
state := c.state.State
return state == define.ContainerStateRunning
}
@@ -323,13 +340,8 @@ func (r *RootlessCNI) Cleanup(runtime *Runtime) error {
if err != nil {
return err
}
- cleanup := true
- for _, ctr := range ctrs {
- if ctr.config.NetMode.IsBridge() {
- cleanup = false
- }
- }
- if cleanup {
+ // cleanup if we found no containers
+ if len(ctrs) == 0 {
// make sure the the cni results (cache) dir is empty
// libpod instances with another root dir are not covered by the check above
// this allows several libpod instances to use the same rootless cni ns