summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Heon <matthew.heon@pm.me>2019-05-07 21:22:06 -0400
committerMatthew Heon <matthew.heon@pm.me>2019-05-07 21:28:22 -0400
commitf5938be1f758abf71f3d91fcb82240220ecfad91 (patch)
tree0392ff8a9a61303ba82be53dd6226be69bd0313b
parentff260f07e2be55c7fbda24b8727686fc55c650a6 (diff)
downloadpodman-f5938be1f758abf71f3d91fcb82240220ecfad91.tar.gz
podman-f5938be1f758abf71f3d91fcb82240220ecfad91.tar.bz2
podman-f5938be1f758abf71f3d91fcb82240220ecfad91.zip
Improve robustness of pod removal
Removing a pod must first removal all containers in the pod. Libpod requires the state to remain consistent at all times, so references to a deleted pod must all be cleansed first. Pods can have many containers in them. We presently iterate through all of them, and if an error occurs trying to clean up and remove any single container, we abort the entire operation (but cannot recover anything already removed - pod removal is not an atomic operation). Because of this, if a removal error occurs partway through, we can end up with a pod in an inconsistent state that is no longer usable. What's worse, if the error is in the infra container, and it's persistent, we get zombie pods - completely unable to be removed. When we saw some of these same issues with containers not in pods, we modified the removal code there to aggressively purge containers from the database, then try to clean up afterwards. Take the same approach here, and make cleanup errors nonfatal. Once we've gone ahead and removed containers, we need to see pod deletion through to the end - we'll log errors but keep going. Also, fix some other small things (most notably, we didn't make events for the containers removed). Signed-off-by: Matthew Heon <matthew.heon@pm.me>
-rw-r--r--libpod/runtime_pod_linux.go70
1 files changed, 32 insertions, 38 deletions
diff --git a/libpod/runtime_pod_linux.go b/libpod/runtime_pod_linux.go
index 0011c771a..6f74acb9d 100644
--- a/libpod/runtime_pod_linux.go
+++ b/libpod/runtime_pod_linux.go
@@ -149,10 +149,10 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool)
}
// Go through and lock all containers so we can operate on them all at once
- dependencies := make(map[string][]string)
for _, ctr := range ctrs {
- ctr.lock.Lock()
- defer ctr.lock.Unlock()
+ ctrLock := ctr.lock
+ ctrLock.Lock()
+ defer ctrLock.Unlock()
// Sync all containers
if err := ctr.syncContainer(); err != nil {
@@ -177,23 +177,12 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool)
if len(ctr.state.ExecSessions) != 0 && !force {
return errors.Wrapf(ErrCtrStateInvalid, "pod %s contains container %s which has active exec sessions", p.ID(), ctr.ID())
}
-
- deps, err := r.state.ContainerInUse(ctr)
- if err != nil {
- return err
- }
- dependencies[ctr.ID()] = deps
}
- // Check if containers have dependencies
- // If they do, and the dependencies are not in the pod, error
- for ctr, deps := range dependencies {
- for _, dep := range deps {
- if _, ok := dependencies[dep]; !ok {
- return errors.Wrapf(ErrCtrExists, "container %s depends on container %s not in pod %s", ctr, dep, p.ID())
- }
- }
- }
+ // We maintain the invariant that container dependencies must all exist
+ // within the container's pod.
+ // No need to check dependencies as such - we're removing all containers
+ // in the pod at once, no dependency issues.
// First loop through all containers and stop them
// Do not remove in this loop to ensure that we don't remove unless all
@@ -220,18 +209,30 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool)
}
}
- // Start removing containers
- // We can remove containers even if they have dependencies now
- // As we have guaranteed their dependencies are in the pod
+ // Remove all containers in the pod from the state.
+ if err := r.state.RemovePodContainers(p); err != nil {
+ return err
+ }
+
+ // Clean up after our removed containers.
+ // Errors here are nonfatal - the containers have already been evicted.
+ // We'll do our best to clean up after them, but we have to keep going
+ // and remove the pod as well.
+ // From here until we remove the pod from the state, no error returns.
for _, ctr := range ctrs {
+ // The container no longer exists in the state, mark invalid.
+ ctr.valid = false
+
+ ctr.newContainerEvent(events.Remove)
+
// Clean up network namespace, cgroups, mounts
if err := ctr.cleanup(ctx); err != nil {
- return err
+ logrus.Errorf("Unable to clean up container %s: %v", ctr.ID(), err)
}
// Stop container's storage
if err := ctr.teardownStorage(); err != nil {
- return err
+ logrus.Errorf("Unable to tear down container %s storage: %v", ctr.ID(), err)
}
// Delete the container from runtime (only if we are not
@@ -239,26 +240,16 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool)
if ctr.state.State != ContainerStateConfigured &&
ctr.state.State != ContainerStateExited {
if err := ctr.delete(ctx); err != nil {
- return err
+ logrus.Errorf("Unable to remove container %s from OCI runtime: %v", ctr.ID(), err)
}
}
// Free the container's lock
if err := ctr.lock.Free(); err != nil {
- return err
+ logrus.Errorf("Unable to free container %s lock: %v", ctr.ID(), err)
}
}
- // Remove containers from the state
- if err := r.state.RemovePodContainers(p); err != nil {
- return err
- }
-
- // Mark containers invalid
- for _, ctr := range ctrs {
- ctr.valid = false
- }
-
// Remove pod cgroup, if present
if p.state.CgroupPath != "" {
logrus.Debugf("Removing pod cgroup %s", p.state.CgroupPath)
@@ -280,7 +271,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool)
conmonCgroupPath := filepath.Join(p.state.CgroupPath, "conmon")
conmonCgroup, err := cgroups.Load(v1CGroups, cgroups.StaticPath(conmonCgroupPath))
if err != nil && err != cgroups.ErrCgroupDeleted {
- return err
+ logrus.Debugf("Error retrieving pod %s conmon cgroup %s: %v", p.ID(), conmonCgroupPath, err)
}
if err == nil {
if err := conmonCgroup.Delete(); err != nil {
@@ -289,7 +280,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool)
}
cgroup, err := cgroups.Load(v1CGroups, cgroups.StaticPath(p.state.CgroupPath))
if err != nil && err != cgroups.ErrCgroupDeleted {
- return err
+ logrus.Errorf("Error retrieving pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err)
}
if err == nil {
if err := cgroup.Delete(); err != nil {
@@ -297,7 +288,10 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool)
}
}
default:
- return errors.Wrapf(ErrInvalidArg, "unknown cgroups manager %s specified", p.runtime.config.CgroupManager)
+ // This should be caught much earlier, but let's still
+ // keep going so we make sure to evict the pod before
+ // ending up with an inconsistent state.
+ logrus.Errorf("Unknown cgroups manager %s specified - cannot remove pod %s cgroup", p.runtime.config.CgroupManager, p.ID())
}
}