diff options
Diffstat (limited to 'libpod')
-rw-r--r-- | libpod/container_internal.go | 2 | ||||
-rw-r--r-- | libpod/lock/in_memory_locks.go | 16 | ||||
-rw-r--r-- | libpod/lock/lock.go | 7 | ||||
-rw-r--r-- | libpod/lock/shm/shm_lock.c | 55 | ||||
-rw-r--r-- | libpod/lock/shm/shm_lock.go | 17 | ||||
-rw-r--r-- | libpod/lock/shm/shm_lock.h | 1 | ||||
-rw-r--r-- | libpod/lock/shm_lock_manager_linux.go | 19 | ||||
-rw-r--r-- | libpod/pod_internal.go | 2 | ||||
-rw-r--r-- | libpod/runtime.go | 2 | ||||
-rw-r--r-- | libpod/runtime_pod_linux.go | 126 |
10 files changed, 199 insertions, 48 deletions
diff --git a/libpod/container_internal.go b/libpod/container_internal.go index 0b4e5763e..e6ffaa6d7 100644 --- a/libpod/container_internal.go +++ b/libpod/container_internal.go @@ -516,7 +516,7 @@ func (c *Container) refresh() error { } // We need to pick up a new lock - lock, err := c.runtime.lockManager.RetrieveLock(c.config.LockID) + lock, err := c.runtime.lockManager.AllocateAndRetrieveLock(c.config.LockID) if err != nil { return errors.Wrapf(err, "error acquiring lock for container %s", c.ID()) } diff --git a/libpod/lock/in_memory_locks.go b/libpod/lock/in_memory_locks.go index 7c9605917..f3c842f89 100644 --- a/libpod/lock/in_memory_locks.go +++ b/libpod/lock/in_memory_locks.go @@ -90,6 +90,22 @@ func (m *InMemoryManager) RetrieveLock(id uint32) (Locker, error) { return m.locks[id], nil } +// AllocateAndRetrieveLock allocates a lock with the given ID (if not already in +// use) and returns it. +func (m *InMemoryManager) AllocateAndRetrieveLock(id uint32) (Locker, error) { + if id >= m.numLocks { + return nil, errors.Errorf("given lock ID %d is too large - this manager only supports lock indexes up to %d", id, m.numLocks) + } + + if m.locks[id].allocated { + return nil, errors.Errorf("given lock ID %d is already in use, cannot reallocate", id) + } + + m.locks[id].allocated = true + + return m.locks[id], nil +} + // FreeAllLocks frees all locks. // This function is DANGEROUS. Please read the full comment in locks.go before // trying to use it. diff --git a/libpod/lock/lock.go b/libpod/lock/lock.go index d6841646b..4e1e2e215 100644 --- a/libpod/lock/lock.go +++ b/libpod/lock/lock.go @@ -24,6 +24,13 @@ type Manager interface { // The underlying lock MUST be the same as another other lock with the // same UUID. RetrieveLock(id uint32) (Locker, error) + // AllocateAndRetrieveLock marks the lock with the given UUID as in use + // and retrieves it. + // RetrieveAndAllocateLock will error if the lock in question has + // already been allocated. + // This is mostly used after a system restart to repopulate the list of + // locks in use. + AllocateAndRetrieveLock(id uint32) (Locker, error) // PLEASE READ FULL DESCRIPTION BEFORE USING. // FreeAllLocks frees all allocated locks, in preparation for lock // reallocation. diff --git a/libpod/lock/shm/shm_lock.c b/libpod/lock/shm/shm_lock.c index d11fce71a..047d3c417 100644 --- a/libpod/lock/shm/shm_lock.c +++ b/libpod/lock/shm/shm_lock.c @@ -354,6 +354,61 @@ int64_t allocate_semaphore(shm_struct_t *shm) { return -1 * ENOSPC; } +// Allocate the semaphore with the given ID. +// Returns an error if the semaphore with this ID does not exist, or has already +// been allocated. +// Returns 0 on success, or negative errno values on failure. +int32_t allocate_given_semaphore(shm_struct_t *shm, uint32_t sem_index) { + int bitmap_index, index_in_bitmap, ret_code; + bitmap_t test_map; + + if (shm == NULL) { + return -1 * EINVAL; + } + + // Check if the lock index is valid + if (sem_index >= shm->num_locks) { + return -1 * EINVAL; + } + + bitmap_index = sem_index / BITMAP_SIZE; + index_in_bitmap = sem_index % BITMAP_SIZE; + + // This should never happen if the sem_index test above succeeded, but better + // safe than sorry + if (bitmap_index >= shm->num_bitmaps) { + return -1 * EFAULT; + } + + test_map = 0x1 << index_in_bitmap; + + // Lock the mutex controlling access to our shared memory + ret_code = take_mutex(&(shm->segment_lock)); + if (ret_code != 0) { + return -1 * ret_code; + } + + // Check if the semaphore is allocated + if ((test_map & shm->locks[bitmap_index].bitmap) != 0) { + ret_code = release_mutex(&(shm->segment_lock)); + if (ret_code != 0) { + return -1 * ret_code; + } + + return -1 * EEXIST; + } + + // The semaphore is not allocated, allocate it + shm->locks[bitmap_index].bitmap = shm->locks[bitmap_index].bitmap | test_map; + + ret_code = release_mutex(&(shm->segment_lock)); + if (ret_code != 0) { + return -1 * ret_code; + } + + return 0; +} + // Deallocate a given semaphore // Returns 0 on success, negative ERRNO values on failure int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index) { diff --git a/libpod/lock/shm/shm_lock.go b/libpod/lock/shm/shm_lock.go index e70ea8743..c21e9a221 100644 --- a/libpod/lock/shm/shm_lock.go +++ b/libpod/lock/shm/shm_lock.go @@ -134,6 +134,23 @@ func (locks *SHMLocks) AllocateSemaphore() (uint32, error) { return uint32(retCode), nil } +// AllocateGivenSemaphore allocates the given semaphore from the shared-memory +// segment for use by a container or pod. +// If the semaphore is already in use or the index is invalid an error will be +// returned. +func (locks *SHMLocks) AllocateGivenSemaphore(sem uint32) error { + if !locks.valid { + return errors.Wrapf(syscall.EINVAL, "locks have already been closed") + } + + retCode := C.allocate_given_semaphore(locks.lockStruct, C.uint32_t(sem)) + if retCode < 0 { + return syscall.Errno(-1 * retCode) + } + + return nil +} + // DeallocateSemaphore frees a semaphore in a shared-memory segment so it can be // reallocated to another container or pod. // The given semaphore must be already allocated, or an error will be returned. diff --git a/libpod/lock/shm/shm_lock.h b/libpod/lock/shm/shm_lock.h index 58e4297e2..759f8178a 100644 --- a/libpod/lock/shm/shm_lock.h +++ b/libpod/lock/shm/shm_lock.h @@ -39,6 +39,7 @@ shm_struct_t *setup_lock_shm(char *path, uint32_t num_locks, int *error_code); shm_struct_t *open_lock_shm(char *path, uint32_t num_locks, int *error_code); int32_t close_lock_shm(shm_struct_t *shm); int64_t allocate_semaphore(shm_struct_t *shm); +int32_t allocate_given_semaphore(shm_struct_t *shm, uint32_t sem_index); int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index); int32_t deallocate_all_semaphores(shm_struct_t *shm); int32_t lock_semaphore(shm_struct_t *shm, uint32_t sem_index); diff --git a/libpod/lock/shm_lock_manager_linux.go b/libpod/lock/shm_lock_manager_linux.go index 8678958ee..5f31939f8 100644 --- a/libpod/lock/shm_lock_manager_linux.go +++ b/libpod/lock/shm_lock_manager_linux.go @@ -57,6 +57,25 @@ func (m *SHMLockManager) AllocateLock() (Locker, error) { return lock, nil } +// AllocateAndRetrieveLock allocates the lock with the given ID and returns it. +// If the lock is already allocated, error. +func (m *SHMLockManager) AllocateAndRetrieveLock(id uint32) (Locker, error) { + lock := new(SHMLock) + lock.lockID = id + lock.manager = m + + if id >= m.locks.GetMaxLocks() { + return nil, errors.Wrapf(syscall.EINVAL, "lock ID %d is too large - max lock size is %d", + id, m.locks.GetMaxLocks()-1) + } + + if err := m.locks.AllocateGivenSemaphore(id); err != nil { + return nil, err + } + + return lock, nil +} + // RetrieveLock retrieves a lock from the manager given its ID. func (m *SHMLockManager) RetrieveLock(id uint32) (Locker, error) { lock := new(SHMLock) diff --git a/libpod/pod_internal.go b/libpod/pod_internal.go index 25e4e77d7..1fcb5b1a6 100644 --- a/libpod/pod_internal.go +++ b/libpod/pod_internal.go @@ -56,7 +56,7 @@ func (p *Pod) refresh() error { } // Retrieve the pod's lock - lock, err := p.runtime.lockManager.RetrieveLock(p.config.LockID) + lock, err := p.runtime.lockManager.AllocateAndRetrieveLock(p.config.LockID) if err != nil { return errors.Wrapf(err, "error retrieving lock for pod %s", p.ID()) } diff --git a/libpod/runtime.go b/libpod/runtime.go index 34b6ac74f..e6b84014e 100644 --- a/libpod/runtime.go +++ b/libpod/runtime.go @@ -922,7 +922,7 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) { if os.IsNotExist(errors.Cause(err)) { manager, err = lock.NewSHMLockManager(lockPath, runtime.config.NumLocks) if err != nil { - return err + return errors.Wrapf(err, "failed to get new shm lock manager") } } else if errors.Cause(err) == syscall.ERANGE && runtime.doRenumber { logrus.Debugf("Number of locks does not match - removing old locks") diff --git a/libpod/runtime_pod_linux.go b/libpod/runtime_pod_linux.go index 0011c771a..5867b1f87 100644 --- a/libpod/runtime_pod_linux.go +++ b/libpod/runtime_pod_linux.go @@ -149,10 +149,10 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool) } // Go through and lock all containers so we can operate on them all at once - dependencies := make(map[string][]string) for _, ctr := range ctrs { - ctr.lock.Lock() - defer ctr.lock.Unlock() + ctrLock := ctr.lock + ctrLock.Lock() + defer ctrLock.Unlock() // Sync all containers if err := ctr.syncContainer(); err != nil { @@ -177,23 +177,12 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool) if len(ctr.state.ExecSessions) != 0 && !force { return errors.Wrapf(ErrCtrStateInvalid, "pod %s contains container %s which has active exec sessions", p.ID(), ctr.ID()) } - - deps, err := r.state.ContainerInUse(ctr) - if err != nil { - return err - } - dependencies[ctr.ID()] = deps } - // Check if containers have dependencies - // If they do, and the dependencies are not in the pod, error - for ctr, deps := range dependencies { - for _, dep := range deps { - if _, ok := dependencies[dep]; !ok { - return errors.Wrapf(ErrCtrExists, "container %s depends on container %s not in pod %s", ctr, dep, p.ID()) - } - } - } + // We maintain the invariant that container dependencies must all exist + // within the container's pod. + // No need to check dependencies as such - we're removing all containers + // in the pod at once, no dependency issues. // First loop through all containers and stop them // Do not remove in this loop to ensure that we don't remove unless all @@ -220,18 +209,40 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool) } } - // Start removing containers - // We can remove containers even if they have dependencies now - // As we have guaranteed their dependencies are in the pod + // Remove all containers in the pod from the state. + if err := r.state.RemovePodContainers(p); err != nil { + return err + } + + var removalErr error + + // Clean up after our removed containers. + // Errors here are nonfatal - the containers have already been evicted. + // We'll do our best to clean up after them, but we have to keep going + // and remove the pod as well. + // From here until we remove the pod from the state, no error returns. for _, ctr := range ctrs { + // The container no longer exists in the state, mark invalid. + ctr.valid = false + + ctr.newContainerEvent(events.Remove) + // Clean up network namespace, cgroups, mounts if err := ctr.cleanup(ctx); err != nil { - return err + if removalErr == nil { + removalErr = err + } else { + logrus.Errorf("Unable to clean up container %s: %v", ctr.ID(), err) + } } // Stop container's storage if err := ctr.teardownStorage(); err != nil { - return err + if removalErr == nil { + removalErr = err + } else { + logrus.Errorf("Unable to tear down container %s storage: %v", ctr.ID(), err) + } } // Delete the container from runtime (only if we are not @@ -239,26 +250,24 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool) if ctr.state.State != ContainerStateConfigured && ctr.state.State != ContainerStateExited { if err := ctr.delete(ctx); err != nil { - return err + if removalErr == nil { + removalErr = err + } else { + logrus.Errorf("Unable to remove container %s from OCI runtime: %v", ctr.ID(), err) + } } } // Free the container's lock if err := ctr.lock.Free(); err != nil { - return err + if removalErr == nil { + removalErr = errors.Wrapf(err, "error freeing container %s lock", ctr.ID()) + } else { + logrus.Errorf("Unable to free container %s lock: %v", ctr.ID(), err) + } } } - // Remove containers from the state - if err := r.state.RemovePodContainers(p); err != nil { - return err - } - - // Mark containers invalid - for _, ctr := range ctrs { - ctr.valid = false - } - // Remove pod cgroup, if present if p.state.CgroupPath != "" { logrus.Debugf("Removing pod cgroup %s", p.state.CgroupPath) @@ -266,10 +275,11 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool) switch p.runtime.config.CgroupManager { case SystemdCgroupsManager: if err := deleteSystemdCgroup(p.state.CgroupPath); err != nil { - // The pod is already almost gone. - // No point in hard-failing if we fail - // this bit of cleanup. - logrus.Errorf("Error deleting pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err) + if removalErr == nil { + removalErr = errors.Wrapf(err, "error removing pod %s cgroup", p.ID()) + } else { + logrus.Errorf("Error deleting pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err) + } } case CgroupfsCgroupsManager: // Delete the cgroupfs cgroup @@ -280,34 +290,60 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool) conmonCgroupPath := filepath.Join(p.state.CgroupPath, "conmon") conmonCgroup, err := cgroups.Load(v1CGroups, cgroups.StaticPath(conmonCgroupPath)) if err != nil && err != cgroups.ErrCgroupDeleted { - return err + if removalErr == nil { + removalErr = errors.Wrapf(err, "error retrieving pod %s conmon cgroup", p.ID()) + } else { + logrus.Debugf("Error retrieving pod %s conmon cgroup %s: %v", p.ID(), conmonCgroupPath, err) + } } if err == nil { if err := conmonCgroup.Delete(); err != nil { - logrus.Errorf("Error deleting pod %s conmon cgroup %s: %v", p.ID(), conmonCgroupPath, err) + if removalErr == nil { + removalErr = errors.Wrapf(err, "error removing pod %s conmon cgroup", p.ID()) + } else { + logrus.Errorf("Error deleting pod %s conmon cgroup %s: %v", p.ID(), conmonCgroupPath, err) + } } } cgroup, err := cgroups.Load(v1CGroups, cgroups.StaticPath(p.state.CgroupPath)) if err != nil && err != cgroups.ErrCgroupDeleted { - return err + if removalErr == nil { + removalErr = errors.Wrapf(err, "error retrieving pod %s cgroup", p.ID()) + } else { + logrus.Errorf("Error retrieving pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err) + } } if err == nil { if err := cgroup.Delete(); err != nil { - logrus.Errorf("Error deleting pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err) + if removalErr == nil { + removalErr = errors.Wrapf(err, "error removing pod %s cgroup", p.ID()) + } else { + logrus.Errorf("Error deleting pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err) + } } } default: - return errors.Wrapf(ErrInvalidArg, "unknown cgroups manager %s specified", p.runtime.config.CgroupManager) + // This should be caught much earlier, but let's still + // keep going so we make sure to evict the pod before + // ending up with an inconsistent state. + if removalErr == nil { + removalErr = errors.Wrapf(ErrInternal, "unrecognized cgroup manager %s when removing pod %s cgroups", p.runtime.config.CgroupManager, p.ID()) + } else { + logrus.Errorf("Unknown cgroups manager %s specified - cannot remove pod %s cgroup", p.runtime.config.CgroupManager, p.ID()) + } } } // Remove pod from state if err := r.state.RemovePod(p); err != nil { + if removalErr != nil { + logrus.Errorf("%v", removalErr) + } return err } // Mark pod invalid p.valid = false p.newPodEvent(events.Remove) - return nil + return removalErr } |