aboutsummaryrefslogtreecommitdiff
path: root/libpod/lock
diff options
context:
space:
mode:
authorMatthew Heon <matthew.heon@pm.me>2019-05-06 13:44:01 -0400
committerMatthew Heon <matthew.heon@pm.me>2019-05-06 14:17:54 -0400
commitfaae3a7065980a735ad60ab5f6d9e8421296dbf5 (patch)
treeca4e9dcdaaf1684fa446b3d4a48f686d1b86333a /libpod/lock
parentff260f07e2be55c7fbda24b8727686fc55c650a6 (diff)
downloadpodman-faae3a7065980a735ad60ab5f6d9e8421296dbf5.tar.gz
podman-faae3a7065980a735ad60ab5f6d9e8421296dbf5.tar.bz2
podman-faae3a7065980a735ad60ab5f6d9e8421296dbf5.zip
When refreshing after a reboot, force lock allocation
After a reboot, when we refresh Podman's state, we retrieved the lock from the fresh SHM instance, but we did not mark it as allocated to prevent it being handed out to other containers and pods. Provide a method for marking locks as in-use, and use it when we refresh Podman state after a reboot. Fixes #2900 Signed-off-by: Matthew Heon <matthew.heon@pm.me>
Diffstat (limited to 'libpod/lock')
-rw-r--r--libpod/lock/in_memory_locks.go16
-rw-r--r--libpod/lock/lock.go7
-rw-r--r--libpod/lock/shm/shm_lock.c55
-rw-r--r--libpod/lock/shm/shm_lock.go17
-rw-r--r--libpod/lock/shm/shm_lock.h1
-rw-r--r--libpod/lock/shm_lock_manager_linux.go19
6 files changed, 115 insertions, 0 deletions
diff --git a/libpod/lock/in_memory_locks.go b/libpod/lock/in_memory_locks.go
index 7c9605917..f3c842f89 100644
--- a/libpod/lock/in_memory_locks.go
+++ b/libpod/lock/in_memory_locks.go
@@ -90,6 +90,22 @@ func (m *InMemoryManager) RetrieveLock(id uint32) (Locker, error) {
return m.locks[id], nil
}
+// AllocateAndRetrieveLock allocates a lock with the given ID (if not already in
+// use) and returns it.
+func (m *InMemoryManager) AllocateAndRetrieveLock(id uint32) (Locker, error) {
+ if id >= m.numLocks {
+ return nil, errors.Errorf("given lock ID %d is too large - this manager only supports lock indexes up to %d", id, m.numLocks)
+ }
+
+ if m.locks[id].allocated {
+ return nil, errors.Errorf("given lock ID %d is already in use, cannot reallocate", id)
+ }
+
+ m.locks[id].allocated = true
+
+ return m.locks[id], nil
+}
+
// FreeAllLocks frees all locks.
// This function is DANGEROUS. Please read the full comment in locks.go before
// trying to use it.
diff --git a/libpod/lock/lock.go b/libpod/lock/lock.go
index d6841646b..4e1e2e215 100644
--- a/libpod/lock/lock.go
+++ b/libpod/lock/lock.go
@@ -24,6 +24,13 @@ type Manager interface {
// The underlying lock MUST be the same as another other lock with the
// same UUID.
RetrieveLock(id uint32) (Locker, error)
+ // AllocateAndRetrieveLock marks the lock with the given UUID as in use
+ // and retrieves it.
+ // RetrieveAndAllocateLock will error if the lock in question has
+ // already been allocated.
+ // This is mostly used after a system restart to repopulate the list of
+ // locks in use.
+ AllocateAndRetrieveLock(id uint32) (Locker, error)
// PLEASE READ FULL DESCRIPTION BEFORE USING.
// FreeAllLocks frees all allocated locks, in preparation for lock
// reallocation.
diff --git a/libpod/lock/shm/shm_lock.c b/libpod/lock/shm/shm_lock.c
index d11fce71a..047d3c417 100644
--- a/libpod/lock/shm/shm_lock.c
+++ b/libpod/lock/shm/shm_lock.c
@@ -354,6 +354,61 @@ int64_t allocate_semaphore(shm_struct_t *shm) {
return -1 * ENOSPC;
}
+// Allocate the semaphore with the given ID.
+// Returns an error if the semaphore with this ID does not exist, or has already
+// been allocated.
+// Returns 0 on success, or negative errno values on failure.
+int32_t allocate_given_semaphore(shm_struct_t *shm, uint32_t sem_index) {
+ int bitmap_index, index_in_bitmap, ret_code;
+ bitmap_t test_map;
+
+ if (shm == NULL) {
+ return -1 * EINVAL;
+ }
+
+ // Check if the lock index is valid
+ if (sem_index >= shm->num_locks) {
+ return -1 * EINVAL;
+ }
+
+ bitmap_index = sem_index / BITMAP_SIZE;
+ index_in_bitmap = sem_index % BITMAP_SIZE;
+
+ // This should never happen if the sem_index test above succeeded, but better
+ // safe than sorry
+ if (bitmap_index >= shm->num_bitmaps) {
+ return -1 * EFAULT;
+ }
+
+ test_map = 0x1 << index_in_bitmap;
+
+ // Lock the mutex controlling access to our shared memory
+ ret_code = take_mutex(&(shm->segment_lock));
+ if (ret_code != 0) {
+ return -1 * ret_code;
+ }
+
+ // Check if the semaphore is allocated
+ if ((test_map & shm->locks[bitmap_index].bitmap) != 0) {
+ ret_code = release_mutex(&(shm->segment_lock));
+ if (ret_code != 0) {
+ return -1 * ret_code;
+ }
+
+ return -1 * EEXIST;
+ }
+
+ // The semaphore is not allocated, allocate it
+ shm->locks[bitmap_index].bitmap = shm->locks[bitmap_index].bitmap | test_map;
+
+ ret_code = release_mutex(&(shm->segment_lock));
+ if (ret_code != 0) {
+ return -1 * ret_code;
+ }
+
+ return 0;
+}
+
// Deallocate a given semaphore
// Returns 0 on success, negative ERRNO values on failure
int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index) {
diff --git a/libpod/lock/shm/shm_lock.go b/libpod/lock/shm/shm_lock.go
index e70ea8743..c21e9a221 100644
--- a/libpod/lock/shm/shm_lock.go
+++ b/libpod/lock/shm/shm_lock.go
@@ -134,6 +134,23 @@ func (locks *SHMLocks) AllocateSemaphore() (uint32, error) {
return uint32(retCode), nil
}
+// AllocateGivenSemaphore allocates the given semaphore from the shared-memory
+// segment for use by a container or pod.
+// If the semaphore is already in use or the index is invalid an error will be
+// returned.
+func (locks *SHMLocks) AllocateGivenSemaphore(sem uint32) error {
+ if !locks.valid {
+ return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ }
+
+ retCode := C.allocate_given_semaphore(locks.lockStruct, C.uint32_t(sem))
+ if retCode < 0 {
+ return syscall.Errno(-1 * retCode)
+ }
+
+ return nil
+}
+
// DeallocateSemaphore frees a semaphore in a shared-memory segment so it can be
// reallocated to another container or pod.
// The given semaphore must be already allocated, or an error will be returned.
diff --git a/libpod/lock/shm/shm_lock.h b/libpod/lock/shm/shm_lock.h
index 58e4297e2..759f8178a 100644
--- a/libpod/lock/shm/shm_lock.h
+++ b/libpod/lock/shm/shm_lock.h
@@ -39,6 +39,7 @@ shm_struct_t *setup_lock_shm(char *path, uint32_t num_locks, int *error_code);
shm_struct_t *open_lock_shm(char *path, uint32_t num_locks, int *error_code);
int32_t close_lock_shm(shm_struct_t *shm);
int64_t allocate_semaphore(shm_struct_t *shm);
+int32_t allocate_given_semaphore(shm_struct_t *shm, uint32_t sem_index);
int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index);
int32_t deallocate_all_semaphores(shm_struct_t *shm);
int32_t lock_semaphore(shm_struct_t *shm, uint32_t sem_index);
diff --git a/libpod/lock/shm_lock_manager_linux.go b/libpod/lock/shm_lock_manager_linux.go
index 8678958ee..5f31939f8 100644
--- a/libpod/lock/shm_lock_manager_linux.go
+++ b/libpod/lock/shm_lock_manager_linux.go
@@ -57,6 +57,25 @@ func (m *SHMLockManager) AllocateLock() (Locker, error) {
return lock, nil
}
+// AllocateAndRetrieveLock allocates the lock with the given ID and returns it.
+// If the lock is already allocated, error.
+func (m *SHMLockManager) AllocateAndRetrieveLock(id uint32) (Locker, error) {
+ lock := new(SHMLock)
+ lock.lockID = id
+ lock.manager = m
+
+ if id >= m.locks.GetMaxLocks() {
+ return nil, errors.Wrapf(syscall.EINVAL, "lock ID %d is too large - max lock size is %d",
+ id, m.locks.GetMaxLocks()-1)
+ }
+
+ if err := m.locks.AllocateGivenSemaphore(id); err != nil {
+ return nil, err
+ }
+
+ return lock, nil
+}
+
// RetrieveLock retrieves a lock from the manager given its ID.
func (m *SHMLockManager) RetrieveLock(id uint32) (Locker, error) {
lock := new(SHMLock)