summaryrefslogtreecommitdiff
path: root/libpod/lock/shm
diff options
context:
space:
mode:
Diffstat (limited to 'libpod/lock/shm')
-rw-r--r--libpod/lock/shm/shm_lock.c55
-rw-r--r--libpod/lock/shm/shm_lock.go17
-rw-r--r--libpod/lock/shm/shm_lock.h1
3 files changed, 73 insertions, 0 deletions
diff --git a/libpod/lock/shm/shm_lock.c b/libpod/lock/shm/shm_lock.c
index d11fce71a..047d3c417 100644
--- a/libpod/lock/shm/shm_lock.c
+++ b/libpod/lock/shm/shm_lock.c
@@ -354,6 +354,61 @@ int64_t allocate_semaphore(shm_struct_t *shm) {
return -1 * ENOSPC;
}
+// Allocate the semaphore with the given ID.
+// Returns an error if the semaphore with this ID does not exist, or has already
+// been allocated.
+// Returns 0 on success, or negative errno values on failure.
+int32_t allocate_given_semaphore(shm_struct_t *shm, uint32_t sem_index) {
+ int bitmap_index, index_in_bitmap, ret_code;
+ bitmap_t test_map;
+
+ if (shm == NULL) {
+ return -1 * EINVAL;
+ }
+
+ // Check if the lock index is valid
+ if (sem_index >= shm->num_locks) {
+ return -1 * EINVAL;
+ }
+
+ bitmap_index = sem_index / BITMAP_SIZE;
+ index_in_bitmap = sem_index % BITMAP_SIZE;
+
+ // This should never happen if the sem_index test above succeeded, but better
+ // safe than sorry
+ if (bitmap_index >= shm->num_bitmaps) {
+ return -1 * EFAULT;
+ }
+
+ test_map = 0x1 << index_in_bitmap;
+
+ // Lock the mutex controlling access to our shared memory
+ ret_code = take_mutex(&(shm->segment_lock));
+ if (ret_code != 0) {
+ return -1 * ret_code;
+ }
+
+ // Check if the semaphore is allocated
+ if ((test_map & shm->locks[bitmap_index].bitmap) != 0) {
+ ret_code = release_mutex(&(shm->segment_lock));
+ if (ret_code != 0) {
+ return -1 * ret_code;
+ }
+
+ return -1 * EEXIST;
+ }
+
+ // The semaphore is not allocated, allocate it
+ shm->locks[bitmap_index].bitmap = shm->locks[bitmap_index].bitmap | test_map;
+
+ ret_code = release_mutex(&(shm->segment_lock));
+ if (ret_code != 0) {
+ return -1 * ret_code;
+ }
+
+ return 0;
+}
+
// Deallocate a given semaphore
// Returns 0 on success, negative ERRNO values on failure
int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index) {
diff --git a/libpod/lock/shm/shm_lock.go b/libpod/lock/shm/shm_lock.go
index e70ea8743..c21e9a221 100644
--- a/libpod/lock/shm/shm_lock.go
+++ b/libpod/lock/shm/shm_lock.go
@@ -134,6 +134,23 @@ func (locks *SHMLocks) AllocateSemaphore() (uint32, error) {
return uint32(retCode), nil
}
+// AllocateGivenSemaphore allocates the given semaphore from the shared-memory
+// segment for use by a container or pod.
+// If the semaphore is already in use or the index is invalid an error will be
+// returned.
+func (locks *SHMLocks) AllocateGivenSemaphore(sem uint32) error {
+ if !locks.valid {
+ return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ }
+
+ retCode := C.allocate_given_semaphore(locks.lockStruct, C.uint32_t(sem))
+ if retCode < 0 {
+ return syscall.Errno(-1 * retCode)
+ }
+
+ return nil
+}
+
// DeallocateSemaphore frees a semaphore in a shared-memory segment so it can be
// reallocated to another container or pod.
// The given semaphore must be already allocated, or an error will be returned.
diff --git a/libpod/lock/shm/shm_lock.h b/libpod/lock/shm/shm_lock.h
index 58e4297e2..759f8178a 100644
--- a/libpod/lock/shm/shm_lock.h
+++ b/libpod/lock/shm/shm_lock.h
@@ -39,6 +39,7 @@ shm_struct_t *setup_lock_shm(char *path, uint32_t num_locks, int *error_code);
shm_struct_t *open_lock_shm(char *path, uint32_t num_locks, int *error_code);
int32_t close_lock_shm(shm_struct_t *shm);
int64_t allocate_semaphore(shm_struct_t *shm);
+int32_t allocate_given_semaphore(shm_struct_t *shm, uint32_t sem_index);
int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index);
int32_t deallocate_all_semaphores(shm_struct_t *shm);
int32_t lock_semaphore(shm_struct_t *shm, uint32_t sem_index);