summaryrefslogtreecommitdiff
path: root/libpod/lock/shm_lock.c
diff options
context:
space:
mode:
authorMatthew Heon <matthew.heon@gmail.com>2018-04-27 16:06:02 -0400
committerMatthew Heon <matthew.heon@pm.me>2019-01-04 09:45:59 -0500
commit27cebb780be4142afe6356cbbc57775a33e8e55e (patch)
treebdca2e5f96f7ff64c3e4cf9c8c46383a230dc9a8 /libpod/lock/shm_lock.c
parent6868b5aa1444404113bc6a4582203fbbf89490c2 (diff)
downloadpodman-27cebb780be4142afe6356cbbc57775a33e8e55e.tar.gz
podman-27cebb780be4142afe6356cbbc57775a33e8e55e.tar.bz2
podman-27cebb780be4142afe6356cbbc57775a33e8e55e.zip
Initial skeleton of in-memory locks
Signed-off-by: Matthew Heon <matthew.heon@gmail.com>
Diffstat (limited to 'libpod/lock/shm_lock.c')
-rw-r--r--libpod/lock/shm_lock.c340
1 files changed, 340 insertions, 0 deletions
diff --git a/libpod/lock/shm_lock.c b/libpod/lock/shm_lock.c
new file mode 100644
index 000000000..ab715891c
--- /dev/null
+++ b/libpod/lock/shm_lock.c
@@ -0,0 +1,340 @@
+#include <errno.h>
+#include <fcntl.h>
+#include <semaphore.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "shm_lock.h"
+
+// Compute the size of the SHM struct
+size_t compute_shm_size(uint32_t num_bitmaps) {
+ return sizeof(shm_struct_t) + (num_bitmaps * sizeof(lock_group_t));
+}
+
+// Set up an SHM segment holding locks for libpod
+// num_locks must be a multiple of BITMAP_SIZE (32 by default)
+// Returns a valid pointer on success or NULL on error
+shm_struct_t *setup_lock_shm(uint32_t num_locks) {
+ int shm_fd, i, j, ret_code;
+ uint32_t num_bitmaps;
+ size_t shm_size;
+ shm_struct_t *shm;
+
+ // TODO maybe set errno so we can get errors back to libpod?
+
+ // We need a nonzero number of locks
+ if (num_locks == 0) {
+ return NULL;
+ }
+
+ // Calculate the number of bitmaps required
+ if (num_locks % BITMAP_SIZE != 0) {
+ // Number of locks not a multiple of BITMAP_SIZE
+ return NULL;
+ }
+ num_bitmaps = num_locks / BITMAP_SIZE;
+
+ // Calculate size of the shm segment
+ shm_size = compute_shm_size(num_bitmaps);
+
+ // Create a new SHM segment for us
+ shm_fd = shm_open(SHM_NAME, O_RDWR | O_CREAT | O_EXCL, 0600);
+ if (shm_fd < 0) {
+ return NULL;
+ }
+
+ // Increase its size to what we need
+ ret_code = ftruncate(shm_fd, shm_size);
+ if (ret_code < 0) {
+ goto CLEANUP_UNLINK;
+ }
+
+ // Map the shared memory in
+ shm = mmap(NULL, shm_size, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, 0);
+ if (shm == MAP_FAILED) {
+ goto CLEANUP_UNLINK;
+ }
+
+ // We have successfully mapped the memory, now initialize the region
+ shm->magic = MAGIC;
+ shm->num_locks = num_locks;
+ shm->num_bitmaps = num_bitmaps;
+
+ // Initialize the semaphore that protects the bitmaps
+ ret_code = sem_init(&(shm->segment_lock), 1, 1);
+ if (ret_code < 0) {
+ goto CLEANUP_UNMAP;
+ }
+
+ // Initialize all bitmaps to 0 initially
+ // And initialize all semaphores they use
+ for (i = 0; i < num_bitmaps; i++) {
+ shm->locks[i].bitmap = 0;
+ for (j = 0; j < BITMAP_SIZE; j++) {
+ ret_code = sem_init(&(shm->locks[i].locks[j]), 1, 1);
+ if (ret_code < 0) {
+ goto CLEANUP_UNMAP;
+ }
+ }
+ }
+
+ // Close the file descriptor, we're done with it
+ // Ignore errors, it's ok if we leak a single FD and this should only run once
+ close(shm_fd);
+
+ return shm;
+
+ // Cleanup after an error
+ CLEANUP_UNMAP:
+ munmap(shm, shm_size);
+ CLEANUP_UNLINK:
+ close(shm_fd);
+ shm_unlink(SHM_NAME);
+ return NULL;
+}
+
+// Open an existing SHM segment holding libpod locks
+// num_locks is the number of locks that will be configured in the SHM segment
+// num_locks must be a multiple of BITMAP_SIZE (32 by default)
+// Returns a valid pointer on success or NULL on error
+shm_struct_t *open_lock_shm(uint32_t num_locks) {
+ int shm_fd;
+ shm_struct_t *shm;
+ size_t shm_size;
+ uint32_t num_bitmaps;
+
+ // We need a nonzero number of locks
+ if (num_locks == 0) {
+ return NULL;
+ }
+
+ // Calculate the number of bitmaps required
+ if (num_locks % BITMAP_SIZE != 0) {
+ // Number of locks not a multiple of BITMAP_SIZE
+ return NULL;
+ }
+ num_bitmaps = num_locks / BITMAP_SIZE;
+
+ // Calculate size of the shm segment
+ shm_size = compute_shm_size(num_bitmaps);
+
+ shm_fd = shm_open(SHM_NAME, O_RDWR, 0600);
+ if (shm_fd < 0) {
+ return NULL;
+ }
+
+ // Map the shared memory in
+ shm = mmap(NULL, shm_size, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, 0);
+
+ // Ignore errors, it's ok if we leak a single FD since this only runs once
+ close(shm_fd);
+
+ // Check if we successfully mmap'd
+ if (shm == MAP_FAILED) {
+ return NULL;
+ }
+
+ // Need to check the SHM to see if it's actually our locks
+ if (shm->magic != MAGIC) {
+ goto CLEANUP;
+ }
+ if (shm->num_locks != num_locks) {
+ goto CLEANUP;
+ }
+
+ return shm;
+
+ CLEANUP:
+ munmap(shm, shm_size);
+ return NULL;
+}
+
+// Close an open SHM lock struct, unmapping the backing memory.
+// The given shm_struct_t will be rendered unusable as a result.
+// On success, 0 is returned. On failure, negative ERRNO values are returned.
+int32_t close_lock_shm(shm_struct_t *shm) {
+ int ret_code;
+ size_t shm_size;
+
+ // We can't unmap null...
+ if (shm == NULL) {
+ return -1 * EINVAL;
+ }
+
+ shm_size = compute_shm_size(shm->num_bitmaps);
+
+ ret_code = munmap(shm, shm_size);
+
+ if (ret_code != 0) {
+ return -1 * errno;
+ }
+
+ return 0;
+}
+
+// Allocate the first available semaphore
+// Returns a positive integer guaranteed to be less than UINT32_MAX on success,
+// or negative errno values on failure
+// On sucess, the returned integer is the number of the semaphore allocated
+int64_t allocate_semaphore(shm_struct_t *shm) {
+ int ret_code, i;
+ bitmap_t test_map;
+ int64_t sem_number, num_within_bitmap;
+
+ if (shm == NULL) {
+ return -1 * EINVAL;
+ }
+
+ // Lock the semaphore controlling access to our shared memory
+ do {
+ ret_code = sem_wait(&(shm->segment_lock));
+ } while(ret_code == EINTR);
+ if (ret_code != 0) {
+ return -1 * errno;
+ }
+
+ // Loop through our bitmaps to search for one that is not full
+ for (i = 0; i < shm->num_bitmaps; i++) {
+ if (shm->locks[i].bitmap != 0xFFFFFFFF) {
+ test_map = 0x1;
+ num_within_bitmap = 0;
+ while (test_map != 0) {
+ if ((test_map & shm->locks[i].bitmap) == 0) {
+ // Compute the number of the semaphore we are allocating
+ sem_number = (BITMAP_SIZE * i) + num_within_bitmap;
+ // OR in the bitmap
+ shm->locks[i].bitmap = shm->locks[i].bitmap | test_map;
+ // Clear the semaphore
+ sem_post(&(shm->segment_lock));
+ // Return the semaphore we've allocated
+ return sem_number;
+ }
+ test_map = test_map << 1;
+ num_within_bitmap++;
+ }
+ // We should never fall through this loop
+ // TODO maybe an assert() here to panic if we do?
+ }
+ }
+
+ // Post to the semaphore to clear the lock
+ sem_post(&(shm->segment_lock));
+
+ // All bitmaps are full
+ // We have no available semaphores, report allocation failure
+ return -1 * ENOSPC;
+}
+
+// Deallocate a given semaphore
+// Returns 0 on success, negative ERRNO values on failure
+int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index) {
+ bitmap_t test_map;
+ int bitmap_index, index_in_bitmap, ret_code, i;
+
+ if (shm == NULL) {
+ return -1 * EINVAL;
+ }
+
+ // Check if the lock index is valid
+ if (sem_index >= shm->num_locks) {
+ return -1 * EINVAL;
+ }
+
+ bitmap_index = sem_index / BITMAP_SIZE;
+ index_in_bitmap = sem_index % BITMAP_SIZE;
+
+ // This should never happen if the sem_index test above succeeded, but better
+ // safe than sorry
+ if (bitmap_index >= shm->num_bitmaps) {
+ return -1 * EFAULT;
+ }
+
+ test_map = 0x1;
+ for (i = 0; i < index_in_bitmap; i++) {
+ test_map = test_map << 1;
+ }
+
+ // Lock the semaphore controlling access to our shared memory
+ do {
+ ret_code = sem_wait(&(shm->segment_lock));
+ } while(ret_code == EINTR);
+ if (ret_code != 0) {
+ return -1 * errno;
+ }
+
+ // Check if the semaphore is allocated
+ if ((test_map & shm->locks[bitmap_index].bitmap) == 0) {
+ // Post to the semaphore to clear the lock
+ sem_post(&(shm->segment_lock));
+
+ return -1 * ENOENT;
+ }
+
+ // The semaphore is allocated, clear it
+ // Invert the bitmask we used to test to clear the bit
+ test_map = ~test_map;
+ shm->locks[bitmap_index].bitmap = shm->locks[bitmap_index].bitmap & test_map;
+
+ // Post to the semaphore to clear the lock
+ sem_post(&(shm->segment_lock));
+
+ return 0;
+}
+
+// Lock a given semaphore
+// Does not check if the semaphore is allocated - this ensures that, even for
+// removed containers, we can still successfully lock to check status (and
+// subsequently realize they have been removed).
+// Returns 0 on success, -1 on failure
+int32_t lock_semaphore(shm_struct_t *shm, uint32_t sem_index) {
+ int bitmap_index, index_in_bitmap, ret_code;
+
+ if (shm == NULL) {
+ return -1 * EINVAL;
+ }
+
+ if (sem_index >= shm->num_locks) {
+ return -1 * EINVAL;
+ }
+
+ bitmap_index = sem_index / BITMAP_SIZE;
+ index_in_bitmap = sem_index % BITMAP_SIZE;
+
+ // Lock the semaphore controlling access to our shared memory
+ do {
+ ret_code = sem_wait(&(shm->locks[bitmap_index].locks[index_in_bitmap]));
+ } while(ret_code == EINTR);
+ if (ret_code != 0) {
+ return -1 * errno;
+ }
+
+ return 0;
+}
+
+// Unlock a given semaphore
+// Does not check if the semaphore is allocated - this ensures that, even for
+// removed containers, we can still successfully lock to check status (and
+// subsequently realize they have been removed).
+// Returns 0 on success, -1 on failure
+int32_t unlock_semaphore(shm_struct_t *shm, uint32_t sem_index) {
+ int bitmap_index, index_in_bitmap;
+
+ if (shm == NULL) {
+ return -1 * EINVAL;
+ }
+
+ if (sem_index >= shm->num_locks) {
+ return -1 * EINVAL;
+ }
+
+ bitmap_index = sem_index / BITMAP_SIZE;
+ index_in_bitmap = sem_index % BITMAP_SIZE;
+
+ sem_post(&(shm->locks[bitmap_index].locks[index_in_bitmap]));
+
+ return 0;
+}