summaryrefslogtreecommitdiff
path: root/libpod/lock/shm
diff options
context:
space:
mode:
authorMatthew Heon <matthew.heon@gmail.com>2018-08-08 15:50:16 -0400
committerMatthew Heon <matthew.heon@pm.me>2019-01-04 09:45:59 -0500
commita21f21efa19372e055e8f1daf2e77c52e5352ccc (patch)
tree1cefa8e72f8d318ff47dd4925d486cd3081295d4 /libpod/lock/shm
parent3ed81051e814e688f7b4ae1bbc7c4d4c3fbd7d0f (diff)
downloadpodman-a21f21efa19372e055e8f1daf2e77c52e5352ccc.tar.gz
podman-a21f21efa19372e055e8f1daf2e77c52e5352ccc.tar.bz2
podman-a21f21efa19372e055e8f1daf2e77c52e5352ccc.zip
Refactor locks package to build on non-Linux
Move SHM specific code into a subpackage. Within the main locks package, move the manager to be linux-only and add a non-Linux unsupported build file. Signed-off-by: Matthew Heon <matthew.heon@gmail.com>
Diffstat (limited to 'libpod/lock/shm')
-rw-r--r--libpod/lock/shm/shm_lock.c383
-rw-r--r--libpod/lock/shm/shm_lock.go188
-rw-r--r--libpod/lock/shm/shm_lock.h43
-rw-r--r--libpod/lock/shm/shm_lock_test.go243
4 files changed, 857 insertions, 0 deletions
diff --git a/libpod/lock/shm/shm_lock.c b/libpod/lock/shm/shm_lock.c
new file mode 100644
index 000000000..3fe41f63c
--- /dev/null
+++ b/libpod/lock/shm/shm_lock.c
@@ -0,0 +1,383 @@
+#include <errno.h>
+#include <fcntl.h>
+#include <semaphore.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "shm_lock.h"
+
+// Compute the size of the SHM struct
+size_t compute_shm_size(uint32_t num_bitmaps) {
+ return sizeof(shm_struct_t) + (num_bitmaps * sizeof(lock_group_t));
+}
+
+// Set up an SHM segment holding locks for libpod.
+// num_locks must be a multiple of BITMAP_SIZE (32 by default).
+// Returns a valid pointer on success or NULL on error.
+// If an error occurs, it will be written to the int pointed to by error_code.
+shm_struct_t *setup_lock_shm(uint32_t num_locks, int *error_code) {
+ int shm_fd, i, j, ret_code;
+ uint32_t num_bitmaps;
+ size_t shm_size;
+ shm_struct_t *shm;
+
+ // If error_code doesn't point to anything, we can't reasonably return errors
+ // So fail immediately
+ if (error_code == NULL) {
+ return NULL;
+ }
+
+ // We need a nonzero number of locks
+ if (num_locks == 0) {
+ *error_code = EINVAL;
+ return NULL;
+ }
+
+ // Calculate the number of bitmaps required
+ if (num_locks % BITMAP_SIZE != 0) {
+ // Number of locks not a multiple of BITMAP_SIZE
+ *error_code = EINVAL;
+ return NULL;
+ }
+ num_bitmaps = num_locks / BITMAP_SIZE;
+
+ // Calculate size of the shm segment
+ shm_size = compute_shm_size(num_bitmaps);
+
+ // Create a new SHM segment for us
+ shm_fd = shm_open(SHM_NAME, O_RDWR | O_CREAT | O_EXCL, 0600);
+ if (shm_fd < 0) {
+ *error_code = errno;
+ return NULL;
+ }
+
+ // Increase its size to what we need
+ ret_code = ftruncate(shm_fd, shm_size);
+ if (ret_code < 0) {
+ *error_code = errno;
+ goto CLEANUP_UNLINK;
+ }
+
+ // Map the shared memory in
+ shm = mmap(NULL, shm_size, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, 0);
+ if (shm == MAP_FAILED) {
+ *error_code = errno;
+ goto CLEANUP_UNLINK;
+ }
+
+ // We have successfully mapped the memory, now initialize the region
+ shm->magic = MAGIC;
+ shm->num_locks = num_locks;
+ shm->num_bitmaps = num_bitmaps;
+
+ // Initialize the semaphore that protects the bitmaps.
+ // Initialize to value 1, as we're a mutex, and set pshared as this will be
+ // shared between processes in an SHM.
+ ret_code = sem_init(&(shm->segment_lock), true, 1);
+ if (ret_code < 0) {
+ *error_code = errno;
+ goto CLEANUP_UNMAP;
+ }
+
+ // Initialize all bitmaps to 0 initially
+ // And initialize all semaphores they use
+ for (i = 0; i < num_bitmaps; i++) {
+ shm->locks[i].bitmap = 0;
+ for (j = 0; j < BITMAP_SIZE; j++) {
+ // As above, initialize to 1 to act as a mutex, and set pshared as we'll
+ // be living in an SHM.
+ ret_code = sem_init(&(shm->locks[i].locks[j]), true, 1);
+ if (ret_code < 0) {
+ *error_code = errno;
+ goto CLEANUP_UNMAP;
+ }
+ }
+ }
+
+ // Close the file descriptor, we're done with it
+ // Ignore errors, it's ok if we leak a single FD and this should only run once
+ close(shm_fd);
+
+ return shm;
+
+ // Cleanup after an error
+ CLEANUP_UNMAP:
+ munmap(shm, shm_size);
+ CLEANUP_UNLINK:
+ close(shm_fd);
+ shm_unlink(SHM_NAME);
+ return NULL;
+}
+
+// Open an existing SHM segment holding libpod locks.
+// num_locks is the number of locks that will be configured in the SHM segment.
+// num_locks must be a multiple of BITMAP_SIZE (32 by default).
+// Returns a valid pointer on success or NULL on error.
+// If an error occurs, it will be written to the int pointed to by error_code.
+shm_struct_t *open_lock_shm(uint32_t num_locks, int *error_code) {
+ int shm_fd;
+ shm_struct_t *shm;
+ size_t shm_size;
+ uint32_t num_bitmaps;
+
+ if (error_code == NULL) {
+ return NULL;
+ }
+
+ // We need a nonzero number of locks
+ if (num_locks == 0) {
+ *error_code = EINVAL;
+ return NULL;
+ }
+
+ // Calculate the number of bitmaps required
+ if (num_locks % BITMAP_SIZE != 0) {
+ // Number of locks not a multiple of BITMAP_SIZE
+ *error_code = EINVAL;
+ return NULL;
+ }
+ num_bitmaps = num_locks / BITMAP_SIZE;
+
+ // Calculate size of the shm segment
+ shm_size = compute_shm_size(num_bitmaps);
+
+ shm_fd = shm_open(SHM_NAME, O_RDWR, 0600);
+ if (shm_fd < 0) {
+ return NULL;
+ }
+
+ // Map the shared memory in
+ shm = mmap(NULL, shm_size, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, 0);
+ if (shm == MAP_FAILED) {
+ *error_code = errno;
+ }
+
+ // Ignore errors, it's ok if we leak a single FD since this only runs once
+ close(shm_fd);
+
+ // Check if we successfully mmap'd
+ if (shm == MAP_FAILED) {
+ return NULL;
+ }
+
+ // Need to check the SHM to see if it's actually our locks
+ if (shm->magic != MAGIC) {
+ *error_code = errno;
+ goto CLEANUP;
+ }
+ if (shm->num_locks != num_locks) {
+ *error_code = errno;
+ goto CLEANUP;
+ }
+
+ return shm;
+
+ CLEANUP:
+ munmap(shm, shm_size);
+ return NULL;
+}
+
+// Close an open SHM lock struct, unmapping the backing memory.
+// The given shm_struct_t will be rendered unusable as a result.
+// On success, 0 is returned. On failure, negative ERRNO values are returned.
+int32_t close_lock_shm(shm_struct_t *shm) {
+ int ret_code;
+ size_t shm_size;
+
+ // We can't unmap null...
+ if (shm == NULL) {
+ return -1 * EINVAL;
+ }
+
+ shm_size = compute_shm_size(shm->num_bitmaps);
+
+ ret_code = munmap(shm, shm_size);
+
+ if (ret_code != 0) {
+ return -1 * errno;
+ }
+
+ return 0;
+}
+
+// Allocate the first available semaphore
+// Returns a positive integer guaranteed to be less than UINT32_MAX on success,
+// or negative errno values on failure
+// On sucess, the returned integer is the number of the semaphore allocated
+int64_t allocate_semaphore(shm_struct_t *shm) {
+ int ret_code, i;
+ bitmap_t test_map;
+ int64_t sem_number, num_within_bitmap;
+
+ if (shm == NULL) {
+ return -1 * EINVAL;
+ }
+
+ // Lock the semaphore controlling access to our shared memory
+ do {
+ ret_code = sem_wait(&(shm->segment_lock));
+ } while(ret_code == EINTR);
+ if (ret_code != 0) {
+ return -1 * errno;
+ }
+
+ // Loop through our bitmaps to search for one that is not full
+ for (i = 0; i < shm->num_bitmaps; i++) {
+ if (shm->locks[i].bitmap != 0xFFFFFFFF) {
+ test_map = 0x1;
+ num_within_bitmap = 0;
+ while (test_map != 0) {
+ if ((test_map & shm->locks[i].bitmap) == 0) {
+ // Compute the number of the semaphore we are allocating
+ sem_number = (BITMAP_SIZE * i) + num_within_bitmap;
+ // OR in the bitmap
+ shm->locks[i].bitmap = shm->locks[i].bitmap | test_map;
+ // Clear the semaphore
+ sem_post(&(shm->segment_lock));
+ // Return the semaphore we've allocated
+ return sem_number;
+ }
+ test_map = test_map << 1;
+ num_within_bitmap++;
+ }
+ // We should never fall through this loop
+ // TODO maybe an assert() here to panic if we do?
+ }
+ }
+
+ // Post to the semaphore to clear the lock
+ sem_post(&(shm->segment_lock));
+
+ // All bitmaps are full
+ // We have no available semaphores, report allocation failure
+ return -1 * ENOSPC;
+}
+
+// Deallocate a given semaphore
+// Returns 0 on success, negative ERRNO values on failure
+int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index) {
+ bitmap_t test_map;
+ int bitmap_index, index_in_bitmap, ret_code, i;
+
+ if (shm == NULL) {
+ return -1 * EINVAL;
+ }
+
+ // Check if the lock index is valid
+ if (sem_index >= shm->num_locks) {
+ return -1 * EINVAL;
+ }
+
+ bitmap_index = sem_index / BITMAP_SIZE;
+ index_in_bitmap = sem_index % BITMAP_SIZE;
+
+ // This should never happen if the sem_index test above succeeded, but better
+ // safe than sorry
+ if (bitmap_index >= shm->num_bitmaps) {
+ return -1 * EFAULT;
+ }
+
+ test_map = 0x1;
+ for (i = 0; i < index_in_bitmap; i++) {
+ test_map = test_map << 1;
+ }
+
+ // Lock the semaphore controlling access to our shared memory
+ do {
+ ret_code = sem_wait(&(shm->segment_lock));
+ } while(ret_code == EINTR);
+ if (ret_code != 0) {
+ return -1 * errno;
+ }
+
+ // Check if the semaphore is allocated
+ if ((test_map & shm->locks[bitmap_index].bitmap) == 0) {
+ // Post to the semaphore to clear the lock
+ sem_post(&(shm->segment_lock));
+
+ return -1 * ENOENT;
+ }
+
+ // The semaphore is allocated, clear it
+ // Invert the bitmask we used to test to clear the bit
+ test_map = ~test_map;
+ shm->locks[bitmap_index].bitmap = shm->locks[bitmap_index].bitmap & test_map;
+
+ // Post to the semaphore to clear the lock
+ sem_post(&(shm->segment_lock));
+
+ return 0;
+}
+
+// Lock a given semaphore
+// Does not check if the semaphore is allocated - this ensures that, even for
+// removed containers, we can still successfully lock to check status (and
+// subsequently realize they have been removed).
+// Returns 0 on success, -1 on failure
+int32_t lock_semaphore(shm_struct_t *shm, uint32_t sem_index) {
+ int bitmap_index, index_in_bitmap, ret_code;
+
+ if (shm == NULL) {
+ return -1 * EINVAL;
+ }
+
+ if (sem_index >= shm->num_locks) {
+ return -1 * EINVAL;
+ }
+
+ bitmap_index = sem_index / BITMAP_SIZE;
+ index_in_bitmap = sem_index % BITMAP_SIZE;
+
+ // Lock the semaphore controlling access to our shared memory
+ do {
+ ret_code = sem_wait(&(shm->locks[bitmap_index].locks[index_in_bitmap]));
+ } while(ret_code == EINTR);
+ if (ret_code != 0) {
+ return -1 * errno;
+ }
+
+ return 0;
+}
+
+// Unlock a given semaphore
+// Does not check if the semaphore is allocated - this ensures that, even for
+// removed containers, we can still successfully lock to check status (and
+// subsequently realize they have been removed).
+// Returns 0 on success, -1 on failure
+int32_t unlock_semaphore(shm_struct_t *shm, uint32_t sem_index) {
+ int bitmap_index, index_in_bitmap, ret_code;
+ unsigned int sem_value = 0;
+
+ if (shm == NULL) {
+ return -1 * EINVAL;
+ }
+
+ if (sem_index >= shm->num_locks) {
+ return -1 * EINVAL;
+ }
+
+ bitmap_index = sem_index / BITMAP_SIZE;
+ index_in_bitmap = sem_index % BITMAP_SIZE;
+
+ // Only allow a post if the semaphore is less than 1 (locked)
+ // This allows us to preserve mutex behavior
+ ret_code = sem_getvalue(&(shm->locks[bitmap_index].locks[index_in_bitmap]), &sem_value);
+ if (ret_code != 0) {
+ return -1 * errno;
+ }
+ if (sem_value >= 1) {
+ return -1 * EBUSY;
+ }
+
+ ret_code = sem_post(&(shm->locks[bitmap_index].locks[index_in_bitmap]));
+ if (ret_code != 0) {
+ return -1 * errno;
+ }
+
+ return 0;
+}
diff --git a/libpod/lock/shm/shm_lock.go b/libpod/lock/shm/shm_lock.go
new file mode 100644
index 000000000..ff9b0ce2c
--- /dev/null
+++ b/libpod/lock/shm/shm_lock.go
@@ -0,0 +1,188 @@
+package shm
+
+// #cgo LDFLAGS: -lrt -lpthread
+// #include "shm_lock.h"
+// const uint32_t bitmap_size_c = BITMAP_SIZE;
+import "C"
+
+import (
+ "syscall"
+
+ "github.com/pkg/errors"
+)
+
+var (
+ bitmapSize uint32 = uint32(C.bitmap_size_c)
+)
+
+// SHMLocks is a struct enabling POSIX semaphore locking in a shared memory
+// segment
+type SHMLocks struct {
+ lockStruct *C.shm_struct_t
+ valid bool
+ maxLocks uint32
+}
+
+// CreateSHMLock sets up a shared-memory segment holding a given number of POSIX
+// semaphores, and returns a struct that can be used to operate on those locks.
+// numLocks must be a multiple of the lock bitmap size (by default, 32).
+func CreateSHMLock(numLocks uint32) (*SHMLocks, error) {
+ if numLocks%bitmapSize != 0 || numLocks == 0 {
+ return nil, errors.Wrapf(syscall.EINVAL, "number of locks must be a multiple of %d", C.bitmap_size_c)
+ }
+
+ locks := new(SHMLocks)
+
+ var errCode C.int
+ lockStruct := C.setup_lock_shm(C.uint32_t(numLocks), &errCode)
+ if lockStruct == nil {
+ // We got a null pointer, so something errored
+ return nil, syscall.Errno(-1 * errCode)
+ }
+
+ locks.lockStruct = lockStruct
+ locks.maxLocks = numLocks
+ locks.valid = true
+
+ return locks, nil
+}
+
+// OpenSHMLock opens an existing shared-memory segment holding a given number of
+// POSIX semaphores. numLocks must match the number of locks the shared memory
+// segment was created with and be a multiple of the lock bitmap size (default
+// 32).
+func OpenSHMLock(numLocks uint32) (*SHMLocks, error) {
+ if numLocks%bitmapSize != 0 || numLocks == 0 {
+ return nil, errors.Wrapf(syscall.EINVAL, "number of locks must be a multiple of %d", C.bitmap_size_c)
+ }
+
+ locks := new(SHMLocks)
+
+ var errCode C.int
+ lockStruct := C.open_lock_shm(C.uint32_t(numLocks), &errCode)
+ if lockStruct == nil {
+ // We got a null pointer, so something errored
+ return nil, syscall.Errno(-1 * errCode)
+ }
+
+ locks.lockStruct = lockStruct
+ locks.maxLocks = numLocks
+ locks.valid = true
+
+ return locks, nil
+}
+
+// GetMaxLocks returns the maximum number of locks in the SHM
+func (locks *SHMLocks) GetMaxLocks() uint32 {
+ return locks.maxLocks
+}
+
+// Close closes an existing shared-memory segment.
+// The segment will be rendered unusable after closing.
+// WARNING: If you Close() while there are still locks locked, these locks may
+// fail to release, causing a program freeze.
+// Close() is only intended to be used while testing the locks.
+func (locks *SHMLocks) Close() error {
+ if !locks.valid {
+ return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ }
+
+ locks.valid = false
+
+ retCode := C.close_lock_shm(locks.lockStruct)
+ if retCode < 0 {
+ // Negative errno returned
+ return syscall.Errno(-1 * retCode)
+ }
+
+ return nil
+}
+
+// AllocateSemaphore allocates a semaphore from a shared-memory segment for use
+// by a container or pod.
+// Returns the index of the semaphore that was allocated.
+// Allocations past the maximum number of locks given when the SHM segment was
+// created will result in an error, and no semaphore will be allocated.
+func (locks *SHMLocks) AllocateSemaphore() (uint32, error) {
+ if !locks.valid {
+ return 0, errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ }
+
+ retCode := C.allocate_semaphore(locks.lockStruct)
+ if retCode < 0 {
+ // Negative errno returned
+ return 0, syscall.Errno(-1 * retCode)
+ }
+
+ return uint32(retCode), nil
+}
+
+// DeallocateSemaphore frees a semaphore in a shared-memory segment so it can be
+// reallocated to another container or pod.
+// The given semaphore must be already allocated, or an error will be returned.
+func (locks *SHMLocks) DeallocateSemaphore(sem uint32) error {
+ if !locks.valid {
+ return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ }
+
+ if sem > locks.maxLocks {
+ return errors.Wrapf(syscall.EINVAL, "given semaphore %d is higher than maximum locks count %d", sem, locks.maxLocks)
+ }
+
+ retCode := C.deallocate_semaphore(locks.lockStruct, C.uint32_t(sem))
+ if retCode < 0 {
+ // Negative errno returned
+ return syscall.Errno(-1 * retCode)
+ }
+
+ return nil
+}
+
+// LockSemaphore locks the given semaphore.
+// If the semaphore is already locked, LockSemaphore will block until the lock
+// can be acquired.
+// There is no requirement that the given semaphore be allocated.
+// This ensures that attempts to lock a container after it has been deleted,
+// but before the caller has queried the database to determine this, will
+// succeed.
+func (locks *SHMLocks) LockSemaphore(sem uint32) error {
+ if !locks.valid {
+ return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ }
+
+ if sem > locks.maxLocks {
+ return errors.Wrapf(syscall.EINVAL, "given semaphore %d is higher than maximum locks count %d", sem, locks.maxLocks)
+ }
+
+ retCode := C.lock_semaphore(locks.lockStruct, C.uint32_t(sem))
+ if retCode < 0 {
+ // Negative errno returned
+ return syscall.Errno(-1 * retCode)
+ }
+
+ return nil
+}
+
+// UnlockSemaphore unlocks the given semaphore.
+// Unlocking a semaphore that is already unlocked with return EBUSY.
+// There is no requirement that the given semaphore be allocated.
+// This ensures that attempts to lock a container after it has been deleted,
+// but before the caller has queried the database to determine this, will
+// succeed.
+func (locks *SHMLocks) UnlockSemaphore(sem uint32) error {
+ if !locks.valid {
+ return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ }
+
+ if sem > locks.maxLocks {
+ return errors.Wrapf(syscall.EINVAL, "given semaphore %d is higher than maximum locks count %d", sem, locks.maxLocks)
+ }
+
+ retCode := C.unlock_semaphore(locks.lockStruct, C.uint32_t(sem))
+ if retCode < 0 {
+ // Negative errno returned
+ return syscall.Errno(-1 * retCode)
+ }
+
+ return nil
+}
diff --git a/libpod/lock/shm/shm_lock.h b/libpod/lock/shm/shm_lock.h
new file mode 100644
index 000000000..18bea47e9
--- /dev/null
+++ b/libpod/lock/shm/shm_lock.h
@@ -0,0 +1,43 @@
+#ifndef shm_locks_h_
+#define shm_locks_h_
+
+#include <semaphore.h>
+#include <stdint.h>
+
+// Magic number to ensure we open the right SHM segment
+#define MAGIC 0xA5A5
+
+// Name of the SHM
+#define SHM_NAME "/libpod_lock"
+
+// Type for our bitmaps
+typedef uint32_t bitmap_t;
+
+// bitmap size
+#define BITMAP_SIZE (sizeof(bitmap_t) * 8)
+
+// Struct to hold a single bitmap and associated locks
+typedef struct lock_group {
+ bitmap_t bitmap;
+ sem_t locks[BITMAP_SIZE];
+} lock_group_t;
+
+// Struct to hold our SHM locks
+typedef struct shm_struct {
+ uint16_t magic;
+ sem_t segment_lock;
+ uint32_t num_bitmaps;
+ uint32_t num_locks;
+ lock_group_t locks[];
+} shm_struct_t;
+
+size_t compute_shm_size(uint32_t num_bitmaps);
+shm_struct_t *setup_lock_shm(uint32_t num_locks, int *error_code);
+shm_struct_t *open_lock_shm(uint32_t num_locks, int *error_code);
+int32_t close_lock_shm(shm_struct_t *shm);
+int64_t allocate_semaphore(shm_struct_t *shm);
+int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index);
+int32_t lock_semaphore(shm_struct_t *shm, uint32_t sem_index);
+int32_t unlock_semaphore(shm_struct_t *shm, uint32_t sem_index);
+
+#endif
diff --git a/libpod/lock/shm/shm_lock_test.go b/libpod/lock/shm/shm_lock_test.go
new file mode 100644
index 000000000..bc22db835
--- /dev/null
+++ b/libpod/lock/shm/shm_lock_test.go
@@ -0,0 +1,243 @@
+package shm
+
+import (
+ "fmt"
+ "os"
+ "syscall"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// All tests here are in the same process, which somewhat limits their utility
+// The big intent of this package it multiprocess locking, which is really hard
+// to test without actually having multiple processes...
+// We can at least verify that the locks work within the local process.
+
+// 4 * BITMAP_SIZE to ensure we have to traverse bitmaps
+const numLocks = 128
+
+// We need a test main to ensure that the SHM is created before the tests run
+func TestMain(m *testing.M) {
+ shmLock, err := CreateSHMLock(numLocks)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Error creating SHM for tests: %v\n", err)
+ os.Exit(-1)
+ }
+
+ // Close the SHM - every subsequent test will reopen
+ if err := shmLock.Close(); err != nil {
+ fmt.Fprintf(os.Stderr, "Error closing SHM locks: %v\n", err)
+ os.Exit(-1)
+ }
+
+ exitCode := m.Run()
+
+ // We need to remove the SHM segment to clean up after ourselves
+ os.RemoveAll("/dev/shm/libpod_lock")
+
+ os.Exit(exitCode)
+}
+
+func runLockTest(t *testing.T, testFunc func(*testing.T, *SHMLocks)) {
+ locks, err := OpenSHMLock(numLocks)
+ if err != nil {
+ t.Fatalf("Error opening locks: %v", err)
+ }
+ defer func() {
+ // Unlock and deallocate all locks
+ // Ignore EBUSY (lock is already unlocked)
+ // Ignore ENOENT (lock is not allocated)
+ var i uint32
+ for i = 0; i < numLocks; i++ {
+ if err := locks.UnlockSemaphore(i); err != nil && err != syscall.EBUSY {
+ t.Fatalf("Error unlocking semaphore %d: %v", i, err)
+ }
+ if err := locks.DeallocateSemaphore(i); err != nil && err != syscall.ENOENT {
+ t.Fatalf("Error deallocating semaphore %d: %v", i, err)
+ }
+ }
+
+ if err := locks.Close(); err != nil {
+ t.Fatalf("Error closing locks: %v", err)
+ }
+ }()
+
+ success := t.Run("locks", func(t *testing.T) {
+ testFunc(t, locks)
+ })
+ if !success {
+ t.Fail()
+ }
+}
+
+// Test that creating an SHM with a bad size fails
+func TestCreateNewSHMBadSize(t *testing.T) {
+ // Odd number, not a power of 2, should never be a word size on a system
+ _, err := CreateSHMLock(7)
+ assert.Error(t, err)
+}
+
+// Test that creating an SHM with 0 size fails
+func TestCreateNewSHMZeroSize(t *testing.T) {
+ _, err := CreateSHMLock(0)
+ assert.Error(t, err)
+}
+
+// Test that deallocating an unallocated lock errors
+func TestDeallocateUnallocatedLockErrors(t *testing.T) {
+ runLockTest(t, func(t *testing.T, locks *SHMLocks) {
+ err := locks.DeallocateSemaphore(0)
+ assert.Error(t, err)
+ })
+}
+
+// Test that unlocking an unlocked lock fails
+func TestUnlockingUnlockedLockFails(t *testing.T) {
+ runLockTest(t, func(t *testing.T, locks *SHMLocks) {
+ err := locks.UnlockSemaphore(0)
+ assert.Error(t, err)
+ })
+}
+
+// Test that locking and double-unlocking fails
+func TestDoubleUnlockFails(t *testing.T) {
+ runLockTest(t, func(t *testing.T, locks *SHMLocks) {
+ err := locks.LockSemaphore(0)
+ assert.NoError(t, err)
+
+ err = locks.UnlockSemaphore(0)
+ assert.NoError(t, err)
+
+ err = locks.UnlockSemaphore(0)
+ assert.Error(t, err)
+ })
+}
+
+// Test allocating - lock - unlock - deallocate cycle, single lock
+func TestLockLifecycleSingleLock(t *testing.T) {
+ runLockTest(t, func(t *testing.T, locks *SHMLocks) {
+ sem, err := locks.AllocateSemaphore()
+ require.NoError(t, err)
+
+ err = locks.LockSemaphore(sem)
+ assert.NoError(t, err)
+
+ err = locks.UnlockSemaphore(sem)
+ assert.NoError(t, err)
+
+ err = locks.DeallocateSemaphore(sem)
+ assert.NoError(t, err)
+ })
+}
+
+// Test allocate two locks returns different locks
+func TestAllocateTwoLocksGetsDifferentLocks(t *testing.T) {
+ runLockTest(t, func(t *testing.T, locks *SHMLocks) {
+ sem1, err := locks.AllocateSemaphore()
+ assert.NoError(t, err)
+
+ sem2, err := locks.AllocateSemaphore()
+ assert.NoError(t, err)
+
+ assert.NotEqual(t, sem1, sem2)
+ })
+}
+
+// Test allocate all locks successful and all are unique
+func TestAllocateAllLocksSucceeds(t *testing.T) {
+ runLockTest(t, func(t *testing.T, locks *SHMLocks) {
+ sems := make(map[uint32]bool)
+ for i := 0; i < numLocks; i++ {
+ sem, err := locks.AllocateSemaphore()
+ assert.NoError(t, err)
+
+ // Ensure the allocate semaphore is unique
+ _, ok := sems[sem]
+ assert.False(t, ok)
+
+ sems[sem] = true
+ }
+ })
+}
+
+// Test allocating more than the given max fails
+func TestAllocateTooManyLocksFails(t *testing.T) {
+ runLockTest(t, func(t *testing.T, locks *SHMLocks) {
+ // Allocate all locks
+ for i := 0; i < numLocks; i++ {
+ _, err := locks.AllocateSemaphore()
+ assert.NoError(t, err)
+ }
+
+ // Try and allocate one more
+ _, err := locks.AllocateSemaphore()
+ assert.Error(t, err)
+ })
+}
+
+// Test allocating max locks, deallocating one, and then allocating again succeeds
+func TestAllocateDeallocateCycle(t *testing.T) {
+ runLockTest(t, func(t *testing.T, locks *SHMLocks) {
+ // Allocate all locks
+ for i := 0; i < numLocks; i++ {
+ _, err := locks.AllocateSemaphore()
+ assert.NoError(t, err)
+ }
+
+ // Now loop through again, deallocating and reallocating.
+ // Each time we free 1 semaphore, allocate again, and make sure
+ // we get the same semaphore back.
+ var j uint32
+ for j = 0; j < numLocks; j++ {
+ err := locks.DeallocateSemaphore(j)
+ assert.NoError(t, err)
+
+ newSem, err := locks.AllocateSemaphore()
+ assert.NoError(t, err)
+ assert.Equal(t, j, newSem)
+ }
+ })
+}
+
+// Test that locks actually lock
+func TestLockSemaphoreActuallyLocks(t *testing.T) {
+ runLockTest(t, func(t *testing.T, locks *SHMLocks) {
+ // This entire test is very ugly - lots of sleeps to try and get
+ // things to occur in the right order.
+ // It also doesn't even exercise the multiprocess nature of the
+ // locks.
+
+ // Get the current time
+ startTime := time.Now()
+
+ // Start a goroutine to take the lock and then release it after
+ // a second.
+ go func() {
+ err := locks.LockSemaphore(0)
+ assert.NoError(t, err)
+
+ time.Sleep(1 * time.Second)
+
+ err = locks.UnlockSemaphore(0)
+ assert.NoError(t, err)
+ }()
+
+ // Sleep for a quarter of a second to give the goroutine time
+ // to kick off and grab the lock
+ time.Sleep(250 * time.Millisecond)
+
+ // Take the lock
+ err := locks.LockSemaphore(0)
+ assert.NoError(t, err)
+
+ // Get the current time
+ endTime := time.Now()
+
+ // Verify that at least 1 second has passed since start
+ duration := endTime.Sub(startTime)
+ assert.True(t, duration.Seconds() > 1.0)
+ })
+}