From 185136cf0edee5288576b541517e0e994f6ee18d Mon Sep 17 00:00:00 2001 From: Matthew Heon Date: Wed, 8 Aug 2018 11:22:44 -0400 Subject: Add interface for libpod multiprocess locks Signed-off-by: Matthew Heon --- libpod/lock/lock.go | 55 ++++++++++ libpod/lock/locks.go | 183 -------------------------------- libpod/lock/locks_test.go | 244 ------------------------------------------- libpod/lock/shm_lock.go | 183 ++++++++++++++++++++++++++++++++ libpod/lock/shm_lock_test.go | 244 +++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 482 insertions(+), 427 deletions(-) create mode 100644 libpod/lock/lock.go delete mode 100644 libpod/lock/locks.go delete mode 100644 libpod/lock/locks_test.go create mode 100644 libpod/lock/shm_lock.go create mode 100644 libpod/lock/shm_lock_test.go (limited to 'libpod') diff --git a/libpod/lock/lock.go b/libpod/lock/lock.go new file mode 100644 index 000000000..6999e1118 --- /dev/null +++ b/libpod/lock/lock.go @@ -0,0 +1,55 @@ +package lock + +// LockManager provides an interface for allocating multiprocess locks. +// Locks returned by LockManager MUST be multiprocess - allocating a lock in +// process A and retrieving that lock's ID in process B must return handles for +// the same lock, and locking the lock in A should exclude B from the lock until +// it is unlocked in A. +// All locks must be identified by a UUID (retrieved with Locker's ID() method). +// All locks with a given UUID must refer to the same underlying lock, and it +// must be possible to retrieve the lock given its UUID. +// Each UUID should refer to a unique underlying lock. +// Calls to AllocateLock() must return a unique, unallocated UUID. +// AllocateLock() must fail once all available locks have been allocated. +// Locks are returned to use by calls to Free(), and can subsequently be +// reallocated. +type LockManager interface { + // AllocateLock returns an unallocated lock. + // It is guaranteed that the same lock will not be returned again by + // AllocateLock until the returned lock has Free() called on it. + // If all available locks are allocated, AllocateLock will return an + // error. + AllocateLock() (Locker, error) + // RetrieveLock retrieves a lock given its UUID. + // The underlying lock MUST be the same as another other lock with the + // same UUID. + RetrieveLock(id string) (Locker, error) +} + +// Locker is similar to sync.Locker, but provides a method for freeing the lock +// to allow its reuse. +// All Locker implementations must maintain mutex semantics - the lock only +// allows one caller in the critical section at a time. +// All locks with the same ID must refer to the same underlying lock, even +// if they are within multiple processes. +type Locker interface { + // ID retrieves the lock's ID. + // ID is guaranteed to uniquely identify the lock within the + // LockManager - that is, calling RetrieveLock with this ID will return + // another instance of the same lock. + ID() string + // Lock locks the lock. + // This call MUST block until it successfully acquires the lock or + // encounters a fatal error. + Lock() error + // Unlock unlocks the lock. + // A call to Unlock() on a lock that is already unlocked lock MUST + // error. + Unlock() error + // Deallocate deallocates the underlying lock, allowing its reuse by + // other pods and containers. + // The lock MUST still be usable after a Free() - some libpod instances + // may still retain Container structs with the old lock. This simply + // advises the manager that the lock may be reallocated. + Free() error +} diff --git a/libpod/lock/locks.go b/libpod/lock/locks.go deleted file mode 100644 index 4d7c26aa2..000000000 --- a/libpod/lock/locks.go +++ /dev/null @@ -1,183 +0,0 @@ -package lock - -// #cgo LDFLAGS: -lrt -lpthread -// #include "shm_lock.h" -// const uint32_t bitmap_size_c = BITMAP_SIZE; -import "C" - -import ( - "syscall" - - "github.com/pkg/errors" -) - -var ( - bitmapSize uint32 = uint32(C.bitmap_size_c) -) - -// SHMLocks is a struct enabling POSIX semaphore locking in a shared memory -// segment -type SHMLocks struct { - lockStruct *C.shm_struct_t - valid bool - maxLocks uint32 -} - -// CreateSHMLock sets up a shared-memory segment holding a given number of POSIX -// semaphores, and returns a struct that can be used to operate on those locks. -// numLocks must be a multiple of the lock bitmap size (by default, 32). -func CreateSHMLock(numLocks uint32) (*SHMLocks, error) { - if numLocks % bitmapSize != 0 || numLocks == 0 { - return nil, errors.Wrapf(syscall.EINVAL, "number of locks must be a multiple of %d", C.bitmap_size_c) - } - - locks := new(SHMLocks) - - var errCode C.int = 0 - lockStruct := C.setup_lock_shm(C.uint32_t(numLocks), &errCode) - if lockStruct == nil { - // We got a null pointer, so something errored - return nil, syscall.Errno(-1 * errCode) - } - - locks.lockStruct = lockStruct - locks.maxLocks = numLocks - locks.valid = true - - return locks, nil -} - -// OpenSHMLock opens an existing shared-memory segment holding a given number of -// POSIX semaphores. numLocks must match the number of locks the shared memory -// segment was created with and be a multiple of the lock bitmap size (default -// 32). -func OpenSHMLock(numLocks uint32) (*SHMLocks, error) { - if numLocks % bitmapSize != 0 || numLocks == 0 { - return nil, errors.Wrapf(syscall.EINVAL, "number of locks must be a multiple of %d", C.bitmap_size_c) - } - - locks := new(SHMLocks) - - var errCode C.int = 0 - lockStruct := C.open_lock_shm(C.uint32_t(numLocks), &errCode) - if lockStruct == nil { - // We got a null pointer, so something errored - return nil, syscall.Errno(-1 * errCode) - } - - locks.lockStruct = lockStruct - locks.maxLocks = numLocks - locks.valid = true - - return locks, nil -} - -// Close closes an existing shared-memory segment. -// The segment will be rendered unusable after closing. -// WARNING: If you Close() while there are still locks locked, these locks may -// fail to release, causing a program freeze. -// Close() is only intended to be used while testing the locks. -func (locks *SHMLocks) Close() error { - if !locks.valid { - return errors.Wrapf(syscall.EINVAL, "locks have already been closed") - } - - locks.valid = false - - retCode := C.close_lock_shm(locks.lockStruct) - if retCode < 0 { - // Negative errno returned - return syscall.Errno(-1 * retCode) - } - - return nil -} - -// AllocateSemaphore allocates a semaphore from a shared-memory segment for use -// by a container or pod. -// Returns the index of the semaphore that was allocated. -// Allocations past the maximum number of locks given when the SHM segment was -// created will result in an error, and no semaphore will be allocated. -func (locks *SHMLocks) AllocateSemaphore() (uint32, error) { - if !locks.valid { - return 0, errors.Wrapf(syscall.EINVAL, "locks have already been closed") - } - - retCode := C.allocate_semaphore(locks.lockStruct) - if retCode < 0 { - // Negative errno returned - return 0, syscall.Errno(-1 * retCode) - } - - return uint32(retCode), nil -} - -// DeallocateSemaphore frees a semaphore in a shared-memory segment so it can be -// reallocated to another container or pod. -// The given semaphore must be already allocated, or an error will be returned. -func (locks *SHMLocks) DeallocateSemaphore(sem uint32) error { - if !locks.valid { - return errors.Wrapf(syscall.EINVAL, "locks have already been closed") - } - - if sem > locks.maxLocks { - return errors.Wrapf(syscall.EINVAL, "given semaphore %d is higher than maximum locks count %d", sem, locks.maxLocks) - } - - retCode := C.deallocate_semaphore(locks.lockStruct, C.uint32_t(sem)) - if retCode < 0 { - // Negative errno returned - return syscall.Errno(-1 * retCode) - } - - return nil -} - -// LockSemaphore locks the given semaphore. -// If the semaphore is already locked, LockSemaphore will block until the lock -// can be acquired. -// There is no requirement that the given semaphore be allocated. -// This ensures that attempts to lock a container after it has been deleted, -// but before the caller has queried the database to determine this, will -// succeed. -func (locks *SHMLocks) LockSemaphore(sem uint32) error { - if !locks.valid { - return errors.Wrapf(syscall.EINVAL, "locks have already been closed") - } - - if sem > locks.maxLocks { - return errors.Wrapf(syscall.EINVAL, "given semaphore %d is higher than maximum locks count %d", sem, locks.maxLocks) - } - - retCode := C.lock_semaphore(locks.lockStruct, C.uint32_t(sem)) - if retCode < 0 { - // Negative errno returned - return syscall.Errno(-1 * retCode) - } - - return nil -} - -// UnlockSemaphore unlocks the given semaphore. -// Unlocking a semaphore that is already unlocked with return EBUSY. -// There is no requirement that the given semaphore be allocated. -// This ensures that attempts to lock a container after it has been deleted, -// but before the caller has queried the database to determine this, will -// succeed. -func (locks *SHMLocks) UnlockSemaphore(sem uint32) error { - if !locks.valid { - return errors.Wrapf(syscall.EINVAL, "locks have already been closed") - } - - if sem > locks.maxLocks { - return errors.Wrapf(syscall.EINVAL, "given semaphore %d is higher than maximum locks count %d", sem, locks.maxLocks) - } - - retCode := C.unlock_semaphore(locks.lockStruct, C.uint32_t(sem)) - if retCode < 0 { - // Negative errno returned - return syscall.Errno(-1 * retCode) - } - - return nil -} diff --git a/libpod/lock/locks_test.go b/libpod/lock/locks_test.go deleted file mode 100644 index 6d4525f6a..000000000 --- a/libpod/lock/locks_test.go +++ /dev/null @@ -1,244 +0,0 @@ -package lock - -import ( - "fmt" - "os" - "syscall" - "time" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// All tests here are in the same process, which somewhat limits their utility -// The big intent of this package it multiprocess locking, which is really hard -// to test without actually having multiple processes... -// We can at least verify that the locks work within the local process. - -// 4 * BITMAP_SIZE to ensure we have to traverse bitmaps -const numLocks = 128 - -// We need a test main to ensure that the SHM is created before the tests run -func TestMain(m *testing.M) { - shmLock, err := CreateSHMLock(numLocks) - if err != nil { - fmt.Fprintf(os.Stderr, "Error creating SHM for tests: %v\n", err) - os.Exit(-1) - } - - // Close the SHM - every subsequent test will reopen - if err := shmLock.Close(); err != nil { - fmt.Fprintf(os.Stderr, "Error closing SHM locks: %v\n", err) - os.Exit(-1) - } - - exitCode := m.Run() - - // We need to remove the SHM segment to clean up after ourselves - os.RemoveAll("/dev/shm/libpod_lock") - - os.Exit(exitCode) -} - - -func runLockTest(t *testing.T, testFunc func(*testing.T, *SHMLocks)) { - locks, err := OpenSHMLock(numLocks) - if err != nil { - t.Fatalf("Error opening locks: %v", err) - } - defer func() { - // Unlock and deallocate all locks - // Ignore EBUSY (lock is already unlocked) - // Ignore ENOENT (lock is not allocated) - var i uint32 - for i = 0; i < numLocks; i++ { - if err := locks.UnlockSemaphore(i); err != nil && err != syscall.EBUSY { - t.Fatalf("Error unlocking semaphore %d: %v", i, err) - } - if err := locks.DeallocateSemaphore(i); err != nil && err != syscall.ENOENT { - t.Fatalf("Error deallocating semaphore %d: %v", i, err) - } - } - - if err := locks.Close(); err != nil { - t.Fatalf("Error closing locks: %v", err) - } - }() - - success := t.Run("locks", func (t *testing.T) { - testFunc(t, locks) - }) - if !success { - t.Fail() - } -} - -// Test that creating an SHM with a bad size fails -func TestCreateNewSHMBadSize(t *testing.T) { - // Odd number, not a power of 2, should never be a word size on a system - _, err := CreateSHMLock(7) - assert.Error(t, err) -} - -// Test that creating an SHM with 0 size fails -func TestCreateNewSHMZeroSize(t *testing.T) { - _, err := CreateSHMLock(0) - assert.Error(t, err) -} - -// Test that deallocating an unallocated lock errors -func TestDeallocateUnallocatedLockErrors(t *testing.T) { - runLockTest(t, func(t *testing.T, locks *SHMLocks) { - err := locks.DeallocateSemaphore(0) - assert.Error(t, err) - }) -} - -// Test that unlocking an unlocked lock fails -func TestUnlockingUnlockedLockFails(t *testing.T) { - runLockTest(t, func(t *testing.T, locks *SHMLocks) { - err := locks.UnlockSemaphore(0) - assert.Error(t, err) - }) -} - -// Test that locking and double-unlocking fails -func TestDoubleUnlockFails(t *testing.T) { - runLockTest(t, func(t *testing.T, locks *SHMLocks) { - err := locks.LockSemaphore(0) - assert.NoError(t, err) - - err = locks.UnlockSemaphore(0) - assert.NoError(t, err) - - err = locks.UnlockSemaphore(0) - assert.Error(t, err) - }) -} - -// Test allocating - lock - unlock - deallocate cycle, single lock -func TestLockLifecycleSingleLock(t *testing.T) { - runLockTest(t, func(t *testing.T, locks *SHMLocks) { - sem, err := locks.AllocateSemaphore() - require.NoError(t, err) - - err = locks.LockSemaphore(sem) - assert.NoError(t, err) - - err = locks.UnlockSemaphore(sem) - assert.NoError(t, err) - - err = locks.DeallocateSemaphore(sem) - assert.NoError(t, err) - }) -} - -// Test allocate two locks returns different locks -func TestAllocateTwoLocksGetsDifferentLocks(t *testing.T) { - runLockTest(t, func(t *testing.T, locks *SHMLocks) { - sem1, err := locks.AllocateSemaphore() - assert.NoError(t, err) - - sem2, err := locks.AllocateSemaphore() - assert.NoError(t, err) - - assert.NotEqual(t, sem1, sem2) - }) -} - -// Test allocate all locks successful and all are unique -func TestAllocateAllLocksSucceeds(t *testing.T) { - runLockTest(t, func(t *testing.T, locks *SHMLocks) { - sems := make(map[uint32]bool) - for i := 0; i < numLocks; i++ { - sem, err := locks.AllocateSemaphore() - assert.NoError(t, err) - - // Ensure the allocate semaphore is unique - _, ok := sems[sem] - assert.False(t, ok) - - sems[sem] = true - } - }) -} - -// Test allocating more than the given max fails -func TestAllocateTooManyLocksFails(t *testing.T) { - runLockTest(t, func(t *testing.T, locks *SHMLocks) { - // Allocate all locks - for i := 0; i < numLocks; i++ { - _, err := locks.AllocateSemaphore() - assert.NoError(t, err) - } - - // Try and allocate one more - _, err := locks.AllocateSemaphore() - assert.Error(t, err) - }) -} - -// Test allocating max locks, deallocating one, and then allocating again succeeds -func TestAllocateDeallocateCycle(t *testing.T) { - runLockTest(t, func(t *testing.T, locks *SHMLocks) { - // Allocate all locks - for i := 0; i < numLocks; i++ { - _, err := locks.AllocateSemaphore() - assert.NoError(t, err) - } - - // Now loop through again, deallocating and reallocating. - // Each time we free 1 semaphore, allocate again, and make sure - // we get the same semaphore back. - var j uint32 - for j = 0; j < numLocks; j++ { - err := locks.DeallocateSemaphore(j) - assert.NoError(t, err) - - newSem, err := locks.AllocateSemaphore() - assert.NoError(t, err) - assert.Equal(t, j, newSem) - } - }) -} - -// Test that locks actually lock -func TestLockSemaphoreActuallyLocks(t *testing.T) { - runLockTest(t, func(t *testing.T, locks *SHMLocks) { - // This entire test is very ugly - lots of sleeps to try and get - // things to occur in the right order. - // It also doesn't even exercise the multiprocess nature of the - // locks. - - // Get the current time - startTime := time.Now() - - // Start a goroutine to take the lock and then release it after - // a second. - go func() { - err := locks.LockSemaphore(0) - assert.NoError(t, err) - - time.Sleep(1 * time.Second) - - err = locks.UnlockSemaphore(0) - assert.NoError(t, err) - }() - - // Sleep for a quarter of a second to give the goroutine time - // to kick off and grab the lock - time.Sleep(250 * time.Millisecond) - - // Take the lock - err := locks.LockSemaphore(0) - assert.NoError(t, err) - - // Get the current time - endTime := time.Now() - - // Verify that at least 1 second has passed since start - duration := endTime.Sub(startTime) - assert.True(t, duration.Seconds() > 1.0) - }) -} diff --git a/libpod/lock/shm_lock.go b/libpod/lock/shm_lock.go new file mode 100644 index 000000000..4d7c26aa2 --- /dev/null +++ b/libpod/lock/shm_lock.go @@ -0,0 +1,183 @@ +package lock + +// #cgo LDFLAGS: -lrt -lpthread +// #include "shm_lock.h" +// const uint32_t bitmap_size_c = BITMAP_SIZE; +import "C" + +import ( + "syscall" + + "github.com/pkg/errors" +) + +var ( + bitmapSize uint32 = uint32(C.bitmap_size_c) +) + +// SHMLocks is a struct enabling POSIX semaphore locking in a shared memory +// segment +type SHMLocks struct { + lockStruct *C.shm_struct_t + valid bool + maxLocks uint32 +} + +// CreateSHMLock sets up a shared-memory segment holding a given number of POSIX +// semaphores, and returns a struct that can be used to operate on those locks. +// numLocks must be a multiple of the lock bitmap size (by default, 32). +func CreateSHMLock(numLocks uint32) (*SHMLocks, error) { + if numLocks % bitmapSize != 0 || numLocks == 0 { + return nil, errors.Wrapf(syscall.EINVAL, "number of locks must be a multiple of %d", C.bitmap_size_c) + } + + locks := new(SHMLocks) + + var errCode C.int = 0 + lockStruct := C.setup_lock_shm(C.uint32_t(numLocks), &errCode) + if lockStruct == nil { + // We got a null pointer, so something errored + return nil, syscall.Errno(-1 * errCode) + } + + locks.lockStruct = lockStruct + locks.maxLocks = numLocks + locks.valid = true + + return locks, nil +} + +// OpenSHMLock opens an existing shared-memory segment holding a given number of +// POSIX semaphores. numLocks must match the number of locks the shared memory +// segment was created with and be a multiple of the lock bitmap size (default +// 32). +func OpenSHMLock(numLocks uint32) (*SHMLocks, error) { + if numLocks % bitmapSize != 0 || numLocks == 0 { + return nil, errors.Wrapf(syscall.EINVAL, "number of locks must be a multiple of %d", C.bitmap_size_c) + } + + locks := new(SHMLocks) + + var errCode C.int = 0 + lockStruct := C.open_lock_shm(C.uint32_t(numLocks), &errCode) + if lockStruct == nil { + // We got a null pointer, so something errored + return nil, syscall.Errno(-1 * errCode) + } + + locks.lockStruct = lockStruct + locks.maxLocks = numLocks + locks.valid = true + + return locks, nil +} + +// Close closes an existing shared-memory segment. +// The segment will be rendered unusable after closing. +// WARNING: If you Close() while there are still locks locked, these locks may +// fail to release, causing a program freeze. +// Close() is only intended to be used while testing the locks. +func (locks *SHMLocks) Close() error { + if !locks.valid { + return errors.Wrapf(syscall.EINVAL, "locks have already been closed") + } + + locks.valid = false + + retCode := C.close_lock_shm(locks.lockStruct) + if retCode < 0 { + // Negative errno returned + return syscall.Errno(-1 * retCode) + } + + return nil +} + +// AllocateSemaphore allocates a semaphore from a shared-memory segment for use +// by a container or pod. +// Returns the index of the semaphore that was allocated. +// Allocations past the maximum number of locks given when the SHM segment was +// created will result in an error, and no semaphore will be allocated. +func (locks *SHMLocks) AllocateSemaphore() (uint32, error) { + if !locks.valid { + return 0, errors.Wrapf(syscall.EINVAL, "locks have already been closed") + } + + retCode := C.allocate_semaphore(locks.lockStruct) + if retCode < 0 { + // Negative errno returned + return 0, syscall.Errno(-1 * retCode) + } + + return uint32(retCode), nil +} + +// DeallocateSemaphore frees a semaphore in a shared-memory segment so it can be +// reallocated to another container or pod. +// The given semaphore must be already allocated, or an error will be returned. +func (locks *SHMLocks) DeallocateSemaphore(sem uint32) error { + if !locks.valid { + return errors.Wrapf(syscall.EINVAL, "locks have already been closed") + } + + if sem > locks.maxLocks { + return errors.Wrapf(syscall.EINVAL, "given semaphore %d is higher than maximum locks count %d", sem, locks.maxLocks) + } + + retCode := C.deallocate_semaphore(locks.lockStruct, C.uint32_t(sem)) + if retCode < 0 { + // Negative errno returned + return syscall.Errno(-1 * retCode) + } + + return nil +} + +// LockSemaphore locks the given semaphore. +// If the semaphore is already locked, LockSemaphore will block until the lock +// can be acquired. +// There is no requirement that the given semaphore be allocated. +// This ensures that attempts to lock a container after it has been deleted, +// but before the caller has queried the database to determine this, will +// succeed. +func (locks *SHMLocks) LockSemaphore(sem uint32) error { + if !locks.valid { + return errors.Wrapf(syscall.EINVAL, "locks have already been closed") + } + + if sem > locks.maxLocks { + return errors.Wrapf(syscall.EINVAL, "given semaphore %d is higher than maximum locks count %d", sem, locks.maxLocks) + } + + retCode := C.lock_semaphore(locks.lockStruct, C.uint32_t(sem)) + if retCode < 0 { + // Negative errno returned + return syscall.Errno(-1 * retCode) + } + + return nil +} + +// UnlockSemaphore unlocks the given semaphore. +// Unlocking a semaphore that is already unlocked with return EBUSY. +// There is no requirement that the given semaphore be allocated. +// This ensures that attempts to lock a container after it has been deleted, +// but before the caller has queried the database to determine this, will +// succeed. +func (locks *SHMLocks) UnlockSemaphore(sem uint32) error { + if !locks.valid { + return errors.Wrapf(syscall.EINVAL, "locks have already been closed") + } + + if sem > locks.maxLocks { + return errors.Wrapf(syscall.EINVAL, "given semaphore %d is higher than maximum locks count %d", sem, locks.maxLocks) + } + + retCode := C.unlock_semaphore(locks.lockStruct, C.uint32_t(sem)) + if retCode < 0 { + // Negative errno returned + return syscall.Errno(-1 * retCode) + } + + return nil +} diff --git a/libpod/lock/shm_lock_test.go b/libpod/lock/shm_lock_test.go new file mode 100644 index 000000000..6d4525f6a --- /dev/null +++ b/libpod/lock/shm_lock_test.go @@ -0,0 +1,244 @@ +package lock + +import ( + "fmt" + "os" + "syscall" + "time" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// All tests here are in the same process, which somewhat limits their utility +// The big intent of this package it multiprocess locking, which is really hard +// to test without actually having multiple processes... +// We can at least verify that the locks work within the local process. + +// 4 * BITMAP_SIZE to ensure we have to traverse bitmaps +const numLocks = 128 + +// We need a test main to ensure that the SHM is created before the tests run +func TestMain(m *testing.M) { + shmLock, err := CreateSHMLock(numLocks) + if err != nil { + fmt.Fprintf(os.Stderr, "Error creating SHM for tests: %v\n", err) + os.Exit(-1) + } + + // Close the SHM - every subsequent test will reopen + if err := shmLock.Close(); err != nil { + fmt.Fprintf(os.Stderr, "Error closing SHM locks: %v\n", err) + os.Exit(-1) + } + + exitCode := m.Run() + + // We need to remove the SHM segment to clean up after ourselves + os.RemoveAll("/dev/shm/libpod_lock") + + os.Exit(exitCode) +} + + +func runLockTest(t *testing.T, testFunc func(*testing.T, *SHMLocks)) { + locks, err := OpenSHMLock(numLocks) + if err != nil { + t.Fatalf("Error opening locks: %v", err) + } + defer func() { + // Unlock and deallocate all locks + // Ignore EBUSY (lock is already unlocked) + // Ignore ENOENT (lock is not allocated) + var i uint32 + for i = 0; i < numLocks; i++ { + if err := locks.UnlockSemaphore(i); err != nil && err != syscall.EBUSY { + t.Fatalf("Error unlocking semaphore %d: %v", i, err) + } + if err := locks.DeallocateSemaphore(i); err != nil && err != syscall.ENOENT { + t.Fatalf("Error deallocating semaphore %d: %v", i, err) + } + } + + if err := locks.Close(); err != nil { + t.Fatalf("Error closing locks: %v", err) + } + }() + + success := t.Run("locks", func (t *testing.T) { + testFunc(t, locks) + }) + if !success { + t.Fail() + } +} + +// Test that creating an SHM with a bad size fails +func TestCreateNewSHMBadSize(t *testing.T) { + // Odd number, not a power of 2, should never be a word size on a system + _, err := CreateSHMLock(7) + assert.Error(t, err) +} + +// Test that creating an SHM with 0 size fails +func TestCreateNewSHMZeroSize(t *testing.T) { + _, err := CreateSHMLock(0) + assert.Error(t, err) +} + +// Test that deallocating an unallocated lock errors +func TestDeallocateUnallocatedLockErrors(t *testing.T) { + runLockTest(t, func(t *testing.T, locks *SHMLocks) { + err := locks.DeallocateSemaphore(0) + assert.Error(t, err) + }) +} + +// Test that unlocking an unlocked lock fails +func TestUnlockingUnlockedLockFails(t *testing.T) { + runLockTest(t, func(t *testing.T, locks *SHMLocks) { + err := locks.UnlockSemaphore(0) + assert.Error(t, err) + }) +} + +// Test that locking and double-unlocking fails +func TestDoubleUnlockFails(t *testing.T) { + runLockTest(t, func(t *testing.T, locks *SHMLocks) { + err := locks.LockSemaphore(0) + assert.NoError(t, err) + + err = locks.UnlockSemaphore(0) + assert.NoError(t, err) + + err = locks.UnlockSemaphore(0) + assert.Error(t, err) + }) +} + +// Test allocating - lock - unlock - deallocate cycle, single lock +func TestLockLifecycleSingleLock(t *testing.T) { + runLockTest(t, func(t *testing.T, locks *SHMLocks) { + sem, err := locks.AllocateSemaphore() + require.NoError(t, err) + + err = locks.LockSemaphore(sem) + assert.NoError(t, err) + + err = locks.UnlockSemaphore(sem) + assert.NoError(t, err) + + err = locks.DeallocateSemaphore(sem) + assert.NoError(t, err) + }) +} + +// Test allocate two locks returns different locks +func TestAllocateTwoLocksGetsDifferentLocks(t *testing.T) { + runLockTest(t, func(t *testing.T, locks *SHMLocks) { + sem1, err := locks.AllocateSemaphore() + assert.NoError(t, err) + + sem2, err := locks.AllocateSemaphore() + assert.NoError(t, err) + + assert.NotEqual(t, sem1, sem2) + }) +} + +// Test allocate all locks successful and all are unique +func TestAllocateAllLocksSucceeds(t *testing.T) { + runLockTest(t, func(t *testing.T, locks *SHMLocks) { + sems := make(map[uint32]bool) + for i := 0; i < numLocks; i++ { + sem, err := locks.AllocateSemaphore() + assert.NoError(t, err) + + // Ensure the allocate semaphore is unique + _, ok := sems[sem] + assert.False(t, ok) + + sems[sem] = true + } + }) +} + +// Test allocating more than the given max fails +func TestAllocateTooManyLocksFails(t *testing.T) { + runLockTest(t, func(t *testing.T, locks *SHMLocks) { + // Allocate all locks + for i := 0; i < numLocks; i++ { + _, err := locks.AllocateSemaphore() + assert.NoError(t, err) + } + + // Try and allocate one more + _, err := locks.AllocateSemaphore() + assert.Error(t, err) + }) +} + +// Test allocating max locks, deallocating one, and then allocating again succeeds +func TestAllocateDeallocateCycle(t *testing.T) { + runLockTest(t, func(t *testing.T, locks *SHMLocks) { + // Allocate all locks + for i := 0; i < numLocks; i++ { + _, err := locks.AllocateSemaphore() + assert.NoError(t, err) + } + + // Now loop through again, deallocating and reallocating. + // Each time we free 1 semaphore, allocate again, and make sure + // we get the same semaphore back. + var j uint32 + for j = 0; j < numLocks; j++ { + err := locks.DeallocateSemaphore(j) + assert.NoError(t, err) + + newSem, err := locks.AllocateSemaphore() + assert.NoError(t, err) + assert.Equal(t, j, newSem) + } + }) +} + +// Test that locks actually lock +func TestLockSemaphoreActuallyLocks(t *testing.T) { + runLockTest(t, func(t *testing.T, locks *SHMLocks) { + // This entire test is very ugly - lots of sleeps to try and get + // things to occur in the right order. + // It also doesn't even exercise the multiprocess nature of the + // locks. + + // Get the current time + startTime := time.Now() + + // Start a goroutine to take the lock and then release it after + // a second. + go func() { + err := locks.LockSemaphore(0) + assert.NoError(t, err) + + time.Sleep(1 * time.Second) + + err = locks.UnlockSemaphore(0) + assert.NoError(t, err) + }() + + // Sleep for a quarter of a second to give the goroutine time + // to kick off and grab the lock + time.Sleep(250 * time.Millisecond) + + // Take the lock + err := locks.LockSemaphore(0) + assert.NoError(t, err) + + // Get the current time + endTime := time.Now() + + // Verify that at least 1 second has passed since start + duration := endTime.Sub(startTime) + assert.True(t, duration.Seconds() > 1.0) + }) +} -- cgit v1.2.3-54-g00ecf