From b489feff717a9976ee177acd4b239acf2dc9c326 Mon Sep 17 00:00:00 2001 From: Matthew Heon Date: Mon, 6 Aug 2018 10:57:43 -0400 Subject: Add mutex invariant to SHM semaphores. Check value of semaphores when incrementing to ensure we never go beyond 1, preserving mutex invariants. Also, add cleanup code to the lock tests, ensuring that we never leave the locks in a bad state after a test. We aren't destroying and recreating the SHM every time, so we have to be careful not to leak state between test runs. Signed-off-by: Matthew Heon --- libpod/lock/locks_test.go | 27 +++++++++++++++++++++++++++ libpod/lock/shm_lock.c | 18 ++++++++++++++++-- 2 files changed, 43 insertions(+), 2 deletions(-) (limited to 'libpod/lock') diff --git a/libpod/lock/locks_test.go b/libpod/lock/locks_test.go index b14d6e19a..ce4882643 100644 --- a/libpod/lock/locks_test.go +++ b/libpod/lock/locks_test.go @@ -3,6 +3,7 @@ package lock import ( "fmt" "os" + "syscall" "testing" "github.com/stretchr/testify/assert" @@ -46,6 +47,19 @@ func runLockTest(t *testing.T, testFunc func(*testing.T, *SHMLocks)) { t.Fatalf("Error opening locks: %v", err) } defer func() { + // Unlock and deallocate all locks + // Ignore EBUSY (lock is already unlocked) + // Ignore ENOENT (lock is not allocated) + var i uint32 + for i = 0; i < numLocks; i++ { + if err := locks.UnlockSemaphore(i); err != nil && err != syscall.EBUSY { + t.Fatalf("Error unlocking semaphore %d: %v", i, err) + } + if err := locks.DeallocateSemaphore(i); err != nil && err != syscall.ENOENT { + t.Fatalf("Error deallocating semaphore %d: %v", i, err) + } + } + if err := locks.Close(); err != nil { t.Fatalf("Error closing locks: %v", err) } @@ -82,3 +96,16 @@ func TestLockLifecycleSingleLock(t *testing.T) { assert.NoError(t, err) }) } + +// Test allocate two locks returns different locks +func TestAllocateTwoLocksGetsDifferentLocks(t *testing.T) { + runLockTest(t, func(t *testing.T, locks *SHMLocks) { + sem1, err := locks.AllocateSemaphore() + assert.NoError(t, err) + + sem2, err := locks.AllocateSemaphore() + assert.NoError(t, err) + + assert.NotEqual(t, sem1, sem2) + }) +} diff --git a/libpod/lock/shm_lock.c b/libpod/lock/shm_lock.c index ab715891c..48fd4d4a9 100644 --- a/libpod/lock/shm_lock.c +++ b/libpod/lock/shm_lock.c @@ -321,7 +321,8 @@ int32_t lock_semaphore(shm_struct_t *shm, uint32_t sem_index) { // subsequently realize they have been removed). // Returns 0 on success, -1 on failure int32_t unlock_semaphore(shm_struct_t *shm, uint32_t sem_index) { - int bitmap_index, index_in_bitmap; + int bitmap_index, index_in_bitmap, ret_code; + unsigned int sem_value = 0; if (shm == NULL) { return -1 * EINVAL; @@ -334,7 +335,20 @@ int32_t unlock_semaphore(shm_struct_t *shm, uint32_t sem_index) { bitmap_index = sem_index / BITMAP_SIZE; index_in_bitmap = sem_index % BITMAP_SIZE; - sem_post(&(shm->locks[bitmap_index].locks[index_in_bitmap])); + // Only allow a post if the semaphore is less than 1 (locked) + // This allows us to preserve mutex behavior + ret_code = sem_getvalue(&(shm->locks[bitmap_index].locks[index_in_bitmap]), &sem_value); + if (ret_code != 0) { + return -1 * errno; + } + if (sem_value >= 1) { + return -1 * EBUSY; + } + + ret_code = sem_post(&(shm->locks[bitmap_index].locks[index_in_bitmap])); + if (ret_code != 0) { + return -1 * errno; + } return 0; } -- cgit v1.2.3-54-g00ecf