aboutsummaryrefslogtreecommitdiff
path: root/libpod
diff options
context:
space:
mode:
authorMatthew Heon <matthew.heon@gmail.com>2018-08-07 16:16:01 -0400
committerMatthew Heon <matthew.heon@pm.me>2019-01-04 09:45:59 -0500
commit35cc71a9e8d542477be2c35e767ec6cfeda63ac1 (patch)
treebd4c073fd22274cde773984532cd3a74d3ab9001 /libpod
parent52d95f50729b40628ae77d667d262c5235e50cb8 (diff)
downloadpodman-35cc71a9e8d542477be2c35e767ec6cfeda63ac1.tar.gz
podman-35cc71a9e8d542477be2c35e767ec6cfeda63ac1.tar.bz2
podman-35cc71a9e8d542477be2c35e767ec6cfeda63ac1.zip
Improve documentation and unit tests for SHM locks
Signed-off-by: Matthew Heon <matthew.heon@gmail.com>
Diffstat (limited to 'libpod')
-rw-r--r--libpod/lock/locks.go26
-rw-r--r--libpod/lock/locks_test.go97
2 files changed, 117 insertions, 6 deletions
diff --git a/libpod/lock/locks.go b/libpod/lock/locks.go
index c61bd78d2..4d7c26aa2 100644
--- a/libpod/lock/locks.go
+++ b/libpod/lock/locks.go
@@ -50,7 +50,7 @@ func CreateSHMLock(numLocks uint32) (*SHMLocks, error) {
// OpenSHMLock opens an existing shared-memory segment holding a given number of
// POSIX semaphores. numLocks must match the number of locks the shared memory
// segment was created with and be a multiple of the lock bitmap size (default
-// 32)
+// 32).
func OpenSHMLock(numLocks uint32) (*SHMLocks, error) {
if numLocks % bitmapSize != 0 || numLocks == 0 {
return nil, errors.Wrapf(syscall.EINVAL, "number of locks must be a multiple of %d", C.bitmap_size_c)
@@ -95,7 +95,9 @@ func (locks *SHMLocks) Close() error {
// AllocateSemaphore allocates a semaphore from a shared-memory segment for use
// by a container or pod.
-// Returns the index of the semaphore that was allocated
+// Returns the index of the semaphore that was allocated.
+// Allocations past the maximum number of locks given when the SHM segment was
+// created will result in an error, and no semaphore will be allocated.
func (locks *SHMLocks) AllocateSemaphore() (uint32, error) {
if !locks.valid {
return 0, errors.Wrapf(syscall.EINVAL, "locks have already been closed")
@@ -110,8 +112,9 @@ func (locks *SHMLocks) AllocateSemaphore() (uint32, error) {
return uint32(retCode), nil
}
-// DeallocateSemaphore frees a semaphore in a shared-memory segment for use by
-// a container of pod
+// DeallocateSemaphore frees a semaphore in a shared-memory segment so it can be
+// reallocated to another container or pod.
+// The given semaphore must be already allocated, or an error will be returned.
func (locks *SHMLocks) DeallocateSemaphore(sem uint32) error {
if !locks.valid {
return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
@@ -130,7 +133,13 @@ func (locks *SHMLocks) DeallocateSemaphore(sem uint32) error {
return nil
}
-// LockSemaphore locks the given semaphore
+// LockSemaphore locks the given semaphore.
+// If the semaphore is already locked, LockSemaphore will block until the lock
+// can be acquired.
+// There is no requirement that the given semaphore be allocated.
+// This ensures that attempts to lock a container after it has been deleted,
+// but before the caller has queried the database to determine this, will
+// succeed.
func (locks *SHMLocks) LockSemaphore(sem uint32) error {
if !locks.valid {
return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
@@ -149,7 +158,12 @@ func (locks *SHMLocks) LockSemaphore(sem uint32) error {
return nil
}
-// UnlockSemaphore locks the given semaphore
+// UnlockSemaphore unlocks the given semaphore.
+// Unlocking a semaphore that is already unlocked with return EBUSY.
+// There is no requirement that the given semaphore be allocated.
+// This ensures that attempts to lock a container after it has been deleted,
+// but before the caller has queried the database to determine this, will
+// succeed.
func (locks *SHMLocks) UnlockSemaphore(sem uint32) error {
if !locks.valid {
return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
diff --git a/libpod/lock/locks_test.go b/libpod/lock/locks_test.go
index 62a36c5da..6d4525f6a 100644
--- a/libpod/lock/locks_test.go
+++ b/libpod/lock/locks_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
"syscall"
+ "time"
"testing"
"github.com/stretchr/testify/assert"
@@ -145,3 +146,99 @@ func TestAllocateTwoLocksGetsDifferentLocks(t *testing.T) {
assert.NotEqual(t, sem1, sem2)
})
}
+
+// Test allocate all locks successful and all are unique
+func TestAllocateAllLocksSucceeds(t *testing.T) {
+ runLockTest(t, func(t *testing.T, locks *SHMLocks) {
+ sems := make(map[uint32]bool)
+ for i := 0; i < numLocks; i++ {
+ sem, err := locks.AllocateSemaphore()
+ assert.NoError(t, err)
+
+ // Ensure the allocate semaphore is unique
+ _, ok := sems[sem]
+ assert.False(t, ok)
+
+ sems[sem] = true
+ }
+ })
+}
+
+// Test allocating more than the given max fails
+func TestAllocateTooManyLocksFails(t *testing.T) {
+ runLockTest(t, func(t *testing.T, locks *SHMLocks) {
+ // Allocate all locks
+ for i := 0; i < numLocks; i++ {
+ _, err := locks.AllocateSemaphore()
+ assert.NoError(t, err)
+ }
+
+ // Try and allocate one more
+ _, err := locks.AllocateSemaphore()
+ assert.Error(t, err)
+ })
+}
+
+// Test allocating max locks, deallocating one, and then allocating again succeeds
+func TestAllocateDeallocateCycle(t *testing.T) {
+ runLockTest(t, func(t *testing.T, locks *SHMLocks) {
+ // Allocate all locks
+ for i := 0; i < numLocks; i++ {
+ _, err := locks.AllocateSemaphore()
+ assert.NoError(t, err)
+ }
+
+ // Now loop through again, deallocating and reallocating.
+ // Each time we free 1 semaphore, allocate again, and make sure
+ // we get the same semaphore back.
+ var j uint32
+ for j = 0; j < numLocks; j++ {
+ err := locks.DeallocateSemaphore(j)
+ assert.NoError(t, err)
+
+ newSem, err := locks.AllocateSemaphore()
+ assert.NoError(t, err)
+ assert.Equal(t, j, newSem)
+ }
+ })
+}
+
+// Test that locks actually lock
+func TestLockSemaphoreActuallyLocks(t *testing.T) {
+ runLockTest(t, func(t *testing.T, locks *SHMLocks) {
+ // This entire test is very ugly - lots of sleeps to try and get
+ // things to occur in the right order.
+ // It also doesn't even exercise the multiprocess nature of the
+ // locks.
+
+ // Get the current time
+ startTime := time.Now()
+
+ // Start a goroutine to take the lock and then release it after
+ // a second.
+ go func() {
+ err := locks.LockSemaphore(0)
+ assert.NoError(t, err)
+
+ time.Sleep(1 * time.Second)
+
+ err = locks.UnlockSemaphore(0)
+ assert.NoError(t, err)
+ }()
+
+ // Sleep for a quarter of a second to give the goroutine time
+ // to kick off and grab the lock
+ time.Sleep(250 * time.Millisecond)
+
+ // Take the lock
+ err := locks.LockSemaphore(0)
+ assert.NoError(t, err)
+
+ // Get the current time
+ endTime := time.Now()
+
+ // Verify that at least 1 second has passed since start
+ duration := endTime.Sub(startTime)
+ assert.True(t, duration.Seconds() > 1.0)
+ })
+}