From a21f21efa19372e055e8f1daf2e77c52e5352ccc Mon Sep 17 00:00:00 2001 From: Matthew Heon Date: Wed, 8 Aug 2018 15:50:16 -0400 Subject: Refactor locks package to build on non-Linux Move SHM specific code into a subpackage. Within the main locks package, move the manager to be linux-only and add a non-Linux unsupported build file. Signed-off-by: Matthew Heon --- libpod/lock/shm/shm_lock.c | 383 +++++++++++++++++++++++++++++++++++++++ libpod/lock/shm/shm_lock.go | 188 +++++++++++++++++++ libpod/lock/shm/shm_lock.h | 43 +++++ libpod/lock/shm/shm_lock_test.go | 243 +++++++++++++++++++++++++ 4 files changed, 857 insertions(+) create mode 100644 libpod/lock/shm/shm_lock.c create mode 100644 libpod/lock/shm/shm_lock.go create mode 100644 libpod/lock/shm/shm_lock.h create mode 100644 libpod/lock/shm/shm_lock_test.go (limited to 'libpod/lock/shm') diff --git a/libpod/lock/shm/shm_lock.c b/libpod/lock/shm/shm_lock.c new file mode 100644 index 000000000..3fe41f63c --- /dev/null +++ b/libpod/lock/shm/shm_lock.c @@ -0,0 +1,383 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "shm_lock.h" + +// Compute the size of the SHM struct +size_t compute_shm_size(uint32_t num_bitmaps) { + return sizeof(shm_struct_t) + (num_bitmaps * sizeof(lock_group_t)); +} + +// Set up an SHM segment holding locks for libpod. +// num_locks must be a multiple of BITMAP_SIZE (32 by default). +// Returns a valid pointer on success or NULL on error. +// If an error occurs, it will be written to the int pointed to by error_code. +shm_struct_t *setup_lock_shm(uint32_t num_locks, int *error_code) { + int shm_fd, i, j, ret_code; + uint32_t num_bitmaps; + size_t shm_size; + shm_struct_t *shm; + + // If error_code doesn't point to anything, we can't reasonably return errors + // So fail immediately + if (error_code == NULL) { + return NULL; + } + + // We need a nonzero number of locks + if (num_locks == 0) { + *error_code = EINVAL; + return NULL; + } + + // Calculate the number of bitmaps required + if (num_locks % BITMAP_SIZE != 0) { + // Number of locks not a multiple of BITMAP_SIZE + *error_code = EINVAL; + return NULL; + } + num_bitmaps = num_locks / BITMAP_SIZE; + + // Calculate size of the shm segment + shm_size = compute_shm_size(num_bitmaps); + + // Create a new SHM segment for us + shm_fd = shm_open(SHM_NAME, O_RDWR | O_CREAT | O_EXCL, 0600); + if (shm_fd < 0) { + *error_code = errno; + return NULL; + } + + // Increase its size to what we need + ret_code = ftruncate(shm_fd, shm_size); + if (ret_code < 0) { + *error_code = errno; + goto CLEANUP_UNLINK; + } + + // Map the shared memory in + shm = mmap(NULL, shm_size, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, 0); + if (shm == MAP_FAILED) { + *error_code = errno; + goto CLEANUP_UNLINK; + } + + // We have successfully mapped the memory, now initialize the region + shm->magic = MAGIC; + shm->num_locks = num_locks; + shm->num_bitmaps = num_bitmaps; + + // Initialize the semaphore that protects the bitmaps. + // Initialize to value 1, as we're a mutex, and set pshared as this will be + // shared between processes in an SHM. + ret_code = sem_init(&(shm->segment_lock), true, 1); + if (ret_code < 0) { + *error_code = errno; + goto CLEANUP_UNMAP; + } + + // Initialize all bitmaps to 0 initially + // And initialize all semaphores they use + for (i = 0; i < num_bitmaps; i++) { + shm->locks[i].bitmap = 0; + for (j = 0; j < BITMAP_SIZE; j++) { + // As above, initialize to 1 to act as a mutex, and set pshared as we'll + // be living in an SHM. + ret_code = sem_init(&(shm->locks[i].locks[j]), true, 1); + if (ret_code < 0) { + *error_code = errno; + goto CLEANUP_UNMAP; + } + } + } + + // Close the file descriptor, we're done with it + // Ignore errors, it's ok if we leak a single FD and this should only run once + close(shm_fd); + + return shm; + + // Cleanup after an error + CLEANUP_UNMAP: + munmap(shm, shm_size); + CLEANUP_UNLINK: + close(shm_fd); + shm_unlink(SHM_NAME); + return NULL; +} + +// Open an existing SHM segment holding libpod locks. +// num_locks is the number of locks that will be configured in the SHM segment. +// num_locks must be a multiple of BITMAP_SIZE (32 by default). +// Returns a valid pointer on success or NULL on error. +// If an error occurs, it will be written to the int pointed to by error_code. +shm_struct_t *open_lock_shm(uint32_t num_locks, int *error_code) { + int shm_fd; + shm_struct_t *shm; + size_t shm_size; + uint32_t num_bitmaps; + + if (error_code == NULL) { + return NULL; + } + + // We need a nonzero number of locks + if (num_locks == 0) { + *error_code = EINVAL; + return NULL; + } + + // Calculate the number of bitmaps required + if (num_locks % BITMAP_SIZE != 0) { + // Number of locks not a multiple of BITMAP_SIZE + *error_code = EINVAL; + return NULL; + } + num_bitmaps = num_locks / BITMAP_SIZE; + + // Calculate size of the shm segment + shm_size = compute_shm_size(num_bitmaps); + + shm_fd = shm_open(SHM_NAME, O_RDWR, 0600); + if (shm_fd < 0) { + return NULL; + } + + // Map the shared memory in + shm = mmap(NULL, shm_size, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, 0); + if (shm == MAP_FAILED) { + *error_code = errno; + } + + // Ignore errors, it's ok if we leak a single FD since this only runs once + close(shm_fd); + + // Check if we successfully mmap'd + if (shm == MAP_FAILED) { + return NULL; + } + + // Need to check the SHM to see if it's actually our locks + if (shm->magic != MAGIC) { + *error_code = errno; + goto CLEANUP; + } + if (shm->num_locks != num_locks) { + *error_code = errno; + goto CLEANUP; + } + + return shm; + + CLEANUP: + munmap(shm, shm_size); + return NULL; +} + +// Close an open SHM lock struct, unmapping the backing memory. +// The given shm_struct_t will be rendered unusable as a result. +// On success, 0 is returned. On failure, negative ERRNO values are returned. +int32_t close_lock_shm(shm_struct_t *shm) { + int ret_code; + size_t shm_size; + + // We can't unmap null... + if (shm == NULL) { + return -1 * EINVAL; + } + + shm_size = compute_shm_size(shm->num_bitmaps); + + ret_code = munmap(shm, shm_size); + + if (ret_code != 0) { + return -1 * errno; + } + + return 0; +} + +// Allocate the first available semaphore +// Returns a positive integer guaranteed to be less than UINT32_MAX on success, +// or negative errno values on failure +// On sucess, the returned integer is the number of the semaphore allocated +int64_t allocate_semaphore(shm_struct_t *shm) { + int ret_code, i; + bitmap_t test_map; + int64_t sem_number, num_within_bitmap; + + if (shm == NULL) { + return -1 * EINVAL; + } + + // Lock the semaphore controlling access to our shared memory + do { + ret_code = sem_wait(&(shm->segment_lock)); + } while(ret_code == EINTR); + if (ret_code != 0) { + return -1 * errno; + } + + // Loop through our bitmaps to search for one that is not full + for (i = 0; i < shm->num_bitmaps; i++) { + if (shm->locks[i].bitmap != 0xFFFFFFFF) { + test_map = 0x1; + num_within_bitmap = 0; + while (test_map != 0) { + if ((test_map & shm->locks[i].bitmap) == 0) { + // Compute the number of the semaphore we are allocating + sem_number = (BITMAP_SIZE * i) + num_within_bitmap; + // OR in the bitmap + shm->locks[i].bitmap = shm->locks[i].bitmap | test_map; + // Clear the semaphore + sem_post(&(shm->segment_lock)); + // Return the semaphore we've allocated + return sem_number; + } + test_map = test_map << 1; + num_within_bitmap++; + } + // We should never fall through this loop + // TODO maybe an assert() here to panic if we do? + } + } + + // Post to the semaphore to clear the lock + sem_post(&(shm->segment_lock)); + + // All bitmaps are full + // We have no available semaphores, report allocation failure + return -1 * ENOSPC; +} + +// Deallocate a given semaphore +// Returns 0 on success, negative ERRNO values on failure +int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index) { + bitmap_t test_map; + int bitmap_index, index_in_bitmap, ret_code, i; + + if (shm == NULL) { + return -1 * EINVAL; + } + + // Check if the lock index is valid + if (sem_index >= shm->num_locks) { + return -1 * EINVAL; + } + + bitmap_index = sem_index / BITMAP_SIZE; + index_in_bitmap = sem_index % BITMAP_SIZE; + + // This should never happen if the sem_index test above succeeded, but better + // safe than sorry + if (bitmap_index >= shm->num_bitmaps) { + return -1 * EFAULT; + } + + test_map = 0x1; + for (i = 0; i < index_in_bitmap; i++) { + test_map = test_map << 1; + } + + // Lock the semaphore controlling access to our shared memory + do { + ret_code = sem_wait(&(shm->segment_lock)); + } while(ret_code == EINTR); + if (ret_code != 0) { + return -1 * errno; + } + + // Check if the semaphore is allocated + if ((test_map & shm->locks[bitmap_index].bitmap) == 0) { + // Post to the semaphore to clear the lock + sem_post(&(shm->segment_lock)); + + return -1 * ENOENT; + } + + // The semaphore is allocated, clear it + // Invert the bitmask we used to test to clear the bit + test_map = ~test_map; + shm->locks[bitmap_index].bitmap = shm->locks[bitmap_index].bitmap & test_map; + + // Post to the semaphore to clear the lock + sem_post(&(shm->segment_lock)); + + return 0; +} + +// Lock a given semaphore +// Does not check if the semaphore is allocated - this ensures that, even for +// removed containers, we can still successfully lock to check status (and +// subsequently realize they have been removed). +// Returns 0 on success, -1 on failure +int32_t lock_semaphore(shm_struct_t *shm, uint32_t sem_index) { + int bitmap_index, index_in_bitmap, ret_code; + + if (shm == NULL) { + return -1 * EINVAL; + } + + if (sem_index >= shm->num_locks) { + return -1 * EINVAL; + } + + bitmap_index = sem_index / BITMAP_SIZE; + index_in_bitmap = sem_index % BITMAP_SIZE; + + // Lock the semaphore controlling access to our shared memory + do { + ret_code = sem_wait(&(shm->locks[bitmap_index].locks[index_in_bitmap])); + } while(ret_code == EINTR); + if (ret_code != 0) { + return -1 * errno; + } + + return 0; +} + +// Unlock a given semaphore +// Does not check if the semaphore is allocated - this ensures that, even for +// removed containers, we can still successfully lock to check status (and +// subsequently realize they have been removed). +// Returns 0 on success, -1 on failure +int32_t unlock_semaphore(shm_struct_t *shm, uint32_t sem_index) { + int bitmap_index, index_in_bitmap, ret_code; + unsigned int sem_value = 0; + + if (shm == NULL) { + return -1 * EINVAL; + } + + if (sem_index >= shm->num_locks) { + return -1 * EINVAL; + } + + bitmap_index = sem_index / BITMAP_SIZE; + index_in_bitmap = sem_index % BITMAP_SIZE; + + // Only allow a post if the semaphore is less than 1 (locked) + // This allows us to preserve mutex behavior + ret_code = sem_getvalue(&(shm->locks[bitmap_index].locks[index_in_bitmap]), &sem_value); + if (ret_code != 0) { + return -1 * errno; + } + if (sem_value >= 1) { + return -1 * EBUSY; + } + + ret_code = sem_post(&(shm->locks[bitmap_index].locks[index_in_bitmap])); + if (ret_code != 0) { + return -1 * errno; + } + + return 0; +} diff --git a/libpod/lock/shm/shm_lock.go b/libpod/lock/shm/shm_lock.go new file mode 100644 index 000000000..ff9b0ce2c --- /dev/null +++ b/libpod/lock/shm/shm_lock.go @@ -0,0 +1,188 @@ +package shm + +// #cgo LDFLAGS: -lrt -lpthread +// #include "shm_lock.h" +// const uint32_t bitmap_size_c = BITMAP_SIZE; +import "C" + +import ( + "syscall" + + "github.com/pkg/errors" +) + +var ( + bitmapSize uint32 = uint32(C.bitmap_size_c) +) + +// SHMLocks is a struct enabling POSIX semaphore locking in a shared memory +// segment +type SHMLocks struct { + lockStruct *C.shm_struct_t + valid bool + maxLocks uint32 +} + +// CreateSHMLock sets up a shared-memory segment holding a given number of POSIX +// semaphores, and returns a struct that can be used to operate on those locks. +// numLocks must be a multiple of the lock bitmap size (by default, 32). +func CreateSHMLock(numLocks uint32) (*SHMLocks, error) { + if numLocks%bitmapSize != 0 || numLocks == 0 { + return nil, errors.Wrapf(syscall.EINVAL, "number of locks must be a multiple of %d", C.bitmap_size_c) + } + + locks := new(SHMLocks) + + var errCode C.int + lockStruct := C.setup_lock_shm(C.uint32_t(numLocks), &errCode) + if lockStruct == nil { + // We got a null pointer, so something errored + return nil, syscall.Errno(-1 * errCode) + } + + locks.lockStruct = lockStruct + locks.maxLocks = numLocks + locks.valid = true + + return locks, nil +} + +// OpenSHMLock opens an existing shared-memory segment holding a given number of +// POSIX semaphores. numLocks must match the number of locks the shared memory +// segment was created with and be a multiple of the lock bitmap size (default +// 32). +func OpenSHMLock(numLocks uint32) (*SHMLocks, error) { + if numLocks%bitmapSize != 0 || numLocks == 0 { + return nil, errors.Wrapf(syscall.EINVAL, "number of locks must be a multiple of %d", C.bitmap_size_c) + } + + locks := new(SHMLocks) + + var errCode C.int + lockStruct := C.open_lock_shm(C.uint32_t(numLocks), &errCode) + if lockStruct == nil { + // We got a null pointer, so something errored + return nil, syscall.Errno(-1 * errCode) + } + + locks.lockStruct = lockStruct + locks.maxLocks = numLocks + locks.valid = true + + return locks, nil +} + +// GetMaxLocks returns the maximum number of locks in the SHM +func (locks *SHMLocks) GetMaxLocks() uint32 { + return locks.maxLocks +} + +// Close closes an existing shared-memory segment. +// The segment will be rendered unusable after closing. +// WARNING: If you Close() while there are still locks locked, these locks may +// fail to release, causing a program freeze. +// Close() is only intended to be used while testing the locks. +func (locks *SHMLocks) Close() error { + if !locks.valid { + return errors.Wrapf(syscall.EINVAL, "locks have already been closed") + } + + locks.valid = false + + retCode := C.close_lock_shm(locks.lockStruct) + if retCode < 0 { + // Negative errno returned + return syscall.Errno(-1 * retCode) + } + + return nil +} + +// AllocateSemaphore allocates a semaphore from a shared-memory segment for use +// by a container or pod. +// Returns the index of the semaphore that was allocated. +// Allocations past the maximum number of locks given when the SHM segment was +// created will result in an error, and no semaphore will be allocated. +func (locks *SHMLocks) AllocateSemaphore() (uint32, error) { + if !locks.valid { + return 0, errors.Wrapf(syscall.EINVAL, "locks have already been closed") + } + + retCode := C.allocate_semaphore(locks.lockStruct) + if retCode < 0 { + // Negative errno returned + return 0, syscall.Errno(-1 * retCode) + } + + return uint32(retCode), nil +} + +// DeallocateSemaphore frees a semaphore in a shared-memory segment so it can be +// reallocated to another container or pod. +// The given semaphore must be already allocated, or an error will be returned. +func (locks *SHMLocks) DeallocateSemaphore(sem uint32) error { + if !locks.valid { + return errors.Wrapf(syscall.EINVAL, "locks have already been closed") + } + + if sem > locks.maxLocks { + return errors.Wrapf(syscall.EINVAL, "given semaphore %d is higher than maximum locks count %d", sem, locks.maxLocks) + } + + retCode := C.deallocate_semaphore(locks.lockStruct, C.uint32_t(sem)) + if retCode < 0 { + // Negative errno returned + return syscall.Errno(-1 * retCode) + } + + return nil +} + +// LockSemaphore locks the given semaphore. +// If the semaphore is already locked, LockSemaphore will block until the lock +// can be acquired. +// There is no requirement that the given semaphore be allocated. +// This ensures that attempts to lock a container after it has been deleted, +// but before the caller has queried the database to determine this, will +// succeed. +func (locks *SHMLocks) LockSemaphore(sem uint32) error { + if !locks.valid { + return errors.Wrapf(syscall.EINVAL, "locks have already been closed") + } + + if sem > locks.maxLocks { + return errors.Wrapf(syscall.EINVAL, "given semaphore %d is higher than maximum locks count %d", sem, locks.maxLocks) + } + + retCode := C.lock_semaphore(locks.lockStruct, C.uint32_t(sem)) + if retCode < 0 { + // Negative errno returned + return syscall.Errno(-1 * retCode) + } + + return nil +} + +// UnlockSemaphore unlocks the given semaphore. +// Unlocking a semaphore that is already unlocked with return EBUSY. +// There is no requirement that the given semaphore be allocated. +// This ensures that attempts to lock a container after it has been deleted, +// but before the caller has queried the database to determine this, will +// succeed. +func (locks *SHMLocks) UnlockSemaphore(sem uint32) error { + if !locks.valid { + return errors.Wrapf(syscall.EINVAL, "locks have already been closed") + } + + if sem > locks.maxLocks { + return errors.Wrapf(syscall.EINVAL, "given semaphore %d is higher than maximum locks count %d", sem, locks.maxLocks) + } + + retCode := C.unlock_semaphore(locks.lockStruct, C.uint32_t(sem)) + if retCode < 0 { + // Negative errno returned + return syscall.Errno(-1 * retCode) + } + + return nil +} diff --git a/libpod/lock/shm/shm_lock.h b/libpod/lock/shm/shm_lock.h new file mode 100644 index 000000000..18bea47e9 --- /dev/null +++ b/libpod/lock/shm/shm_lock.h @@ -0,0 +1,43 @@ +#ifndef shm_locks_h_ +#define shm_locks_h_ + +#include +#include + +// Magic number to ensure we open the right SHM segment +#define MAGIC 0xA5A5 + +// Name of the SHM +#define SHM_NAME "/libpod_lock" + +// Type for our bitmaps +typedef uint32_t bitmap_t; + +// bitmap size +#define BITMAP_SIZE (sizeof(bitmap_t) * 8) + +// Struct to hold a single bitmap and associated locks +typedef struct lock_group { + bitmap_t bitmap; + sem_t locks[BITMAP_SIZE]; +} lock_group_t; + +// Struct to hold our SHM locks +typedef struct shm_struct { + uint16_t magic; + sem_t segment_lock; + uint32_t num_bitmaps; + uint32_t num_locks; + lock_group_t locks[]; +} shm_struct_t; + +size_t compute_shm_size(uint32_t num_bitmaps); +shm_struct_t *setup_lock_shm(uint32_t num_locks, int *error_code); +shm_struct_t *open_lock_shm(uint32_t num_locks, int *error_code); +int32_t close_lock_shm(shm_struct_t *shm); +int64_t allocate_semaphore(shm_struct_t *shm); +int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index); +int32_t lock_semaphore(shm_struct_t *shm, uint32_t sem_index); +int32_t unlock_semaphore(shm_struct_t *shm, uint32_t sem_index); + +#endif diff --git a/libpod/lock/shm/shm_lock_test.go b/libpod/lock/shm/shm_lock_test.go new file mode 100644 index 000000000..bc22db835 --- /dev/null +++ b/libpod/lock/shm/shm_lock_test.go @@ -0,0 +1,243 @@ +package shm + +import ( + "fmt" + "os" + "syscall" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// All tests here are in the same process, which somewhat limits their utility +// The big intent of this package it multiprocess locking, which is really hard +// to test without actually having multiple processes... +// We can at least verify that the locks work within the local process. + +// 4 * BITMAP_SIZE to ensure we have to traverse bitmaps +const numLocks = 128 + +// We need a test main to ensure that the SHM is created before the tests run +func TestMain(m *testing.M) { + shmLock, err := CreateSHMLock(numLocks) + if err != nil { + fmt.Fprintf(os.Stderr, "Error creating SHM for tests: %v\n", err) + os.Exit(-1) + } + + // Close the SHM - every subsequent test will reopen + if err := shmLock.Close(); err != nil { + fmt.Fprintf(os.Stderr, "Error closing SHM locks: %v\n", err) + os.Exit(-1) + } + + exitCode := m.Run() + + // We need to remove the SHM segment to clean up after ourselves + os.RemoveAll("/dev/shm/libpod_lock") + + os.Exit(exitCode) +} + +func runLockTest(t *testing.T, testFunc func(*testing.T, *SHMLocks)) { + locks, err := OpenSHMLock(numLocks) + if err != nil { + t.Fatalf("Error opening locks: %v", err) + } + defer func() { + // Unlock and deallocate all locks + // Ignore EBUSY (lock is already unlocked) + // Ignore ENOENT (lock is not allocated) + var i uint32 + for i = 0; i < numLocks; i++ { + if err := locks.UnlockSemaphore(i); err != nil && err != syscall.EBUSY { + t.Fatalf("Error unlocking semaphore %d: %v", i, err) + } + if err := locks.DeallocateSemaphore(i); err != nil && err != syscall.ENOENT { + t.Fatalf("Error deallocating semaphore %d: %v", i, err) + } + } + + if err := locks.Close(); err != nil { + t.Fatalf("Error closing locks: %v", err) + } + }() + + success := t.Run("locks", func(t *testing.T) { + testFunc(t, locks) + }) + if !success { + t.Fail() + } +} + +// Test that creating an SHM with a bad size fails +func TestCreateNewSHMBadSize(t *testing.T) { + // Odd number, not a power of 2, should never be a word size on a system + _, err := CreateSHMLock(7) + assert.Error(t, err) +} + +// Test that creating an SHM with 0 size fails +func TestCreateNewSHMZeroSize(t *testing.T) { + _, err := CreateSHMLock(0) + assert.Error(t, err) +} + +// Test that deallocating an unallocated lock errors +func TestDeallocateUnallocatedLockErrors(t *testing.T) { + runLockTest(t, func(t *testing.T, locks *SHMLocks) { + err := locks.DeallocateSemaphore(0) + assert.Error(t, err) + }) +} + +// Test that unlocking an unlocked lock fails +func TestUnlockingUnlockedLockFails(t *testing.T) { + runLockTest(t, func(t *testing.T, locks *SHMLocks) { + err := locks.UnlockSemaphore(0) + assert.Error(t, err) + }) +} + +// Test that locking and double-unlocking fails +func TestDoubleUnlockFails(t *testing.T) { + runLockTest(t, func(t *testing.T, locks *SHMLocks) { + err := locks.LockSemaphore(0) + assert.NoError(t, err) + + err = locks.UnlockSemaphore(0) + assert.NoError(t, err) + + err = locks.UnlockSemaphore(0) + assert.Error(t, err) + }) +} + +// Test allocating - lock - unlock - deallocate cycle, single lock +func TestLockLifecycleSingleLock(t *testing.T) { + runLockTest(t, func(t *testing.T, locks *SHMLocks) { + sem, err := locks.AllocateSemaphore() + require.NoError(t, err) + + err = locks.LockSemaphore(sem) + assert.NoError(t, err) + + err = locks.UnlockSemaphore(sem) + assert.NoError(t, err) + + err = locks.DeallocateSemaphore(sem) + assert.NoError(t, err) + }) +} + +// Test allocate two locks returns different locks +func TestAllocateTwoLocksGetsDifferentLocks(t *testing.T) { + runLockTest(t, func(t *testing.T, locks *SHMLocks) { + sem1, err := locks.AllocateSemaphore() + assert.NoError(t, err) + + sem2, err := locks.AllocateSemaphore() + assert.NoError(t, err) + + assert.NotEqual(t, sem1, sem2) + }) +} + +// Test allocate all locks successful and all are unique +func TestAllocateAllLocksSucceeds(t *testing.T) { + runLockTest(t, func(t *testing.T, locks *SHMLocks) { + sems := make(map[uint32]bool) + for i := 0; i < numLocks; i++ { + sem, err := locks.AllocateSemaphore() + assert.NoError(t, err) + + // Ensure the allocate semaphore is unique + _, ok := sems[sem] + assert.False(t, ok) + + sems[sem] = true + } + }) +} + +// Test allocating more than the given max fails +func TestAllocateTooManyLocksFails(t *testing.T) { + runLockTest(t, func(t *testing.T, locks *SHMLocks) { + // Allocate all locks + for i := 0; i < numLocks; i++ { + _, err := locks.AllocateSemaphore() + assert.NoError(t, err) + } + + // Try and allocate one more + _, err := locks.AllocateSemaphore() + assert.Error(t, err) + }) +} + +// Test allocating max locks, deallocating one, and then allocating again succeeds +func TestAllocateDeallocateCycle(t *testing.T) { + runLockTest(t, func(t *testing.T, locks *SHMLocks) { + // Allocate all locks + for i := 0; i < numLocks; i++ { + _, err := locks.AllocateSemaphore() + assert.NoError(t, err) + } + + // Now loop through again, deallocating and reallocating. + // Each time we free 1 semaphore, allocate again, and make sure + // we get the same semaphore back. + var j uint32 + for j = 0; j < numLocks; j++ { + err := locks.DeallocateSemaphore(j) + assert.NoError(t, err) + + newSem, err := locks.AllocateSemaphore() + assert.NoError(t, err) + assert.Equal(t, j, newSem) + } + }) +} + +// Test that locks actually lock +func TestLockSemaphoreActuallyLocks(t *testing.T) { + runLockTest(t, func(t *testing.T, locks *SHMLocks) { + // This entire test is very ugly - lots of sleeps to try and get + // things to occur in the right order. + // It also doesn't even exercise the multiprocess nature of the + // locks. + + // Get the current time + startTime := time.Now() + + // Start a goroutine to take the lock and then release it after + // a second. + go func() { + err := locks.LockSemaphore(0) + assert.NoError(t, err) + + time.Sleep(1 * time.Second) + + err = locks.UnlockSemaphore(0) + assert.NoError(t, err) + }() + + // Sleep for a quarter of a second to give the goroutine time + // to kick off and grab the lock + time.Sleep(250 * time.Millisecond) + + // Take the lock + err := locks.LockSemaphore(0) + assert.NoError(t, err) + + // Get the current time + endTime := time.Now() + + // Verify that at least 1 second has passed since start + duration := endTime.Sub(startTime) + assert.True(t, duration.Seconds() > 1.0) + }) +} -- cgit v1.2.3-54-g00ecf From f38fccb48c9acc2b7d55c1746c9e6dbde492cff5 Mon Sep 17 00:00:00 2001 From: Matthew Heon Date: Wed, 8 Aug 2018 16:38:38 -0400 Subject: Disable lint on SHMLock struct Golint wants to rename the struct. I think the name is fine. I can disable golint. Golint will no longer complain about the name. Signed-off-by: Matthew Heon --- libpod/lock/shm/shm_lock.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'libpod/lock/shm') diff --git a/libpod/lock/shm/shm_lock.go b/libpod/lock/shm/shm_lock.go index ff9b0ce2c..9a9074c04 100644 --- a/libpod/lock/shm/shm_lock.go +++ b/libpod/lock/shm/shm_lock.go @@ -16,8 +16,8 @@ var ( ) // SHMLocks is a struct enabling POSIX semaphore locking in a shared memory -// segment -type SHMLocks struct { +// segment. +type SHMLocks struct { // nolint lockStruct *C.shm_struct_t valid bool maxLocks uint32 -- cgit v1.2.3-54-g00ecf From e73484c176839b2f2adf3d07cc09222a7b75bf69 Mon Sep 17 00:00:00 2001 From: Matthew Heon Date: Fri, 10 Aug 2018 13:46:07 -0400 Subject: Move to POSIX mutexes for SHM locks Signed-off-by: Matthew Heon --- libpod/lock/lock.go | 8 +- libpod/lock/shm/shm_lock.c | 245 ++++++++++++++++++++++------------ libpod/lock/shm/shm_lock.go | 52 +++++--- libpod/lock/shm/shm_lock.h | 35 ++--- libpod/lock/shm/shm_lock_test.go | 56 ++++++-- libpod/lock/shm_lock_manager_linux.go | 42 ++---- 6 files changed, 269 insertions(+), 169 deletions(-) (limited to 'libpod/lock/shm') diff --git a/libpod/lock/lock.go b/libpod/lock/lock.go index 6d17828f6..5258c641f 100644 --- a/libpod/lock/lock.go +++ b/libpod/lock/lock.go @@ -23,7 +23,7 @@ type Manager interface { // RetrieveLock retrieves a lock given its UUID. // The underlying lock MUST be the same as another other lock with the // same UUID. - RetrieveLock(id string) (Locker, error) + RetrieveLock(id uint32) (Locker, error) } // Locker is similar to sync.Locker, but provides a method for freeing the lock @@ -37,7 +37,7 @@ type Locker interface { // ID is guaranteed to uniquely identify the lock within the // Manager - that is, calling RetrieveLock with this ID will return // another instance of the same lock. - ID() string + ID() uint32 // Lock locks the lock. // This call MUST block until it successfully acquires the lock or // encounters a fatal error. @@ -46,8 +46,8 @@ type Locker interface { // A call to Unlock() on a lock that is already unlocked lock MUST // error. Unlock() error - // Deallocate deallocates the underlying lock, allowing its reuse by - // other pods and containers. + // Free deallocates the underlying lock, allowing its reuse by other + // pods and containers. // The lock MUST still be usable after a Free() - some libpod instances // may still retain Container structs with the old lock. This simply // advises the manager that the lock may be reallocated. diff --git a/libpod/lock/shm/shm_lock.c b/libpod/lock/shm/shm_lock.c index 3fe41f63c..4af58d857 100644 --- a/libpod/lock/shm/shm_lock.c +++ b/libpod/lock/shm/shm_lock.c @@ -1,6 +1,6 @@ #include #include -#include +#include #include #include #include @@ -12,19 +12,67 @@ #include "shm_lock.h" // Compute the size of the SHM struct -size_t compute_shm_size(uint32_t num_bitmaps) { +static size_t compute_shm_size(uint32_t num_bitmaps) { return sizeof(shm_struct_t) + (num_bitmaps * sizeof(lock_group_t)); } +// Take the given mutex. +// Handles exceptional conditions, including a mutex locked by a process that +// died holding it. +// Returns 0 on success, or positive errno on failure. +static int take_mutex(pthread_mutex_t *mutex) { + int ret_code; + + do { + ret_code = pthread_mutex_lock(mutex); + } while(ret_code == EAGAIN); + + if (ret_code == EOWNERDEAD) { + // The previous owner of the mutex died while holding it + // Take it for ourselves + ret_code = pthread_mutex_consistent(mutex); + if (ret_code != 0) { + // Someone else may have gotten here first and marked the state consistent + // However, the mutex could also be invalid. + // Fail here instead of looping back to trying to lock the mutex. + return ret_code; + } + } else if (ret_code != 0) { + return ret_code; + } + + return 0; +} + +// Release the given mutex. +// Returns 0 on success, or positive errno on failure. +static int release_mutex(pthread_mutex_t *mutex) { + int ret_code; + + do { + ret_code = pthread_mutex_unlock(mutex); + } while(ret_code == EAGAIN); + + if (ret_code != 0) { + return ret_code; + } + + return 0; +} + // Set up an SHM segment holding locks for libpod. -// num_locks must be a multiple of BITMAP_SIZE (32 by default). +// num_locks must not be 0. +// Path is the path to the SHM segment. It must begin with a single / and +// container no other / characters, and be at most 255 characters including +// terminating NULL byte. // Returns a valid pointer on success or NULL on error. -// If an error occurs, it will be written to the int pointed to by error_code. -shm_struct_t *setup_lock_shm(uint32_t num_locks, int *error_code) { +// If an error occurs, negative ERRNO values will be written to error_code. +shm_struct_t *setup_lock_shm(char *path, uint32_t num_locks, int *error_code) { int shm_fd, i, j, ret_code; uint32_t num_bitmaps; size_t shm_size; shm_struct_t *shm; + pthread_mutexattr_t attr; // If error_code doesn't point to anything, we can't reasonably return errors // So fail immediately @@ -34,67 +82,93 @@ shm_struct_t *setup_lock_shm(uint32_t num_locks, int *error_code) { // We need a nonzero number of locks if (num_locks == 0) { - *error_code = EINVAL; + *error_code = -1 * EINVAL; return NULL; } - // Calculate the number of bitmaps required - if (num_locks % BITMAP_SIZE != 0) { - // Number of locks not a multiple of BITMAP_SIZE - *error_code = EINVAL; + if (path == NULL) { + *error_code = -1 * EINVAL; return NULL; } + + // Calculate the number of bitmaps required num_bitmaps = num_locks / BITMAP_SIZE; + if (num_locks % BITMAP_SIZE != 0) { + // The actual number given is not an even multiple of our bitmap size + // So round up + num_bitmaps += 1; + } // Calculate size of the shm segment shm_size = compute_shm_size(num_bitmaps); // Create a new SHM segment for us - shm_fd = shm_open(SHM_NAME, O_RDWR | O_CREAT | O_EXCL, 0600); + shm_fd = shm_open(path, O_RDWR | O_CREAT | O_EXCL, 0600); if (shm_fd < 0) { - *error_code = errno; + *error_code = -1 * errno; return NULL; } // Increase its size to what we need ret_code = ftruncate(shm_fd, shm_size); if (ret_code < 0) { - *error_code = errno; + *error_code = -1 * errno; goto CLEANUP_UNLINK; } // Map the shared memory in shm = mmap(NULL, shm_size, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, 0); if (shm == MAP_FAILED) { - *error_code = errno; + *error_code = -1 * errno; goto CLEANUP_UNLINK; } // We have successfully mapped the memory, now initialize the region shm->magic = MAGIC; - shm->num_locks = num_locks; + shm->unused = 0; + shm->num_locks = num_bitmaps * BITMAP_SIZE; shm->num_bitmaps = num_bitmaps; - // Initialize the semaphore that protects the bitmaps. - // Initialize to value 1, as we're a mutex, and set pshared as this will be - // shared between processes in an SHM. - ret_code = sem_init(&(shm->segment_lock), true, 1); - if (ret_code < 0) { - *error_code = errno; + // Create an initializer for our pthread mutexes + ret_code = pthread_mutexattr_init(&attr); + if (ret_code != 0) { + *error_code = -1 * ret_code; goto CLEANUP_UNMAP; } + // Set mutexes to pshared - multiprocess-safe + ret_code = pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED); + if (ret_code != 0) { + *error_code = -1 * ret_code; + goto CLEANUP_FREEATTR; + } + + // Set mutexes to robust - if a process dies while holding a mutex, we'll get + // a special error code on the next attempt to lock it. + // This should prevent panicing processes from leaving the state unusable. + ret_code = pthread_mutexattr_setrobust(&attr, PTHREAD_MUTEX_ROBUST); + if (ret_code != 0) { + *error_code = -1 * ret_code; + goto CLEANUP_FREEATTR; + } + + // Initialize the mutex that protects the bitmaps using the mutex attributes + ret_code = pthread_mutex_init(&(shm->segment_lock), &attr); + if (ret_code != 0) { + *error_code = -1 * ret_code; + goto CLEANUP_FREEATTR; + } + // Initialize all bitmaps to 0 initially // And initialize all semaphores they use for (i = 0; i < num_bitmaps; i++) { shm->locks[i].bitmap = 0; for (j = 0; j < BITMAP_SIZE; j++) { - // As above, initialize to 1 to act as a mutex, and set pshared as we'll - // be living in an SHM. - ret_code = sem_init(&(shm->locks[i].locks[j]), true, 1); - if (ret_code < 0) { - *error_code = errno; - goto CLEANUP_UNMAP; + // Initialize each mutex + ret_code = pthread_mutex_init(&(shm->locks[i].locks[j]), &attr); + if (ret_code != 0) { + *error_code = -1 * ret_code; + goto CLEANUP_FREEATTR; } } } @@ -103,23 +177,33 @@ shm_struct_t *setup_lock_shm(uint32_t num_locks, int *error_code) { // Ignore errors, it's ok if we leak a single FD and this should only run once close(shm_fd); + // Destroy the pthread initializer attribute. + // Again, ignore errors, this will only run once and we might leak a tiny bit + // of memory at worst. + pthread_mutexattr_destroy(&attr); + return shm; // Cleanup after an error + CLEANUP_FREEATTR: + pthread_mutexattr_destroy(&attr); CLEANUP_UNMAP: munmap(shm, shm_size); CLEANUP_UNLINK: close(shm_fd); - shm_unlink(SHM_NAME); + shm_unlink(path); return NULL; } // Open an existing SHM segment holding libpod locks. // num_locks is the number of locks that will be configured in the SHM segment. -// num_locks must be a multiple of BITMAP_SIZE (32 by default). +// num_locks cannot be 0. +// Path is the path to the SHM segment. It must begin with a single / and +// container no other / characters, and be at most 255 characters including +// terminating NULL byte. // Returns a valid pointer on success or NULL on error. -// If an error occurs, it will be written to the int pointed to by error_code. -shm_struct_t *open_lock_shm(uint32_t num_locks, int *error_code) { +// If an error occurs, negative ERRNO values will be written to error_code. +shm_struct_t *open_lock_shm(char *path, uint32_t num_locks, int *error_code) { int shm_fd; shm_struct_t *shm; size_t shm_size; @@ -131,30 +215,34 @@ shm_struct_t *open_lock_shm(uint32_t num_locks, int *error_code) { // We need a nonzero number of locks if (num_locks == 0) { - *error_code = EINVAL; + *error_code = -1 * EINVAL; return NULL; } - // Calculate the number of bitmaps required - if (num_locks % BITMAP_SIZE != 0) { - // Number of locks not a multiple of BITMAP_SIZE - *error_code = EINVAL; + if (path == NULL) { + *error_code = -1 * EINVAL; return NULL; } + + // Calculate the number of bitmaps required num_bitmaps = num_locks / BITMAP_SIZE; + if (num_locks % BITMAP_SIZE != 0) { + num_bitmaps += 1; + } // Calculate size of the shm segment shm_size = compute_shm_size(num_bitmaps); - shm_fd = shm_open(SHM_NAME, O_RDWR, 0600); + shm_fd = shm_open(path, O_RDWR, 0600); if (shm_fd < 0) { + *error_code = -1 * errno; return NULL; } // Map the shared memory in shm = mmap(NULL, shm_size, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, 0); if (shm == MAP_FAILED) { - *error_code = errno; + *error_code = -1 * errno; } // Ignore errors, it's ok if we leak a single FD since this only runs once @@ -167,11 +255,11 @@ shm_struct_t *open_lock_shm(uint32_t num_locks, int *error_code) { // Need to check the SHM to see if it's actually our locks if (shm->magic != MAGIC) { - *error_code = errno; + *error_code = -1 * errno; goto CLEANUP; } - if (shm->num_locks != num_locks) { - *error_code = errno; + if (shm->num_locks != (num_bitmaps * BITMAP_SIZE)) { + *error_code = -1 * errno; goto CLEANUP; } @@ -219,11 +307,9 @@ int64_t allocate_semaphore(shm_struct_t *shm) { } // Lock the semaphore controlling access to our shared memory - do { - ret_code = sem_wait(&(shm->segment_lock)); - } while(ret_code == EINTR); + ret_code = take_mutex(&(shm->segment_lock)); if (ret_code != 0) { - return -1 * errno; + return -1 * ret_code; } // Loop through our bitmaps to search for one that is not full @@ -237,8 +323,13 @@ int64_t allocate_semaphore(shm_struct_t *shm) { sem_number = (BITMAP_SIZE * i) + num_within_bitmap; // OR in the bitmap shm->locks[i].bitmap = shm->locks[i].bitmap | test_map; - // Clear the semaphore - sem_post(&(shm->segment_lock)); + + // Clear the mutex + ret_code = release_mutex(&(shm->segment_lock)); + if (ret_code != 0) { + return -1 * ret_code; + } + // Return the semaphore we've allocated return sem_number; } @@ -250,8 +341,11 @@ int64_t allocate_semaphore(shm_struct_t *shm) { } } - // Post to the semaphore to clear the lock - sem_post(&(shm->segment_lock)); + // Clear the mutex + ret_code = release_mutex(&(shm->segment_lock)); + if (ret_code != 0) { + return -1 * ret_code; + } // All bitmaps are full // We have no available semaphores, report allocation failure @@ -282,23 +376,20 @@ int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index) { return -1 * EFAULT; } - test_map = 0x1; - for (i = 0; i < index_in_bitmap; i++) { - test_map = test_map << 1; - } + test_map = 0x1 << index_in_bitmap; - // Lock the semaphore controlling access to our shared memory - do { - ret_code = sem_wait(&(shm->segment_lock)); - } while(ret_code == EINTR); + // Lock the mutex controlling access to our shared memory + ret_code = take_mutex(&(shm->segment_lock)); if (ret_code != 0) { - return -1 * errno; + return -1 * ret_code; } // Check if the semaphore is allocated if ((test_map & shm->locks[bitmap_index].bitmap) == 0) { - // Post to the semaphore to clear the lock - sem_post(&(shm->segment_lock)); + ret_code = release_mutex(&(shm->segment_lock)); + if (ret_code != 0) { + return -1 * ret_code; + } return -1 * ENOENT; } @@ -308,8 +399,10 @@ int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index) { test_map = ~test_map; shm->locks[bitmap_index].bitmap = shm->locks[bitmap_index].bitmap & test_map; - // Post to the semaphore to clear the lock - sem_post(&(shm->segment_lock)); + ret_code = release_mutex(&(shm->segment_lock)); + if (ret_code != 0) { + return -1 * ret_code; + } return 0; } @@ -333,15 +426,7 @@ int32_t lock_semaphore(shm_struct_t *shm, uint32_t sem_index) { bitmap_index = sem_index / BITMAP_SIZE; index_in_bitmap = sem_index % BITMAP_SIZE; - // Lock the semaphore controlling access to our shared memory - do { - ret_code = sem_wait(&(shm->locks[bitmap_index].locks[index_in_bitmap])); - } while(ret_code == EINTR); - if (ret_code != 0) { - return -1 * errno; - } - - return 0; + return -1 * take_mutex(&(shm->locks[bitmap_index].locks[index_in_bitmap])); } // Unlock a given semaphore @@ -351,7 +436,6 @@ int32_t lock_semaphore(shm_struct_t *shm, uint32_t sem_index) { // Returns 0 on success, -1 on failure int32_t unlock_semaphore(shm_struct_t *shm, uint32_t sem_index) { int bitmap_index, index_in_bitmap, ret_code; - unsigned int sem_value = 0; if (shm == NULL) { return -1 * EINVAL; @@ -364,20 +448,5 @@ int32_t unlock_semaphore(shm_struct_t *shm, uint32_t sem_index) { bitmap_index = sem_index / BITMAP_SIZE; index_in_bitmap = sem_index % BITMAP_SIZE; - // Only allow a post if the semaphore is less than 1 (locked) - // This allows us to preserve mutex behavior - ret_code = sem_getvalue(&(shm->locks[bitmap_index].locks[index_in_bitmap]), &sem_value); - if (ret_code != 0) { - return -1 * errno; - } - if (sem_value >= 1) { - return -1 * EBUSY; - } - - ret_code = sem_post(&(shm->locks[bitmap_index].locks[index_in_bitmap])); - if (ret_code != 0) { - return -1 * errno; - } - - return 0; + return -1 * release_mutex(&(shm->locks[bitmap_index].locks[index_in_bitmap])); } diff --git a/libpod/lock/shm/shm_lock.go b/libpod/lock/shm/shm_lock.go index 9a9074c04..16d7f2008 100644 --- a/libpod/lock/shm/shm_lock.go +++ b/libpod/lock/shm/shm_lock.go @@ -1,47 +1,54 @@ package shm // #cgo LDFLAGS: -lrt -lpthread +// #include // #include "shm_lock.h" // const uint32_t bitmap_size_c = BITMAP_SIZE; import "C" import ( + "runtime" "syscall" + "unsafe" "github.com/pkg/errors" ) -var ( - bitmapSize uint32 = uint32(C.bitmap_size_c) +const ( + BitmapSize uint32 = uint32(C.bitmap_size_c) ) // SHMLocks is a struct enabling POSIX semaphore locking in a shared memory // segment. type SHMLocks struct { // nolint lockStruct *C.shm_struct_t - valid bool maxLocks uint32 + valid bool } // CreateSHMLock sets up a shared-memory segment holding a given number of POSIX // semaphores, and returns a struct that can be used to operate on those locks. -// numLocks must be a multiple of the lock bitmap size (by default, 32). -func CreateSHMLock(numLocks uint32) (*SHMLocks, error) { - if numLocks%bitmapSize != 0 || numLocks == 0 { - return nil, errors.Wrapf(syscall.EINVAL, "number of locks must be a multiple of %d", C.bitmap_size_c) +// numLocks must not be 0, and may be rounded up to a multiple of the bitmap +// size used by the underlying implementation. +func CreateSHMLock(path string, numLocks uint32) (*SHMLocks, error) { + if numLocks == 0 { + return nil, errors.Wrapf(syscall.EINVAL, "number of locks must greater than 0 0") } locks := new(SHMLocks) + cPath := C.CString(path) + defer C.free(unsafe.Pointer(cPath)) + var errCode C.int - lockStruct := C.setup_lock_shm(C.uint32_t(numLocks), &errCode) + lockStruct := C.setup_lock_shm(cPath, C.uint32_t(numLocks), &errCode) if lockStruct == nil { // We got a null pointer, so something errored return nil, syscall.Errno(-1 * errCode) } locks.lockStruct = lockStruct - locks.maxLocks = numLocks + locks.maxLocks = uint32(lockStruct.num_locks) locks.valid = true return locks, nil @@ -49,17 +56,19 @@ func CreateSHMLock(numLocks uint32) (*SHMLocks, error) { // OpenSHMLock opens an existing shared-memory segment holding a given number of // POSIX semaphores. numLocks must match the number of locks the shared memory -// segment was created with and be a multiple of the lock bitmap size (default -// 32). -func OpenSHMLock(numLocks uint32) (*SHMLocks, error) { - if numLocks%bitmapSize != 0 || numLocks == 0 { - return nil, errors.Wrapf(syscall.EINVAL, "number of locks must be a multiple of %d", C.bitmap_size_c) +// segment was created with. +func OpenSHMLock(path string, numLocks uint32) (*SHMLocks, error) { + if numLocks == 0 { + return nil, errors.Wrapf(syscall.EINVAL, "number of locks must greater than 0") } locks := new(SHMLocks) + cPath := C.CString(path) + defer C.free(unsafe.Pointer(cPath)) + var errCode C.int - lockStruct := C.open_lock_shm(C.uint32_t(numLocks), &errCode) + lockStruct := C.open_lock_shm(cPath, C.uint32_t(numLocks), &errCode) if lockStruct == nil { // We got a null pointer, so something errored return nil, syscall.Errno(-1 * errCode) @@ -108,6 +117,8 @@ func (locks *SHMLocks) AllocateSemaphore() (uint32, error) { return 0, errors.Wrapf(syscall.EINVAL, "locks have already been closed") } + // This returns a U64, so we have the full u32 range available for + // semaphore indexes, and can still return error codes. retCode := C.allocate_semaphore(locks.lockStruct) if retCode < 0 { // Negative errno returned @@ -154,6 +165,10 @@ func (locks *SHMLocks) LockSemaphore(sem uint32) error { return errors.Wrapf(syscall.EINVAL, "given semaphore %d is higher than maximum locks count %d", sem, locks.maxLocks) } + // For pthread mutexes, we have to guarantee lock and unlock happen in + // the same thread. + runtime.LockOSThread() + retCode := C.lock_semaphore(locks.lockStruct, C.uint32_t(sem)) if retCode < 0 { // Negative errno returned @@ -184,5 +199,12 @@ func (locks *SHMLocks) UnlockSemaphore(sem uint32) error { return syscall.Errno(-1 * retCode) } + // For pthread mutexes, we have to guarantee lock and unlock happen in + // the same thread. + // OK if we take multiple locks - UnlockOSThread() won't actually unlock + // until the number of calls equals the number of calls to + // LockOSThread() + runtime.UnlockOSThread() + return nil } diff --git a/libpod/lock/shm/shm_lock.h b/libpod/lock/shm/shm_lock.h index 18bea47e9..8e7e23fb7 100644 --- a/libpod/lock/shm/shm_lock.h +++ b/libpod/lock/shm/shm_lock.h @@ -1,14 +1,11 @@ #ifndef shm_locks_h_ #define shm_locks_h_ -#include +#include #include // Magic number to ensure we open the right SHM segment -#define MAGIC 0xA5A5 - -// Name of the SHM -#define SHM_NAME "/libpod_lock" +#define MAGIC 0x87D1 // Type for our bitmaps typedef uint32_t bitmap_t; @@ -18,22 +15,28 @@ typedef uint32_t bitmap_t; // Struct to hold a single bitmap and associated locks typedef struct lock_group { - bitmap_t bitmap; - sem_t locks[BITMAP_SIZE]; + bitmap_t bitmap; + pthread_mutex_t locks[BITMAP_SIZE]; } lock_group_t; -// Struct to hold our SHM locks +// Struct to hold our SHM locks. +// Unused is required to be 0 in the current implementation. If we ever make +// changes to this structure in the future, this will be repurposed as a version +// field. typedef struct shm_struct { - uint16_t magic; - sem_t segment_lock; - uint32_t num_bitmaps; - uint32_t num_locks; - lock_group_t locks[]; + uint16_t magic; + uint16_t unused; + pthread_mutex_t segment_lock; + uint32_t num_bitmaps; + uint32_t num_locks; + lock_group_t locks[]; } shm_struct_t; -size_t compute_shm_size(uint32_t num_bitmaps); -shm_struct_t *setup_lock_shm(uint32_t num_locks, int *error_code); -shm_struct_t *open_lock_shm(uint32_t num_locks, int *error_code); +static size_t compute_shm_size(uint32_t num_bitmaps); +static int take_mutex(pthread_mutex_t *mutex); +static int release_mutex(pthread_mutex_t *mutex); +shm_struct_t *setup_lock_shm(char *path, uint32_t num_locks, int *error_code); +shm_struct_t *open_lock_shm(char *path, uint32_t num_locks, int *error_code); int32_t close_lock_shm(shm_struct_t *shm); int64_t allocate_semaphore(shm_struct_t *shm); int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index); diff --git a/libpod/lock/shm/shm_lock_test.go b/libpod/lock/shm/shm_lock_test.go index bc22db835..7174253d0 100644 --- a/libpod/lock/shm/shm_lock_test.go +++ b/libpod/lock/shm/shm_lock_test.go @@ -3,6 +3,7 @@ package shm import ( "fmt" "os" + "runtime" "syscall" "testing" "time" @@ -17,11 +18,13 @@ import ( // We can at least verify that the locks work within the local process. // 4 * BITMAP_SIZE to ensure we have to traverse bitmaps -const numLocks = 128 +const numLocks uint32 = 4 * BitmapSize + +const lockPath = "/libpod_test" // We need a test main to ensure that the SHM is created before the tests run func TestMain(m *testing.M) { - shmLock, err := CreateSHMLock(numLocks) + shmLock, err := CreateSHMLock(lockPath, numLocks) if err != nil { fmt.Fprintf(os.Stderr, "Error creating SHM for tests: %v\n", err) os.Exit(-1) @@ -42,19 +45,15 @@ func TestMain(m *testing.M) { } func runLockTest(t *testing.T, testFunc func(*testing.T, *SHMLocks)) { - locks, err := OpenSHMLock(numLocks) + locks, err := OpenSHMLock(lockPath, numLocks) if err != nil { t.Fatalf("Error opening locks: %v", err) } defer func() { - // Unlock and deallocate all locks - // Ignore EBUSY (lock is already unlocked) + // Deallocate all locks // Ignore ENOENT (lock is not allocated) var i uint32 for i = 0; i < numLocks; i++ { - if err := locks.UnlockSemaphore(i); err != nil && err != syscall.EBUSY { - t.Fatalf("Error unlocking semaphore %d: %v", i, err) - } if err := locks.DeallocateSemaphore(i); err != nil && err != syscall.ENOENT { t.Fatalf("Error deallocating semaphore %d: %v", i, err) } @@ -73,16 +72,22 @@ func runLockTest(t *testing.T, testFunc func(*testing.T, *SHMLocks)) { } } -// Test that creating an SHM with a bad size fails -func TestCreateNewSHMBadSize(t *testing.T) { +// Test that creating an SHM with a bad size rounds up to a good size +func TestCreateNewSHMBadSizeRoundsUp(t *testing.T) { // Odd number, not a power of 2, should never be a word size on a system - _, err := CreateSHMLock(7) - assert.Error(t, err) + lock, err := CreateSHMLock("/test1", 7) + assert.NoError(t, err) + + assert.Equal(t, lock.GetMaxLocks(), BitmapSize) + + if err := lock.Close(); err != nil { + t.Fatalf("Error closing locks: %v", err) + } } // Test that creating an SHM with 0 size fails func TestCreateNewSHMZeroSize(t *testing.T) { - _, err := CreateSHMLock(0) + _, err := CreateSHMLock("/test2", 0) assert.Error(t, err) } @@ -241,3 +246,28 @@ func TestLockSemaphoreActuallyLocks(t *testing.T) { assert.True(t, duration.Seconds() > 1.0) }) } + +// Test that locking and unlocking two semaphores succeeds +// Ensures that runtime.LockOSThread() is doing its job +func TestLockAndUnlockTwoSemaphore(t *testing.T) { + runLockTest(t, func(t *testing.T, locks *SHMLocks) { + err := locks.LockSemaphore(0) + assert.NoError(t, err) + + err = locks.LockSemaphore(1) + assert.NoError(t, err) + + err = locks.UnlockSemaphore(1) + assert.NoError(t, err) + + // Now yield scheduling + // To try and get us on another OS thread + runtime.Gosched() + + // And unlock the last semaphore + // If we are in a different OS thread, this should fail. + // However, runtime.UnlockOSThread() should guarantee we are not + err = locks.UnlockSemaphore(0) + assert.NoError(t, err) + }) +} diff --git a/libpod/lock/shm_lock_manager_linux.go b/libpod/lock/shm_lock_manager_linux.go index b1e9df12d..974431a13 100644 --- a/libpod/lock/shm_lock_manager_linux.go +++ b/libpod/lock/shm_lock_manager_linux.go @@ -3,13 +3,7 @@ package lock import ( - "fmt" - "math" - "strconv" - "syscall" - - "github.com/pkg/errors" - "github.com/projectatomic/libpod/libpod/lock/shm" + "github.com/containers/libpod/libpod/lock/shm" ) // SHMLockManager manages shared memory locks. @@ -18,8 +12,8 @@ type SHMLockManager struct { } // NewSHMLockManager makes a new SHMLockManager with the given number of locks. -func NewSHMLockManager(numLocks uint32) (Manager, error) { - locks, err := shm.CreateSHMLock(numLocks) +func NewSHMLockManager(path string, numLocks uint32) (Manager, error) { + locks, err := shm.CreateSHMLock(path, numLocks) if err != nil { return nil, err } @@ -32,8 +26,8 @@ func NewSHMLockManager(numLocks uint32) (Manager, error) { // OpenSHMLockManager opens an existing SHMLockManager with the given number of // locks. -func OpenSHMLockManager(numLocks uint32) (Manager, error) { - locks, err := shm.OpenSHMLock(numLocks) +func OpenSHMLockManager(path string, numLocks uint32) (Manager, error) { + locks, err := shm.OpenSHMLock(path, numLocks) if err != nil { return nil, err } @@ -59,27 +53,9 @@ func (m *SHMLockManager) AllocateLock() (Locker, error) { } // RetrieveLock retrieves a lock from the manager given its ID. -func (m *SHMLockManager) RetrieveLock(id string) (Locker, error) { - intID, err := strconv.ParseInt(id, 16, 64) - if err != nil { - return nil, errors.Wrapf(err, "given ID %q is not a valid SHMLockManager ID - cannot be parsed as int", id) - } - - if intID < 0 { - return nil, errors.Wrapf(syscall.EINVAL, "given ID %q is not a valid SHMLockManager ID - must be positive", id) - } - - if intID > math.MaxUint32 { - return nil, errors.Wrapf(syscall.EINVAL, "given ID %q is not a valid SHMLockManager ID - too large", id) - } - - var u32ID uint32 = uint32(intID) - if u32ID >= m.locks.GetMaxLocks() { - return nil, errors.Wrapf(syscall.EINVAL, "given ID %q is not a valid SHMLockManager ID - too large to fit", id) - } - +func (m *SHMLockManager) RetrieveLock(id uint32) (Locker, error) { lock := new(SHMLock) - lock.lockID = u32ID + lock.lockID = id lock.manager = m return lock, nil @@ -92,8 +68,8 @@ type SHMLock struct { } // ID returns the ID of the lock. -func (l *SHMLock) ID() string { - return fmt.Sprintf("%x", l.lockID) +func (l *SHMLock) ID() uint32 { + return l.lockID } // Lock acquires the lock. -- cgit v1.2.3-54-g00ecf From a364b656eaef1be5329abfd02d3fcd2dbcd37d64 Mon Sep 17 00:00:00 2001 From: Matthew Heon Date: Thu, 23 Aug 2018 13:48:07 -0400 Subject: Add lock manager to libpod runtime Signed-off-by: Matthew Heon --- libpod/lock/lock.go | 11 ++-- libpod/lock/shm/shm_lock.go | 8 ++- libpod/lock/shm_lock_manager_linux.go | 12 ++-- libpod/runtime.go | 110 ++++++++++++++++++++++------------ 4 files changed, 95 insertions(+), 46 deletions(-) (limited to 'libpod/lock/shm') diff --git a/libpod/lock/lock.go b/libpod/lock/lock.go index 5258c641f..73c1fdcf7 100644 --- a/libpod/lock/lock.go +++ b/libpod/lock/lock.go @@ -41,11 +41,14 @@ type Locker interface { // Lock locks the lock. // This call MUST block until it successfully acquires the lock or // encounters a fatal error. - Lock() error + // All errors must be handled internally, as they are not returned. For + // the most part, panicking should be appropriate. + Lock() // Unlock unlocks the lock. - // A call to Unlock() on a lock that is already unlocked lock MUST - // error. - Unlock() error + // All errors must be handled internally, as they are not returned. For + // the most part, panicking should be appropriate. + // This includes unlocking locks which are already unlocked. + Unlock() // Free deallocates the underlying lock, allowing its reuse by other // pods and containers. // The lock MUST still be usable after a Free() - some libpod instances diff --git a/libpod/lock/shm/shm_lock.go b/libpod/lock/shm/shm_lock.go index 16d7f2008..3372a8c71 100644 --- a/libpod/lock/shm/shm_lock.go +++ b/libpod/lock/shm/shm_lock.go @@ -12,9 +12,13 @@ import ( "unsafe" "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) -const ( +var ( + // BitmapSize is the size of the bitmap used when managing SHM locks. + // an SHM lock manager's max locks will be rounded up to a multiple of + // this number. BitmapSize uint32 = uint32(C.bitmap_size_c) ) @@ -51,6 +55,8 @@ func CreateSHMLock(path string, numLocks uint32) (*SHMLocks, error) { locks.maxLocks = uint32(lockStruct.num_locks) locks.valid = true + logrus.Debugf("Initialized SHM lock manager at path %s", path) + return locks, nil } diff --git a/libpod/lock/shm_lock_manager_linux.go b/libpod/lock/shm_lock_manager_linux.go index 974431a13..2c0ea611a 100644 --- a/libpod/lock/shm_lock_manager_linux.go +++ b/libpod/lock/shm_lock_manager_linux.go @@ -73,13 +73,17 @@ func (l *SHMLock) ID() uint32 { } // Lock acquires the lock. -func (l *SHMLock) Lock() error { - return l.manager.locks.LockSemaphore(l.lockID) +func (l *SHMLock) Lock() { + if err := l.manager.locks.LockSemaphore(l.lockID); err != nil { + panic(err.Error()) + } } // Unlock releases the lock. -func (l *SHMLock) Unlock() error { - return l.manager.locks.UnlockSemaphore(l.lockID) +func (l *SHMLock) Unlock() { + if err := l.manager.locks.UnlockSemaphore(l.lockID); err != nil { + panic(err.Error()) + } } // Free releases the lock, allowing it to be reused. diff --git a/libpod/runtime.go b/libpod/runtime.go index facbe5d66..238a7a9db 100644 --- a/libpod/runtime.go +++ b/libpod/runtime.go @@ -11,6 +11,7 @@ import ( is "github.com/containers/image/storage" "github.com/containers/image/types" "github.com/containers/libpod/libpod/image" + "github.com/containers/libpod/libpod/lock" "github.com/containers/libpod/pkg/firewall" sysreg "github.com/containers/libpod/pkg/registries" "github.com/containers/libpod/pkg/rootless" @@ -64,6 +65,11 @@ const ( // DefaultInitPath is the default path to the container-init binary DefaultInitPath = "/usr/libexec/podman/catatonit" + + // DefaultSHMLockPath is the default path for SHM locks + DefaultSHMLockPath = "/libpod_lock" + // DefaultRootlessSHMLockPath is the default path for rootless SHM locks + DefaultRootlessSHMLockPath = "/libpod_rootless_lock" ) // A RuntimeOption is a functional option which alters the Runtime created by @@ -86,6 +92,7 @@ type Runtime struct { lock sync.RWMutex imageRuntime *image.Runtime firewallBackend firewall.FirewallBackend + lockManager lock.Manager configuredFrom *runtimeConfiguredFrom } @@ -165,6 +172,7 @@ type RuntimeConfig struct { // and all containers and pods will be visible. // The default namespace is "". Namespace string `toml:"namespace,omitempty"` + // InfraImage is the image a pod infra container will use to manage namespaces InfraImage string `toml:"infra_image"` // InfraCommand is the command run to start up a pod infra container @@ -179,6 +187,10 @@ type RuntimeConfig struct { EnablePortReservation bool `toml:"enable_port_reservation"` // EnableLabeling indicates wether libpod will support container labeling EnableLabeling bool `toml:"label"` + + // NumLocks is the number of locks to make available for containers and + // pods. + NumLocks uint32 `toml:"num_locks,omitempty"` } // runtimeConfiguredFrom is a struct used during early runtime init to help @@ -234,6 +246,7 @@ var ( InfraImage: DefaultInfraImage, EnablePortReservation: true, EnableLabeling: true, + NumLocks: 2048, } ) @@ -487,6 +500,56 @@ func makeRuntime(runtime *Runtime) (err error) { } } + // We now need to see if the system has restarted + // We check for the presence of a file in our tmp directory to verify this + // This check must be locked to prevent races + runtimeAliveLock := filepath.Join(runtime.config.TmpDir, "alive.lck") + runtimeAliveFile := filepath.Join(runtime.config.TmpDir, "alive") + aliveLock, err := storage.GetLockfile(runtimeAliveLock) + if err != nil { + return errors.Wrapf(err, "error acquiring runtime init lock") + } + // Acquire the lock and hold it until we return + // This ensures that no two processes will be in runtime.refresh at once + // TODO: we can't close the FD in this lock, so we should keep it around + // and use it to lock important operations + aliveLock.Lock() + locked := true + doRefresh := false + defer func() { + if locked { + aliveLock.Unlock() + } + }() + _, err = os.Stat(runtimeAliveFile) + if err != nil { + // If the file doesn't exist, we need to refresh the state + // This will trigger on first use as well, but refreshing an + // empty state only creates a single file + // As such, it's not really a performance concern + if os.IsNotExist(err) { + doRefresh = true + } else { + return errors.Wrapf(err, "error reading runtime status file %s", runtimeAliveFile) + } + } + + // Set up the lock manager + var manager lock.Manager + lockPath := DefaultSHMLockPath + if rootless.IsRootless() { + lockPath = DefaultRootlessSHMLockPath + } + if doRefresh { + manager, err = lock.NewSHMLockManager(lockPath, runtime.config.NumLocks) + } else { + manager, err = lock.OpenSHMLockManager(lockPath, runtime.config.NumLocks) + } + if err != nil { + return errors.Wrapf(err, "error initializing SHM locking") + } + runtime.lockManager = manager + // Set up the state switch runtime.config.StateType { case InMemoryStateStore: @@ -656,46 +719,19 @@ func makeRuntime(runtime *Runtime) (err error) { } runtime.firewallBackend = fwBackend - // We now need to see if the system has restarted - // We check for the presence of a file in our tmp directory to verify this - // This check must be locked to prevent races - runtimeAliveLock := filepath.Join(runtime.config.TmpDir, "alive.lck") - runtimeAliveFile := filepath.Join(runtime.config.TmpDir, "alive") - aliveLock, err := storage.GetLockfile(runtimeAliveLock) - if err != nil { - return errors.Wrapf(err, "error acquiring runtime init lock") - } - // Acquire the lock and hold it until we return - // This ensures that no two processes will be in runtime.refresh at once - // TODO: we can't close the FD in this lock, so we should keep it around - // and use it to lock important operations - aliveLock.Lock() - locked := true - defer func() { - if locked { + // If we need to refresh the state, do it now - things are guaranteed to + // be set up by now. + if doRefresh { + if os.Geteuid() != 0 { aliveLock.Unlock() - } - }() - _, err = os.Stat(runtimeAliveFile) - if err != nil { - // If the file doesn't exist, we need to refresh the state - // This will trigger on first use as well, but refreshing an - // empty state only creates a single file - // As such, it's not really a performance concern - if os.IsNotExist(err) { - if os.Geteuid() != 0 { - aliveLock.Unlock() - locked = false - if err2 := runtime.refreshRootless(); err2 != nil { - return err2 - } - } else { - if err2 := runtime.refresh(runtimeAliveFile); err2 != nil { - return err2 - } + locked = false + if err2 := runtime.refreshRootless(); err2 != nil { + return err2 } } else { - return errors.Wrapf(err, "error reading runtime status file %s", runtimeAliveFile) + if err2 := runtime.refresh(runtimeAliveFile); err2 != nil { + return err2 + } } } -- cgit v1.2.3-54-g00ecf From d4b2f116018e1d8e6a3c4f80f30db45934428c6b Mon Sep 17 00:00:00 2001 From: Matthew Heon Date: Fri, 24 Aug 2018 15:15:56 -0400 Subject: Convert pods to SHM locks Signed-off-by: Matthew Heon --- libpod/boltdb_state_internal.go | 5 ++--- libpod/container_internal.go | 3 +-- libpod/lock/shm/shm_lock_test.go | 11 +++++++---- libpod/pod.go | 7 +++++-- libpod/pod_easyjson.go | 12 ++++++++++++ libpod/pod_internal.go | 19 +++++++++---------- libpod/runtime_ctr.go | 6 +++++- libpod/runtime_pod_linux.go | 8 ++++++++ 8 files changed, 49 insertions(+), 22 deletions(-) (limited to 'libpod/lock/shm') diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go index 238bfa161..bffa83ffb 100644 --- a/libpod/boltdb_state_internal.go +++ b/libpod/boltdb_state_internal.go @@ -323,10 +323,9 @@ func (s *BoltState) getPodFromDB(id []byte, pod *Pod, podBkt *bolt.Bucket) error } // Get the lock - lockPath := filepath.Join(s.runtime.lockDir, string(id)) - lock, err := storage.GetLockfile(lockPath) + lock, err := s.runtime.lockManager.RetrieveLock(pod.config.LockID) if err != nil { - return errors.Wrapf(err, "error retrieving lockfile for pod %s", string(id)) + return errors.Wrapf(err, "error retrieving lock for pod %s", string(id)) } pod.lock = lock diff --git a/libpod/container_internal.go b/libpod/container_internal.go index 856313b9d..cc4c36bc9 100644 --- a/libpod/container_internal.go +++ b/libpod/container_internal.go @@ -451,12 +451,11 @@ func (c *Container) refresh() error { } // We need to pick up a new lock - lock, err := c.runtime.lockManager.AllocateLock() + lock, err := c.runtime.lockManager.RetrieveLock(c.config.LockID) if err != nil { return errors.Wrapf(err, "error acquiring lock for container %s", c.ID()) } c.lock = lock - c.config.LockID = c.lock.ID() if err := c.save(); err != nil { return errors.Wrapf(err, "error refreshing state for container %s", c.ID()) diff --git a/libpod/lock/shm/shm_lock_test.go b/libpod/lock/shm/shm_lock_test.go index 7174253d0..bdf0b19e5 100644 --- a/libpod/lock/shm/shm_lock_test.go +++ b/libpod/lock/shm/shm_lock_test.go @@ -18,7 +18,7 @@ import ( // We can at least verify that the locks work within the local process. // 4 * BITMAP_SIZE to ensure we have to traverse bitmaps -const numLocks uint32 = 4 * BitmapSize +var numLocks uint32 = 4 * BitmapSize const lockPath = "/libpod_test" @@ -155,7 +155,8 @@ func TestAllocateTwoLocksGetsDifferentLocks(t *testing.T) { func TestAllocateAllLocksSucceeds(t *testing.T) { runLockTest(t, func(t *testing.T, locks *SHMLocks) { sems := make(map[uint32]bool) - for i := 0; i < numLocks; i++ { + var i uint32 + for i = 0; i < numLocks; i++ { sem, err := locks.AllocateSemaphore() assert.NoError(t, err) @@ -172,7 +173,8 @@ func TestAllocateAllLocksSucceeds(t *testing.T) { func TestAllocateTooManyLocksFails(t *testing.T) { runLockTest(t, func(t *testing.T, locks *SHMLocks) { // Allocate all locks - for i := 0; i < numLocks; i++ { + var i uint32 + for i = 0; i < numLocks; i++ { _, err := locks.AllocateSemaphore() assert.NoError(t, err) } @@ -187,7 +189,8 @@ func TestAllocateTooManyLocksFails(t *testing.T) { func TestAllocateDeallocateCycle(t *testing.T) { runLockTest(t, func(t *testing.T, locks *SHMLocks) { // Allocate all locks - for i := 0; i < numLocks; i++ { + var i uint32 + for i = 0; i < numLocks; i++ { _, err := locks.AllocateSemaphore() assert.NoError(t, err) } diff --git a/libpod/pod.go b/libpod/pod.go index 07f41f5c6..4ce697402 100644 --- a/libpod/pod.go +++ b/libpod/pod.go @@ -3,7 +3,7 @@ package libpod import ( "time" - "github.com/containers/storage" + "github.com/containers/libpod/libpod/lock" "github.com/cri-o/ocicni/pkg/ocicni" "github.com/pkg/errors" ) @@ -26,7 +26,7 @@ type Pod struct { valid bool runtime *Runtime - lock storage.Locker + lock lock.Locker } // PodConfig represents a pod's static configuration @@ -60,6 +60,9 @@ type PodConfig struct { // Time pod was created CreatedTime time.Time `json:"created"` + + // ID of the pod's lock + LockID uint32 `json:"lockID"` } // podState represents a pod's state diff --git a/libpod/pod_easyjson.go b/libpod/pod_easyjson.go index 8ea9a5e72..71862dad0 100644 --- a/libpod/pod_easyjson.go +++ b/libpod/pod_easyjson.go @@ -501,6 +501,8 @@ func easyjsonBe091417DecodeGithubComContainersLibpodLibpod4(in *jlexer.Lexer, ou if data := in.Raw(); in.Ok() { in.AddError((out.CreatedTime).UnmarshalJSON(data)) } + case "lockID": + out.LockID = uint32(in.Uint32()) default: in.SkipRecursive() } @@ -675,6 +677,16 @@ func easyjsonBe091417EncodeGithubComContainersLibpodLibpod4(out *jwriter.Writer, } out.Raw((in.CreatedTime).MarshalJSON()) } + { + const prefix string = ",\"lockID\":" + if first { + first = false + out.RawString(prefix[1:]) + } else { + out.RawString(prefix) + } + out.Uint32(uint32(in.LockID)) + } out.RawByte('}') } diff --git a/libpod/pod_internal.go b/libpod/pod_internal.go index 39a25c004..348dd2373 100644 --- a/libpod/pod_internal.go +++ b/libpod/pod_internal.go @@ -7,7 +7,6 @@ import ( "strings" "time" - "github.com/containers/storage" "github.com/containers/storage/pkg/stringid" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -24,15 +23,6 @@ func newPod(lockDir string, runtime *Runtime) (*Pod, error) { pod.state = new(podState) pod.runtime = runtime - // Path our lock file will reside at - lockPath := filepath.Join(lockDir, pod.config.ID) - // Grab a lockfile at the given path - lock, err := storage.GetLockfile(lockPath) - if err != nil { - return nil, errors.Wrapf(err, "error creating lockfile for new pod") - } - pod.lock = lock - return pod, nil } @@ -55,6 +45,8 @@ func (p *Pod) save() error { } // Refresh a pod's state after restart +// This cannot lock any other pod, but may lock individual containers, as those +// will have refreshed by the time pod refresh runs. func (p *Pod) refresh() error { // Need to to an update from the DB to pull potentially-missing state if err := p.runtime.state.UpdatePod(p); err != nil { @@ -65,6 +57,13 @@ func (p *Pod) refresh() error { return ErrPodRemoved } + // Retrieve the pod's lock + lock, err := p.runtime.lockManager.RetrieveLock(p.config.LockID) + if err != nil { + return errors.Wrapf(err, "error retrieving lock for pod %s", p.ID()) + } + p.lock = lock + // We need to recreate the pod's cgroup if p.config.UsePodCgroup { switch p.runtime.config.CgroupManager { diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go index eb78e7d7d..5b8a7f759 100644 --- a/libpod/runtime_ctr.go +++ b/libpod/runtime_ctr.go @@ -81,7 +81,11 @@ func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options .. return nil, errors.Wrapf(err, "error allocating lock for new container") } ctr.lock = lock - ctr.config.LockID = c.lock.ID() + ctr.config.LockID = ctr.lock.ID() + + ctr.valid = true + ctr.state.State = ContainerStateConfigured + ctr.runtime = r ctr.valid = true ctr.state.State = ContainerStateConfigured diff --git a/libpod/runtime_pod_linux.go b/libpod/runtime_pod_linux.go index 95246449a..529c516c8 100644 --- a/libpod/runtime_pod_linux.go +++ b/libpod/runtime_pod_linux.go @@ -48,6 +48,14 @@ func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (*Pod, pod.config.Name = name } + // Allocate a lock for the pod + lock, err := r.lockManager.AllocateLock() + if err != nil { + return nil, errors.Wrapf(err, "error allocating lock for new pod") + } + pod.lock = lock + pod.config.LockID = pod.lock.ID() + pod.valid = true // Check CGroup parent sanity, and set it if it was not set -- cgit v1.2.3-54-g00ecf From 625c7e18ef2b7f47a853c42f1a07fec730dbc91e Mon Sep 17 00:00:00 2001 From: Matthew Heon Date: Sun, 23 Sep 2018 14:02:36 -0400 Subject: Update unit tests to use in-memory lock manager Signed-off-by: Matthew Heon --- libpod/common_test.go | 46 +- libpod/container_graph_test.go | 160 +++---- libpod/lock/in_memory_locks.go | 2 +- libpod/lock/shm/shm_lock_test.go | 6 +- libpod/state_test.go | 908 ++++++++++++++++++++------------------- 5 files changed, 573 insertions(+), 549 deletions(-) (limited to 'libpod/lock/shm') diff --git a/libpod/common_test.go b/libpod/common_test.go index 6c7434fd2..882468a3a 100644 --- a/libpod/common_test.go +++ b/libpod/common_test.go @@ -3,20 +3,19 @@ package libpod import ( "encoding/json" "net" - "path/filepath" "reflect" "strings" "testing" "time" - "github.com/containers/storage" + "github.com/containers/libpod/libpod/lock" "github.com/cri-o/ocicni/pkg/ocicni" "github.com/opencontainers/runtime-tools/generate" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func getTestContainer(id, name, locksDir string) (*Container, error) { +func getTestContainer(id, name string, manager lock.Manager) (*Container, error) { ctr := &Container{ config: &Config{ ID: id, @@ -90,18 +89,18 @@ func getTestContainer(id, name, locksDir string) (*Container, error) { ctr.config.Labels["test"] = "testing" - // Must make lockfile or container will error on being retrieved from DB - lockPath := filepath.Join(locksDir, id) - lock, err := storage.GetLockfile(lockPath) + // Allocate a lock for the container + lock, err := manager.AllocateLock() if err != nil { return nil, err } ctr.lock = lock + ctr.config.LockID = lock.ID() return ctr, nil } -func getTestPod(id, name, locksDir string) (*Pod, error) { +func getTestPod(id, name string, manager lock.Manager) (*Pod, error) { pod := &Pod{ config: &PodConfig{ ID: id, @@ -115,38 +114,39 @@ func getTestPod(id, name, locksDir string) (*Pod, error) { valid: true, } - lockPath := filepath.Join(locksDir, id) - lock, err := storage.GetLockfile(lockPath) + // Allocate a lock for the pod + lock, err := manager.AllocateLock() if err != nil { return nil, err } pod.lock = lock + pod.config.LockID = lock.ID() return pod, nil } -func getTestCtrN(n, lockPath string) (*Container, error) { - return getTestContainer(strings.Repeat(n, 32), "test"+n, lockPath) +func getTestCtrN(n string, manager lock.Manager) (*Container, error) { + return getTestContainer(strings.Repeat(n, 32), "test"+n, manager) } -func getTestCtr1(lockPath string) (*Container, error) { - return getTestCtrN("1", lockPath) +func getTestCtr1(manager lock.Manager) (*Container, error) { + return getTestCtrN("1", manager) } -func getTestCtr2(lockPath string) (*Container, error) { - return getTestCtrN("2", lockPath) +func getTestCtr2(manager lock.Manager) (*Container, error) { + return getTestCtrN("2", manager) } -func getTestPodN(n, lockPath string) (*Pod, error) { - return getTestPod(strings.Repeat(n, 32), "test"+n, lockPath) +func getTestPodN(n string, manager lock.Manager) (*Pod, error) { + return getTestPod(strings.Repeat(n, 32), "test"+n, manager) } -func getTestPod1(lockPath string) (*Pod, error) { - return getTestPodN("1", lockPath) +func getTestPod1(manager lock.Manager) (*Pod, error) { + return getTestPodN("1", manager) } -func getTestPod2(lockPath string) (*Pod, error) { - return getTestPodN("2", lockPath) +func getTestPod2(manager lock.Manager) (*Pod, error) { + return getTestPodN("2", manager) } // This horrible hack tests if containers are equal in a way that should handle @@ -174,6 +174,8 @@ func testContainersEqual(t *testing.T, a, b *Container, allowedEmpty bool) { assert.Equal(t, a.valid, b.valid) + assert.Equal(t, a.lock.ID(), b.lock.ID()) + aConfigJSON, err := json.Marshal(a.config) assert.NoError(t, err) err = json.Unmarshal(aConfigJSON, aConfig) @@ -223,6 +225,8 @@ func testPodsEqual(t *testing.T, a, b *Pod, allowedEmpty bool) { assert.Equal(t, a.valid, b.valid) + assert.Equal(t, a.lock.ID(), b.lock.ID()) + assert.EqualValues(t, a.config, b.config) if allowedEmpty { diff --git a/libpod/container_graph_test.go b/libpod/container_graph_test.go index 25461f1f4..d1a52658d 100644 --- a/libpod/container_graph_test.go +++ b/libpod/container_graph_test.go @@ -1,10 +1,9 @@ package libpod import ( - "io/ioutil" - "os" "testing" + "github.com/containers/libpod/libpod/lock" "github.com/stretchr/testify/assert" ) @@ -17,11 +16,12 @@ func TestBuildContainerGraphNoCtrsIsEmpty(t *testing.T) { } func TestBuildContainerGraphOneCtr(t *testing.T) { - tmpDir, err := ioutil.TempDir("", tmpDirPrefix) - assert.NoError(t, err) - defer os.RemoveAll(tmpDir) + manager, err := lock.NewInMemoryManager(16) + if err != nil { + t.Fatalf("Error setting up locks: %v", err) + } - ctr1, err := getTestCtr1(tmpDir) + ctr1, err := getTestCtr1(manager) assert.NoError(t, err) graph, err := buildContainerGraph([]*Container{ctr1}) @@ -39,13 +39,14 @@ func TestBuildContainerGraphOneCtr(t *testing.T) { } func TestBuildContainerGraphTwoCtrNoEdge(t *testing.T) { - tmpDir, err := ioutil.TempDir("", tmpDirPrefix) - assert.NoError(t, err) - defer os.RemoveAll(tmpDir) + manager, err := lock.NewInMemoryManager(16) + if err != nil { + t.Fatalf("Error setting up locks: %v", err) + } - ctr1, err := getTestCtr1(tmpDir) + ctr1, err := getTestCtr1(manager) assert.NoError(t, err) - ctr2, err := getTestCtr2(tmpDir) + ctr2, err := getTestCtr2(manager) assert.NoError(t, err) graph, err := buildContainerGraph([]*Container{ctr1, ctr2}) @@ -64,13 +65,14 @@ func TestBuildContainerGraphTwoCtrNoEdge(t *testing.T) { } func TestBuildContainerGraphTwoCtrOneEdge(t *testing.T) { - tmpDir, err := ioutil.TempDir("", tmpDirPrefix) - assert.NoError(t, err) - defer os.RemoveAll(tmpDir) + manager, err := lock.NewInMemoryManager(16) + if err != nil { + t.Fatalf("Error setting up locks: %v", err) + } - ctr1, err := getTestCtr1(tmpDir) + ctr1, err := getTestCtr1(manager) assert.NoError(t, err) - ctr2, err := getTestCtr2(tmpDir) + ctr2, err := getTestCtr2(manager) assert.NoError(t, err) ctr2.config.UserNsCtr = ctr1.config.ID @@ -85,13 +87,14 @@ func TestBuildContainerGraphTwoCtrOneEdge(t *testing.T) { } func TestBuildContainerGraphTwoCtrCycle(t *testing.T) { - tmpDir, err := ioutil.TempDir("", tmpDirPrefix) - assert.NoError(t, err) - defer os.RemoveAll(tmpDir) + manager, err := lock.NewInMemoryManager(16) + if err != nil { + t.Fatalf("Error setting up locks: %v", err) + } - ctr1, err := getTestCtr1(tmpDir) + ctr1, err := getTestCtr1(manager) assert.NoError(t, err) - ctr2, err := getTestCtr2(tmpDir) + ctr2, err := getTestCtr2(manager) assert.NoError(t, err) ctr2.config.UserNsCtr = ctr1.config.ID ctr1.config.NetNsCtr = ctr2.config.ID @@ -101,15 +104,16 @@ func TestBuildContainerGraphTwoCtrCycle(t *testing.T) { } func TestBuildContainerGraphThreeCtrNoEdges(t *testing.T) { - tmpDir, err := ioutil.TempDir("", tmpDirPrefix) - assert.NoError(t, err) - defer os.RemoveAll(tmpDir) + manager, err := lock.NewInMemoryManager(16) + if err != nil { + t.Fatalf("Error setting up locks: %v", err) + } - ctr1, err := getTestCtr1(tmpDir) + ctr1, err := getTestCtr1(manager) assert.NoError(t, err) - ctr2, err := getTestCtr2(tmpDir) + ctr2, err := getTestCtr2(manager) assert.NoError(t, err) - ctr3, err := getTestCtrN("3", tmpDir) + ctr3, err := getTestCtrN("3", manager) assert.NoError(t, err) graph, err := buildContainerGraph([]*Container{ctr1, ctr2, ctr3}) @@ -132,15 +136,16 @@ func TestBuildContainerGraphThreeCtrNoEdges(t *testing.T) { } func TestBuildContainerGraphThreeContainersTwoInCycle(t *testing.T) { - tmpDir, err := ioutil.TempDir("", tmpDirPrefix) - assert.NoError(t, err) - defer os.RemoveAll(tmpDir) + manager, err := lock.NewInMemoryManager(16) + if err != nil { + t.Fatalf("Error setting up locks: %v", err) + } - ctr1, err := getTestCtr1(tmpDir) + ctr1, err := getTestCtr1(manager) assert.NoError(t, err) - ctr2, err := getTestCtr2(tmpDir) + ctr2, err := getTestCtr2(manager) assert.NoError(t, err) - ctr3, err := getTestCtrN("3", tmpDir) + ctr3, err := getTestCtrN("3", manager) assert.NoError(t, err) ctr1.config.UserNsCtr = ctr2.config.ID ctr2.config.IPCNsCtr = ctr1.config.ID @@ -150,15 +155,16 @@ func TestBuildContainerGraphThreeContainersTwoInCycle(t *testing.T) { } func TestBuildContainerGraphThreeContainersCycle(t *testing.T) { - tmpDir, err := ioutil.TempDir("", tmpDirPrefix) - assert.NoError(t, err) - defer os.RemoveAll(tmpDir) + manager, err := lock.NewInMemoryManager(16) + if err != nil { + t.Fatalf("Error setting up locks: %v", err) + } - ctr1, err := getTestCtr1(tmpDir) + ctr1, err := getTestCtr1(manager) assert.NoError(t, err) - ctr2, err := getTestCtr2(tmpDir) + ctr2, err := getTestCtr2(manager) assert.NoError(t, err) - ctr3, err := getTestCtrN("3", tmpDir) + ctr3, err := getTestCtrN("3", manager) assert.NoError(t, err) ctr1.config.UserNsCtr = ctr2.config.ID ctr2.config.IPCNsCtr = ctr3.config.ID @@ -169,15 +175,16 @@ func TestBuildContainerGraphThreeContainersCycle(t *testing.T) { } func TestBuildContainerGraphThreeContainersNoCycle(t *testing.T) { - tmpDir, err := ioutil.TempDir("", tmpDirPrefix) - assert.NoError(t, err) - defer os.RemoveAll(tmpDir) + manager, err := lock.NewInMemoryManager(16) + if err != nil { + t.Fatalf("Error setting up locks: %v", err) + } - ctr1, err := getTestCtr1(tmpDir) + ctr1, err := getTestCtr1(manager) assert.NoError(t, err) - ctr2, err := getTestCtr2(tmpDir) + ctr2, err := getTestCtr2(manager) assert.NoError(t, err) - ctr3, err := getTestCtrN("3", tmpDir) + ctr3, err := getTestCtrN("3", manager) assert.NoError(t, err) ctr1.config.UserNsCtr = ctr2.config.ID ctr1.config.NetNsCtr = ctr3.config.ID @@ -194,17 +201,18 @@ func TestBuildContainerGraphThreeContainersNoCycle(t *testing.T) { } func TestBuildContainerGraphFourContainersNoEdges(t *testing.T) { - tmpDir, err := ioutil.TempDir("", tmpDirPrefix) - assert.NoError(t, err) - defer os.RemoveAll(tmpDir) + manager, err := lock.NewInMemoryManager(16) + if err != nil { + t.Fatalf("Error setting up locks: %v", err) + } - ctr1, err := getTestCtr1(tmpDir) + ctr1, err := getTestCtr1(manager) assert.NoError(t, err) - ctr2, err := getTestCtr2(tmpDir) + ctr2, err := getTestCtr2(manager) assert.NoError(t, err) - ctr3, err := getTestCtrN("3", tmpDir) + ctr3, err := getTestCtrN("3", manager) assert.NoError(t, err) - ctr4, err := getTestCtrN("4", tmpDir) + ctr4, err := getTestCtrN("4", manager) assert.NoError(t, err) graph, err := buildContainerGraph([]*Container{ctr1, ctr2, ctr3, ctr4}) @@ -231,18 +239,20 @@ func TestBuildContainerGraphFourContainersNoEdges(t *testing.T) { } func TestBuildContainerGraphFourContainersTwoInCycle(t *testing.T) { - tmpDir, err := ioutil.TempDir("", tmpDirPrefix) - assert.NoError(t, err) - defer os.RemoveAll(tmpDir) + manager, err := lock.NewInMemoryManager(16) + if err != nil { + t.Fatalf("Error setting up locks: %v", err) + } - ctr1, err := getTestCtr1(tmpDir) + ctr1, err := getTestCtr1(manager) assert.NoError(t, err) - ctr2, err := getTestCtr2(tmpDir) + ctr2, err := getTestCtr2(manager) assert.NoError(t, err) - ctr3, err := getTestCtrN("3", tmpDir) + ctr3, err := getTestCtrN("3", manager) assert.NoError(t, err) - ctr4, err := getTestCtrN("4", tmpDir) + ctr4, err := getTestCtrN("4", manager) assert.NoError(t, err) + ctr1.config.IPCNsCtr = ctr2.config.ID ctr2.config.UserNsCtr = ctr1.config.ID @@ -251,18 +261,20 @@ func TestBuildContainerGraphFourContainersTwoInCycle(t *testing.T) { } func TestBuildContainerGraphFourContainersAllInCycle(t *testing.T) { - tmpDir, err := ioutil.TempDir("", tmpDirPrefix) - assert.NoError(t, err) - defer os.RemoveAll(tmpDir) + manager, err := lock.NewInMemoryManager(16) + if err != nil { + t.Fatalf("Error setting up locks: %v", err) + } - ctr1, err := getTestCtr1(tmpDir) + ctr1, err := getTestCtr1(manager) assert.NoError(t, err) - ctr2, err := getTestCtr2(tmpDir) + ctr2, err := getTestCtr2(manager) assert.NoError(t, err) - ctr3, err := getTestCtrN("3", tmpDir) + ctr3, err := getTestCtrN("3", manager) assert.NoError(t, err) - ctr4, err := getTestCtrN("4", tmpDir) + ctr4, err := getTestCtrN("4", manager) assert.NoError(t, err) + ctr1.config.IPCNsCtr = ctr2.config.ID ctr2.config.UserNsCtr = ctr3.config.ID ctr3.config.NetNsCtr = ctr4.config.ID @@ -273,18 +285,20 @@ func TestBuildContainerGraphFourContainersAllInCycle(t *testing.T) { } func TestBuildContainerGraphFourContainersNoneInCycle(t *testing.T) { - tmpDir, err := ioutil.TempDir("", tmpDirPrefix) - assert.NoError(t, err) - defer os.RemoveAll(tmpDir) + manager, err := lock.NewInMemoryManager(16) + if err != nil { + t.Fatalf("Error setting up locks: %v", err) + } - ctr1, err := getTestCtr1(tmpDir) + ctr1, err := getTestCtr1(manager) assert.NoError(t, err) - ctr2, err := getTestCtr2(tmpDir) + ctr2, err := getTestCtr2(manager) assert.NoError(t, err) - ctr3, err := getTestCtrN("3", tmpDir) + ctr3, err := getTestCtrN("3", manager) assert.NoError(t, err) - ctr4, err := getTestCtrN("4", tmpDir) + ctr4, err := getTestCtrN("4", manager) assert.NoError(t, err) + ctr1.config.IPCNsCtr = ctr2.config.ID ctr1.config.NetNsCtr = ctr3.config.ID ctr2.config.UserNsCtr = ctr3.config.ID diff --git a/libpod/lock/in_memory_locks.go b/libpod/lock/in_memory_locks.go index 1df0d2b61..db8f20e95 100644 --- a/libpod/lock/in_memory_locks.go +++ b/libpod/lock/in_memory_locks.go @@ -84,7 +84,7 @@ func (m *InMemoryManager) AllocateLock() (Locker, error) { // RetrieveLock retrieves a lock from the manager. func (m *InMemoryManager) RetrieveLock(id uint32) (Locker, error) { if id >= m.numLocks { - return nil, errors.Errorf("given lock ID %d is too large - this manager only supports lock indexes up to %d", id, m.numLocks - 1) + return nil, errors.Errorf("given lock ID %d is too large - this manager only supports lock indexes up to %d", id, m.numLocks-1) } return m.locks[id], nil diff --git a/libpod/lock/shm/shm_lock_test.go b/libpod/lock/shm/shm_lock_test.go index bdf0b19e5..0f3a96cca 100644 --- a/libpod/lock/shm/shm_lock_test.go +++ b/libpod/lock/shm/shm_lock_test.go @@ -17,8 +17,10 @@ import ( // to test without actually having multiple processes... // We can at least verify that the locks work within the local process. -// 4 * BITMAP_SIZE to ensure we have to traverse bitmaps -var numLocks uint32 = 4 * BitmapSize +var ( + // 4 * BITMAP_SIZE to ensure we have to traverse bitmaps + numLocks = 4 * BitmapSize +) const lockPath = "/libpod_test" diff --git a/libpod/state_test.go b/libpod/state_test.go index 708ce7d4e..ee4201b1c 100644 --- a/libpod/state_test.go +++ b/libpod/state_test.go @@ -8,16 +8,17 @@ import ( "testing" "time" + "github.com/containers/libpod/libpod/lock" "github.com/containers/storage" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -// Returns state, tmp directory containing all state files, locks directory -// (subdirectory of tmp dir), and error +// Returns state, tmp directory containing all state files, lock manager, and +// error. // Closing the state and removing the given tmp directory should be sufficient -// to clean up -type emptyStateFunc func() (State, string, string, error) +// to clean up. +type emptyStateFunc func() (State, string, lock.Manager, error) const ( tmpDirPrefix = "libpod_state_test_" @@ -31,10 +32,10 @@ var ( ) // Get an empty BoltDB state for use in tests -func getEmptyBoltState() (s State, p string, p2 string, err error) { +func getEmptyBoltState() (s State, p string, m lock.Manager, err error) { tmpDir, err := ioutil.TempDir("", tmpDirPrefix) if err != nil { - return nil, "", "", err + return nil, "", nil, err } defer func() { if err != nil { @@ -43,30 +44,30 @@ func getEmptyBoltState() (s State, p string, p2 string, err error) { }() dbPath := filepath.Join(tmpDir, "db.sql") - lockDir := filepath.Join(tmpDir, "locks") - if err := os.Mkdir(lockDir, 0755); err != nil { - return nil, "", "", err + lockManager, err := lock.NewInMemoryManager(16) + if err != nil { + return nil, "", nil, err } runtime := new(Runtime) runtime.config = new(RuntimeConfig) runtime.config.StorageConfig = storage.StoreOptions{} - runtime.lockDir = lockDir + runtime.lockManager = lockManager state, err := NewBoltState(dbPath, runtime) if err != nil { - return nil, "", "", err + return nil, "", nil, err } - return state, tmpDir, lockDir, nil + return state, tmpDir, lockManager, nil } // Get an empty in-memory state for use in tests -func getEmptyInMemoryState() (s State, p string, p2 string, err error) { +func getEmptyInMemoryState() (s State, p string, m lock.Manager, err error) { tmpDir, err := ioutil.TempDir("", tmpDirPrefix) if err != nil { - return nil, "", "", err + return nil, "", nil, err } defer func() { if err != nil { @@ -76,17 +77,20 @@ func getEmptyInMemoryState() (s State, p string, p2 string, err error) { state, err := NewInMemoryState() if err != nil { - return nil, "", "", err + return nil, "", nil, err + } + + lockManager, err := lock.NewInMemoryManager(16) + if err != nil { + return nil, "", nil, err } - // Don't need a separate locks dir as InMemoryState stores nothing on - // disk - return state, tmpDir, tmpDir, nil + return state, tmpDir, lockManager, nil } -func runForAllStates(t *testing.T, testFunc func(*testing.T, State, string)) { +func runForAllStates(t *testing.T, testFunc func(*testing.T, State, lock.Manager)) { for stateName, stateFunc := range testedStates { - state, path, lockPath, err := stateFunc() + state, path, manager, err := stateFunc() if err != nil { t.Fatalf("Error initializing state %s: %v", stateName, err) } @@ -94,7 +98,7 @@ func runForAllStates(t *testing.T, testFunc func(*testing.T, State, string)) { defer state.Close() success := t.Run(stateName, func(t *testing.T) { - testFunc(t, state, lockPath) + testFunc(t, state, manager) }) if !success { t.Fail() @@ -103,8 +107,8 @@ func runForAllStates(t *testing.T, testFunc func(*testing.T, State, string)) { } func TestAddAndGetContainer(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr, err := getTestCtr1(manager) assert.NoError(t, err) err = state.AddContainer(testCtr) @@ -118,10 +122,10 @@ func TestAddAndGetContainer(t *testing.T) { } func TestAddAndGetContainerFromMultiple(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr1, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr1, err := getTestCtr1(manager) assert.NoError(t, err) - testCtr2, err := getTestCtr2(lockPath) + testCtr2, err := getTestCtr2(manager) assert.NoError(t, err) err = state.AddContainer(testCtr1) @@ -138,8 +142,8 @@ func TestAddAndGetContainerFromMultiple(t *testing.T) { } func TestGetContainerPodSameIDFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) err = state.AddPod(testPod) @@ -151,17 +155,17 @@ func TestGetContainerPodSameIDFails(t *testing.T) { } func TestAddInvalidContainerFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { err := state.AddContainer(&Container{config: &Config{ID: "1234"}}) assert.Error(t, err) }) } func TestAddDuplicateCtrIDFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr1, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr1, err := getTestCtr1(manager) assert.NoError(t, err) - testCtr2, err := getTestContainer(testCtr1.ID(), "test2", lockPath) + testCtr2, err := getTestContainer(testCtr1.ID(), "test2", manager) assert.NoError(t, err) err = state.AddContainer(testCtr1) @@ -177,10 +181,10 @@ func TestAddDuplicateCtrIDFails(t *testing.T) { } func TestAddDuplicateCtrNameFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr1, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr1, err := getTestCtr1(manager) assert.NoError(t, err) - testCtr2, err := getTestContainer(strings.Repeat("2", 32), testCtr1.Name(), lockPath) + testCtr2, err := getTestContainer(strings.Repeat("2", 32), testCtr1.Name(), manager) assert.NoError(t, err) err = state.AddContainer(testCtr1) @@ -196,10 +200,10 @@ func TestAddDuplicateCtrNameFails(t *testing.T) { } func TestAddCtrPodDupIDFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) - testCtr, err := getTestContainer(testPod.ID(), "testCtr", lockPath) + testCtr, err := getTestContainer(testPod.ID(), "testCtr", manager) assert.NoError(t, err) err = state.AddPod(testPod) @@ -215,10 +219,10 @@ func TestAddCtrPodDupIDFails(t *testing.T) { } func TestAddCtrPodDupNameFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) - testCtr, err := getTestContainer(strings.Repeat("2", 32), testPod.Name(), lockPath) + testCtr, err := getTestContainer(strings.Repeat("2", 32), testPod.Name(), manager) assert.NoError(t, err) err = state.AddPod(testPod) @@ -234,11 +238,11 @@ func TestAddCtrPodDupNameFails(t *testing.T) { } func TestAddCtrInPodFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) - testCtr, err := getTestCtr2(lockPath) + testCtr, err := getTestCtr2(manager) assert.NoError(t, err) testCtr.config.Pod = testPod.ID() @@ -256,16 +260,16 @@ func TestAddCtrInPodFails(t *testing.T) { } func TestAddCtrDepInPodFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) - testCtr1, err := getTestCtr2(lockPath) + testCtr1, err := getTestCtr2(manager) assert.NoError(t, err) testCtr1.config.Pod = testPod.ID() - testCtr2, err := getTestCtrN("3", lockPath) + testCtr2, err := getTestCtrN("3", manager) assert.NoError(t, err) testCtr2.config.UserNsCtr = testCtr1.ID() @@ -288,10 +292,10 @@ func TestAddCtrDepInPodFails(t *testing.T) { } func TestAddCtrDepInSameNamespaceSucceeds(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr1, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr1, err := getTestCtr1(manager) assert.NoError(t, err) - testCtr2, err := getTestCtr2(lockPath) + testCtr2, err := getTestCtr2(manager) assert.NoError(t, err) testCtr2.config.UserNsCtr = testCtr1.config.ID @@ -312,10 +316,10 @@ func TestAddCtrDepInSameNamespaceSucceeds(t *testing.T) { } func TestAddCtrDepInDifferentNamespaceFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr1, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr1, err := getTestCtr1(manager) assert.NoError(t, err) - testCtr2, err := getTestCtr2(lockPath) + testCtr2, err := getTestCtr2(manager) assert.NoError(t, err) testCtr2.config.UserNsCtr = testCtr1.config.ID @@ -338,8 +342,8 @@ func TestAddCtrDepInDifferentNamespaceFails(t *testing.T) { } func TestAddCtrSameNamespaceSucceeds(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr, err := getTestCtr1(manager) assert.NoError(t, err) testCtr.config.Namespace = "test1" @@ -357,8 +361,8 @@ func TestAddCtrSameNamespaceSucceeds(t *testing.T) { } func TestAddCtrDifferentNamespaceFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr, err := getTestCtr1(manager) assert.NoError(t, err) testCtr.config.Namespace = "test1" @@ -377,22 +381,22 @@ func TestAddCtrDifferentNamespaceFails(t *testing.T) { } func TestGetNonexistentContainerFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { _, err := state.Container("does not exist") assert.Error(t, err) }) } func TestGetContainerWithEmptyIDFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { _, err := state.Container("") assert.Error(t, err) }) } func TestGetContainerInDifferentNamespaceFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr, err := getTestCtr1(manager) assert.NoError(t, err) testCtr.config.Namespace = "test2" @@ -408,8 +412,8 @@ func TestGetContainerInDifferentNamespaceFails(t *testing.T) { } func TestGetContainerInSameNamespaceSucceeds(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr, err := getTestCtr1(manager) assert.NoError(t, err) testCtr.config.Namespace = "test1" @@ -427,8 +431,8 @@ func TestGetContainerInSameNamespaceSucceeds(t *testing.T) { } func TestGetContainerInNamespaceWhileNotInNamespaceSucceeds(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr, err := getTestCtr1(manager) assert.NoError(t, err) testCtr.config.Namespace = "test1" @@ -444,22 +448,22 @@ func TestGetContainerInNamespaceWhileNotInNamespaceSucceeds(t *testing.T) { } func TestLookupContainerWithEmptyIDFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { _, err := state.LookupContainer("") assert.Error(t, err) }) } func TestLookupNonexistentContainerFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { _, err := state.LookupContainer("does not exist") assert.Error(t, err) }) } func TestLookupContainerByFullID(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr, err := getTestCtr1(manager) assert.NoError(t, err) err = state.AddContainer(testCtr) @@ -473,8 +477,8 @@ func TestLookupContainerByFullID(t *testing.T) { } func TestLookupContainerByUniquePartialID(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr, err := getTestCtr1(manager) assert.NoError(t, err) err = state.AddContainer(testCtr) @@ -488,10 +492,10 @@ func TestLookupContainerByUniquePartialID(t *testing.T) { } func TestLookupContainerByNonUniquePartialIDFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr1, err := getTestContainer(strings.Repeat("0", 32), "test1", lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr1, err := getTestContainer(strings.Repeat("0", 32), "test1", manager) assert.NoError(t, err) - testCtr2, err := getTestContainer(strings.Repeat("0", 31)+"1", "test2", lockPath) + testCtr2, err := getTestContainer(strings.Repeat("0", 31)+"1", "test2", manager) assert.NoError(t, err) err = state.AddContainer(testCtr1) @@ -506,8 +510,8 @@ func TestLookupContainerByNonUniquePartialIDFails(t *testing.T) { } func TestLookupContainerByName(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr, err := getTestCtr1(manager) assert.NoError(t, err) err = state.AddContainer(testCtr) @@ -521,8 +525,8 @@ func TestLookupContainerByName(t *testing.T) { } func TestLookupCtrByPodNameFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) err = state.AddPod(testPod) @@ -534,8 +538,8 @@ func TestLookupCtrByPodNameFails(t *testing.T) { } func TestLookupCtrByPodIDFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) err = state.AddPod(testPod) @@ -547,8 +551,8 @@ func TestLookupCtrByPodIDFails(t *testing.T) { } func TestLookupCtrInSameNamespaceSucceeds(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr, err := getTestCtr1(manager) assert.NoError(t, err) testCtr.config.Namespace = "test1" @@ -566,8 +570,8 @@ func TestLookupCtrInSameNamespaceSucceeds(t *testing.T) { } func TestLookupCtrInDifferentNamespaceFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr, err := getTestCtr1(manager) assert.NoError(t, err) testCtr.config.Namespace = "test1" @@ -583,11 +587,11 @@ func TestLookupCtrInDifferentNamespaceFails(t *testing.T) { } func TestLookupContainerMatchInDifferentNamespaceSucceeds(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr1, err := getTestContainer(strings.Repeat("0", 32), "test1", lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr1, err := getTestContainer(strings.Repeat("0", 32), "test1", manager) assert.NoError(t, err) testCtr1.config.Namespace = "test2" - testCtr2, err := getTestContainer(strings.Repeat("0", 31)+"1", "test2", lockPath) + testCtr2, err := getTestContainer(strings.Repeat("0", 31)+"1", "test2", manager) assert.NoError(t, err) testCtr2.config.Namespace = "test1" @@ -607,14 +611,14 @@ func TestLookupContainerMatchInDifferentNamespaceSucceeds(t *testing.T) { } func TestHasContainerEmptyIDFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { _, err := state.HasContainer("") assert.Error(t, err) }) } func TestHasContainerNoSuchContainerReturnsFalse(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { exists, err := state.HasContainer("does not exist") assert.NoError(t, err) assert.False(t, exists) @@ -622,8 +626,8 @@ func TestHasContainerNoSuchContainerReturnsFalse(t *testing.T) { } func TestHasContainerFindsContainer(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr, err := getTestCtr1(manager) assert.NoError(t, err) err = state.AddContainer(testCtr) @@ -636,8 +640,8 @@ func TestHasContainerFindsContainer(t *testing.T) { } func TestHasContainerPodIDIsFalse(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) err = state.AddPod(testPod) @@ -650,8 +654,8 @@ func TestHasContainerPodIDIsFalse(t *testing.T) { } func TestHasContainerSameNamespaceIsTrue(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr, err := getTestCtr1(manager) assert.NoError(t, err) testCtr.config.Namespace = "test1" @@ -668,8 +672,8 @@ func TestHasContainerSameNamespaceIsTrue(t *testing.T) { } func TestHasContainerDifferentNamespaceIsFalse(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr, err := getTestCtr1(manager) assert.NoError(t, err) testCtr.config.Namespace = "test1" @@ -686,8 +690,8 @@ func TestHasContainerDifferentNamespaceIsFalse(t *testing.T) { } func TestSaveAndUpdateContainer(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr, err := getTestCtr1(manager) assert.NoError(t, err) err = state.AddContainer(testCtr) @@ -711,8 +715,8 @@ func TestSaveAndUpdateContainer(t *testing.T) { } func TestSaveAndUpdateContainerSameNamespaceSucceeds(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr, err := getTestCtr1(manager) assert.NoError(t, err) testCtr.config.Namespace = "test1" @@ -740,8 +744,8 @@ func TestSaveAndUpdateContainerSameNamespaceSucceeds(t *testing.T) { } func TestUpdateContainerNotInDatabaseReturnsError(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr, err := getTestCtr1(manager) assert.NoError(t, err) err = state.UpdateContainer(testCtr) @@ -751,15 +755,15 @@ func TestUpdateContainerNotInDatabaseReturnsError(t *testing.T) { } func TestUpdateInvalidContainerReturnsError(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { err := state.UpdateContainer(&Container{config: &Config{ID: "1234"}}) assert.Error(t, err) }) } func TestUpdateContainerNotInNamespaceReturnsError(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr, err := getTestCtr1(manager) assert.NoError(t, err) testCtr.config.Namespace = "test1" @@ -775,15 +779,15 @@ func TestUpdateContainerNotInNamespaceReturnsError(t *testing.T) { } func TestSaveInvalidContainerReturnsError(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { err := state.SaveContainer(&Container{config: &Config{ID: "1234"}}) assert.Error(t, err) }) } func TestSaveContainerNotInStateReturnsError(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr, err := getTestCtr1(manager) assert.NoError(t, err) err = state.SaveContainer(testCtr) @@ -793,8 +797,8 @@ func TestSaveContainerNotInStateReturnsError(t *testing.T) { } func TestSaveContainerNotInNamespaceReturnsError(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr, err := getTestCtr1(manager) assert.NoError(t, err) testCtr.config.Namespace = "test1" @@ -810,8 +814,8 @@ func TestSaveContainerNotInNamespaceReturnsError(t *testing.T) { } func TestRemoveContainer(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr, err := getTestCtr1(manager) assert.NoError(t, err) err = state.AddContainer(testCtr) @@ -831,8 +835,8 @@ func TestRemoveContainer(t *testing.T) { } func TestRemoveNonexistantContainerFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr, err := getTestCtr1(manager) assert.NoError(t, err) err = state.RemoveContainer(testCtr) @@ -842,8 +846,8 @@ func TestRemoveNonexistantContainerFails(t *testing.T) { } func TestRemoveContainerNotInNamespaceFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr, err := getTestCtr1(manager) assert.NoError(t, err) testCtr.config.Namespace = "test1" @@ -869,7 +873,7 @@ func TestRemoveContainerNotInNamespaceFails(t *testing.T) { } func TestGetAllContainersOnNewStateIsEmpty(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { ctrs, err := state.AllContainers() assert.NoError(t, err) assert.Equal(t, 0, len(ctrs)) @@ -877,8 +881,8 @@ func TestGetAllContainersOnNewStateIsEmpty(t *testing.T) { } func TestGetAllContainersWithOneContainer(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr, err := getTestCtr1(manager) assert.NoError(t, err) err = state.AddContainer(testCtr) @@ -893,10 +897,10 @@ func TestGetAllContainersWithOneContainer(t *testing.T) { } func TestGetAllContainersTwoContainers(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr1, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr1, err := getTestCtr1(manager) assert.NoError(t, err) - testCtr2, err := getTestCtr2(lockPath) + testCtr2, err := getTestCtr2(manager) assert.NoError(t, err) err = state.AddContainer(testCtr1) @@ -912,8 +916,8 @@ func TestGetAllContainersTwoContainers(t *testing.T) { } func TestGetAllContainersNoContainerInNamespace(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr, err := getTestCtr1(manager) assert.NoError(t, err) testCtr.config.Namespace = "test1" @@ -930,13 +934,13 @@ func TestGetAllContainersNoContainerInNamespace(t *testing.T) { } func TestGetContainerOneContainerInNamespace(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr1, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr1, err := getTestCtr1(manager) assert.NoError(t, err) testCtr1.config.Namespace = "test1" - testCtr2, err := getTestCtr2(lockPath) + testCtr2, err := getTestCtr2(manager) assert.NoError(t, err) err = state.AddContainer(testCtr1) @@ -956,15 +960,15 @@ func TestGetContainerOneContainerInNamespace(t *testing.T) { } func TestContainerInUseInvalidContainer(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { _, err := state.ContainerInUse(&Container{}) assert.Error(t, err) }) } func TestContainerInUseCtrNotInState(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr, err := getTestCtr1(manager) assert.NoError(t, err) _, err = state.ContainerInUse(testCtr) assert.Error(t, err) @@ -972,8 +976,8 @@ func TestContainerInUseCtrNotInState(t *testing.T) { } func TestContainerInUseCtrNotInNamespace(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr, err := getTestCtr1(manager) assert.NoError(t, err) testCtr.config.Namespace = "test1" @@ -989,10 +993,10 @@ func TestContainerInUseCtrNotInNamespace(t *testing.T) { } func TestContainerInUseOneContainer(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr1, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr1, err := getTestCtr1(manager) assert.NoError(t, err) - testCtr2, err := getTestCtr2(lockPath) + testCtr2, err := getTestCtr2(manager) assert.NoError(t, err) testCtr2.config.UserNsCtr = testCtr1.config.ID @@ -1011,12 +1015,12 @@ func TestContainerInUseOneContainer(t *testing.T) { } func TestContainerInUseTwoContainers(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr1, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr1, err := getTestCtr1(manager) assert.NoError(t, err) - testCtr2, err := getTestCtr2(lockPath) + testCtr2, err := getTestCtr2(manager) assert.NoError(t, err) - testCtr3, err := getTestCtrN("3", lockPath) + testCtr3, err := getTestCtrN("3", manager) assert.NoError(t, err) testCtr2.config.UserNsCtr = testCtr1.config.ID @@ -1038,10 +1042,10 @@ func TestContainerInUseTwoContainers(t *testing.T) { } func TestContainerInUseOneContainerMultipleDependencies(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr1, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr1, err := getTestCtr1(manager) assert.NoError(t, err) - testCtr2, err := getTestCtr2(lockPath) + testCtr2, err := getTestCtr2(manager) assert.NoError(t, err) testCtr2.config.UserNsCtr = testCtr1.config.ID @@ -1061,10 +1065,10 @@ func TestContainerInUseOneContainerMultipleDependencies(t *testing.T) { } func TestContainerInUseGenericDependency(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr1, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr1, err := getTestCtr1(manager) assert.NoError(t, err) - testCtr2, err := getTestCtr2(lockPath) + testCtr2, err := getTestCtr2(manager) assert.NoError(t, err) testCtr2.config.Dependencies = []string{testCtr1.config.ID} @@ -1083,12 +1087,12 @@ func TestContainerInUseGenericDependency(t *testing.T) { } func TestContainerInUseMultipleGenericDependencies(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr1, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr1, err := getTestCtr1(manager) assert.NoError(t, err) - testCtr2, err := getTestCtr2(lockPath) + testCtr2, err := getTestCtr2(manager) assert.NoError(t, err) - testCtr3, err := getTestCtrN("3", lockPath) + testCtr3, err := getTestCtrN("3", manager) assert.NoError(t, err) testCtr3.config.Dependencies = []string{testCtr1.config.ID, testCtr2.config.ID} @@ -1115,10 +1119,10 @@ func TestContainerInUseMultipleGenericDependencies(t *testing.T) { } func TestContainerInUseGenericAndNamespaceDependencies(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr1, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr1, err := getTestCtr1(manager) assert.NoError(t, err) - testCtr2, err := getTestCtr2(lockPath) + testCtr2, err := getTestCtr2(manager) assert.NoError(t, err) testCtr2.config.Dependencies = []string{testCtr1.config.ID} @@ -1138,10 +1142,10 @@ func TestContainerInUseGenericAndNamespaceDependencies(t *testing.T) { } func TestCannotRemoveContainerWithDependency(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr1, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr1, err := getTestCtr1(manager) assert.NoError(t, err) - testCtr2, err := getTestCtr2(lockPath) + testCtr2, err := getTestCtr2(manager) assert.NoError(t, err) testCtr2.config.UserNsCtr = testCtr1.config.ID @@ -1162,10 +1166,10 @@ func TestCannotRemoveContainerWithDependency(t *testing.T) { } func TestCannotRemoveContainerWithGenericDependency(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr1, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr1, err := getTestCtr1(manager) assert.NoError(t, err) - testCtr2, err := getTestCtr2(lockPath) + testCtr2, err := getTestCtr2(manager) assert.NoError(t, err) testCtr2.config.Dependencies = []string{testCtr1.config.ID} @@ -1186,10 +1190,10 @@ func TestCannotRemoveContainerWithGenericDependency(t *testing.T) { } func TestCanRemoveContainerAfterDependencyRemoved(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr1, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr1, err := getTestCtr1(manager) assert.NoError(t, err) - testCtr2, err := getTestCtr2(lockPath) + testCtr2, err := getTestCtr2(manager) assert.NoError(t, err) testCtr2.config.UserNsCtr = testCtr1.ID() @@ -1213,10 +1217,10 @@ func TestCanRemoveContainerAfterDependencyRemoved(t *testing.T) { } func TestCanRemoveContainerAfterDependencyRemovedDuplicate(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr1, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr1, err := getTestCtr1(manager) assert.NoError(t, err) - testCtr2, err := getTestCtr2(lockPath) + testCtr2, err := getTestCtr2(manager) assert.NoError(t, err) testCtr2.config.UserNsCtr = testCtr1.ID() @@ -1241,11 +1245,11 @@ func TestCanRemoveContainerAfterDependencyRemovedDuplicate(t *testing.T) { } func TestCannotUsePodAsDependency(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr, err := getTestCtr1(manager) assert.NoError(t, err) - testPod, err := getTestPod2(lockPath) + testPod, err := getTestPod2(manager) assert.NoError(t, err) testCtr.config.UserNsCtr = testPod.ID() @@ -1263,8 +1267,8 @@ func TestCannotUsePodAsDependency(t *testing.T) { } func TestCannotUseBadIDAsDependency(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr, err := getTestCtr1(manager) assert.NoError(t, err) testCtr.config.UserNsCtr = strings.Repeat("5", 32) @@ -1279,8 +1283,8 @@ func TestCannotUseBadIDAsDependency(t *testing.T) { } func TestCannotUseBadIDAsGenericDependency(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr, err := getTestCtr1(manager) assert.NoError(t, err) testCtr.config.Dependencies = []string{strings.Repeat("5", 32)} @@ -1295,22 +1299,22 @@ func TestCannotUseBadIDAsGenericDependency(t *testing.T) { } func TestGetPodDoesNotExist(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { _, err := state.Pod("doesnotexist") assert.Error(t, err) }) } func TestGetPodEmptyID(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { _, err := state.Pod("") assert.Error(t, err) }) } func TestGetPodOnePod(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) err = state.AddPod(testPod) @@ -1324,11 +1328,11 @@ func TestGetPodOnePod(t *testing.T) { } func TestGetOnePodFromTwo(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod1, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod1, err := getTestPod1(manager) assert.NoError(t, err) - testPod2, err := getTestPod2(lockPath) + testPod2, err := getTestPod2(manager) assert.NoError(t, err) err = state.AddPod(testPod1) @@ -1345,11 +1349,11 @@ func TestGetOnePodFromTwo(t *testing.T) { } func TestGetNotExistPodWithPods(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod1, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod1, err := getTestPod1(manager) assert.NoError(t, err) - testPod2, err := getTestPod2(lockPath) + testPod2, err := getTestPod2(manager) assert.NoError(t, err) err = state.AddPod(testPod1) @@ -1364,8 +1368,8 @@ func TestGetNotExistPodWithPods(t *testing.T) { } func TestGetPodByCtrID(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr, err := getTestCtr1(manager) assert.NoError(t, err) err = state.AddContainer(testCtr) @@ -1377,8 +1381,8 @@ func TestGetPodByCtrID(t *testing.T) { } func TestGetPodInNamespaceSucceeds(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) testPod.config.Namespace = "test1" @@ -1396,8 +1400,8 @@ func TestGetPodInNamespaceSucceeds(t *testing.T) { } func TestGetPodPodNotInNamespaceFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) testPod.config.Namespace = "test1" @@ -1413,22 +1417,22 @@ func TestGetPodPodNotInNamespaceFails(t *testing.T) { } func TestLookupPodEmptyID(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { _, err := state.LookupPod("") assert.Error(t, err) }) } func TestLookupNotExistPod(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { _, err := state.LookupPod("doesnotexist") assert.Error(t, err) }) } func TestLookupPodFullID(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) err = state.AddPod(testPod) @@ -1442,8 +1446,8 @@ func TestLookupPodFullID(t *testing.T) { } func TestLookupPodUniquePartialID(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) err = state.AddPod(testPod) @@ -1457,11 +1461,11 @@ func TestLookupPodUniquePartialID(t *testing.T) { } func TestLookupPodNonUniquePartialID(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod1, err := getTestPod(strings.Repeat("1", 32), "test1", lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod1, err := getTestPod(strings.Repeat("1", 32), "test1", manager) assert.NoError(t, err) - testPod2, err := getTestPod(strings.Repeat("1", 31)+"2", "test2", lockPath) + testPod2, err := getTestPod(strings.Repeat("1", 31)+"2", "test2", manager) assert.NoError(t, err) err = state.AddPod(testPod1) @@ -1476,8 +1480,8 @@ func TestLookupPodNonUniquePartialID(t *testing.T) { } func TestLookupPodByName(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) err = state.AddPod(testPod) @@ -1491,8 +1495,8 @@ func TestLookupPodByName(t *testing.T) { } func TestLookupPodByCtrID(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr, err := getTestCtr1(manager) assert.NoError(t, err) err = state.AddContainer(testCtr) @@ -1504,8 +1508,8 @@ func TestLookupPodByCtrID(t *testing.T) { } func TestLookupPodByCtrName(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr, err := getTestCtr1(manager) assert.NoError(t, err) err = state.AddContainer(testCtr) @@ -1517,8 +1521,8 @@ func TestLookupPodByCtrName(t *testing.T) { } func TestLookupPodInSameNamespaceSucceeds(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) testPod.config.Namespace = "test1" @@ -1536,8 +1540,8 @@ func TestLookupPodInSameNamespaceSucceeds(t *testing.T) { } func TestLookupPodInDifferentNamespaceFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) testPod.config.Namespace = "test1" @@ -1553,13 +1557,13 @@ func TestLookupPodInDifferentNamespaceFails(t *testing.T) { } func TestLookupPodOneInDifferentNamespaceFindsRightPod(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod1, err := getTestPod(strings.Repeat("1", 32), "test1", lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod1, err := getTestPod(strings.Repeat("1", 32), "test1", manager) assert.NoError(t, err) testPod1.config.Namespace = "test1" - testPod2, err := getTestPod(strings.Repeat("1", 31)+"2", "test2", lockPath) + testPod2, err := getTestPod(strings.Repeat("1", 31)+"2", "test2", manager) assert.NoError(t, err) testPod2.config.Namespace = "test2" @@ -1580,14 +1584,14 @@ func TestLookupPodOneInDifferentNamespaceFindsRightPod(t *testing.T) { } func TestHasPodEmptyIDErrors(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { _, err := state.HasPod("") assert.Error(t, err) }) } func TestHasPodNoSuchPod(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { exist, err := state.HasPod("notexist") assert.NoError(t, err) assert.False(t, exist) @@ -1595,8 +1599,8 @@ func TestHasPodNoSuchPod(t *testing.T) { } func TestHasPodWrongIDFalse(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) err = state.AddPod(testPod) @@ -1609,8 +1613,8 @@ func TestHasPodWrongIDFalse(t *testing.T) { } func TestHasPodRightIDTrue(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) err = state.AddPod(testPod) @@ -1623,8 +1627,8 @@ func TestHasPodRightIDTrue(t *testing.T) { } func TestHasPodCtrIDFalse(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr, err := getTestCtr1(manager) assert.NoError(t, err) err = state.AddContainer(testCtr) @@ -1637,8 +1641,8 @@ func TestHasPodCtrIDFalse(t *testing.T) { } func TestHasPodSameNamespaceSucceeds(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) testPod.config.Namespace = "test1" @@ -1655,8 +1659,8 @@ func TestHasPodSameNamespaceSucceeds(t *testing.T) { } func TestHasPodDifferentNamespaceFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) testPod.config.Namespace = "test1" @@ -1673,15 +1677,15 @@ func TestHasPodDifferentNamespaceFails(t *testing.T) { } func TestAddPodInvalidPodErrors(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { err := state.AddPod(&Pod{config: &PodConfig{}}) assert.Error(t, err) }) } func TestAddPodValidPodSucceeds(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) err = state.AddPod(testPod) @@ -1696,11 +1700,11 @@ func TestAddPodValidPodSucceeds(t *testing.T) { } func TestAddPodDuplicateIDFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod1, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod1, err := getTestPod1(manager) assert.NoError(t, err) - testPod2, err := getTestPod(testPod1.ID(), "testpod2", lockPath) + testPod2, err := getTestPod(testPod1.ID(), "testpod2", manager) assert.NoError(t, err) err = state.AddPod(testPod1) @@ -1716,11 +1720,11 @@ func TestAddPodDuplicateIDFails(t *testing.T) { } func TestAddPodDuplicateNameFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod1, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod1, err := getTestPod1(manager) assert.NoError(t, err) - testPod2, err := getTestPod(strings.Repeat("2", 32), testPod1.Name(), lockPath) + testPod2, err := getTestPod(strings.Repeat("2", 32), testPod1.Name(), manager) assert.NoError(t, err) err = state.AddPod(testPod1) @@ -1736,11 +1740,11 @@ func TestAddPodDuplicateNameFails(t *testing.T) { } func TestAddPodNonDuplicateSucceeds(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod1, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod1, err := getTestPod1(manager) assert.NoError(t, err) - testPod2, err := getTestPod2(lockPath) + testPod2, err := getTestPod2(manager) assert.NoError(t, err) err = state.AddPod(testPod1) @@ -1756,11 +1760,11 @@ func TestAddPodNonDuplicateSucceeds(t *testing.T) { } func TestAddPodCtrIDConflictFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr, err := getTestCtr1(manager) assert.NoError(t, err) - testPod, err := getTestPod(testCtr.ID(), "testpod1", lockPath) + testPod, err := getTestPod(testCtr.ID(), "testpod1", manager) assert.NoError(t, err) err = state.AddContainer(testCtr) @@ -1776,11 +1780,11 @@ func TestAddPodCtrIDConflictFails(t *testing.T) { } func TestAddPodCtrNameConflictFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr, err := getTestCtr1(manager) assert.NoError(t, err) - testPod, err := getTestPod(strings.Repeat("3", 32), testCtr.Name(), lockPath) + testPod, err := getTestPod(strings.Repeat("3", 32), testCtr.Name(), manager) assert.NoError(t, err) err = state.AddContainer(testCtr) @@ -1796,8 +1800,8 @@ func TestAddPodCtrNameConflictFails(t *testing.T) { } func TestAddPodSameNamespaceSucceeds(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) testPod.config.Namespace = "test1" @@ -1816,8 +1820,8 @@ func TestAddPodSameNamespaceSucceeds(t *testing.T) { } func TestAddPodDifferentNamespaceFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) testPod.config.Namespace = "test1" @@ -1836,15 +1840,15 @@ func TestAddPodDifferentNamespaceFails(t *testing.T) { } func TestRemovePodInvalidPodErrors(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { err := state.RemovePod(&Pod{config: &PodConfig{}}) assert.Error(t, err) }) } func TestRemovePodNotInStateFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) err = state.RemovePod(testPod) @@ -1854,8 +1858,8 @@ func TestRemovePodNotInStateFails(t *testing.T) { } func TestRemovePodSucceeds(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) err = state.AddPod(testPod) @@ -1871,11 +1875,11 @@ func TestRemovePodSucceeds(t *testing.T) { } func TestRemovePodFromPods(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod1, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod1, err := getTestPod1(manager) assert.NoError(t, err) - testPod2, err := getTestPod2(lockPath) + testPod2, err := getTestPod2(manager) assert.NoError(t, err) err = state.AddPod(testPod1) @@ -1896,11 +1900,11 @@ func TestRemovePodFromPods(t *testing.T) { } func TestRemovePodNotEmptyFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) - testCtr, err := getTestCtr2(lockPath) + testCtr, err := getTestCtr2(manager) assert.NoError(t, err) testCtr.config.Pod = testPod.ID() @@ -1920,11 +1924,11 @@ func TestRemovePodNotEmptyFails(t *testing.T) { } func TestRemovePodAfterEmptySucceeds(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) - testCtr, err := getTestCtr2(lockPath) + testCtr, err := getTestCtr2(manager) assert.NoError(t, err) testCtr.config.Pod = testPod.ID() @@ -1947,8 +1951,8 @@ func TestRemovePodAfterEmptySucceeds(t *testing.T) { } func TestRemovePodNotInNamespaceFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) testPod.config.Namespace = "test1" @@ -1970,7 +1974,7 @@ func TestRemovePodNotInNamespaceFails(t *testing.T) { } func TestAllPodsEmptyOnEmptyState(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { allPods, err := state.AllPods() assert.NoError(t, err) assert.Equal(t, 0, len(allPods)) @@ -1978,8 +1982,8 @@ func TestAllPodsEmptyOnEmptyState(t *testing.T) { } func TestAllPodsFindsPod(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) err = state.AddPod(testPod) @@ -1994,14 +1998,14 @@ func TestAllPodsFindsPod(t *testing.T) { } func TestAllPodsMultiplePods(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod1, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod1, err := getTestPod1(manager) assert.NoError(t, err) - testPod2, err := getTestPod2(lockPath) + testPod2, err := getTestPod2(manager) assert.NoError(t, err) - testPod3, err := getTestPodN("3", lockPath) + testPod3, err := getTestPodN("3", manager) assert.NoError(t, err) allPods1, err := state.AllPods() @@ -2032,8 +2036,8 @@ func TestAllPodsMultiplePods(t *testing.T) { } func TestAllPodsPodInDifferentNamespaces(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) testPod.config.Namespace = "test1" @@ -2050,13 +2054,13 @@ func TestAllPodsPodInDifferentNamespaces(t *testing.T) { } func TestAllPodsOnePodInDifferentNamespace(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod1, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod1, err := getTestPod1(manager) assert.NoError(t, err) testPod1.config.Namespace = "test1" - testPod2, err := getTestPod2(lockPath) + testPod2, err := getTestPod2(manager) assert.NoError(t, err) testPod2.config.Namespace = "test2" @@ -2078,15 +2082,15 @@ func TestAllPodsOnePodInDifferentNamespace(t *testing.T) { } func TestPodHasContainerNoSuchPod(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { _, err := state.PodHasContainer(&Pod{config: &PodConfig{}}, strings.Repeat("0", 32)) assert.Error(t, err) }) } func TestPodHasContainerEmptyCtrID(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) err = state.AddPod(testPod) @@ -2098,8 +2102,8 @@ func TestPodHasContainerEmptyCtrID(t *testing.T) { } func TestPodHasContainerNoSuchCtr(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) err = state.AddPod(testPod) @@ -2112,11 +2116,11 @@ func TestPodHasContainerNoSuchCtr(t *testing.T) { } func TestPodHasContainerCtrNotInPod(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) - testCtr, err := getTestCtr2(lockPath) + testCtr, err := getTestCtr2(manager) assert.NoError(t, err) err = state.AddPod(testPod) @@ -2132,11 +2136,11 @@ func TestPodHasContainerCtrNotInPod(t *testing.T) { } func TestPodHasContainerSucceeds(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) - testCtr, err := getTestCtr2(lockPath) + testCtr, err := getTestCtr2(manager) assert.NoError(t, err) testCtr.config.Pod = testPod.ID() @@ -2154,8 +2158,8 @@ func TestPodHasContainerSucceeds(t *testing.T) { } func TestPodHasContainerPodNotInNamespaceFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) testPod.config.Namespace = "test1" @@ -2171,15 +2175,15 @@ func TestPodHasContainerPodNotInNamespaceFails(t *testing.T) { } func TestPodContainersByIDInvalidPod(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { _, err := state.PodContainersByID(&Pod{config: &PodConfig{}}) assert.Error(t, err) }) } func TestPodContainerdByIDPodNotInState(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) _, err = state.PodContainersByID(testPod) @@ -2189,8 +2193,8 @@ func TestPodContainerdByIDPodNotInState(t *testing.T) { } func TestPodContainersByIDEmptyPod(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) err = state.AddPod(testPod) @@ -2203,11 +2207,11 @@ func TestPodContainersByIDEmptyPod(t *testing.T) { } func TestPodContainersByIDOneContainer(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) - testCtr, err := getTestCtr2(lockPath) + testCtr, err := getTestCtr2(manager) assert.NoError(t, err) testCtr.config.Pod = testPod.ID() @@ -2226,19 +2230,19 @@ func TestPodContainersByIDOneContainer(t *testing.T) { } func TestPodContainersByIDMultipleContainers(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) - testCtr1, err := getTestCtr2(lockPath) + testCtr1, err := getTestCtr2(manager) assert.NoError(t, err) testCtr1.config.Pod = testPod.ID() - testCtr2, err := getTestCtrN("3", lockPath) + testCtr2, err := getTestCtrN("3", manager) assert.NoError(t, err) testCtr2.config.Pod = testPod.ID() - testCtr3, err := getTestCtrN("4", lockPath) + testCtr3, err := getTestCtrN("4", manager) assert.NoError(t, err) testCtr3.config.Pod = testPod.ID() @@ -2273,8 +2277,8 @@ func TestPodContainersByIDMultipleContainers(t *testing.T) { } func TestPodContainerByIDPodNotInNamespace(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) testPod.config.Namespace = "test1" @@ -2290,15 +2294,15 @@ func TestPodContainerByIDPodNotInNamespace(t *testing.T) { } func TestPodContainersInvalidPod(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { _, err := state.PodContainers(&Pod{config: &PodConfig{}}) assert.Error(t, err) }) } func TestPodContainersPodNotInState(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) _, err = state.PodContainers(testPod) @@ -2308,8 +2312,8 @@ func TestPodContainersPodNotInState(t *testing.T) { } func TestPodContainersEmptyPod(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) err = state.AddPod(testPod) @@ -2322,11 +2326,11 @@ func TestPodContainersEmptyPod(t *testing.T) { } func TestPodContainersOneContainer(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) - testCtr, err := getTestCtr2(lockPath) + testCtr, err := getTestCtr2(manager) assert.NoError(t, err) testCtr.config.Pod = testPod.ID() @@ -2346,19 +2350,19 @@ func TestPodContainersOneContainer(t *testing.T) { } func TestPodContainersMultipleContainers(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) - testCtr1, err := getTestCtr2(lockPath) + testCtr1, err := getTestCtr2(manager) assert.NoError(t, err) testCtr1.config.Pod = testPod.ID() - testCtr2, err := getTestCtrN("3", lockPath) + testCtr2, err := getTestCtrN("3", manager) assert.NoError(t, err) testCtr2.config.Pod = testPod.ID() - testCtr3, err := getTestCtrN("4", lockPath) + testCtr3, err := getTestCtrN("4", manager) assert.NoError(t, err) testCtr3.config.Pod = testPod.ID() @@ -2393,8 +2397,8 @@ func TestPodContainersMultipleContainers(t *testing.T) { } func TestPodContainersPodNotInNamespace(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) testPod.config.Namespace = "test1" @@ -2410,15 +2414,15 @@ func TestPodContainersPodNotInNamespace(t *testing.T) { } func TestRemovePodContainersInvalidPod(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { err := state.RemovePodContainers(&Pod{config: &PodConfig{}}) assert.Error(t, err) }) } func TestRemovePodContainersPodNotInState(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) err = state.RemovePodContainers(testPod) @@ -2428,8 +2432,8 @@ func TestRemovePodContainersPodNotInState(t *testing.T) { } func TestRemovePodContainersNoContainers(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) err = state.AddPod(testPod) @@ -2445,11 +2449,11 @@ func TestRemovePodContainersNoContainers(t *testing.T) { } func TestRemovePodContainersOneContainer(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) - testCtr, err := getTestCtr2(lockPath) + testCtr, err := getTestCtr2(manager) assert.NoError(t, err) testCtr.config.Pod = testPod.ID() @@ -2469,15 +2473,15 @@ func TestRemovePodContainersOneContainer(t *testing.T) { } func TestRemovePodContainersPreservesCtrOutsidePod(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) - testCtr1, err := getTestCtr2(lockPath) + testCtr1, err := getTestCtr2(manager) assert.NoError(t, err) testCtr1.config.Pod = testPod.ID() - testCtr2, err := getTestCtrN("3", lockPath) + testCtr2, err := getTestCtrN("3", manager) assert.NoError(t, err) err = state.AddPod(testPod) @@ -2503,15 +2507,15 @@ func TestRemovePodContainersPreservesCtrOutsidePod(t *testing.T) { } func TestRemovePodContainersTwoContainers(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) - testCtr1, err := getTestCtr2(lockPath) + testCtr1, err := getTestCtr2(manager) assert.NoError(t, err) testCtr1.config.Pod = testPod.ID() - testCtr2, err := getTestCtrN("3", lockPath) + testCtr2, err := getTestCtrN("3", manager) assert.NoError(t, err) testCtr2.config.Pod = testPod.ID() @@ -2534,15 +2538,15 @@ func TestRemovePodContainersTwoContainers(t *testing.T) { } func TestRemovePodContainerDependencyInPod(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) - testCtr1, err := getTestCtr2(lockPath) + testCtr1, err := getTestCtr2(manager) assert.NoError(t, err) testCtr1.config.Pod = testPod.ID() - testCtr2, err := getTestCtrN("3", lockPath) + testCtr2, err := getTestCtrN("3", manager) assert.NoError(t, err) testCtr2.config.Pod = testPod.ID() testCtr2.config.IPCNsCtr = testCtr1.ID() @@ -2566,8 +2570,8 @@ func TestRemovePodContainerDependencyInPod(t *testing.T) { } func TestRemoveContainersNotInNamespace(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) testPod.config.Namespace = "test1" @@ -2583,8 +2587,8 @@ func TestRemoveContainersNotInNamespace(t *testing.T) { } func TestAddContainerToPodInvalidPod(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr, err := getTestCtr1(manager) assert.NoError(t, err) err = state.AddContainerToPod(&Pod{config: &PodConfig{}}, testCtr) @@ -2593,8 +2597,8 @@ func TestAddContainerToPodInvalidPod(t *testing.T) { } func TestAddContainerToPodInvalidCtr(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) err = state.AddPod(testPod) @@ -2610,11 +2614,11 @@ func TestAddContainerToPodInvalidCtr(t *testing.T) { } func TestAddContainerToPodPodNotInState(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) - testCtr, err := getTestCtr2(lockPath) + testCtr, err := getTestCtr2(manager) assert.NoError(t, err) testCtr.config.Pod = testPod.ID() @@ -2625,11 +2629,11 @@ func TestAddContainerToPodPodNotInState(t *testing.T) { } func TestAddContainerToPodSucceeds(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) - testCtr, err := getTestCtr2(lockPath) + testCtr, err := getTestCtr2(manager) assert.NoError(t, err) testCtr.config.Pod = testPod.ID() @@ -2653,15 +2657,15 @@ func TestAddContainerToPodSucceeds(t *testing.T) { } func TestAddContainerToPodTwoContainers(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) - testCtr1, err := getTestCtr2(lockPath) + testCtr1, err := getTestCtr2(manager) assert.NoError(t, err) testCtr1.config.Pod = testPod.ID() - testCtr2, err := getTestCtrN("3", lockPath) + testCtr2, err := getTestCtrN("3", manager) assert.NoError(t, err) testCtr2.config.Pod = testPod.ID() @@ -2685,15 +2689,15 @@ func TestAddContainerToPodTwoContainers(t *testing.T) { } func TestAddContainerToPodWithAddContainer(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) - testCtr1, err := getTestCtr2(lockPath) + testCtr1, err := getTestCtr2(manager) assert.NoError(t, err) testCtr1.config.Pod = testPod.ID() - testCtr2, err := getTestCtrN("3", lockPath) + testCtr2, err := getTestCtrN("3", manager) assert.NoError(t, err) err = state.AddPod(testPod) @@ -2718,14 +2722,14 @@ func TestAddContainerToPodWithAddContainer(t *testing.T) { } func TestAddContainerToPodCtrIDConflict(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) - testCtr1, err := getTestCtr2(lockPath) + testCtr1, err := getTestCtr2(manager) assert.NoError(t, err) - testCtr2, err := getTestContainer(testCtr1.ID(), "testCtr3", lockPath) + testCtr2, err := getTestContainer(testCtr1.ID(), "testCtr3", manager) assert.NoError(t, err) testCtr2.config.Pod = testPod.ID() @@ -2749,14 +2753,14 @@ func TestAddContainerToPodCtrIDConflict(t *testing.T) { } func TestAddContainerToPodCtrNameConflict(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) - testCtr1, err := getTestCtr2(lockPath) + testCtr1, err := getTestCtr2(manager) assert.NoError(t, err) - testCtr2, err := getTestContainer(strings.Repeat("4", 32), testCtr1.Name(), lockPath) + testCtr2, err := getTestContainer(strings.Repeat("4", 32), testCtr1.Name(), manager) assert.NoError(t, err) testCtr2.config.Pod = testPod.ID() @@ -2780,11 +2784,11 @@ func TestAddContainerToPodCtrNameConflict(t *testing.T) { } func TestAddContainerToPodPodIDConflict(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) - testCtr, err := getTestContainer(testPod.ID(), "testCtr", lockPath) + testCtr, err := getTestContainer(testPod.ID(), "testCtr", manager) assert.NoError(t, err) testCtr.config.Pod = testPod.ID() @@ -2805,11 +2809,11 @@ func TestAddContainerToPodPodIDConflict(t *testing.T) { } func TestAddContainerToPodPodNameConflict(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) - testCtr, err := getTestContainer(strings.Repeat("2", 32), testPod.Name(), lockPath) + testCtr, err := getTestContainer(strings.Repeat("2", 32), testPod.Name(), manager) assert.NoError(t, err) testCtr.config.Pod = testPod.ID() @@ -2830,15 +2834,15 @@ func TestAddContainerToPodPodNameConflict(t *testing.T) { } func TestAddContainerToPodAddsDependencies(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) - testCtr1, err := getTestCtr2(lockPath) + testCtr1, err := getTestCtr2(manager) assert.NoError(t, err) testCtr1.config.Pod = testPod.ID() - testCtr2, err := getTestCtrN("3", lockPath) + testCtr2, err := getTestCtrN("3", manager) assert.NoError(t, err) testCtr2.config.Pod = testPod.ID() testCtr2.config.IPCNsCtr = testCtr1.ID() @@ -2860,11 +2864,11 @@ func TestAddContainerToPodAddsDependencies(t *testing.T) { } func TestAddContainerToPodPodDependencyFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) - testCtr, err := getTestCtr2(lockPath) + testCtr, err := getTestCtr2(manager) assert.NoError(t, err) testCtr.config.Pod = testPod.ID() testCtr.config.IPCNsCtr = testPod.ID() @@ -2882,11 +2886,11 @@ func TestAddContainerToPodPodDependencyFails(t *testing.T) { } func TestAddContainerToPodBadDependencyFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) - testCtr, err := getTestCtr2(lockPath) + testCtr, err := getTestCtr2(manager) assert.NoError(t, err) testCtr.config.Pod = testPod.ID() testCtr.config.IPCNsCtr = strings.Repeat("8", 32) @@ -2904,14 +2908,14 @@ func TestAddContainerToPodBadDependencyFails(t *testing.T) { } func TestAddContainerToPodDependencyOutsidePodFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) - testCtr1, err := getTestCtr2(lockPath) + testCtr1, err := getTestCtr2(manager) assert.NoError(t, err) - testCtr2, err := getTestCtrN("3", lockPath) + testCtr2, err := getTestCtrN("3", manager) assert.NoError(t, err) testCtr2.config.Pod = testPod.ID() testCtr2.config.IPCNsCtr = testCtr1.ID() @@ -2940,17 +2944,17 @@ func TestAddContainerToPodDependencyOutsidePodFails(t *testing.T) { } func TestAddContainerToPodDependencyInSameNamespaceSucceeds(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) testPod.config.Namespace = "test1" - testCtr1, err := getTestCtr2(lockPath) + testCtr1, err := getTestCtr2(manager) assert.NoError(t, err) testCtr1.config.Pod = testPod.ID() testCtr1.config.Namespace = "test1" - testCtr2, err := getTestCtrN("3", lockPath) + testCtr2, err := getTestCtrN("3", manager) assert.NoError(t, err) testCtr2.config.Pod = testPod.ID() testCtr2.config.IPCNsCtr = testCtr1.ID() @@ -2973,17 +2977,17 @@ func TestAddContainerToPodDependencyInSameNamespaceSucceeds(t *testing.T) { } func TestAddContainerToPodDependencyInSeparateNamespaceFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) testPod.config.Namespace = "test1" - testCtr1, err := getTestCtr2(lockPath) + testCtr1, err := getTestCtr2(manager) assert.NoError(t, err) testCtr1.config.Pod = testPod.ID() testCtr1.config.Namespace = "test1" - testCtr2, err := getTestCtrN("3", lockPath) + testCtr2, err := getTestCtrN("3", manager) assert.NoError(t, err) testCtr2.config.Pod = testPod.ID() testCtr2.config.IPCNsCtr = testCtr1.ID() @@ -3013,12 +3017,12 @@ func TestAddContainerToPodDependencyInSeparateNamespaceFails(t *testing.T) { } func TestAddContainerToPodSameNamespaceSucceeds(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) testPod.config.Namespace = "test1" - testCtr, err := getTestCtr2(lockPath) + testCtr, err := getTestCtr2(manager) assert.NoError(t, err) testCtr.config.Namespace = "test1" testCtr.config.Pod = testPod.ID() @@ -3037,12 +3041,12 @@ func TestAddContainerToPodSameNamespaceSucceeds(t *testing.T) { } func TestAddContainerToPodDifferentNamespaceFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) testPod.config.Namespace = "test1" - testCtr, err := getTestCtr2(lockPath) + testCtr, err := getTestCtr2(manager) assert.NoError(t, err) testCtr.config.Namespace = "test2" testCtr.config.Pod = testPod.ID() @@ -3060,11 +3064,11 @@ func TestAddContainerToPodDifferentNamespaceFails(t *testing.T) { } func TestAddContainerToPodNamespaceOnCtrFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) - testCtr, err := getTestCtr2(lockPath) + testCtr, err := getTestCtr2(manager) assert.NoError(t, err) testCtr.config.Namespace = "test1" testCtr.config.Pod = testPod.ID() @@ -3082,12 +3086,12 @@ func TestAddContainerToPodNamespaceOnCtrFails(t *testing.T) { } func TestAddContainerToPodNamespaceOnPodFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) testPod.config.Namespace = "test1" - testCtr, err := getTestCtr2(lockPath) + testCtr, err := getTestCtr2(manager) assert.NoError(t, err) testCtr.config.Pod = testPod.ID() @@ -3104,11 +3108,11 @@ func TestAddContainerToPodNamespaceOnPodFails(t *testing.T) { } func TestAddCtrToPodSameNamespaceSucceeds(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr, err := getTestCtr1(manager) assert.NoError(t, err) - testPod, err := getTestPod2(lockPath) + testPod, err := getTestPod2(manager) assert.NoError(t, err) testCtr.config.Namespace = "test1" @@ -3131,11 +3135,11 @@ func TestAddCtrToPodSameNamespaceSucceeds(t *testing.T) { } func TestAddCtrToPodDifferentNamespaceFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr, err := getTestCtr1(manager) assert.NoError(t, err) - testPod, err := getTestPod2(lockPath) + testPod, err := getTestPod2(manager) assert.NoError(t, err) testCtr.config.Namespace = "test1" @@ -3159,8 +3163,8 @@ func TestAddCtrToPodDifferentNamespaceFails(t *testing.T) { } func TestRemoveContainerFromPodBadPodFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testCtr, err := getTestCtr1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr, err := getTestCtr1(manager) assert.NoError(t, err) err = state.RemoveContainerFromPod(&Pod{config: &PodConfig{}}, testCtr) @@ -3169,11 +3173,11 @@ func TestRemoveContainerFromPodBadPodFails(t *testing.T) { } func TestRemoveContainerFromPodPodNotInStateFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) - testCtr, err := getTestCtr2(lockPath) + testCtr, err := getTestCtr2(manager) assert.NoError(t, err) testCtr.config.Pod = testPod.ID() @@ -3185,11 +3189,11 @@ func TestRemoveContainerFromPodPodNotInStateFails(t *testing.T) { } func TestRemoveContainerFromPodCtrNotInStateFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) - testCtr, err := getTestCtr2(lockPath) + testCtr, err := getTestCtr2(manager) assert.NoError(t, err) testCtr.config.Pod = testPod.ID() @@ -3204,11 +3208,11 @@ func TestRemoveContainerFromPodCtrNotInStateFails(t *testing.T) { } func TestRemoveContainerFromPodCtrNotInPodFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) - testCtr, err := getTestCtr2(lockPath) + testCtr, err := getTestCtr2(manager) assert.NoError(t, err) err = state.AddPod(testPod) @@ -3229,11 +3233,11 @@ func TestRemoveContainerFromPodCtrNotInPodFails(t *testing.T) { } func TestRemoveContainerFromPodSucceeds(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) - testCtr, err := getTestCtr2(lockPath) + testCtr, err := getTestCtr2(manager) assert.NoError(t, err) testCtr.config.Pod = testPod.ID() @@ -3257,15 +3261,15 @@ func TestRemoveContainerFromPodSucceeds(t *testing.T) { } func TestRemoveContainerFromPodWithDependencyFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) - testCtr1, err := getTestCtr2(lockPath) + testCtr1, err := getTestCtr2(manager) assert.NoError(t, err) testCtr1.config.Pod = testPod.ID() - testCtr2, err := getTestCtrN("3", lockPath) + testCtr2, err := getTestCtrN("3", manager) assert.NoError(t, err) testCtr2.config.Pod = testPod.ID() testCtr2.config.IPCNsCtr = testCtr1.ID() @@ -3293,15 +3297,15 @@ func TestRemoveContainerFromPodWithDependencyFails(t *testing.T) { } func TestRemoveContainerFromPodWithDependencySucceedsAfterDepRemoved(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) - testCtr1, err := getTestCtr2(lockPath) + testCtr1, err := getTestCtr2(manager) assert.NoError(t, err) testCtr1.config.Pod = testPod.ID() - testCtr2, err := getTestCtrN("3", lockPath) + testCtr2, err := getTestCtrN("3", manager) assert.NoError(t, err) testCtr2.config.Pod = testPod.ID() testCtr2.config.IPCNsCtr = testCtr1.ID() @@ -3332,13 +3336,13 @@ func TestRemoveContainerFromPodWithDependencySucceedsAfterDepRemoved(t *testing. } func TestRemoveContainerFromPodSameNamespaceSucceeds(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) testPod.config.Namespace = "test1" - testCtr, err := getTestCtr2(lockPath) + testCtr, err := getTestCtr2(manager) assert.NoError(t, err) testCtr.config.Pod = testPod.ID() @@ -3366,13 +3370,13 @@ func TestRemoveContainerFromPodSameNamespaceSucceeds(t *testing.T) { } func TestRemoveContainerFromPodDifferentNamespaceFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) testPod.config.Namespace = "test1" - testCtr, err := getTestCtr2(lockPath) + testCtr, err := getTestCtr2(manager) assert.NoError(t, err) testCtr.config.Pod = testPod.ID() @@ -3402,15 +3406,15 @@ func TestRemoveContainerFromPodDifferentNamespaceFails(t *testing.T) { } func TestUpdatePodInvalidPod(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { err := state.UpdatePod(&Pod{config: &PodConfig{}}) assert.Error(t, err) }) } func TestUpdatePodPodNotInStateFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) err = state.UpdatePod(testPod) @@ -3419,8 +3423,8 @@ func TestUpdatePodPodNotInStateFails(t *testing.T) { } func TestUpdatePodNotInNamespaceFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) testPod.config.Namespace = "test1" @@ -3436,15 +3440,15 @@ func TestUpdatePodNotInNamespaceFails(t *testing.T) { } func TestSavePodInvalidPod(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { err := state.SavePod(&Pod{config: &PodConfig{}}) assert.Error(t, err) }) } func TestSavePodPodNotInStateFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) err = state.SavePod(testPod) @@ -3453,8 +3457,8 @@ func TestSavePodPodNotInStateFails(t *testing.T) { } func TestSavePodNotInNamespaceFails(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) testPod.config.Namespace = "test1" @@ -3470,8 +3474,8 @@ func TestSavePodNotInNamespaceFails(t *testing.T) { } func TestSaveAndUpdatePod(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) err = state.AddPod(testPod) @@ -3495,8 +3499,8 @@ func TestSaveAndUpdatePod(t *testing.T) { } func TestSaveAndUpdatePodSameNamespace(t *testing.T) { - runForAllStates(t, func(t *testing.T, state State, lockPath string) { - testPod, err := getTestPod1(lockPath) + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) assert.NoError(t, err) testPod.config.Namespace = "test1" -- cgit v1.2.3-54-g00ecf