summaryrefslogtreecommitdiff
path: root/libpod/lock/shm/shm_lock.go
diff options
context:
space:
mode:
authorOpenShift Merge Robot <openshift-merge-robot@users.noreply.github.com>2019-01-04 10:41:05 -0800
committerGitHub <noreply@github.com>2019-01-04 10:41:05 -0800
commitbf5f779331870d31863c486619daae3fcea458eb (patch)
treeaafcacc17883a8df4734bed0aadbaca59a9882fe /libpod/lock/shm/shm_lock.go
parent6868b5aa1444404113bc6a4582203fbbf89490c2 (diff)
parent56c5c89408f89fc3733692786d66eb44133b2c59 (diff)
downloadpodman-bf5f779331870d31863c486619daae3fcea458eb.tar.gz
podman-bf5f779331870d31863c486619daae3fcea458eb.tar.bz2
podman-bf5f779331870d31863c486619daae3fcea458eb.zip
Merge pull request #1235 from mheon/shm_locking
SHM locking for Libpod
Diffstat (limited to 'libpod/lock/shm/shm_lock.go')
-rw-r--r--libpod/lock/shm/shm_lock.go216
1 files changed, 216 insertions, 0 deletions
diff --git a/libpod/lock/shm/shm_lock.go b/libpod/lock/shm/shm_lock.go
new file mode 100644
index 000000000..3372a8c71
--- /dev/null
+++ b/libpod/lock/shm/shm_lock.go
@@ -0,0 +1,216 @@
+package shm
+
+// #cgo LDFLAGS: -lrt -lpthread
+// #include <stdlib.h>
+// #include "shm_lock.h"
+// const uint32_t bitmap_size_c = BITMAP_SIZE;
+import "C"
+
+import (
+ "runtime"
+ "syscall"
+ "unsafe"
+
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+var (
+ // BitmapSize is the size of the bitmap used when managing SHM locks.
+ // an SHM lock manager's max locks will be rounded up to a multiple of
+ // this number.
+ BitmapSize uint32 = uint32(C.bitmap_size_c)
+)
+
+// SHMLocks is a struct enabling POSIX semaphore locking in a shared memory
+// segment.
+type SHMLocks struct { // nolint
+ lockStruct *C.shm_struct_t
+ maxLocks uint32
+ valid bool
+}
+
+// CreateSHMLock sets up a shared-memory segment holding a given number of POSIX
+// semaphores, and returns a struct that can be used to operate on those locks.
+// numLocks must not be 0, and may be rounded up to a multiple of the bitmap
+// size used by the underlying implementation.
+func CreateSHMLock(path string, numLocks uint32) (*SHMLocks, error) {
+ if numLocks == 0 {
+ return nil, errors.Wrapf(syscall.EINVAL, "number of locks must greater than 0 0")
+ }
+
+ locks := new(SHMLocks)
+
+ cPath := C.CString(path)
+ defer C.free(unsafe.Pointer(cPath))
+
+ var errCode C.int
+ lockStruct := C.setup_lock_shm(cPath, C.uint32_t(numLocks), &errCode)
+ if lockStruct == nil {
+ // We got a null pointer, so something errored
+ return nil, syscall.Errno(-1 * errCode)
+ }
+
+ locks.lockStruct = lockStruct
+ locks.maxLocks = uint32(lockStruct.num_locks)
+ locks.valid = true
+
+ logrus.Debugf("Initialized SHM lock manager at path %s", path)
+
+ return locks, nil
+}
+
+// OpenSHMLock opens an existing shared-memory segment holding a given number of
+// POSIX semaphores. numLocks must match the number of locks the shared memory
+// segment was created with.
+func OpenSHMLock(path string, numLocks uint32) (*SHMLocks, error) {
+ if numLocks == 0 {
+ return nil, errors.Wrapf(syscall.EINVAL, "number of locks must greater than 0")
+ }
+
+ locks := new(SHMLocks)
+
+ cPath := C.CString(path)
+ defer C.free(unsafe.Pointer(cPath))
+
+ var errCode C.int
+ lockStruct := C.open_lock_shm(cPath, C.uint32_t(numLocks), &errCode)
+ if lockStruct == nil {
+ // We got a null pointer, so something errored
+ return nil, syscall.Errno(-1 * errCode)
+ }
+
+ locks.lockStruct = lockStruct
+ locks.maxLocks = numLocks
+ locks.valid = true
+
+ return locks, nil
+}
+
+// GetMaxLocks returns the maximum number of locks in the SHM
+func (locks *SHMLocks) GetMaxLocks() uint32 {
+ return locks.maxLocks
+}
+
+// Close closes an existing shared-memory segment.
+// The segment will be rendered unusable after closing.
+// WARNING: If you Close() while there are still locks locked, these locks may
+// fail to release, causing a program freeze.
+// Close() is only intended to be used while testing the locks.
+func (locks *SHMLocks) Close() error {
+ if !locks.valid {
+ return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ }
+
+ locks.valid = false
+
+ retCode := C.close_lock_shm(locks.lockStruct)
+ if retCode < 0 {
+ // Negative errno returned
+ return syscall.Errno(-1 * retCode)
+ }
+
+ return nil
+}
+
+// AllocateSemaphore allocates a semaphore from a shared-memory segment for use
+// by a container or pod.
+// Returns the index of the semaphore that was allocated.
+// Allocations past the maximum number of locks given when the SHM segment was
+// created will result in an error, and no semaphore will be allocated.
+func (locks *SHMLocks) AllocateSemaphore() (uint32, error) {
+ if !locks.valid {
+ return 0, errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ }
+
+ // This returns a U64, so we have the full u32 range available for
+ // semaphore indexes, and can still return error codes.
+ retCode := C.allocate_semaphore(locks.lockStruct)
+ if retCode < 0 {
+ // Negative errno returned
+ return 0, syscall.Errno(-1 * retCode)
+ }
+
+ return uint32(retCode), nil
+}
+
+// DeallocateSemaphore frees a semaphore in a shared-memory segment so it can be
+// reallocated to another container or pod.
+// The given semaphore must be already allocated, or an error will be returned.
+func (locks *SHMLocks) DeallocateSemaphore(sem uint32) error {
+ if !locks.valid {
+ return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ }
+
+ if sem > locks.maxLocks {
+ return errors.Wrapf(syscall.EINVAL, "given semaphore %d is higher than maximum locks count %d", sem, locks.maxLocks)
+ }
+
+ retCode := C.deallocate_semaphore(locks.lockStruct, C.uint32_t(sem))
+ if retCode < 0 {
+ // Negative errno returned
+ return syscall.Errno(-1 * retCode)
+ }
+
+ return nil
+}
+
+// LockSemaphore locks the given semaphore.
+// If the semaphore is already locked, LockSemaphore will block until the lock
+// can be acquired.
+// There is no requirement that the given semaphore be allocated.
+// This ensures that attempts to lock a container after it has been deleted,
+// but before the caller has queried the database to determine this, will
+// succeed.
+func (locks *SHMLocks) LockSemaphore(sem uint32) error {
+ if !locks.valid {
+ return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ }
+
+ if sem > locks.maxLocks {
+ return errors.Wrapf(syscall.EINVAL, "given semaphore %d is higher than maximum locks count %d", sem, locks.maxLocks)
+ }
+
+ // For pthread mutexes, we have to guarantee lock and unlock happen in
+ // the same thread.
+ runtime.LockOSThread()
+
+ retCode := C.lock_semaphore(locks.lockStruct, C.uint32_t(sem))
+ if retCode < 0 {
+ // Negative errno returned
+ return syscall.Errno(-1 * retCode)
+ }
+
+ return nil
+}
+
+// UnlockSemaphore unlocks the given semaphore.
+// Unlocking a semaphore that is already unlocked with return EBUSY.
+// There is no requirement that the given semaphore be allocated.
+// This ensures that attempts to lock a container after it has been deleted,
+// but before the caller has queried the database to determine this, will
+// succeed.
+func (locks *SHMLocks) UnlockSemaphore(sem uint32) error {
+ if !locks.valid {
+ return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ }
+
+ if sem > locks.maxLocks {
+ return errors.Wrapf(syscall.EINVAL, "given semaphore %d is higher than maximum locks count %d", sem, locks.maxLocks)
+ }
+
+ retCode := C.unlock_semaphore(locks.lockStruct, C.uint32_t(sem))
+ if retCode < 0 {
+ // Negative errno returned
+ return syscall.Errno(-1 * retCode)
+ }
+
+ // For pthread mutexes, we have to guarantee lock and unlock happen in
+ // the same thread.
+ // OK if we take multiple locks - UnlockOSThread() won't actually unlock
+ // until the number of calls equals the number of calls to
+ // LockOSThread()
+ runtime.UnlockOSThread()
+
+ return nil
+}