summaryrefslogtreecommitdiff
path: root/libpod
diff options
context:
space:
mode:
authorMatthew Heon <matthew.heon@pm.me>2019-02-14 17:25:58 -0500
committerMatthew Heon <matthew.heon@pm.me>2019-02-21 10:51:42 -0500
commit7fdd20ae5a1ced1faceab9cb0a6e553343911a0b (patch)
tree21bc568928dfaa8f12e3b616e0e72fda3f1f5a63 /libpod
parent84feff2e06e9c3dd504be918f8dcf0b0a434a941 (diff)
downloadpodman-7fdd20ae5a1ced1faceab9cb0a6e553343911a0b.tar.gz
podman-7fdd20ae5a1ced1faceab9cb0a6e553343911a0b.tar.bz2
podman-7fdd20ae5a1ced1faceab9cb0a6e553343911a0b.zip
Add initial version of renumber backend
Renumber is a way of renumbering container locks after the number of locks available has changed. For now, renumber only works with containers. Signed-off-by: Matthew Heon <matthew.heon@pm.me>
Diffstat (limited to 'libpod')
-rw-r--r--libpod/lock/in_memory_locks.go9
-rw-r--r--libpod/lock/lock.go3
-rw-r--r--libpod/lock/shm/shm_lock.c30
-rw-r--r--libpod/lock/shm/shm_lock.go16
-rw-r--r--libpod/lock/shm/shm_lock.h1
-rw-r--r--libpod/lock/shm/shm_lock_test.go28
-rw-r--r--libpod/lock/shm_lock_manager_linux.go5
-rw-r--r--libpod/lock/shm_lock_manager_unsupported.go5
-rw-r--r--libpod/runtime_renumber.go60
9 files changed, 150 insertions, 7 deletions
diff --git a/libpod/lock/in_memory_locks.go b/libpod/lock/in_memory_locks.go
index db8f20e95..7eb22328f 100644
--- a/libpod/lock/in_memory_locks.go
+++ b/libpod/lock/in_memory_locks.go
@@ -89,3 +89,12 @@ func (m *InMemoryManager) RetrieveLock(id uint32) (Locker, error) {
return m.locks[id], nil
}
+
+// FreeAllLocks frees all locks
+func (m *InMemoryManager) FreeAllLocks() error {
+ for _, lock := range m.locks {
+ lock.allocated = false
+ }
+
+ return nil
+}
diff --git a/libpod/lock/lock.go b/libpod/lock/lock.go
index 1f94171fe..b96393574 100644
--- a/libpod/lock/lock.go
+++ b/libpod/lock/lock.go
@@ -24,6 +24,9 @@ type Manager interface {
// The underlying lock MUST be the same as another other lock with the
// same UUID.
RetrieveLock(id uint32) (Locker, error)
+ // FreeAllLocks frees all allocated locks, in preparation for lock
+ // reallocation.
+ FreeAllLocks() error
}
// Locker is similar to sync.Locker, but provides a method for freeing the lock
diff --git a/libpod/lock/shm/shm_lock.c b/libpod/lock/shm/shm_lock.c
index 4af58d857..367055823 100644
--- a/libpod/lock/shm/shm_lock.c
+++ b/libpod/lock/shm/shm_lock.c
@@ -407,6 +407,36 @@ int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index) {
return 0;
}
+// Deallocate all semaphores unconditionally.
+// Returns negative ERRNO values.
+int32_t deallocate_all_semaphores(shm_struct_t *shm) {
+ int ret_code;
+ uint i;
+
+ if (shm == NULL) {
+ return -1 * EINVAL;
+ }
+
+ // Lock the mutex controlling access to our shared memory
+ ret_code = take_mutex(&(shm->segment_lock));
+ if (ret_code != 0) {
+ return -1 * ret_code;
+ }
+
+ // Iterate through all bitmaps and reset to unused
+ for (i = 0; i < shm->num_bitmaps; i++) {
+ shm->locks[i].bitmap = 0;
+ }
+
+ // Unlock the allocation control mutex
+ ret_code = release_mutex(&(shm->segment_lock));
+ if (ret_code != 0) {
+ return -1 * ret_code;
+ }
+
+ return 0;
+}
+
// Lock a given semaphore
// Does not check if the semaphore is allocated - this ensures that, even for
// removed containers, we can still successfully lock to check status (and
diff --git a/libpod/lock/shm/shm_lock.go b/libpod/lock/shm/shm_lock.go
index 87d28e5c1..e70ea8743 100644
--- a/libpod/lock/shm/shm_lock.go
+++ b/libpod/lock/shm/shm_lock.go
@@ -155,6 +155,22 @@ func (locks *SHMLocks) DeallocateSemaphore(sem uint32) error {
return nil
}
+// DeallocateAllSemaphores frees all semaphores so they can be reallocated to
+// other containers and pods.
+func (locks *SHMLocks) DeallocateAllSemaphores() error {
+ if !locks.valid {
+ return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ }
+
+ retCode := C.deallocate_all_semaphores(locks.lockStruct)
+ if retCode < 0 {
+ // Negative errno return from C
+ return syscall.Errno(-1 * retCode)
+ }
+
+ return nil
+}
+
// LockSemaphore locks the given semaphore.
// If the semaphore is already locked, LockSemaphore will block until the lock
// can be acquired.
diff --git a/libpod/lock/shm/shm_lock.h b/libpod/lock/shm/shm_lock.h
index 8e7e23fb7..58e4297e2 100644
--- a/libpod/lock/shm/shm_lock.h
+++ b/libpod/lock/shm/shm_lock.h
@@ -40,6 +40,7 @@ shm_struct_t *open_lock_shm(char *path, uint32_t num_locks, int *error_code);
int32_t close_lock_shm(shm_struct_t *shm);
int64_t allocate_semaphore(shm_struct_t *shm);
int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index);
+int32_t deallocate_all_semaphores(shm_struct_t *shm);
int32_t lock_semaphore(shm_struct_t *shm, uint32_t sem_index);
int32_t unlock_semaphore(shm_struct_t *shm, uint32_t sem_index);
diff --git a/libpod/lock/shm/shm_lock_test.go b/libpod/lock/shm/shm_lock_test.go
index 594eb5d8e..830035881 100644
--- a/libpod/lock/shm/shm_lock_test.go
+++ b/libpod/lock/shm/shm_lock_test.go
@@ -4,7 +4,6 @@ import (
"fmt"
"os"
"runtime"
- "syscall"
"testing"
"time"
@@ -53,12 +52,8 @@ func runLockTest(t *testing.T, testFunc func(*testing.T, *SHMLocks)) {
}
defer func() {
// Deallocate all locks
- // Ignore ENOENT (lock is not allocated)
- var i uint32
- for i = 0; i < numLocks; i++ {
- if err := locks.DeallocateSemaphore(i); err != nil && err != syscall.ENOENT {
- t.Fatalf("Error deallocating semaphore %d: %v", i, err)
- }
+ if err := locks.DeallocateAllSemaphores(); err != nil {
+ t.Fatalf("Error deallocating semaphores: %v", err)
}
if err := locks.Close(); err != nil {
@@ -212,6 +207,25 @@ func TestAllocateDeallocateCycle(t *testing.T) {
})
}
+// Test that DeallocateAllSemaphores deallocates all semaphores
+func TestDeallocateAllSemaphoresDeallocatesAll(t *testing.T) {
+ runLockTest(t, func(t *testing.T, locks *SHMLocks) {
+ // Allocate a lock
+ locks1, err := locks.AllocateSemaphore()
+ assert.NoError(t, err)
+
+ // Free all locks
+ err = locks.DeallocateAllSemaphores()
+ assert.NoError(t, err)
+
+ // Allocate another lock
+ locks2, err := locks.AllocateSemaphore()
+ assert.NoError(t, err)
+
+ assert.Equal(t, locks1, locks2)
+ })
+}
+
// Test that locks actually lock
func TestLockSemaphoreActuallyLocks(t *testing.T) {
runLockTest(t, func(t *testing.T, locks *SHMLocks) {
diff --git a/libpod/lock/shm_lock_manager_linux.go b/libpod/lock/shm_lock_manager_linux.go
index 94dfd7dd7..187661c8b 100644
--- a/libpod/lock/shm_lock_manager_linux.go
+++ b/libpod/lock/shm_lock_manager_linux.go
@@ -71,6 +71,11 @@ func (m *SHMLockManager) RetrieveLock(id uint32) (Locker, error) {
return lock, nil
}
+// FreeAllLocks frees all locks in the manager
+func (m *SHMLockManager) FreeAllLocks() error {
+ return m.locks.DeallocateAllSemaphores()
+}
+
// SHMLock is an individual shared memory lock.
type SHMLock struct {
lockID uint32
diff --git a/libpod/lock/shm_lock_manager_unsupported.go b/libpod/lock/shm_lock_manager_unsupported.go
index cbdb2f7bc..1d6e3fcbd 100644
--- a/libpod/lock/shm_lock_manager_unsupported.go
+++ b/libpod/lock/shm_lock_manager_unsupported.go
@@ -27,3 +27,8 @@ func (m *SHMLockManager) AllocateLock() (Locker, error) {
func (m *SHMLockManager) RetrieveLock(id string) (Locker, error) {
return nil, fmt.Errorf("not supported")
}
+
+// FreeAllLocks is not supported on this platform
+func (m *SHMLockManager) FreeAllLocks() error {
+ return fmt.Errorf("not supported")
+}
diff --git a/libpod/runtime_renumber.go b/libpod/runtime_renumber.go
new file mode 100644
index 000000000..bc291156e
--- /dev/null
+++ b/libpod/runtime_renumber.go
@@ -0,0 +1,60 @@
+package libpod
+
+import (
+ "path/filepath"
+
+ "github.com/containers/storage"
+ "github.com/pkg/errors"
+)
+
+// RenumberLocks reassigns lock numbers for all containers, pods, and volumes in
+// the state.
+// It renders the runtime it is called on, and all container/pod/volume structs
+// from that runtime, unusable, and requires that a new runtime be initialized
+// after it is called.
+func (r *Runtime) RenumberLocks() error {
+ r.lock.Lock()
+ locked := true
+ defer func() {
+ if locked {
+ r.lock.Unlock()
+ }
+ }()
+
+ runtimeAliveLock := filepath.Join(r.config.TmpDir, "alive.lck")
+ aliveLock, err := storage.GetLockfile(runtimeAliveLock)
+ if err != nil {
+ return errors.Wrapf(err, "error acquiring runtime init lock")
+ }
+ aliveLock.Lock()
+ // It's OK to defer until Shutdown() has run, so no need to check locked
+ defer aliveLock.Unlock()
+
+ // Start off by deallocating all locks
+ if err := r.lockManager.FreeAllLocks(); err != nil {
+ return err
+ }
+
+ allCtrs, err := r.state.AllContainers()
+ if err != nil {
+ return err
+ }
+ for _, ctr := range allCtrs {
+ lock, err := r.lockManager.AllocateLock()
+ if err != nil {
+ return errors.Wrapf(err, "error allocating lock for container %s", ctr.ID())
+ }
+
+ ctr.config.LockID = lock.ID()
+
+ // Write the new lock ID
+ if err := r.state.RewriteContainerConfig(ctr, ctr.config); err != nil {
+ return err
+ }
+ }
+
+ r.lock.Unlock()
+ locked = false
+
+ return r.Shutdown(false)
+}