summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorOpenShift Merge Robot <openshift-merge-robot@users.noreply.github.com>2019-05-07 19:50:54 +0200
committerGitHub <noreply@github.com>2019-05-07 19:50:54 +0200
commit7b67c9601e1eab6c881ac44503c285c71b0a4a3a (patch)
treecf0802d1dee6beefd68e20b2a8345c9edeaec80f
parent3b5ac1818f09514039fdfec20aa874ae68e6ab7c (diff)
parentf881e32f12c9a30d903b15d8d51729b23928cfa3 (diff)
downloadpodman-7b67c9601e1eab6c881ac44503c285c71b0a4a3a.tar.gz
podman-7b67c9601e1eab6c881ac44503c285c71b0a4a3a.tar.bz2
podman-7b67c9601e1eab6c881ac44503c285c71b0a4a3a.zip
Merge pull request #3073 from mheon/force_lock_realloc
When refreshing after a reboot, force lock allocation
-rw-r--r--libpod/container_internal.go2
-rw-r--r--libpod/lock/in_memory_locks.go16
-rw-r--r--libpod/lock/lock.go7
-rw-r--r--libpod/lock/shm/shm_lock.c55
-rw-r--r--libpod/lock/shm/shm_lock.go17
-rw-r--r--libpod/lock/shm/shm_lock.h1
-rw-r--r--libpod/lock/shm_lock_manager_linux.go19
-rw-r--r--libpod/pod_internal.go2
-rw-r--r--test/e2e/refresh_test.go74
9 files changed, 117 insertions, 76 deletions
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index 0b4e5763e..e6ffaa6d7 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -516,7 +516,7 @@ func (c *Container) refresh() error {
}
// We need to pick up a new lock
- lock, err := c.runtime.lockManager.RetrieveLock(c.config.LockID)
+ lock, err := c.runtime.lockManager.AllocateAndRetrieveLock(c.config.LockID)
if err != nil {
return errors.Wrapf(err, "error acquiring lock for container %s", c.ID())
}
diff --git a/libpod/lock/in_memory_locks.go b/libpod/lock/in_memory_locks.go
index 7c9605917..f3c842f89 100644
--- a/libpod/lock/in_memory_locks.go
+++ b/libpod/lock/in_memory_locks.go
@@ -90,6 +90,22 @@ func (m *InMemoryManager) RetrieveLock(id uint32) (Locker, error) {
return m.locks[id], nil
}
+// AllocateAndRetrieveLock allocates a lock with the given ID (if not already in
+// use) and returns it.
+func (m *InMemoryManager) AllocateAndRetrieveLock(id uint32) (Locker, error) {
+ if id >= m.numLocks {
+ return nil, errors.Errorf("given lock ID %d is too large - this manager only supports lock indexes up to %d", id, m.numLocks)
+ }
+
+ if m.locks[id].allocated {
+ return nil, errors.Errorf("given lock ID %d is already in use, cannot reallocate", id)
+ }
+
+ m.locks[id].allocated = true
+
+ return m.locks[id], nil
+}
+
// FreeAllLocks frees all locks.
// This function is DANGEROUS. Please read the full comment in locks.go before
// trying to use it.
diff --git a/libpod/lock/lock.go b/libpod/lock/lock.go
index d6841646b..4e1e2e215 100644
--- a/libpod/lock/lock.go
+++ b/libpod/lock/lock.go
@@ -24,6 +24,13 @@ type Manager interface {
// The underlying lock MUST be the same as another other lock with the
// same UUID.
RetrieveLock(id uint32) (Locker, error)
+ // AllocateAndRetrieveLock marks the lock with the given UUID as in use
+ // and retrieves it.
+ // RetrieveAndAllocateLock will error if the lock in question has
+ // already been allocated.
+ // This is mostly used after a system restart to repopulate the list of
+ // locks in use.
+ AllocateAndRetrieveLock(id uint32) (Locker, error)
// PLEASE READ FULL DESCRIPTION BEFORE USING.
// FreeAllLocks frees all allocated locks, in preparation for lock
// reallocation.
diff --git a/libpod/lock/shm/shm_lock.c b/libpod/lock/shm/shm_lock.c
index d11fce71a..047d3c417 100644
--- a/libpod/lock/shm/shm_lock.c
+++ b/libpod/lock/shm/shm_lock.c
@@ -354,6 +354,61 @@ int64_t allocate_semaphore(shm_struct_t *shm) {
return -1 * ENOSPC;
}
+// Allocate the semaphore with the given ID.
+// Returns an error if the semaphore with this ID does not exist, or has already
+// been allocated.
+// Returns 0 on success, or negative errno values on failure.
+int32_t allocate_given_semaphore(shm_struct_t *shm, uint32_t sem_index) {
+ int bitmap_index, index_in_bitmap, ret_code;
+ bitmap_t test_map;
+
+ if (shm == NULL) {
+ return -1 * EINVAL;
+ }
+
+ // Check if the lock index is valid
+ if (sem_index >= shm->num_locks) {
+ return -1 * EINVAL;
+ }
+
+ bitmap_index = sem_index / BITMAP_SIZE;
+ index_in_bitmap = sem_index % BITMAP_SIZE;
+
+ // This should never happen if the sem_index test above succeeded, but better
+ // safe than sorry
+ if (bitmap_index >= shm->num_bitmaps) {
+ return -1 * EFAULT;
+ }
+
+ test_map = 0x1 << index_in_bitmap;
+
+ // Lock the mutex controlling access to our shared memory
+ ret_code = take_mutex(&(shm->segment_lock));
+ if (ret_code != 0) {
+ return -1 * ret_code;
+ }
+
+ // Check if the semaphore is allocated
+ if ((test_map & shm->locks[bitmap_index].bitmap) != 0) {
+ ret_code = release_mutex(&(shm->segment_lock));
+ if (ret_code != 0) {
+ return -1 * ret_code;
+ }
+
+ return -1 * EEXIST;
+ }
+
+ // The semaphore is not allocated, allocate it
+ shm->locks[bitmap_index].bitmap = shm->locks[bitmap_index].bitmap | test_map;
+
+ ret_code = release_mutex(&(shm->segment_lock));
+ if (ret_code != 0) {
+ return -1 * ret_code;
+ }
+
+ return 0;
+}
+
// Deallocate a given semaphore
// Returns 0 on success, negative ERRNO values on failure
int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index) {
diff --git a/libpod/lock/shm/shm_lock.go b/libpod/lock/shm/shm_lock.go
index e70ea8743..c21e9a221 100644
--- a/libpod/lock/shm/shm_lock.go
+++ b/libpod/lock/shm/shm_lock.go
@@ -134,6 +134,23 @@ func (locks *SHMLocks) AllocateSemaphore() (uint32, error) {
return uint32(retCode), nil
}
+// AllocateGivenSemaphore allocates the given semaphore from the shared-memory
+// segment for use by a container or pod.
+// If the semaphore is already in use or the index is invalid an error will be
+// returned.
+func (locks *SHMLocks) AllocateGivenSemaphore(sem uint32) error {
+ if !locks.valid {
+ return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ }
+
+ retCode := C.allocate_given_semaphore(locks.lockStruct, C.uint32_t(sem))
+ if retCode < 0 {
+ return syscall.Errno(-1 * retCode)
+ }
+
+ return nil
+}
+
// DeallocateSemaphore frees a semaphore in a shared-memory segment so it can be
// reallocated to another container or pod.
// The given semaphore must be already allocated, or an error will be returned.
diff --git a/libpod/lock/shm/shm_lock.h b/libpod/lock/shm/shm_lock.h
index 58e4297e2..759f8178a 100644
--- a/libpod/lock/shm/shm_lock.h
+++ b/libpod/lock/shm/shm_lock.h
@@ -39,6 +39,7 @@ shm_struct_t *setup_lock_shm(char *path, uint32_t num_locks, int *error_code);
shm_struct_t *open_lock_shm(char *path, uint32_t num_locks, int *error_code);
int32_t close_lock_shm(shm_struct_t *shm);
int64_t allocate_semaphore(shm_struct_t *shm);
+int32_t allocate_given_semaphore(shm_struct_t *shm, uint32_t sem_index);
int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index);
int32_t deallocate_all_semaphores(shm_struct_t *shm);
int32_t lock_semaphore(shm_struct_t *shm, uint32_t sem_index);
diff --git a/libpod/lock/shm_lock_manager_linux.go b/libpod/lock/shm_lock_manager_linux.go
index 8678958ee..5f31939f8 100644
--- a/libpod/lock/shm_lock_manager_linux.go
+++ b/libpod/lock/shm_lock_manager_linux.go
@@ -57,6 +57,25 @@ func (m *SHMLockManager) AllocateLock() (Locker, error) {
return lock, nil
}
+// AllocateAndRetrieveLock allocates the lock with the given ID and returns it.
+// If the lock is already allocated, error.
+func (m *SHMLockManager) AllocateAndRetrieveLock(id uint32) (Locker, error) {
+ lock := new(SHMLock)
+ lock.lockID = id
+ lock.manager = m
+
+ if id >= m.locks.GetMaxLocks() {
+ return nil, errors.Wrapf(syscall.EINVAL, "lock ID %d is too large - max lock size is %d",
+ id, m.locks.GetMaxLocks()-1)
+ }
+
+ if err := m.locks.AllocateGivenSemaphore(id); err != nil {
+ return nil, err
+ }
+
+ return lock, nil
+}
+
// RetrieveLock retrieves a lock from the manager given its ID.
func (m *SHMLockManager) RetrieveLock(id uint32) (Locker, error) {
lock := new(SHMLock)
diff --git a/libpod/pod_internal.go b/libpod/pod_internal.go
index 25e4e77d7..1fcb5b1a6 100644
--- a/libpod/pod_internal.go
+++ b/libpod/pod_internal.go
@@ -56,7 +56,7 @@ func (p *Pod) refresh() error {
}
// Retrieve the pod's lock
- lock, err := p.runtime.lockManager.RetrieveLock(p.config.LockID)
+ lock, err := p.runtime.lockManager.AllocateAndRetrieveLock(p.config.LockID)
if err != nil {
return errors.Wrapf(err, "error retrieving lock for pod %s", p.ID())
}
diff --git a/test/e2e/refresh_test.go b/test/e2e/refresh_test.go
deleted file mode 100644
index 56c1d255e..000000000
--- a/test/e2e/refresh_test.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// +build !remoteclient
-
-package integration
-
-import (
- "os"
- "time"
-
- . "github.com/containers/libpod/test/utils"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("Podman refresh", func() {
- var (
- tmpdir string
- err error
- podmanTest *PodmanTestIntegration
- )
-
- BeforeEach(func() {
- tmpdir, err = CreateTempDirInTempDir()
- if err != nil {
- os.Exit(1)
- }
- podmanTest = PodmanTestCreate(tmpdir)
- podmanTest.Setup()
- podmanTest.RestoreAllArtifacts()
- })
-
- AfterEach(func() {
- podmanTest.Cleanup()
- f := CurrentGinkgoTestDescription()
- processTestResult(f)
-
- })
-
- Specify("Refresh with no containers succeeds", func() {
- session := podmanTest.Podman([]string{"container", "refresh"})
- session.WaitWithDefaultTimeout()
- Expect(session.ExitCode()).To(Equal(0))
- })
-
- Specify("Refresh with created container succeeds", func() {
- createSession := podmanTest.Podman([]string{"create", ALPINE, "ls"})
- createSession.WaitWithDefaultTimeout()
- Expect(createSession.ExitCode()).To(Equal(0))
- Expect(podmanTest.NumberOfContainers()).To(Equal(1))
- Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
-
- refreshSession := podmanTest.Podman([]string{"container", "refresh"})
- refreshSession.WaitWithDefaultTimeout()
- Expect(refreshSession.ExitCode()).To(Equal(0))
- Expect(podmanTest.NumberOfContainers()).To(Equal(1))
- Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
- })
-
- Specify("Refresh with running container restarts container", func() {
- createSession := podmanTest.Podman([]string{"run", "-dt", ALPINE, "top"})
- createSession.WaitWithDefaultTimeout()
- Expect(createSession.ExitCode()).To(Equal(0))
- Expect(podmanTest.NumberOfContainers()).To(Equal(1))
- Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1))
-
- // HACK: ensure container starts before we move on
- time.Sleep(1 * time.Second)
-
- refreshSession := podmanTest.Podman([]string{"container", "refresh"})
- refreshSession.WaitWithDefaultTimeout()
- Expect(refreshSession.ExitCode()).To(Equal(0))
- Expect(podmanTest.NumberOfContainers()).To(Equal(1))
- Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1))
- })
-})