From d4b2f116018e1d8e6a3c4f80f30db45934428c6b Mon Sep 17 00:00:00 2001 From: Matthew Heon Date: Fri, 24 Aug 2018 15:15:56 -0400 Subject: Convert pods to SHM locks Signed-off-by: Matthew Heon --- libpod/boltdb_state_internal.go | 5 ++--- libpod/container_internal.go | 3 +-- libpod/lock/shm/shm_lock_test.go | 11 +++++++---- libpod/pod.go | 7 +++++-- libpod/pod_easyjson.go | 12 ++++++++++++ libpod/pod_internal.go | 19 +++++++++---------- libpod/runtime_ctr.go | 6 +++++- libpod/runtime_pod_linux.go | 8 ++++++++ 8 files changed, 49 insertions(+), 22 deletions(-) diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go index 238bfa161..bffa83ffb 100644 --- a/libpod/boltdb_state_internal.go +++ b/libpod/boltdb_state_internal.go @@ -323,10 +323,9 @@ func (s *BoltState) getPodFromDB(id []byte, pod *Pod, podBkt *bolt.Bucket) error } // Get the lock - lockPath := filepath.Join(s.runtime.lockDir, string(id)) - lock, err := storage.GetLockfile(lockPath) + lock, err := s.runtime.lockManager.RetrieveLock(pod.config.LockID) if err != nil { - return errors.Wrapf(err, "error retrieving lockfile for pod %s", string(id)) + return errors.Wrapf(err, "error retrieving lock for pod %s", string(id)) } pod.lock = lock diff --git a/libpod/container_internal.go b/libpod/container_internal.go index 856313b9d..cc4c36bc9 100644 --- a/libpod/container_internal.go +++ b/libpod/container_internal.go @@ -451,12 +451,11 @@ func (c *Container) refresh() error { } // We need to pick up a new lock - lock, err := c.runtime.lockManager.AllocateLock() + lock, err := c.runtime.lockManager.RetrieveLock(c.config.LockID) if err != nil { return errors.Wrapf(err, "error acquiring lock for container %s", c.ID()) } c.lock = lock - c.config.LockID = c.lock.ID() if err := c.save(); err != nil { return errors.Wrapf(err, "error refreshing state for container %s", c.ID()) diff --git a/libpod/lock/shm/shm_lock_test.go b/libpod/lock/shm/shm_lock_test.go index 7174253d0..bdf0b19e5 100644 --- a/libpod/lock/shm/shm_lock_test.go +++ b/libpod/lock/shm/shm_lock_test.go @@ -18,7 +18,7 @@ import ( // We can at least verify that the locks work within the local process. // 4 * BITMAP_SIZE to ensure we have to traverse bitmaps -const numLocks uint32 = 4 * BitmapSize +var numLocks uint32 = 4 * BitmapSize const lockPath = "/libpod_test" @@ -155,7 +155,8 @@ func TestAllocateTwoLocksGetsDifferentLocks(t *testing.T) { func TestAllocateAllLocksSucceeds(t *testing.T) { runLockTest(t, func(t *testing.T, locks *SHMLocks) { sems := make(map[uint32]bool) - for i := 0; i < numLocks; i++ { + var i uint32 + for i = 0; i < numLocks; i++ { sem, err := locks.AllocateSemaphore() assert.NoError(t, err) @@ -172,7 +173,8 @@ func TestAllocateAllLocksSucceeds(t *testing.T) { func TestAllocateTooManyLocksFails(t *testing.T) { runLockTest(t, func(t *testing.T, locks *SHMLocks) { // Allocate all locks - for i := 0; i < numLocks; i++ { + var i uint32 + for i = 0; i < numLocks; i++ { _, err := locks.AllocateSemaphore() assert.NoError(t, err) } @@ -187,7 +189,8 @@ func TestAllocateTooManyLocksFails(t *testing.T) { func TestAllocateDeallocateCycle(t *testing.T) { runLockTest(t, func(t *testing.T, locks *SHMLocks) { // Allocate all locks - for i := 0; i < numLocks; i++ { + var i uint32 + for i = 0; i < numLocks; i++ { _, err := locks.AllocateSemaphore() assert.NoError(t, err) } diff --git a/libpod/pod.go b/libpod/pod.go index 07f41f5c6..4ce697402 100644 --- a/libpod/pod.go +++ b/libpod/pod.go @@ -3,7 +3,7 @@ package libpod import ( "time" - "github.com/containers/storage" + "github.com/containers/libpod/libpod/lock" "github.com/cri-o/ocicni/pkg/ocicni" "github.com/pkg/errors" ) @@ -26,7 +26,7 @@ type Pod struct { valid bool runtime *Runtime - lock storage.Locker + lock lock.Locker } // PodConfig represents a pod's static configuration @@ -60,6 +60,9 @@ type PodConfig struct { // Time pod was created CreatedTime time.Time `json:"created"` + + // ID of the pod's lock + LockID uint32 `json:"lockID"` } // podState represents a pod's state diff --git a/libpod/pod_easyjson.go b/libpod/pod_easyjson.go index 8ea9a5e72..71862dad0 100644 --- a/libpod/pod_easyjson.go +++ b/libpod/pod_easyjson.go @@ -501,6 +501,8 @@ func easyjsonBe091417DecodeGithubComContainersLibpodLibpod4(in *jlexer.Lexer, ou if data := in.Raw(); in.Ok() { in.AddError((out.CreatedTime).UnmarshalJSON(data)) } + case "lockID": + out.LockID = uint32(in.Uint32()) default: in.SkipRecursive() } @@ -675,6 +677,16 @@ func easyjsonBe091417EncodeGithubComContainersLibpodLibpod4(out *jwriter.Writer, } out.Raw((in.CreatedTime).MarshalJSON()) } + { + const prefix string = ",\"lockID\":" + if first { + first = false + out.RawString(prefix[1:]) + } else { + out.RawString(prefix) + } + out.Uint32(uint32(in.LockID)) + } out.RawByte('}') } diff --git a/libpod/pod_internal.go b/libpod/pod_internal.go index 39a25c004..348dd2373 100644 --- a/libpod/pod_internal.go +++ b/libpod/pod_internal.go @@ -7,7 +7,6 @@ import ( "strings" "time" - "github.com/containers/storage" "github.com/containers/storage/pkg/stringid" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -24,15 +23,6 @@ func newPod(lockDir string, runtime *Runtime) (*Pod, error) { pod.state = new(podState) pod.runtime = runtime - // Path our lock file will reside at - lockPath := filepath.Join(lockDir, pod.config.ID) - // Grab a lockfile at the given path - lock, err := storage.GetLockfile(lockPath) - if err != nil { - return nil, errors.Wrapf(err, "error creating lockfile for new pod") - } - pod.lock = lock - return pod, nil } @@ -55,6 +45,8 @@ func (p *Pod) save() error { } // Refresh a pod's state after restart +// This cannot lock any other pod, but may lock individual containers, as those +// will have refreshed by the time pod refresh runs. func (p *Pod) refresh() error { // Need to to an update from the DB to pull potentially-missing state if err := p.runtime.state.UpdatePod(p); err != nil { @@ -65,6 +57,13 @@ func (p *Pod) refresh() error { return ErrPodRemoved } + // Retrieve the pod's lock + lock, err := p.runtime.lockManager.RetrieveLock(p.config.LockID) + if err != nil { + return errors.Wrapf(err, "error retrieving lock for pod %s", p.ID()) + } + p.lock = lock + // We need to recreate the pod's cgroup if p.config.UsePodCgroup { switch p.runtime.config.CgroupManager { diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go index eb78e7d7d..5b8a7f759 100644 --- a/libpod/runtime_ctr.go +++ b/libpod/runtime_ctr.go @@ -81,7 +81,11 @@ func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options .. return nil, errors.Wrapf(err, "error allocating lock for new container") } ctr.lock = lock - ctr.config.LockID = c.lock.ID() + ctr.config.LockID = ctr.lock.ID() + + ctr.valid = true + ctr.state.State = ContainerStateConfigured + ctr.runtime = r ctr.valid = true ctr.state.State = ContainerStateConfigured diff --git a/libpod/runtime_pod_linux.go b/libpod/runtime_pod_linux.go index 95246449a..529c516c8 100644 --- a/libpod/runtime_pod_linux.go +++ b/libpod/runtime_pod_linux.go @@ -48,6 +48,14 @@ func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (*Pod, pod.config.Name = name } + // Allocate a lock for the pod + lock, err := r.lockManager.AllocateLock() + if err != nil { + return nil, errors.Wrapf(err, "error allocating lock for new pod") + } + pod.lock = lock + pod.config.LockID = pod.lock.ID() + pod.valid = true // Check CGroup parent sanity, and set it if it was not set -- cgit v1.2.3-54-g00ecf