From d4b2f116018e1d8e6a3c4f80f30db45934428c6b Mon Sep 17 00:00:00 2001 From: Matthew Heon Date: Fri, 24 Aug 2018 15:15:56 -0400 Subject: Convert pods to SHM locks Signed-off-by: Matthew Heon --- libpod/pod_internal.go | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) (limited to 'libpod/pod_internal.go') diff --git a/libpod/pod_internal.go b/libpod/pod_internal.go index 39a25c004..348dd2373 100644 --- a/libpod/pod_internal.go +++ b/libpod/pod_internal.go @@ -7,7 +7,6 @@ import ( "strings" "time" - "github.com/containers/storage" "github.com/containers/storage/pkg/stringid" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -24,15 +23,6 @@ func newPod(lockDir string, runtime *Runtime) (*Pod, error) { pod.state = new(podState) pod.runtime = runtime - // Path our lock file will reside at - lockPath := filepath.Join(lockDir, pod.config.ID) - // Grab a lockfile at the given path - lock, err := storage.GetLockfile(lockPath) - if err != nil { - return nil, errors.Wrapf(err, "error creating lockfile for new pod") - } - pod.lock = lock - return pod, nil } @@ -55,6 +45,8 @@ func (p *Pod) save() error { } // Refresh a pod's state after restart +// This cannot lock any other pod, but may lock individual containers, as those +// will have refreshed by the time pod refresh runs. func (p *Pod) refresh() error { // Need to to an update from the DB to pull potentially-missing state if err := p.runtime.state.UpdatePod(p); err != nil { @@ -65,6 +57,13 @@ func (p *Pod) refresh() error { return ErrPodRemoved } + // Retrieve the pod's lock + lock, err := p.runtime.lockManager.RetrieveLock(p.config.LockID) + if err != nil { + return errors.Wrapf(err, "error retrieving lock for pod %s", p.ID()) + } + p.lock = lock + // We need to recreate the pod's cgroup if p.config.UsePodCgroup { switch p.runtime.config.CgroupManager { -- cgit v1.2.3-54-g00ecf From 35361595f36728958e6abb2c37c22d79a776913a Mon Sep 17 00:00:00 2001 From: Matthew Heon Date: Sun, 23 Sep 2018 14:01:29 -0400 Subject: Remove runtime lockDir and add in-memory lock manager Remove runtime's lockDir as it is no longer needed after the lock rework. Add a trivial in-memory lock manager for unit testing Signed-off-by: Matthew Heon --- libpod/boltdb_state_internal.go | 4 +- libpod/lock/in_memory_locks.go | 91 +++++++++++++++++++++++++++++++++++ libpod/lock/shm_lock_manager_linux.go | 2 + libpod/pod_internal.go | 2 +- libpod/runtime.go | 13 +---- libpod/runtime_pod_linux.go | 2 +- libpod/runtime_volume_linux.go | 9 ++-- libpod/volume.go | 10 ++-- 8 files changed, 107 insertions(+), 26 deletions(-) create mode 100644 libpod/lock/in_memory_locks.go (limited to 'libpod/pod_internal.go') diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go index bffa83ffb..29a7184c9 100644 --- a/libpod/boltdb_state_internal.go +++ b/libpod/boltdb_state_internal.go @@ -3,7 +3,6 @@ package libpod import ( "bytes" "encoding/json" - "path/filepath" "runtime" "strings" @@ -351,8 +350,7 @@ func (s *BoltState) getVolumeFromDB(name []byte, volume *Volume, volBkt *bolt.Bu } // Get the lock - lockPath := filepath.Join(s.runtime.lockDir, string(name)) - lock, err := storage.GetLockfile(lockPath) + lock, err := s.runtime.lockManager.RetrieveLock(volume.config.LockID) if err != nil { return errors.Wrapf(err, "error retrieving lockfile for volume %s", string(name)) } diff --git a/libpod/lock/in_memory_locks.go b/libpod/lock/in_memory_locks.go new file mode 100644 index 000000000..1df0d2b61 --- /dev/null +++ b/libpod/lock/in_memory_locks.go @@ -0,0 +1,91 @@ +package lock + +import ( + "sync" + + "github.com/pkg/errors" +) + +// Mutex holds a single mutex and whether it has been allocated. +type Mutex struct { + id uint32 + lock sync.Mutex + allocated bool +} + +// ID retrieves the ID of the mutex +func (m *Mutex) ID() uint32 { + return m.id +} + +// Lock locks the mutex +func (m *Mutex) Lock() { + m.lock.Lock() +} + +// Unlock unlocks the mutex +func (m *Mutex) Unlock() { + m.lock.Unlock() +} + +// Free deallocates the mutex to allow its reuse +func (m *Mutex) Free() error { + m.allocated = false + + return nil +} + +// InMemoryManager is a lock manager that allocates and retrieves local-only +// locks - that is, they are not multiprocess. This lock manager is intended +// purely for unit and integration testing and should not be used in production +// deployments. +type InMemoryManager struct { + locks []*Mutex + numLocks uint32 + localLock sync.Mutex +} + +// NewInMemoryManager creates a new in-memory lock manager with the given number +// of locks. +func NewInMemoryManager(numLocks uint32) (Manager, error) { + if numLocks == 0 { + return nil, errors.Errorf("must provide a non-zero number of locks!") + } + + manager := new(InMemoryManager) + manager.numLocks = numLocks + manager.locks = make([]*Mutex, numLocks) + + var i uint32 + for i = 0; i < numLocks; i++ { + lock := new(Mutex) + lock.id = i + manager.locks[i] = lock + } + + return manager, nil +} + +// AllocateLock allocates a lock from the manager. +func (m *InMemoryManager) AllocateLock() (Locker, error) { + m.localLock.Lock() + defer m.localLock.Unlock() + + for _, lock := range m.locks { + if !lock.allocated { + lock.allocated = true + return lock, nil + } + } + + return nil, errors.Errorf("all locks have been allocated") +} + +// RetrieveLock retrieves a lock from the manager. +func (m *InMemoryManager) RetrieveLock(id uint32) (Locker, error) { + if id >= m.numLocks { + return nil, errors.Errorf("given lock ID %d is too large - this manager only supports lock indexes up to %d", id, m.numLocks - 1) + } + + return m.locks[id], nil +} diff --git a/libpod/lock/shm_lock_manager_linux.go b/libpod/lock/shm_lock_manager_linux.go index 2c0ea611a..3e8f4f3d2 100644 --- a/libpod/lock/shm_lock_manager_linux.go +++ b/libpod/lock/shm_lock_manager_linux.go @@ -12,6 +12,8 @@ type SHMLockManager struct { } // NewSHMLockManager makes a new SHMLockManager with the given number of locks. +// Due to the underlying implementation, the exact number of locks created may +// be greater than the number given here. func NewSHMLockManager(path string, numLocks uint32) (Manager, error) { locks, err := shm.CreateSHMLock(path, numLocks) if err != nil { diff --git a/libpod/pod_internal.go b/libpod/pod_internal.go index 348dd2373..0f1f115e8 100644 --- a/libpod/pod_internal.go +++ b/libpod/pod_internal.go @@ -13,7 +13,7 @@ import ( ) // Creates a new, empty pod -func newPod(lockDir string, runtime *Runtime) (*Pod, error) { +func newPod(runtime *Runtime) (*Pod, error) { pod := new(Pod) pod.config = new(PodConfig) pod.config.ID = stringid.GenerateNonCryptoID() diff --git a/libpod/runtime.go b/libpod/runtime.go index 7c665bbc2..a39547a43 100644 --- a/libpod/runtime.go +++ b/libpod/runtime.go @@ -84,7 +84,6 @@ type Runtime struct { storageService *storageService imageContext *types.SystemContext ociRuntime *OCIRuntime - lockDir string netPlugin ocicni.CNIPlugin ociRuntimePath string conmonPath string @@ -679,17 +678,6 @@ func makeRuntime(runtime *Runtime) (err error) { } runtime.ociRuntime = ociRuntime - // Make a directory to hold container lockfiles - lockDir := filepath.Join(runtime.config.TmpDir, "lock") - if err := os.MkdirAll(lockDir, 0755); err != nil { - // The directory is allowed to exist - if !os.IsExist(err) { - return errors.Wrapf(err, "error creating runtime lockfiles directory %s", - lockDir) - } - } - runtime.lockDir = lockDir - // Make the per-boot files directory if it does not exist if err := os.MkdirAll(runtime.config.TmpDir, 0755); err != nil { // The directory is allowed to exist @@ -732,6 +720,7 @@ func makeRuntime(runtime *Runtime) (err error) { if err2 := runtime.refresh(runtimeAliveFile); err2 != nil { return err2 } + } } diff --git a/libpod/runtime_pod_linux.go b/libpod/runtime_pod_linux.go index 529c516c8..c6d497c0c 100644 --- a/libpod/runtime_pod_linux.go +++ b/libpod/runtime_pod_linux.go @@ -23,7 +23,7 @@ func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (*Pod, return nil, ErrRuntimeStopped } - pod, err := newPod(r.lockDir, r) + pod, err := newPod(r) if err != nil { return nil, errors.Wrapf(err, "error creating pod") } diff --git a/libpod/runtime_volume_linux.go b/libpod/runtime_volume_linux.go index 5cc0938f0..0727cfedf 100644 --- a/libpod/runtime_volume_linux.go +++ b/libpod/runtime_volume_linux.go @@ -8,7 +8,6 @@ import ( "path/filepath" "strings" - "github.com/containers/storage" "github.com/containers/storage/pkg/stringid" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" @@ -68,14 +67,12 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption) } volume.config.MountPoint = fullVolPath - // Path our lock file will reside at - lockPath := filepath.Join(r.lockDir, volume.config.Name) - // Grab a lockfile at the given path - lock, err := storage.GetLockfile(lockPath) + lock, err := r.lockManager.AllocateLock() if err != nil { - return nil, errors.Wrapf(err, "error creating lockfile for new volume") + return nil, errors.Wrapf(err, "error allocating lock for new volume") } volume.lock = lock + volume.config.LockID = volume.lock.ID() volume.valid = true diff --git a/libpod/volume.go b/libpod/volume.go index b732e8aa7..026a3bf49 100644 --- a/libpod/volume.go +++ b/libpod/volume.go @@ -1,6 +1,6 @@ package libpod -import "github.com/containers/storage" +import "github.com/containers/libpod/libpod/lock" // Volume is the type used to create named volumes // TODO: all volumes should be created using this and the Volume API @@ -9,13 +9,17 @@ type Volume struct { valid bool runtime *Runtime - lock storage.Locker + lock lock.Locker } // VolumeConfig holds the volume's config information //easyjson:json type VolumeConfig struct { - Name string `json:"name"` + // Name of the volume + Name string `json:"name"` + // ID of this volume's lock + LockID uint32 `json:"lockID"` + Labels map[string]string `json:"labels"` MountPoint string `json:"mountPoint"` Driver string `json:"driver"` -- cgit v1.2.3-54-g00ecf