summaryrefslogtreecommitdiff
path: root/libpod
diff options
context:
space:
mode:
authorMatthew Heon <matthew.heon@gmail.com>2018-08-23 13:48:07 -0400
committerMatthew Heon <matthew.heon@pm.me>2019-01-04 09:51:09 -0500
commita364b656eaef1be5329abfd02d3fcd2dbcd37d64 (patch)
tree454478de97e3d1831b6b64acdeaeafac9b9052e7 /libpod
parente73484c176839b2f2adf3d07cc09222a7b75bf69 (diff)
downloadpodman-a364b656eaef1be5329abfd02d3fcd2dbcd37d64.tar.gz
podman-a364b656eaef1be5329abfd02d3fcd2dbcd37d64.tar.bz2
podman-a364b656eaef1be5329abfd02d3fcd2dbcd37d64.zip
Add lock manager to libpod runtime
Signed-off-by: Matthew Heon <matthew.heon@gmail.com>
Diffstat (limited to 'libpod')
-rw-r--r--libpod/lock/lock.go11
-rw-r--r--libpod/lock/shm/shm_lock.go8
-rw-r--r--libpod/lock/shm_lock_manager_linux.go12
-rw-r--r--libpod/runtime.go110
4 files changed, 95 insertions, 46 deletions
diff --git a/libpod/lock/lock.go b/libpod/lock/lock.go
index 5258c641f..73c1fdcf7 100644
--- a/libpod/lock/lock.go
+++ b/libpod/lock/lock.go
@@ -41,11 +41,14 @@ type Locker interface {
// Lock locks the lock.
// This call MUST block until it successfully acquires the lock or
// encounters a fatal error.
- Lock() error
+ // All errors must be handled internally, as they are not returned. For
+ // the most part, panicking should be appropriate.
+ Lock()
// Unlock unlocks the lock.
- // A call to Unlock() on a lock that is already unlocked lock MUST
- // error.
- Unlock() error
+ // All errors must be handled internally, as they are not returned. For
+ // the most part, panicking should be appropriate.
+ // This includes unlocking locks which are already unlocked.
+ Unlock()
// Free deallocates the underlying lock, allowing its reuse by other
// pods and containers.
// The lock MUST still be usable after a Free() - some libpod instances
diff --git a/libpod/lock/shm/shm_lock.go b/libpod/lock/shm/shm_lock.go
index 16d7f2008..3372a8c71 100644
--- a/libpod/lock/shm/shm_lock.go
+++ b/libpod/lock/shm/shm_lock.go
@@ -12,9 +12,13 @@ import (
"unsafe"
"github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
)
-const (
+var (
+ // BitmapSize is the size of the bitmap used when managing SHM locks.
+ // an SHM lock manager's max locks will be rounded up to a multiple of
+ // this number.
BitmapSize uint32 = uint32(C.bitmap_size_c)
)
@@ -51,6 +55,8 @@ func CreateSHMLock(path string, numLocks uint32) (*SHMLocks, error) {
locks.maxLocks = uint32(lockStruct.num_locks)
locks.valid = true
+ logrus.Debugf("Initialized SHM lock manager at path %s", path)
+
return locks, nil
}
diff --git a/libpod/lock/shm_lock_manager_linux.go b/libpod/lock/shm_lock_manager_linux.go
index 974431a13..2c0ea611a 100644
--- a/libpod/lock/shm_lock_manager_linux.go
+++ b/libpod/lock/shm_lock_manager_linux.go
@@ -73,13 +73,17 @@ func (l *SHMLock) ID() uint32 {
}
// Lock acquires the lock.
-func (l *SHMLock) Lock() error {
- return l.manager.locks.LockSemaphore(l.lockID)
+func (l *SHMLock) Lock() {
+ if err := l.manager.locks.LockSemaphore(l.lockID); err != nil {
+ panic(err.Error())
+ }
}
// Unlock releases the lock.
-func (l *SHMLock) Unlock() error {
- return l.manager.locks.UnlockSemaphore(l.lockID)
+func (l *SHMLock) Unlock() {
+ if err := l.manager.locks.UnlockSemaphore(l.lockID); err != nil {
+ panic(err.Error())
+ }
}
// Free releases the lock, allowing it to be reused.
diff --git a/libpod/runtime.go b/libpod/runtime.go
index facbe5d66..238a7a9db 100644
--- a/libpod/runtime.go
+++ b/libpod/runtime.go
@@ -11,6 +11,7 @@ import (
is "github.com/containers/image/storage"
"github.com/containers/image/types"
"github.com/containers/libpod/libpod/image"
+ "github.com/containers/libpod/libpod/lock"
"github.com/containers/libpod/pkg/firewall"
sysreg "github.com/containers/libpod/pkg/registries"
"github.com/containers/libpod/pkg/rootless"
@@ -64,6 +65,11 @@ const (
// DefaultInitPath is the default path to the container-init binary
DefaultInitPath = "/usr/libexec/podman/catatonit"
+
+ // DefaultSHMLockPath is the default path for SHM locks
+ DefaultSHMLockPath = "/libpod_lock"
+ // DefaultRootlessSHMLockPath is the default path for rootless SHM locks
+ DefaultRootlessSHMLockPath = "/libpod_rootless_lock"
)
// A RuntimeOption is a functional option which alters the Runtime created by
@@ -86,6 +92,7 @@ type Runtime struct {
lock sync.RWMutex
imageRuntime *image.Runtime
firewallBackend firewall.FirewallBackend
+ lockManager lock.Manager
configuredFrom *runtimeConfiguredFrom
}
@@ -165,6 +172,7 @@ type RuntimeConfig struct {
// and all containers and pods will be visible.
// The default namespace is "".
Namespace string `toml:"namespace,omitempty"`
+
// InfraImage is the image a pod infra container will use to manage namespaces
InfraImage string `toml:"infra_image"`
// InfraCommand is the command run to start up a pod infra container
@@ -179,6 +187,10 @@ type RuntimeConfig struct {
EnablePortReservation bool `toml:"enable_port_reservation"`
// EnableLabeling indicates wether libpod will support container labeling
EnableLabeling bool `toml:"label"`
+
+ // NumLocks is the number of locks to make available for containers and
+ // pods.
+ NumLocks uint32 `toml:"num_locks,omitempty"`
}
// runtimeConfiguredFrom is a struct used during early runtime init to help
@@ -234,6 +246,7 @@ var (
InfraImage: DefaultInfraImage,
EnablePortReservation: true,
EnableLabeling: true,
+ NumLocks: 2048,
}
)
@@ -487,6 +500,56 @@ func makeRuntime(runtime *Runtime) (err error) {
}
}
+ // We now need to see if the system has restarted
+ // We check for the presence of a file in our tmp directory to verify this
+ // This check must be locked to prevent races
+ runtimeAliveLock := filepath.Join(runtime.config.TmpDir, "alive.lck")
+ runtimeAliveFile := filepath.Join(runtime.config.TmpDir, "alive")
+ aliveLock, err := storage.GetLockfile(runtimeAliveLock)
+ if err != nil {
+ return errors.Wrapf(err, "error acquiring runtime init lock")
+ }
+ // Acquire the lock and hold it until we return
+ // This ensures that no two processes will be in runtime.refresh at once
+ // TODO: we can't close the FD in this lock, so we should keep it around
+ // and use it to lock important operations
+ aliveLock.Lock()
+ locked := true
+ doRefresh := false
+ defer func() {
+ if locked {
+ aliveLock.Unlock()
+ }
+ }()
+ _, err = os.Stat(runtimeAliveFile)
+ if err != nil {
+ // If the file doesn't exist, we need to refresh the state
+ // This will trigger on first use as well, but refreshing an
+ // empty state only creates a single file
+ // As such, it's not really a performance concern
+ if os.IsNotExist(err) {
+ doRefresh = true
+ } else {
+ return errors.Wrapf(err, "error reading runtime status file %s", runtimeAliveFile)
+ }
+ }
+
+ // Set up the lock manager
+ var manager lock.Manager
+ lockPath := DefaultSHMLockPath
+ if rootless.IsRootless() {
+ lockPath = DefaultRootlessSHMLockPath
+ }
+ if doRefresh {
+ manager, err = lock.NewSHMLockManager(lockPath, runtime.config.NumLocks)
+ } else {
+ manager, err = lock.OpenSHMLockManager(lockPath, runtime.config.NumLocks)
+ }
+ if err != nil {
+ return errors.Wrapf(err, "error initializing SHM locking")
+ }
+ runtime.lockManager = manager
+
// Set up the state
switch runtime.config.StateType {
case InMemoryStateStore:
@@ -656,46 +719,19 @@ func makeRuntime(runtime *Runtime) (err error) {
}
runtime.firewallBackend = fwBackend
- // We now need to see if the system has restarted
- // We check for the presence of a file in our tmp directory to verify this
- // This check must be locked to prevent races
- runtimeAliveLock := filepath.Join(runtime.config.TmpDir, "alive.lck")
- runtimeAliveFile := filepath.Join(runtime.config.TmpDir, "alive")
- aliveLock, err := storage.GetLockfile(runtimeAliveLock)
- if err != nil {
- return errors.Wrapf(err, "error acquiring runtime init lock")
- }
- // Acquire the lock and hold it until we return
- // This ensures that no two processes will be in runtime.refresh at once
- // TODO: we can't close the FD in this lock, so we should keep it around
- // and use it to lock important operations
- aliveLock.Lock()
- locked := true
- defer func() {
- if locked {
+ // If we need to refresh the state, do it now - things are guaranteed to
+ // be set up by now.
+ if doRefresh {
+ if os.Geteuid() != 0 {
aliveLock.Unlock()
- }
- }()
- _, err = os.Stat(runtimeAliveFile)
- if err != nil {
- // If the file doesn't exist, we need to refresh the state
- // This will trigger on first use as well, but refreshing an
- // empty state only creates a single file
- // As such, it's not really a performance concern
- if os.IsNotExist(err) {
- if os.Geteuid() != 0 {
- aliveLock.Unlock()
- locked = false
- if err2 := runtime.refreshRootless(); err2 != nil {
- return err2
- }
- } else {
- if err2 := runtime.refresh(runtimeAliveFile); err2 != nil {
- return err2
- }
+ locked = false
+ if err2 := runtime.refreshRootless(); err2 != nil {
+ return err2
}
} else {
- return errors.Wrapf(err, "error reading runtime status file %s", runtimeAliveFile)
+ if err2 := runtime.refresh(runtimeAliveFile); err2 != nil {
+ return err2
+ }
}
}