aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Heon <matthew.heon@pm.me>2019-02-18 16:20:02 -0500
committerMatthew Heon <matthew.heon@pm.me>2019-02-21 10:51:42 -0500
commitd2b77f8b3397b3ffbbade6e04e37b291105028aa (patch)
tree40d0e6423be80a66e5ea848a89fe23d4fda90d5e
parente0a6873d78be969a50a1939f52c81264b9547ac0 (diff)
downloadpodman-d2b77f8b3397b3ffbbade6e04e37b291105028aa.tar.gz
podman-d2b77f8b3397b3ffbbade6e04e37b291105028aa.tar.bz2
podman-d2b77f8b3397b3ffbbade6e04e37b291105028aa.zip
Do not make renumber shut down the runtime
The original intent behind the requirement was to ensure that, if two SHM lock structs were open at the same time, we should not make such a runtime available to the user, and should clean it up instead. It turns out that we don't even need to open a second SHM lock struct - if we get an error mapping the first one due to a lock count mismatch, we can just delete it, and it cleans itself up when it errors. So there's no reason not to return a valid runtime. Signed-off-by: Matthew Heon <matthew.heon@pm.me>
-rw-r--r--cmd/podman/system_renumber.go11
-rw-r--r--docs/podman-system-renumber.1.md29
-rw-r--r--libpod/lock/in_memory_locks.go4
-rw-r--r--libpod/lock/lock.go11
-rw-r--r--libpod/lock/shm_lock_manager_linux.go4
-rw-r--r--libpod/options.go12
-rw-r--r--libpod/runtime.go16
-rw-r--r--libpod/runtime_renumber.go5
8 files changed, 73 insertions, 19 deletions
diff --git a/cmd/podman/system_renumber.go b/cmd/podman/system_renumber.go
index 7f9436d5d..c8ce628b1 100644
--- a/cmd/podman/system_renumber.go
+++ b/cmd/podman/system_renumber.go
@@ -12,7 +12,8 @@ var (
renumberDescription = `
podman system renumber
- Migrate lock numbers to handle a change in maximum number of locks
+ Migrate lock numbers to handle a change in maximum number of locks.
+ Mandatory after the number of locks in libpod.conf is changed.
`
_renumberCommand = &cobra.Command{
@@ -34,11 +35,15 @@ func init() {
func renumberCmd(c *cliconfig.SystemRenumberValues) error {
// We need to pass one extra option to NewRuntime.
- // This will inform the OCI runtime to start
- _, err := libpodruntime.GetRuntimeRenumber(&c.PodmanCommand)
+ // This will inform the OCI runtime to start a renumber.
+ // That's controlled by the last argument to GetRuntime.
+ r, err := libpodruntime.GetRuntimeRenumber(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "error renumbering locks")
}
+ if err := r.Shutdown(false); err != nil {
+ return err
+ }
return nil
}
diff --git a/docs/podman-system-renumber.1.md b/docs/podman-system-renumber.1.md
new file mode 100644
index 000000000..a88640d63
--- /dev/null
+++ b/docs/podman-system-renumber.1.md
@@ -0,0 +1,29 @@
+% podman-system-renumber(1) podman
+
+## NAME
+podman\-system\-renumber - Renumber container locks
+
+## SYNOPSIS
+** podman system renumber**
+
+## DESCRIPTION
+** podman system renumber** renumbers locks used by containers and pods.
+
+Each Podman container and pod is allocated a lock at creation time, up to a maximum number controlled by the **num_locks** parameter in **libpod.conf**.
+
+When all available locks are exhausted, no further containers and pods can be created until some existing containers and pods are removed. This can be avoided by increasing the number of locks available via modifying **libpod.conf** and subsequently running **podman system renumber** to prepare the new locks (and reallocate lock numbers to fit the new struct).
+
+**podman system renumber** must be called after any changes to **num_locks** - failure to do so will result in errors starting Podman as the number of locks available conflicts with the configured number of locks.
+
+**podman system renumber** can also be used to migrate 1.0 and earlier versions of Podman, which used a different locking scheme, to the new locking model. It is not strictly required to do this, but it is highly recommended to do so as deadlocks can occur otherwise.
+
+If possible, avoid calling **podman system renumber** while there are other Podman processes running.
+
+## SYNOPSIS
+**podman system renumber**
+
+## SEE ALSO
+`podman(1)`, `libpod.conf(5)`
+
+# HISTORY
+February 2019, Originally compiled by Matt Heon (mheon at redhat dot com)
diff --git a/libpod/lock/in_memory_locks.go b/libpod/lock/in_memory_locks.go
index 7eb22328f..7c9605917 100644
--- a/libpod/lock/in_memory_locks.go
+++ b/libpod/lock/in_memory_locks.go
@@ -90,7 +90,9 @@ func (m *InMemoryManager) RetrieveLock(id uint32) (Locker, error) {
return m.locks[id], nil
}
-// FreeAllLocks frees all locks
+// FreeAllLocks frees all locks.
+// This function is DANGEROUS. Please read the full comment in locks.go before
+// trying to use it.
func (m *InMemoryManager) FreeAllLocks() error {
for _, lock := range m.locks {
lock.allocated = false
diff --git a/libpod/lock/lock.go b/libpod/lock/lock.go
index b96393574..d6841646b 100644
--- a/libpod/lock/lock.go
+++ b/libpod/lock/lock.go
@@ -24,8 +24,19 @@ type Manager interface {
// The underlying lock MUST be the same as another other lock with the
// same UUID.
RetrieveLock(id uint32) (Locker, error)
+ // PLEASE READ FULL DESCRIPTION BEFORE USING.
// FreeAllLocks frees all allocated locks, in preparation for lock
// reallocation.
+ // As this deallocates all presently-held locks, this can be very
+ // dangerous - if there are other processes running that might be
+ // attempting to allocate new locks and free existing locks, we may
+ // encounter races leading to an inconsistent state.
+ // (This is in addition to the fact that FreeAllLocks instantly makes
+ // the state inconsistent simply by using it, and requires a full
+ // lock renumbering to restore consistency!).
+ // In short, this should only be used as part of unit tests, or lock
+ // renumbering, where reasonable guarantees about other processes can be
+ // made.
FreeAllLocks() error
}
diff --git a/libpod/lock/shm_lock_manager_linux.go b/libpod/lock/shm_lock_manager_linux.go
index 187661c8b..8678958ee 100644
--- a/libpod/lock/shm_lock_manager_linux.go
+++ b/libpod/lock/shm_lock_manager_linux.go
@@ -71,7 +71,9 @@ func (m *SHMLockManager) RetrieveLock(id uint32) (Locker, error) {
return lock, nil
}
-// FreeAllLocks frees all locks in the manager
+// FreeAllLocks frees all locks in the manager.
+// This function is DANGEROUS. Please read the full comment in locks.go before
+// trying to use it.
func (m *SHMLockManager) FreeAllLocks() error {
return m.locks.DeallocateAllSemaphores()
}
diff --git a/libpod/options.go b/libpod/options.go
index 4a3dd582d..7c37fd65b 100644
--- a/libpod/options.go
+++ b/libpod/options.go
@@ -394,14 +394,10 @@ func WithDefaultInfraCommand(cmd string) RuntimeOption {
}
}
-// WithRenumber instructs libpod to perform a lock renumbering instead of a
-// normal init.
-// When this is specified, no valid runtime will be returned by NewRuntime.
-// Instead, libpod will reinitialize lock numbers on all pods and containers,
-// shut down the runtime, and return.
-// Renumber is intended to be used from a dedicated entrypoint, where it will
-// handle a changed maximum number of locks and return, with the program
-// exiting after that.
+// WithRenumber instructs libpod to perform a lock renumbering while
+// initializing. This will handle migrations from early versions of libpod with
+// file locks to newer versions with SHM locking, as well as changes in the
+// number of configured locks.
func WithRenumber() RuntimeOption {
return func(rt *Runtime) error {
if rt.valid {
diff --git a/libpod/runtime.go b/libpod/runtime.go
index 850df4fc9..94dbf37dd 100644
--- a/libpod/runtime.go
+++ b/libpod/runtime.go
@@ -95,8 +95,16 @@ type Runtime struct {
lockManager lock.Manager
configuredFrom *runtimeConfiguredFrom
+ // doRenumber indicates that the runtime should perform a lock renumber
+ // during initialization.
+ // Once the runtime has been initialized and returned, this variable is
+ // unused.
doRenumber bool
+ // valid indicates whether the runtime is ready to use.
+ // valid is set to true when a runtime is returned from GetRuntime(),
+ // and remains true until the runtime is shut down (rendering its
+ // storage unusable). When valid is false, the runtime cannot be used.
valid bool
lock sync.RWMutex
}
@@ -784,7 +792,9 @@ func makeRuntime(runtime *Runtime) (err error) {
if err != nil {
return err
}
- } else if err == syscall.ERANGE && runtime.doRenumber {
+ } else if errors.Cause(err) == syscall.ERANGE && runtime.doRenumber {
+ logrus.Debugf("Number of locks does not match - removing old locks")
+
// ERANGE indicates a lock numbering mismatch.
// Since we're renumbering, this is not fatal.
// Remove the earlier set of locks and recreate.
@@ -806,7 +816,9 @@ func makeRuntime(runtime *Runtime) (err error) {
// It breaks out of normal runtime init, and will not return a valid
// runtime.
if runtime.doRenumber {
- return runtime.renumberLocks()
+ if err := runtime.renumberLocks(); err != nil {
+ return err
+ }
}
// If we need to refresh the state, do it now - things are guaranteed to
diff --git a/libpod/runtime_renumber.go b/libpod/runtime_renumber.go
index 04abc84d1..125cf0825 100644
--- a/libpod/runtime_renumber.go
+++ b/libpod/runtime_renumber.go
@@ -6,9 +6,6 @@ import (
// renumberLocks reassigns lock numbers for all containers and pods in the
// state.
-// It renders the runtime it is called on, and all container/pod/volume structs
-// from that runtime, unusable, and requires that a new runtime be initialized
-// after it is called.
// TODO: It would be desirable to make it impossible to call this until all
// other libpod sessions are dead.
// Possibly use a read-write file lock, with all non-renumber podmans owning the
@@ -56,5 +53,5 @@ func (r *Runtime) renumberLocks() error {
}
}
- return r.Shutdown(false)
+ return nil
}