aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Heon <matthew.heon@pm.me>2019-02-15 09:34:37 -0500
committerMatthew Heon <matthew.heon@pm.me>2019-02-21 10:51:42 -0500
commitca8ae877c12cbfa368d572ef6700d9e4a23d5b11 (patch)
treeff88667c11ccf69576a85fbccffab68fbc95b326
parent9353a3e8ec3855f29c552dd1e0134dbaa3031efa (diff)
downloadpodman-ca8ae877c12cbfa368d572ef6700d9e4a23d5b11.tar.gz
podman-ca8ae877c12cbfa368d572ef6700d9e4a23d5b11.tar.bz2
podman-ca8ae877c12cbfa368d572ef6700d9e4a23d5b11.zip
Remove locks from volumes
I was looking into why we have locks in volumes, and I'm fairly convinced they're unnecessary. We don't have a state whose accesses we need to guard with locks and syncs. The only real purpose for the lock was to prevent concurrent removal of the same volume. Looking at the code, concurrent removal ought to be fine with a bit of reordering - one or the other might fail, but we will successfully evict the volume from the state. Also, remove the 'prune' bool from RemoveVolume. None of our other API functions accept it, and it only served to toggle off more verbose error messages. Signed-off-by: Matthew Heon <matthew.heon@pm.me>
-rw-r--r--libpod/boltdb_state.go4
-rw-r--r--libpod/boltdb_state_internal.go7
-rw-r--r--libpod/runtime_ctr.go2
-rw-r--r--libpod/runtime_renumber.go6
-rw-r--r--libpod/runtime_volume.go13
-rw-r--r--libpod/runtime_volume_linux.go31
-rw-r--r--libpod/volume.go5
-rw-r--r--libpod/volume_internal.go3
8 files changed, 25 insertions, 46 deletions
diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go
index 6214333b9..25ef5cd0e 100644
--- a/libpod/boltdb_state.go
+++ b/libpod/boltdb_state.go
@@ -1369,10 +1369,6 @@ func (s *BoltState) RemoveVolume(volume *Volume) error {
return ErrDBClosed
}
- if !volume.valid {
- return ErrVolumeRemoved
- }
-
volName := []byte(volume.Name())
db, err := s.getDBCon()
diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go
index ea150cfac..3d749849d 100644
--- a/libpod/boltdb_state_internal.go
+++ b/libpod/boltdb_state_internal.go
@@ -348,13 +348,6 @@ func (s *BoltState) getVolumeFromDB(name []byte, volume *Volume, volBkt *bolt.Bu
return errors.Wrapf(err, "error unmarshalling volume %s config from DB", string(name))
}
- // Get the lock
- lock, err := s.runtime.lockManager.RetrieveLock(volume.config.LockID)
- if err != nil {
- return errors.Wrapf(err, "error retrieving lockfile for volume %s", string(name))
- }
- volume.lock = lock
-
volume.runtime = s.runtime
volume.valid = true
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index 1055da75b..2ec8d0795 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -421,7 +421,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool,
for _, v := range volumes {
if volume, err := runtime.state.Volume(v); err == nil {
- if err := runtime.removeVolume(ctx, volume, false, true); err != nil && err != ErrNoSuchVolume && err != ErrVolumeBeingUsed {
+ if err := runtime.removeVolume(ctx, volume, false); err != nil && err != ErrNoSuchVolume && err != ErrVolumeBeingUsed {
logrus.Errorf("cleanup volume (%s): %v", v, err)
}
}
diff --git a/libpod/runtime_renumber.go b/libpod/runtime_renumber.go
index b27b5f1a8..d5279934e 100644
--- a/libpod/runtime_renumber.go
+++ b/libpod/runtime_renumber.go
@@ -12,6 +12,12 @@ import (
// It renders the runtime it is called on, and all container/pod/volume structs
// from that runtime, unusable, and requires that a new runtime be initialized
// after it is called.
+// TODO: It would be desirable to make it impossible to call this until all
+// other libpod sessions are dead.
+// Possibly use a read-write file lock, with all non-renumber podmans owning the
+// lock as read, renumber attempting to take a write lock?
+// The alternative is some sort of session tracking, and I don't know how
+// reliable that can be.
func (r *Runtime) RenumberLocks() error {
r.lock.Lock()
locked := true
diff --git a/libpod/runtime_volume.go b/libpod/runtime_volume.go
index beae50ac9..11f37ad4b 100644
--- a/libpod/runtime_volume.go
+++ b/libpod/runtime_volume.go
@@ -19,7 +19,7 @@ type VolumeCreateOption func(*Volume) error
type VolumeFilter func(*Volume) bool
// RemoveVolume removes a volumes
-func (r *Runtime) RemoveVolume(ctx context.Context, v *Volume, force, prune bool) error {
+func (r *Runtime) RemoveVolume(ctx context.Context, v *Volume, force bool) error {
r.lock.Lock()
defer r.lock.Unlock()
@@ -35,10 +35,7 @@ func (r *Runtime) RemoveVolume(ctx context.Context, v *Volume, force, prune bool
}
}
- v.lock.Lock()
- defer v.lock.Unlock()
-
- return r.removeVolume(ctx, v, force, prune)
+ return r.removeVolume(ctx, v, force)
}
// RemoveVolumes removes a slice of volumes or all with a force bool
@@ -64,7 +61,7 @@ func (r *Runtime) RemoveVolumes(ctx context.Context, volumes []string, all, forc
}
for _, vol := range vols {
- if err := r.RemoveVolume(ctx, vol, force, false); err != nil {
+ if err := r.RemoveVolume(ctx, vol, force); err != nil {
return deletedVols, err
}
logrus.Debugf("removed volume %s", vol.Name())
@@ -168,8 +165,8 @@ func (r *Runtime) PruneVolumes(ctx context.Context) ([]string, []error) {
}
for _, vol := range vols {
- if err := r.RemoveVolume(ctx, vol, false, true); err != nil {
- if err != ErrVolumeBeingUsed {
+ if err := r.RemoveVolume(ctx, vol, false); err != nil {
+ if errors.Cause(err) != ErrVolumeBeingUsed && errors.Cause(err) != ErrVolumeRemoved {
pruneErrors = append(pruneErrors, err)
}
continue
diff --git a/libpod/runtime_volume_linux.go b/libpod/runtime_volume_linux.go
index 0727cfedf..838c0167a 100644
--- a/libpod/runtime_volume_linux.go
+++ b/libpod/runtime_volume_linux.go
@@ -67,13 +67,6 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption)
}
volume.config.MountPoint = fullVolPath
- lock, err := r.lockManager.AllocateLock()
- if err != nil {
- return nil, errors.Wrapf(err, "error allocating lock for new volume")
- }
- volume.lock = lock
- volume.config.LockID = volume.lock.ID()
-
volume.valid = true
// Add the volume to state
@@ -85,9 +78,12 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption)
}
// removeVolume removes the specified volume from state as well tears down its mountpoint and storage
-func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force, prune bool) error {
+func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error {
if !v.valid {
- return ErrNoSuchVolume
+ if ok, _ := r.state.HasVolume(v.Name()); !ok {
+ return nil
+ }
+ return ErrVolumeRemoved
}
deps, err := r.state.VolumeInUse(v)
@@ -95,9 +91,6 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force, prune bool
return err
}
if len(deps) != 0 {
- if prune {
- return ErrVolumeBeingUsed
- }
depsStr := strings.Join(deps, ", ")
if !force {
return errors.Wrapf(ErrVolumeBeingUsed, "volume %s is being used by the following container(s): %s", v.Name(), depsStr)
@@ -112,18 +105,20 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force, prune bool
}
}
- // Delete the mountpoint path of the volume, that is delete the volume from /var/lib/containers/storage/volumes
- if err := v.teardownStorage(); err != nil {
- return errors.Wrapf(err, "error cleaning up volume storage for %q", v.Name())
- }
+ // Set volume as invalid so it can no longer be used
+ v.valid = false
// Remove the volume from the state
if err := r.state.RemoveVolume(v); err != nil {
return errors.Wrapf(err, "error removing volume %s", v.Name())
}
- // Set volume as invalid so it can no longer be used
- v.valid = false
+ // Delete the mountpoint path of the volume, that is delete the volume from /var/lib/containers/storage/volumes
+ if err := v.teardownStorage(); err != nil {
+ return errors.Wrapf(err, "error cleaning up volume storage for %q", v.Name())
+ }
+
+ logrus.Debugf("Removed volume %s", v.Name())
return nil
}
diff --git a/libpod/volume.go b/libpod/volume.go
index 026a3bf49..74878b6a4 100644
--- a/libpod/volume.go
+++ b/libpod/volume.go
@@ -1,7 +1,5 @@
package libpod
-import "github.com/containers/libpod/libpod/lock"
-
// Volume is the type used to create named volumes
// TODO: all volumes should be created using this and the Volume API
type Volume struct {
@@ -9,7 +7,6 @@ type Volume struct {
valid bool
runtime *Runtime
- lock lock.Locker
}
// VolumeConfig holds the volume's config information
@@ -17,8 +14,6 @@ type Volume struct {
type VolumeConfig struct {
// Name of the volume
Name string `json:"name"`
- // ID of this volume's lock
- LockID uint32 `json:"lockID"`
Labels map[string]string `json:"labels"`
MountPoint string `json:"mountPoint"`
diff --git a/libpod/volume_internal.go b/libpod/volume_internal.go
index 0de8a2350..35f0ca19d 100644
--- a/libpod/volume_internal.go
+++ b/libpod/volume_internal.go
@@ -18,8 +18,5 @@ func newVolume(runtime *Runtime) (*Volume, error) {
// teardownStorage deletes the volume from volumePath
func (v *Volume) teardownStorage() error {
- if !v.valid {
- return ErrNoSuchVolume
- }
return os.RemoveAll(filepath.Join(v.runtime.config.VolumePath, v.Name()))
}