aboutsummaryrefslogtreecommitdiff
path: root/libpod/runtime_volume_linux.go
diff options
context:
space:
mode:
authorMatthew Heon <matthew.heon@pm.me>2019-02-15 09:34:37 -0500
committerMatthew Heon <matthew.heon@pm.me>2019-02-21 10:51:42 -0500
commitca8ae877c12cbfa368d572ef6700d9e4a23d5b11 (patch)
treeff88667c11ccf69576a85fbccffab68fbc95b326 /libpod/runtime_volume_linux.go
parent9353a3e8ec3855f29c552dd1e0134dbaa3031efa (diff)
downloadpodman-ca8ae877c12cbfa368d572ef6700d9e4a23d5b11.tar.gz
podman-ca8ae877c12cbfa368d572ef6700d9e4a23d5b11.tar.bz2
podman-ca8ae877c12cbfa368d572ef6700d9e4a23d5b11.zip
Remove locks from volumes
I was looking into why we have locks in volumes, and I'm fairly convinced they're unnecessary. We don't have a state whose accesses we need to guard with locks and syncs. The only real purpose for the lock was to prevent concurrent removal of the same volume. Looking at the code, concurrent removal ought to be fine with a bit of reordering - one or the other might fail, but we will successfully evict the volume from the state. Also, remove the 'prune' bool from RemoveVolume. None of our other API functions accept it, and it only served to toggle off more verbose error messages. Signed-off-by: Matthew Heon <matthew.heon@pm.me>
Diffstat (limited to 'libpod/runtime_volume_linux.go')
-rw-r--r--libpod/runtime_volume_linux.go31
1 files changed, 13 insertions, 18 deletions
diff --git a/libpod/runtime_volume_linux.go b/libpod/runtime_volume_linux.go
index 0727cfedf..838c0167a 100644
--- a/libpod/runtime_volume_linux.go
+++ b/libpod/runtime_volume_linux.go
@@ -67,13 +67,6 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption)
}
volume.config.MountPoint = fullVolPath
- lock, err := r.lockManager.AllocateLock()
- if err != nil {
- return nil, errors.Wrapf(err, "error allocating lock for new volume")
- }
- volume.lock = lock
- volume.config.LockID = volume.lock.ID()
-
volume.valid = true
// Add the volume to state
@@ -85,9 +78,12 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption)
}
// removeVolume removes the specified volume from state as well tears down its mountpoint and storage
-func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force, prune bool) error {
+func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error {
if !v.valid {
- return ErrNoSuchVolume
+ if ok, _ := r.state.HasVolume(v.Name()); !ok {
+ return nil
+ }
+ return ErrVolumeRemoved
}
deps, err := r.state.VolumeInUse(v)
@@ -95,9 +91,6 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force, prune bool
return err
}
if len(deps) != 0 {
- if prune {
- return ErrVolumeBeingUsed
- }
depsStr := strings.Join(deps, ", ")
if !force {
return errors.Wrapf(ErrVolumeBeingUsed, "volume %s is being used by the following container(s): %s", v.Name(), depsStr)
@@ -112,18 +105,20 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force, prune bool
}
}
- // Delete the mountpoint path of the volume, that is delete the volume from /var/lib/containers/storage/volumes
- if err := v.teardownStorage(); err != nil {
- return errors.Wrapf(err, "error cleaning up volume storage for %q", v.Name())
- }
+ // Set volume as invalid so it can no longer be used
+ v.valid = false
// Remove the volume from the state
if err := r.state.RemoveVolume(v); err != nil {
return errors.Wrapf(err, "error removing volume %s", v.Name())
}
- // Set volume as invalid so it can no longer be used
- v.valid = false
+ // Delete the mountpoint path of the volume, that is delete the volume from /var/lib/containers/storage/volumes
+ if err := v.teardownStorage(); err != nil {
+ return errors.Wrapf(err, "error cleaning up volume storage for %q", v.Name())
+ }
+
+ logrus.Debugf("Removed volume %s", v.Name())
return nil
}