aboutsummaryrefslogtreecommitdiff
path: root/libpod/runtime_volume_linux.go
diff options
context:
space:
mode:
authorMatthew Heon <matthew.heon@pm.me>2019-08-27 13:45:11 -0400
committerMatthew Heon <matthew.heon@pm.me>2019-08-28 11:35:00 -0400
commite563f4111600a6c5506e4953bf796783a097544f (patch)
tree54cae975be3d0de2805d15d3305d74ef798ce799 /libpod/runtime_volume_linux.go
parentf221c6101934fccbd6705f3b387aadc9ae710f66 (diff)
downloadpodman-e563f4111600a6c5506e4953bf796783a097544f.tar.gz
podman-e563f4111600a6c5506e4953bf796783a097544f.tar.bz2
podman-e563f4111600a6c5506e4953bf796783a097544f.zip
Re-add locks to volumes.
This will require a 'podman system renumber' after being applied to get lock numbers for existing volumes. Add the DB backend code for rewriting volume configs and use it for updating lock numbers as part of 'system renumber'. Signed-off-by: Matthew Heon <matthew.heon@pm.me>
Diffstat (limited to 'libpod/runtime_volume_linux.go')
-rw-r--r--libpod/runtime_volume_linux.go37
1 files changed, 33 insertions, 4 deletions
diff --git a/libpod/runtime_volume_linux.go b/libpod/runtime_volume_linux.go
index 84703787d..70296248c 100644
--- a/libpod/runtime_volume_linux.go
+++ b/libpod/runtime_volume_linux.go
@@ -28,7 +28,7 @@ func (r *Runtime) NewVolume(ctx context.Context, options ...VolumeCreateOption)
}
// newVolume creates a new empty volume
-func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption) (*Volume, error) {
+func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption) (_ *Volume, Err error) {
volume, err := newVolume(r)
if err != nil {
return nil, errors.Wrapf(err, "error creating volume")
@@ -68,6 +68,21 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption)
}
volume.config.MountPoint = fullVolPath
+ lock, err := r.lockManager.AllocateLock()
+ if err != nil {
+ return nil, errors.Wrapf(err, "error allocating lock for new volume")
+ }
+ volume.lock = lock
+ volume.config.LockID = volume.lock.ID()
+
+ defer func() {
+ if Err != nil {
+ if err := volume.lock.Free(); err != nil {
+ logrus.Errorf("Error freeing volume lock after failed creation: %v", err)
+ }
+ }
+ }()
+
volume.valid = true
// Add the volume to state
@@ -110,6 +125,8 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error
return errors.Wrapf(err, "error removing container %s that depends on volume %s", dep, v.Name())
}
+ logrus.Debugf("Removing container %s (depends on volume %q)", ctr.ID(), v.Name())
+
// TODO: do we want to set force here when removing
// containers?
// I'm inclined to say no, in case someone accidentally
@@ -128,12 +145,24 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error
return errors.Wrapf(err, "error removing volume %s", v.Name())
}
- // Delete the mountpoint path of the volume, that is delete the volume from /var/lib/containers/storage/volumes
+ var removalErr error
+
+ // Free the volume's lock
+ if err := v.lock.Free(); err != nil {
+ removalErr = errors.Wrapf(err, "error freeing lock for volume %s", v.Name())
+ }
+
+ // Delete the mountpoint path of the volume, that is delete the volume
+ // from /var/lib/containers/storage/volumes
if err := v.teardownStorage(); err != nil {
- return errors.Wrapf(err, "error cleaning up volume storage for %q", v.Name())
+ if removalErr == nil {
+ removalErr = errors.Wrapf(err, "error cleaning up volume storage for %q", v.Name())
+ } else {
+ logrus.Errorf("error cleaning up volume storage for volume %q: %v", v.Name(), err)
+ }
}
defer v.newVolumeEvent(events.Remove)
logrus.Debugf("Removed volume %s", v.Name())
- return nil
+ return removalErr
}