summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorOpenShift Merge Robot <openshift-merge-robot@users.noreply.github.com>2019-08-28 11:25:12 -0700
committerGitHub <noreply@github.com>2019-08-28 11:25:12 -0700
commitbdf9e568134c37f02f662080cfe32f7ca98710c9 (patch)
treec310ec1ca3d1cdb3db69c2972fa8e2322a4badbe
parentced3ebb512e1b578184317e9ff6474f892d1c382 (diff)
parente563f4111600a6c5506e4953bf796783a097544f (diff)
downloadpodman-bdf9e568134c37f02f662080cfe32f7ca98710c9.tar.gz
podman-bdf9e568134c37f02f662080cfe32f7ca98710c9.tar.bz2
podman-bdf9e568134c37f02f662080cfe32f7ca98710c9.zip
Merge pull request #3893 from mheon/readd_volume_locks
Re-add volume locks
-rw-r--r--libpod/boltdb_state.go46
-rw-r--r--libpod/boltdb_state_internal.go7
-rw-r--r--libpod/in_memory_state.go20
-rw-r--r--libpod/runtime_ctr.go15
-rw-r--r--libpod/runtime_renumber.go17
-rw-r--r--libpod/runtime_volume.go4
-rw-r--r--libpod/runtime_volume_linux.go37
-rw-r--r--libpod/state.go10
-rw-r--r--libpod/volume.go36
-rw-r--r--test/e2e/run_volume_test.go8
10 files changed, 183 insertions, 17 deletions
diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go
index 176781f07..1de8d80c9 100644
--- a/libpod/boltdb_state.go
+++ b/libpod/boltdb_state.go
@@ -870,7 +870,7 @@ func (s *BoltState) RewritePodConfig(pod *Pod, newCfg *PodConfig) error {
newCfgJSON, err := json.Marshal(newCfg)
if err != nil {
- return errors.Wrapf(err, "error marshalling new configuration JSON for container %s", pod.ID())
+ return errors.Wrapf(err, "error marshalling new configuration JSON for pod %s", pod.ID())
}
db, err := s.getDBCon()
@@ -900,6 +900,50 @@ func (s *BoltState) RewritePodConfig(pod *Pod, newCfg *PodConfig) error {
return err
}
+// RewriteVolumeConfig rewrites a volume's configuration.
+// WARNING: This function is DANGEROUS. Do not use without reading the full
+// comment on this function in state.go.
+func (s *BoltState) RewriteVolumeConfig(volume *Volume, newCfg *VolumeConfig) error {
+ if !s.valid {
+ return define.ErrDBClosed
+ }
+
+ if !volume.valid {
+ return define.ErrVolumeRemoved
+ }
+
+ newCfgJSON, err := json.Marshal(newCfg)
+ if err != nil {
+ return errors.Wrapf(err, "error marshalling new configuration JSON for volume %q", volume.Name())
+ }
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return err
+ }
+ defer s.deferredCloseDBCon(db)
+
+ err = db.Update(func(tx *bolt.Tx) error {
+ volBkt, err := getVolBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ volDB := volBkt.Bucket([]byte(volume.Name()))
+ if volDB == nil {
+ volume.valid = false
+ return errors.Wrapf(define.ErrNoSuchVolume, "no volume with name %q found in DB", volume.Name())
+ }
+
+ if err := volDB.Put(configKey, newCfgJSON); err != nil {
+ return errors.Wrapf(err, "error updating volume %q config JSON", volume.Name())
+ }
+
+ return nil
+ })
+ return err
+}
+
// Pod retrieves a pod given its full ID
func (s *BoltState) Pod(id string) (*Pod, error) {
if id == "" {
diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go
index 408ef7224..6e4179835 100644
--- a/libpod/boltdb_state_internal.go
+++ b/libpod/boltdb_state_internal.go
@@ -449,6 +449,13 @@ func (s *BoltState) getVolumeFromDB(name []byte, volume *Volume, volBkt *bolt.Bu
return errors.Wrapf(err, "error unmarshalling volume %s config from DB", string(name))
}
+ // Get the lock
+ lock, err := s.runtime.lockManager.RetrieveLock(volume.config.LockID)
+ if err != nil {
+ return errors.Wrapf(err, "error retrieving lock for volume %q", string(name))
+ }
+ volume.lock = lock
+
volume.runtime = s.runtime
volume.valid = true
diff --git a/libpod/in_memory_state.go b/libpod/in_memory_state.go
index 7c4abd25d..a9b735327 100644
--- a/libpod/in_memory_state.go
+++ b/libpod/in_memory_state.go
@@ -425,6 +425,26 @@ func (s *InMemoryState) RewritePodConfig(pod *Pod, newCfg *PodConfig) error {
return nil
}
+// RewriteVolumeConfig rewrites a volume's configuration.
+// This function is DANGEROUS, even with in-memory state.
+// Please read the full comment in state.go before using it.
+func (s *InMemoryState) RewriteVolumeConfig(volume *Volume, newCfg *VolumeConfig) error {
+ if !volume.valid {
+ return define.ErrVolumeRemoved
+ }
+
+ // If the volume does not exist, return error
+ stateVol, ok := s.volumes[volume.Name()]
+ if !ok {
+ volume.valid = false
+ return errors.Wrapf(define.ErrNoSuchVolume, "volume with name %q not found in state", volume.Name())
+ }
+
+ stateVol.config = newCfg
+
+ return nil
+}
+
// Volume retrieves a volume from its full name
func (s *InMemoryState) Volume(name string) (*Volume, error) {
if name == "" {
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index 92b2faefb..acd317d20 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -253,10 +253,13 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (c *Contai
// Go through named volumes and add them.
// If they don't exist they will be created using basic options.
+ // Maintain an array of them - we need to lock them later.
+ ctrNamedVolumes := make([]*Volume, 0, len(ctr.config.NamedVolumes))
for _, vol := range ctr.config.NamedVolumes {
// Check if it exists already
- _, err := r.state.Volume(vol.Name)
+ dbVol, err := r.state.Volume(vol.Name)
if err == nil {
+ ctrNamedVolumes = append(ctrNamedVolumes, dbVol)
// The volume exists, we're good
continue
} else if errors.Cause(err) != config2.ErrNoSuchVolume {
@@ -275,6 +278,8 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (c *Contai
if err := ctr.copyWithTarFromImage(vol.Dest, newVol.MountPoint()); err != nil && !os.IsNotExist(err) {
return nil, errors.Wrapf(err, "Failed to copy content into new volume mount %q", vol.Name)
}
+
+ ctrNamedVolumes = append(ctrNamedVolumes, newVol)
}
if ctr.config.LogPath == "" && ctr.config.LogDriver != JournaldLogging {
@@ -291,6 +296,14 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (c *Contai
ctr.config.Mounts = append(ctr.config.Mounts, ctr.config.ShmDir)
}
+ // Lock all named volumes we are adding ourself to, to ensure we can't
+ // use a volume being removed.
+ for _, namedVol := range ctrNamedVolumes {
+ toLock := namedVol
+ toLock.lock.Lock()
+ defer toLock.lock.Unlock()
+ }
+
// Add the container to the state
// TODO: May be worth looking into recovering from name/ID collisions here
if ctr.config.Pod != "" {
diff --git a/libpod/runtime_renumber.go b/libpod/runtime_renumber.go
index 735ffba34..9de2556b2 100644
--- a/libpod/runtime_renumber.go
+++ b/libpod/runtime_renumber.go
@@ -53,6 +53,23 @@ func (r *Runtime) renumberLocks() error {
return err
}
}
+ allVols, err := r.state.AllVolumes()
+ if err != nil {
+ return err
+ }
+ for _, vol := range allVols {
+ lock, err := r.lockManager.AllocateLock()
+ if err != nil {
+ return errors.Wrapf(err, "error allocating lock for volume %s", vol.Name())
+ }
+
+ vol.config.LockID = lock.ID()
+
+ // Write the new lock ID
+ if err := r.state.RewriteVolumeConfig(vol, vol.config); err != nil {
+ return err
+ }
+ }
r.newSystemEvent(events.Renumber)
diff --git a/libpod/runtime_volume.go b/libpod/runtime_volume.go
index d05db936b..512e778a1 100644
--- a/libpod/runtime_volume.go
+++ b/libpod/runtime_volume.go
@@ -36,6 +36,10 @@ func (r *Runtime) RemoveVolume(ctx context.Context, v *Volume, force bool) error
return nil
}
}
+
+ v.lock.Lock()
+ defer v.lock.Unlock()
+
return r.removeVolume(ctx, v, force)
}
diff --git a/libpod/runtime_volume_linux.go b/libpod/runtime_volume_linux.go
index 84703787d..70296248c 100644
--- a/libpod/runtime_volume_linux.go
+++ b/libpod/runtime_volume_linux.go
@@ -28,7 +28,7 @@ func (r *Runtime) NewVolume(ctx context.Context, options ...VolumeCreateOption)
}
// newVolume creates a new empty volume
-func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption) (*Volume, error) {
+func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption) (_ *Volume, Err error) {
volume, err := newVolume(r)
if err != nil {
return nil, errors.Wrapf(err, "error creating volume")
@@ -68,6 +68,21 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption)
}
volume.config.MountPoint = fullVolPath
+ lock, err := r.lockManager.AllocateLock()
+ if err != nil {
+ return nil, errors.Wrapf(err, "error allocating lock for new volume")
+ }
+ volume.lock = lock
+ volume.config.LockID = volume.lock.ID()
+
+ defer func() {
+ if Err != nil {
+ if err := volume.lock.Free(); err != nil {
+ logrus.Errorf("Error freeing volume lock after failed creation: %v", err)
+ }
+ }
+ }()
+
volume.valid = true
// Add the volume to state
@@ -110,6 +125,8 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error
return errors.Wrapf(err, "error removing container %s that depends on volume %s", dep, v.Name())
}
+ logrus.Debugf("Removing container %s (depends on volume %q)", ctr.ID(), v.Name())
+
// TODO: do we want to set force here when removing
// containers?
// I'm inclined to say no, in case someone accidentally
@@ -128,12 +145,24 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error
return errors.Wrapf(err, "error removing volume %s", v.Name())
}
- // Delete the mountpoint path of the volume, that is delete the volume from /var/lib/containers/storage/volumes
+ var removalErr error
+
+ // Free the volume's lock
+ if err := v.lock.Free(); err != nil {
+ removalErr = errors.Wrapf(err, "error freeing lock for volume %s", v.Name())
+ }
+
+ // Delete the mountpoint path of the volume, that is delete the volume
+ // from /var/lib/containers/storage/volumes
if err := v.teardownStorage(); err != nil {
- return errors.Wrapf(err, "error cleaning up volume storage for %q", v.Name())
+ if removalErr == nil {
+ removalErr = errors.Wrapf(err, "error cleaning up volume storage for %q", v.Name())
+ } else {
+ logrus.Errorf("error cleaning up volume storage for volume %q: %v", v.Name(), err)
+ }
}
defer v.newVolumeEvent(events.Remove)
logrus.Debugf("Removed volume %s", v.Name())
- return nil
+ return removalErr
}
diff --git a/libpod/state.go b/libpod/state.go
index d0ad1a1f8..5d704e69a 100644
--- a/libpod/state.go
+++ b/libpod/state.go
@@ -115,12 +115,20 @@ type State interface {
// answer is this: use this only very sparingly, and only if you really
// know what you're doing.
RewriteContainerConfig(ctr *Container, newCfg *ContainerConfig) error
- // PLEASE READ THE ABOVE DESCRIPTION BEFORE USING.
+ // PLEASE READ THE DESCRIPTION FOR RewriteContainerConfig BEFORE USING.
// This function is identical to RewriteContainerConfig, save for the
// fact that it is used with pods instead.
// It is subject to the same conditions as RewriteContainerConfig.
// Please do not use this unless you know what you're doing.
RewritePodConfig(pod *Pod, newCfg *PodConfig) error
+ // PLEASE READ THE DESCRIPTION FOR RewriteContainerConfig BEFORE USING.
+ // This function is identical to RewriteContainerConfig, save for the
+ // fact that it is used with volumes instead.
+ // It is subject to the same conditions as RewriteContainerConfig.
+ // The exception is that volumes do not have IDs, so only volume name
+ // cannot be altered.
+ // Please do not use this unless you know what you're doing.
+ RewriteVolumeConfig(volume *Volume, newCfg *VolumeConfig) error
// Accepts full ID of pod.
// If the pod given is not in the set namespace, an error will be
diff --git a/libpod/volume.go b/libpod/volume.go
index 74126b49b..abfa7b3f4 100644
--- a/libpod/volume.go
+++ b/libpod/volume.go
@@ -2,6 +2,8 @@ package libpod
import (
"time"
+
+ "github.com/containers/libpod/libpod/lock"
)
// Volume is the type used to create named volumes
@@ -11,21 +13,35 @@ type Volume struct {
valid bool
runtime *Runtime
+ lock lock.Locker
}
// VolumeConfig holds the volume's config information
type VolumeConfig struct {
- // Name of the volume
+ // Name of the volume.
Name string `json:"name"`
-
- Labels map[string]string `json:"labels"`
- Driver string `json:"driver"`
- MountPoint string `json:"mountPoint"`
- CreatedTime time.Time `json:"createdAt,omitempty"`
- Options map[string]string `json:"options"`
- IsCtrSpecific bool `json:"ctrSpecific"`
- UID int `json:"uid"`
- GID int `json:"gid"`
+ // ID of the volume's lock.
+ LockID uint32 `json:"lockID"`
+ // Labels for the volume.
+ Labels map[string]string `json:"labels"`
+ // The volume driver. Empty string or local does not activate a volume
+ // driver, all other volumes will.
+ Driver string `json:"driver"`
+ // The location the volume is mounted at.
+ MountPoint string `json:"mountPoint"`
+ // Time the volume was created.
+ CreatedTime time.Time `json:"createdAt,omitempty"`
+ // Options to pass to the volume driver. For the local driver, this is
+ // a list of mount options. For other drivers, they are passed to the
+ // volume driver handling the volume.
+ Options map[string]string `json:"options"`
+ // Whether this volume was created for a specific container and will be
+ // removed with it.
+ IsCtrSpecific bool `json:"ctrSpecific"`
+ // UID the volume will be created as.
+ UID int `json:"uid"`
+ // GID the volume will be created as.
+ GID int `json:"gid"`
}
// Name retrieves the volume's name
diff --git a/test/e2e/run_volume_test.go b/test/e2e/run_volume_test.go
index 1e0b84310..abb93a149 100644
--- a/test/e2e/run_volume_test.go
+++ b/test/e2e/run_volume_test.go
@@ -154,4 +154,12 @@ var _ = Describe("Podman run with volumes", func() {
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Not(Equal(0)))
})
+
+ It("podman run with volume flag and multiple named volumes", func() {
+ session := podmanTest.Podman([]string{"run", "--rm", "-v", "testvol1:/testvol1", "-v", "testvol2:/testvol2", ALPINE, "grep", "/testvol", "/proc/self/mountinfo"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.OutputToString()).To(ContainSubstring("/testvol1"))
+ Expect(session.OutputToString()).To(ContainSubstring("/testvol2"))
+ })
})