summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--docs/podman-volume-create.1.md2
-rw-r--r--libpod/boltdb_state.go162
-rw-r--r--libpod/boltdb_state_internal.go8
-rw-r--r--libpod/container_internal.go86
-rw-r--r--libpod/container_internal_linux.go57
-rw-r--r--libpod/define/config.go4
-rw-r--r--libpod/in_memory_state.go30
-rw-r--r--libpod/runtime_volume_linux.go25
-rw-r--r--libpod/state.go4
-rw-r--r--libpod/volume.go30
-rw-r--r--libpod/volume_internal.go24
-rw-r--r--libpod/volume_internal_linux.go128
-rw-r--r--libpod/volume_internal_unsupported.go15
-rw-r--r--pkg/adapter/runtime.go2
-rw-r--r--test/e2e/run_volume_test.go59
-rw-r--r--test/e2e/volume_create_test.go6
16 files changed, 602 insertions, 40 deletions
diff --git a/docs/podman-volume-create.1.md b/docs/podman-volume-create.1.md
index 3d3eb68b8..6612b0ad2 100644
--- a/docs/podman-volume-create.1.md
+++ b/docs/podman-volume-create.1.md
@@ -39,6 +39,8 @@ $ podman volume create myvol
$ podman volume create
$ podman volume create --label foo=bar myvol
+
+$ podman volume create --opt device=tmpfs --opt type=tmpfs --opt o=nodev,noexec myvol
```
## SEE ALSO
diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go
index 1de8d80c9..31b551b36 100644
--- a/libpod/boltdb_state.go
+++ b/libpod/boltdb_state.go
@@ -159,6 +159,16 @@ func (s *BoltState) Refresh() error {
return err
}
+ allVolsBucket, err := getAllVolsBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ volBucket, err := getVolBucket(tx)
+ if err != nil {
+ return err
+ }
+
// Iterate through all IDs. Check if they are containers.
// If they are, unmarshal their state, and then clear
// PID, mountpoint, and state for all of them
@@ -235,6 +245,44 @@ func (s *BoltState) Refresh() error {
return nil
})
+ if err != nil {
+ return err
+ }
+
+ // Now refresh volumes
+ err = allVolsBucket.ForEach(func(id, name []byte) error {
+ dbVol := volBucket.Bucket(id)
+ if dbVol == nil {
+ return errors.Wrapf(define.ErrInternal, "inconsistency in state - volume %s is in all volumes bucket but volume not found", string(id))
+ }
+
+ // Get the state
+ volStateBytes := dbVol.Get(stateKey)
+ if volStateBytes == nil {
+ // If the volume doesn't have a state, nothing to do
+ return nil
+ }
+
+ oldState := new(VolumeState)
+
+ if err := json.Unmarshal(volStateBytes, oldState); err != nil {
+ return errors.Wrapf(err, "error unmarshalling state for volume %s", string(id))
+ }
+
+ // Reset mount count to 0
+ oldState.MountCount = 0
+
+ newState, err := json.Marshal(oldState)
+ if err != nil {
+ return errors.Wrapf(err, "error marshalling state for volume %s", string(id))
+ }
+
+ if err := dbVol.Put(stateKey, newState); err != nil {
+ return errors.Wrapf(err, "error storing new state for volume %s", string(id))
+ }
+
+ return nil
+ })
return err
})
return err
@@ -1352,6 +1400,15 @@ func (s *BoltState) AddVolume(volume *Volume) error {
return errors.Wrapf(err, "error marshalling volume %s config to JSON", volume.Name())
}
+ // Volume state is allowed to not exist
+ var volStateJSON []byte
+ if volume.state != nil {
+ volStateJSON, err = json.Marshal(volume.state)
+ if err != nil {
+ return errors.Wrapf(err, "error marshalling volume %s state to JSON", volume.Name())
+ }
+ }
+
db, err := s.getDBCon()
if err != nil {
return err
@@ -1392,6 +1449,12 @@ func (s *BoltState) AddVolume(volume *Volume) error {
return errors.Wrapf(err, "error storing volume %s configuration in DB", volume.Name())
}
+ if volStateJSON != nil {
+ if err := newVol.Put(stateKey, volStateJSON); err != nil {
+ return errors.Wrapf(err, "error storing volume %s state in DB", volume.Name())
+ }
+ }
+
if err := allVolsBkt.Put(volName, volName); err != nil {
return errors.Wrapf(err, "error storing volume %s in all volumes bucket in DB", volume.Name())
}
@@ -1483,6 +1546,103 @@ func (s *BoltState) RemoveVolume(volume *Volume) error {
return err
}
+// UpdateVolume updates the volume's state from the database.
+func (s *BoltState) UpdateVolume(volume *Volume) error {
+ if !s.valid {
+ return define.ErrDBClosed
+ }
+
+ if !volume.valid {
+ return define.ErrVolumeRemoved
+ }
+
+ newState := new(VolumeState)
+ volumeName := []byte(volume.Name())
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return err
+ }
+ defer s.deferredCloseDBCon(db)
+
+ err = db.View(func(tx *bolt.Tx) error {
+ volBucket, err := getVolBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ volToUpdate := volBucket.Bucket(volumeName)
+ if volToUpdate == nil {
+ volume.valid = false
+ return errors.Wrapf(define.ErrNoSuchVolume, "no volume with name %s found in database", volume.Name())
+ }
+
+ stateBytes := volToUpdate.Get(stateKey)
+ if stateBytes == nil {
+ // Having no state is valid.
+ // Return nil, use the empty state.
+ return nil
+ }
+
+ if err := json.Unmarshal(stateBytes, newState); err != nil {
+ return errors.Wrapf(err, "error unmarshalling volume %s state", volume.Name())
+ }
+
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+
+ volume.state = newState
+
+ return nil
+}
+
+// SaveVolume saves the volume's state to the database.
+func (s *BoltState) SaveVolume(volume *Volume) error {
+ if !s.valid {
+ return define.ErrDBClosed
+ }
+
+ if !volume.valid {
+ return define.ErrVolumeRemoved
+ }
+
+ volumeName := []byte(volume.Name())
+
+ var newStateJSON []byte
+ if volume.state != nil {
+ stateJSON, err := json.Marshal(volume.state)
+ if err != nil {
+ return errors.Wrapf(err, "error marshalling volume %s state to JSON", volume.Name())
+ }
+ newStateJSON = stateJSON
+ }
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return err
+ }
+ defer s.deferredCloseDBCon(db)
+
+ err = db.Update(func(tx *bolt.Tx) error {
+ volBucket, err := getVolBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ volToUpdate := volBucket.Bucket(volumeName)
+ if volToUpdate == nil {
+ volume.valid = false
+ return errors.Wrapf(define.ErrNoSuchVolume, "no volume with name %s found in database", volume.Name())
+ }
+
+ return volToUpdate.Put(stateKey, newStateJSON)
+ })
+ return err
+}
+
// AllVolumes returns all volumes present in the state
func (s *BoltState) AllVolumes() ([]*Volume, error) {
if !s.valid {
@@ -1517,6 +1677,7 @@ func (s *BoltState) AllVolumes() ([]*Volume, error) {
volume := new(Volume)
volume.config = new(VolumeConfig)
+ volume.state = new(VolumeState)
if err := s.getVolumeFromDB(id, volume, volBucket); err != nil {
if errors.Cause(err) != define.ErrNSMismatch {
@@ -1551,6 +1712,7 @@ func (s *BoltState) Volume(name string) (*Volume, error) {
volume := new(Volume)
volume.config = new(VolumeConfig)
+ volume.state = new(VolumeState)
db, err := s.getDBCon()
if err != nil {
diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go
index 6e4179835..a50fce31e 100644
--- a/libpod/boltdb_state_internal.go
+++ b/libpod/boltdb_state_internal.go
@@ -449,6 +449,14 @@ func (s *BoltState) getVolumeFromDB(name []byte, volume *Volume, volBkt *bolt.Bu
return errors.Wrapf(err, "error unmarshalling volume %s config from DB", string(name))
}
+ // Volume state is allowed to be nil for legacy compatibility
+ volStateBytes := volDB.Get(stateKey)
+ if volStateBytes != nil {
+ if err := json.Unmarshal(volStateBytes, volume.state); err != nil {
+ return errors.Wrapf(err, "error unmarshalling volume %s state from DB", string(name))
+ }
+ }
+
// Get the lock
lock, err := s.runtime.lockManager.RetrieveLock(volume.config.LockID)
if err != nil {
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index f51b53e85..ffc6c11ee 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -1205,7 +1205,7 @@ func (c *Container) restartWithTimeout(ctx context.Context, timeout uint) (err e
// TODO: Add ability to override mount label so we can use this for Mount() too
// TODO: Can we use this for export? Copying SHM into the export might not be
// good
-func (c *Container) mountStorage() (string, error) {
+func (c *Container) mountStorage() (_ string, Err error) {
var err error
// Container already mounted, nothing to do
if c.state.Mounted {
@@ -1225,6 +1225,40 @@ func (c *Container) mountStorage() (string, error) {
if err := os.Chown(c.config.ShmDir, c.RootUID(), c.RootGID()); err != nil {
return "", errors.Wrapf(err, "failed to chown %s", c.config.ShmDir)
}
+ defer func() {
+ if Err != nil {
+ if err := c.unmountSHM(c.config.ShmDir); err != nil {
+ logrus.Errorf("Error unmounting SHM for container %s after mount error: %v", c.ID(), err)
+ }
+ }
+ }()
+ }
+
+ // Request a mount of all named volumes
+ for _, v := range c.config.NamedVolumes {
+ vol, err := c.runtime.state.Volume(v.Name)
+ if err != nil {
+ return "", errors.Wrapf(err, "error retrieving named volume %s for container %s", v.Name, c.ID())
+ }
+
+ if vol.needsMount() {
+ vol.lock.Lock()
+ if err := vol.mount(); err != nil {
+ vol.lock.Unlock()
+ return "", errors.Wrapf(err, "error mounting volume %s for container %s", vol.Name(), c.ID())
+ }
+ vol.lock.Unlock()
+ defer func() {
+ if Err == nil {
+ return
+ }
+ vol.lock.Lock()
+ if err := vol.unmount(false); err != nil {
+ logrus.Errorf("Error unmounting volume %s after error mounting container %s: %v", vol.Name(), c.ID(), err)
+ }
+ vol.lock.Unlock()
+ }()
+ }
}
// TODO: generalize this mount code so it will mount every mount in ctr.config.Mounts
@@ -1247,14 +1281,19 @@ func (c *Container) cleanupStorage() error {
return nil
}
+ var cleanupErr error
+
for _, containerMount := range c.config.Mounts {
if err := c.unmountSHM(containerMount); err != nil {
- return err
+ if cleanupErr != nil {
+ logrus.Errorf("Error unmounting container %s: %v", c.ID(), cleanupErr)
+ }
+ cleanupErr = err
}
}
if c.config.Rootfs != "" {
- return nil
+ return cleanupErr
}
if err := c.unmount(false); err != nil {
@@ -1264,19 +1303,52 @@ func (c *Container) cleanupStorage() error {
// state
if errors.Cause(err) == storage.ErrNotAContainer || errors.Cause(err) == storage.ErrContainerUnknown {
logrus.Errorf("Storage for container %s has been removed", c.ID())
- return nil
+ } else {
+ if cleanupErr != nil {
+ logrus.Errorf("Error cleaning up container %s storage: %v", c.ID(), cleanupErr)
+ }
+ cleanupErr = err
}
+ }
- return err
+ // Request an unmount of all named volumes
+ for _, v := range c.config.NamedVolumes {
+ vol, err := c.runtime.state.Volume(v.Name)
+ if err != nil {
+ if cleanupErr != nil {
+ logrus.Errorf("Error unmounting container %s: %v", c.ID(), cleanupErr)
+ }
+ cleanupErr = errors.Wrapf(err, "error retrieving named volume %s for container %s", v.Name, c.ID())
+
+ // We need to try and unmount every volume, so continue
+ // if they fail.
+ continue
+ }
+
+ if vol.needsMount() {
+ vol.lock.Lock()
+ if err := vol.unmount(false); err != nil {
+ if cleanupErr != nil {
+ logrus.Errorf("Error unmounting container %s: %v", c.ID(), cleanupErr)
+ }
+ cleanupErr = errors.Wrapf(err, "error unmounting volume %s for container %s", vol.Name(), c.ID())
+ }
+ vol.lock.Unlock()
+ }
}
c.state.Mountpoint = ""
c.state.Mounted = false
if c.valid {
- return c.save()
+ if err := c.save(); err != nil {
+ if cleanupErr != nil {
+ logrus.Errorf("Error unmounting container %s: %v", c.ID(), cleanupErr)
+ }
+ cleanupErr = err
+ }
}
- return nil
+ return cleanupErr
}
// Unmount the a container and free its resources
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index cb19b5484..a84db9aa5 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -49,19 +49,19 @@ func (c *Container) mountSHM(shmOptions string) error {
}
func (c *Container) unmountSHM(mount string) error {
- if err := unix.Unmount(mount, unix.MNT_DETACH); err != nil {
+ if err := unix.Unmount(mount, 0); err != nil {
if err != syscall.EINVAL {
- logrus.Warnf("container %s failed to unmount %s : %v", c.ID(), mount, err)
- } else {
- logrus.Debugf("container %s failed to unmount %s : %v", c.ID(), mount, err)
+ return errors.Wrapf(err, "error unmounting container %s SHM mount %s", c.ID(), mount)
}
+ // If it's just an EINVAL, debug logs only
+ logrus.Debugf("container %s failed to unmount %s : %v", c.ID(), mount, err)
}
return nil
}
// prepare mounts the container and sets up other required resources like net
// namespaces
-func (c *Container) prepare() (err error) {
+func (c *Container) prepare() (Err error) {
var (
wg sync.WaitGroup
netNS ns.NetNS
@@ -108,31 +108,42 @@ func (c *Container) prepare() (err error) {
logrus.Debugf("Created root filesystem for container %s at %s", c.ID(), c.state.Mountpoint)
}()
- defer func() {
- if err != nil {
- if err2 := c.cleanupNetwork(); err2 != nil {
- logrus.Errorf("Error cleaning up container %s network: %v", c.ID(), err2)
- }
- if err2 := c.cleanupStorage(); err2 != nil {
- logrus.Errorf("Error cleaning up container %s storage: %v", c.ID(), err2)
- }
- }
- }()
-
wg.Wait()
+ var createErr error
if createNetNSErr != nil {
- if mountStorageErr != nil {
- logrus.Error(createNetNSErr)
- return mountStorageErr
- }
- return createNetNSErr
+ createErr = createNetNSErr
}
if mountStorageErr != nil {
- return mountStorageErr
+ logrus.Errorf("Error preparing container %s: %v", c.ID(), createErr)
+ createErr = mountStorageErr
+ }
+
+ // Only trigger storage cleanup if mountStorage was successful.
+ // Otherwise, we may mess up mount counters.
+ if createNetNSErr != nil && mountStorageErr == nil {
+ if err := c.cleanupStorage(); err != nil {
+ // createErr is guaranteed non-nil, so print
+ // unconditionally
+ logrus.Errorf("Error preparing container %s: %v", c.ID(), createErr)
+ createErr = errors.Wrapf(err, "error unmounting storage for container %s after network create failure", c.ID())
+ }
+ }
+
+ // It's OK to unconditionally trigger network cleanup. If the network
+ // isn't ready it will do nothing.
+ if createErr != nil {
+ if err := c.cleanupNetwork(); err != nil {
+ logrus.Errorf("Error preparing container %s: %v", c.ID(), createErr)
+ createErr = errors.Wrapf(err, "error cleaning up container %s network after setup failure", c.ID())
+ }
+ }
+
+ if createErr != nil {
+ return createErr
}
- // Save the container
+ // Save changes to container state
return c.save()
}
diff --git a/libpod/define/config.go b/libpod/define/config.go
index d8d6ccf55..c66f67feb 100644
--- a/libpod/define/config.go
+++ b/libpod/define/config.go
@@ -18,3 +18,7 @@ type InfoData struct {
Type string
Data map[string]interface{}
}
+
+// VolumeDriverLocal is the "local" volume driver. It is managed by libpod
+// itself.
+const VolumeDriverLocal = "local"
diff --git a/libpod/in_memory_state.go b/libpod/in_memory_state.go
index a9b735327..280ae5f5c 100644
--- a/libpod/in_memory_state.go
+++ b/libpod/in_memory_state.go
@@ -507,6 +507,36 @@ func (s *InMemoryState) RemoveVolume(volume *Volume) error {
return nil
}
+// UpdateVolume updates a volume from the database.
+// For the in-memory state, this is a no-op.
+func (s *InMemoryState) UpdateVolume(volume *Volume) error {
+ if !volume.valid {
+ return define.ErrVolumeRemoved
+ }
+
+ if _, ok := s.volumes[volume.Name()]; !ok {
+ volume.valid = false
+ return errors.Wrapf(define.ErrNoSuchVolume, "volume with name %q not found in state", volume.Name())
+ }
+
+ return nil
+}
+
+// SaveVolume saves a volume's state to the database.
+// For the in-memory state, this is a no-op.
+func (s *InMemoryState) SaveVolume(volume *Volume) error {
+ if !volume.valid {
+ return define.ErrVolumeRemoved
+ }
+
+ if _, ok := s.volumes[volume.Name()]; !ok {
+ volume.valid = false
+ return errors.Wrapf(define.ErrNoSuchVolume, "volume with name %q not found in state", volume.Name())
+ }
+
+ return nil
+}
+
// VolumeInUse checks if the given volume is being used by at least one container
func (s *InMemoryState) VolumeInUse(volume *Volume) ([]string, error) {
if !volume.valid {
diff --git a/libpod/runtime_volume_linux.go b/libpod/runtime_volume_linux.go
index 70296248c..9df93faf3 100644
--- a/libpod/runtime_volume_linux.go
+++ b/libpod/runtime_volume_linux.go
@@ -44,10 +44,23 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption)
volume.config.Name = stringid.GenerateNonCryptoID()
}
if volume.config.Driver == "" {
- volume.config.Driver = "local"
+ volume.config.Driver = define.VolumeDriverLocal
}
volume.config.CreatedTime = time.Now()
+ if volume.config.Driver == define.VolumeDriverLocal {
+ logrus.Debugf("Validating options for local driver")
+ // Validate options
+ for key := range volume.config.Options {
+ switch key {
+ case "device", "o", "type":
+ // Do nothing, valid keys
+ default:
+ return nil, errors.Wrapf(define.ErrInvalidArg, "invalid mount option %s for driver 'local'", key)
+ }
+ }
+ }
+
// Create the mountpoint of this volume
volPathRoot := filepath.Join(r.config.VolumePath, volume.config.Name)
if err := os.MkdirAll(volPathRoot, 0700); err != nil {
@@ -102,6 +115,11 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error
return define.ErrVolumeRemoved
}
+ // Update volume status to pick up a potential removal from state
+ if err := v.update(); err != nil {
+ return err
+ }
+
deps, err := r.state.VolumeInUse(v)
if err != nil {
return err
@@ -137,6 +155,11 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error
}
}
+ // If the volume is still mounted - force unmount it
+ if err := v.unmount(true); err != nil {
+ return errors.Wrapf(err, "error unmounting volume %s", v.Name())
+ }
+
// Set volume as invalid so it can no longer be used
v.valid = false
diff --git a/libpod/state.go b/libpod/state.go
index 5d704e69a..db4667ad6 100644
--- a/libpod/state.go
+++ b/libpod/state.go
@@ -203,6 +203,10 @@ type State interface {
// RemoveVolume removes the specified volume.
// Only volumes that have no container dependencies can be removed
RemoveVolume(volume *Volume) error
+ // UpdateVolume updates the volume's state from the database.
+ UpdateVolume(volume *Volume) error
+ // SaveVolume saves a volume's state to the database.
+ SaveVolume(volume *Volume) error
// AllVolumes returns all the volumes available in the state
AllVolumes() ([]*Volume, error)
}
diff --git a/libpod/volume.go b/libpod/volume.go
index abfa7b3f4..b4de3aedc 100644
--- a/libpod/volume.go
+++ b/libpod/volume.go
@@ -6,17 +6,20 @@ import (
"github.com/containers/libpod/libpod/lock"
)
-// Volume is the type used to create named volumes
-// TODO: all volumes should be created using this and the Volume API
+// Volume is a libpod named volume.
+// Named volumes may be shared by multiple containers, and may be created using
+// more complex options than normal bind mounts. They may be backed by a mounted
+// filesystem on the host.
type Volume struct {
config *VolumeConfig
+ state *VolumeState
valid bool
runtime *Runtime
lock lock.Locker
}
-// VolumeConfig holds the volume's config information
+// VolumeConfig holds the volume's immutable configuration.
type VolumeConfig struct {
// Name of the volume.
Name string `json:"name"`
@@ -26,7 +29,7 @@ type VolumeConfig struct {
Labels map[string]string `json:"labels"`
// The volume driver. Empty string or local does not activate a volume
// driver, all other volumes will.
- Driver string `json:"driver"`
+ Driver string `json:"volumeDriver"`
// The location the volume is mounted at.
MountPoint string `json:"mountPoint"`
// Time the volume was created.
@@ -34,7 +37,7 @@ type VolumeConfig struct {
// Options to pass to the volume driver. For the local driver, this is
// a list of mount options. For other drivers, they are passed to the
// volume driver handling the volume.
- Options map[string]string `json:"options"`
+ Options map[string]string `json:"volumeOptions,omitempty"`
// Whether this volume was created for a specific container and will be
// removed with it.
IsCtrSpecific bool `json:"ctrSpecific"`
@@ -44,6 +47,18 @@ type VolumeConfig struct {
GID int `json:"gid"`
}
+// VolumeState holds the volume's mutable state.
+// Volumes are not guaranteed to have a state. Only volumes using the Local
+// driver that have mount options set will create a state.
+type VolumeState struct {
+ // MountCount is the number of times this volume has been requested to
+ // be mounted.
+ // It is incremented on mount() and decremented on unmount().
+ // On incrementing from 0, the volume will be mounted on the host.
+ // On decrementing to 0, the volume will be unmounted on the host.
+ MountCount uint `json:"mountCount"`
+}
+
// Name retrieves the volume's name
func (v *Volume) Name() string {
return v.config.Name
@@ -78,10 +93,9 @@ func (v *Volume) MountPoint() string {
// Options return the volume's options
func (v *Volume) Options() map[string]string {
options := make(map[string]string)
- for key, value := range v.config.Options {
- options[key] = value
+ for k, v := range v.config.Options {
+ options[k] = v
}
-
return options
}
diff --git a/libpod/volume_internal.go b/libpod/volume_internal.go
index 35f0ca19d..2e886e1b0 100644
--- a/libpod/volume_internal.go
+++ b/libpod/volume_internal.go
@@ -3,6 +3,8 @@ package libpod
import (
"os"
"path/filepath"
+
+ "github.com/containers/libpod/libpod/define"
)
// Creates a new volume
@@ -20,3 +22,25 @@ func newVolume(runtime *Runtime) (*Volume, error) {
func (v *Volume) teardownStorage() error {
return os.RemoveAll(filepath.Join(v.runtime.config.VolumePath, v.Name()))
}
+
+// Volumes with options set, or a filesystem type, or a device to mount need to
+// be mounted and unmounted.
+func (v *Volume) needsMount() bool {
+ return len(v.config.Options) > 0 && v.config.Driver == define.VolumeDriverLocal
+}
+
+// update() updates the volume state from the DB.
+func (v *Volume) update() error {
+ if err := v.runtime.state.UpdateVolume(v); err != nil {
+ return err
+ }
+ if !v.valid {
+ return define.ErrVolumeRemoved
+ }
+ return nil
+}
+
+// save() saves the volume state to the DB
+func (v *Volume) save() error {
+ return v.runtime.state.SaveVolume(v)
+}
diff --git a/libpod/volume_internal_linux.go b/libpod/volume_internal_linux.go
new file mode 100644
index 000000000..9ae4dcf69
--- /dev/null
+++ b/libpod/volume_internal_linux.go
@@ -0,0 +1,128 @@
+// +build linux
+
+package libpod
+
+import (
+ "io/ioutil"
+ "os/exec"
+
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+)
+
+// mount mounts the volume if necessary.
+// A mount is necessary if a volume has any options set.
+// If a mount is necessary, v.state.MountCount will be incremented.
+// If it was 0 when the increment occurred, the volume will be mounted on the
+// host. Otherwise, we assume it is already mounted.
+// Must be done while the volume is locked.
+// Is a no-op on volumes that do not require a mount (as defined by
+// volumeNeedsMount())
+func (v *Volume) mount() error {
+ if !v.needsMount() {
+ return nil
+ }
+
+ // Update the volume from the DB to get an accurate mount counter.
+ if err := v.update(); err != nil {
+ return err
+ }
+
+ // If the count is non-zero, the volume is already mounted.
+ // Nothing to do.
+ if v.state.MountCount > 0 {
+ v.state.MountCount = v.state.MountCount + 1
+ logrus.Debugf("Volume %s mount count now at %d", v.Name(), v.state.MountCount)
+ return v.save()
+ }
+
+ volDevice := v.config.Options["device"]
+ volType := v.config.Options["type"]
+ volOptions := v.config.Options["o"]
+
+ // Some filesystems (tmpfs) don't have a device, but we still need to
+ // give the kernel something.
+ if volDevice == "" && volType != "" {
+ volDevice = volType
+ }
+
+ // We need to use the actual mount command.
+ // Convincing unix.Mount to use the same semantics as the mount command
+ // itself seems prohibitively difficult.
+ // TODO: might want to cache this path in the runtime?
+ mountPath, err := exec.LookPath("mount")
+ if err != nil {
+ return errors.Wrapf(err, "error locating 'mount' binary")
+ }
+ mountArgs := []string{}
+ if volOptions != "" {
+ mountArgs = append(mountArgs, "-o", volOptions)
+ }
+ if volType != "" {
+ mountArgs = append(mountArgs, "-t", volType)
+ }
+ mountArgs = append(mountArgs, volDevice, v.config.MountPoint)
+ mountCmd := exec.Command(mountPath, mountArgs...)
+
+ errPipe, err := mountCmd.StderrPipe()
+ if err != nil {
+ return errors.Wrapf(err, "error getting stderr pipe for mount")
+ }
+ if err := mountCmd.Start(); err != nil {
+ out, err2 := ioutil.ReadAll(errPipe)
+ if err2 != nil {
+ return errors.Wrapf(err2, "error reading mount STDERR")
+ }
+ return errors.Wrapf(errors.New(string(out)), "error mounting volume %s", v.Name())
+ }
+
+ logrus.Debugf("Mounted volume %s", v.Name())
+
+ // Increment the mount counter
+ v.state.MountCount = v.state.MountCount + 1
+ logrus.Debugf("Volume %s mount count now at %d", v.Name(), v.state.MountCount)
+ return v.save()
+}
+
+// unmount unmounts the volume if necessary.
+// Unmounting a volume that is not mounted is a no-op.
+// Unmounting a volume that does not require a mount is a no-op.
+// The volume must be locked for this to occur.
+// The mount counter will be decremented if non-zero. If the counter reaches 0,
+// the volume will really be unmounted, as no further containers are using the
+// volume.
+// If force is set, the volume will be unmounted regardless of mount counter.
+func (v *Volume) unmount(force bool) error {
+ if !v.needsMount() {
+ return nil
+ }
+
+ // Update the volume from the DB to get an accurate mount counter.
+ if err := v.update(); err != nil {
+ return err
+ }
+
+ if v.state.MountCount == 0 {
+ logrus.Debugf("Volume %s already unmounted", v.Name())
+ return nil
+ }
+
+ if !force {
+ v.state.MountCount = v.state.MountCount - 1
+ } else {
+ v.state.MountCount = 0
+ }
+
+ logrus.Debugf("Volume %s mount count now at %d", v.Name(), v.state.MountCount)
+
+ if v.state.MountCount == 0 {
+ // Unmount the volume
+ if err := unix.Unmount(v.config.MountPoint, unix.MNT_DETACH); err != nil {
+ return errors.Wrapf(err, "error unmounting volume %s", v.Name())
+ }
+ logrus.Debugf("Unmounted volume %s", v.Name())
+ }
+
+ return v.save()
+}
diff --git a/libpod/volume_internal_unsupported.go b/libpod/volume_internal_unsupported.go
new file mode 100644
index 000000000..74e24cfda
--- /dev/null
+++ b/libpod/volume_internal_unsupported.go
@@ -0,0 +1,15 @@
+// +build !linux
+
+package libpod
+
+import (
+ "github.com/containers/libpod/libpod/define"
+)
+
+func (v *Volume) mount() error {
+ return define.ErrNotImplemented
+}
+
+func (v *Volume) unmount(force bool) error {
+ return define.ErrNotImplemented
+}
diff --git a/pkg/adapter/runtime.go b/pkg/adapter/runtime.go
index 0537308f8..dd15e1d15 100644
--- a/pkg/adapter/runtime.go
+++ b/pkg/adapter/runtime.go
@@ -185,7 +185,7 @@ func (r *LocalRuntime) CreateVolume(ctx context.Context, c *cliconfig.VolumeCrea
options = append(options, libpod.WithVolumeLabels(labels))
}
- if len(options) != 0 {
+ if len(opts) != 0 {
options = append(options, libpod.WithVolumeOptions(opts))
}
newVolume, err := r.NewVolume(ctx, options...)
diff --git a/test/e2e/run_volume_test.go b/test/e2e/run_volume_test.go
index 5bad6744b..551e86b93 100644
--- a/test/e2e/run_volume_test.go
+++ b/test/e2e/run_volume_test.go
@@ -5,11 +5,14 @@ package integration
import (
"fmt"
"os"
+ "os/exec"
"path/filepath"
+ "strings"
. "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
+ "github.com/onsi/gomega/gexec"
)
var _ = Describe("Podman run with volumes", func() {
@@ -190,4 +193,60 @@ var _ = Describe("Podman run with volumes", func() {
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Not(Equal(0)))
})
+
+ It("podman run with tmpfs named volume mounts and unmounts", func() {
+ SkipIfRootless()
+ volName := "testvol"
+ mkVolume := podmanTest.Podman([]string{"volume", "create", "--opt", "type=tmpfs", "--opt", "device=tmpfs", "--opt", "o=nodev", "testvol"})
+ mkVolume.WaitWithDefaultTimeout()
+ Expect(mkVolume.ExitCode()).To(Equal(0))
+
+ // Volume not mounted on create
+ mountCmd1, err := gexec.Start(exec.Command("mount"), GinkgoWriter, GinkgoWriter)
+ Expect(err).To(BeNil())
+ mountCmd1.Wait(90)
+ Expect(mountCmd1.ExitCode()).To(Equal(0))
+ os.Stdout.Sync()
+ os.Stderr.Sync()
+ mountOut1 := strings.Join(strings.Fields(fmt.Sprintf("%s", mountCmd1.Out.Contents())), " ")
+ fmt.Printf("Output: %s", mountOut1)
+ Expect(strings.Contains(mountOut1, volName)).To(BeFalse())
+
+ ctrName := "testctr"
+ podmanSession := podmanTest.Podman([]string{"run", "-d", "--name", ctrName, "-v", fmt.Sprintf("%s:/testvol", volName), ALPINE, "top"})
+ podmanSession.WaitWithDefaultTimeout()
+ Expect(podmanSession.ExitCode()).To(Equal(0))
+
+ // Volume now mounted as container is running
+ mountCmd2, err := gexec.Start(exec.Command("mount"), GinkgoWriter, GinkgoWriter)
+ Expect(err).To(BeNil())
+ mountCmd2.Wait(90)
+ Expect(mountCmd2.ExitCode()).To(Equal(0))
+ os.Stdout.Sync()
+ os.Stderr.Sync()
+ mountOut2 := strings.Join(strings.Fields(fmt.Sprintf("%s", mountCmd2.Out.Contents())), " ")
+ fmt.Printf("Output: %s", mountOut2)
+ Expect(strings.Contains(mountOut2, volName)).To(BeTrue())
+
+ // Stop the container to unmount
+ podmanStopSession := podmanTest.Podman([]string{"stop", "--timeout", "0", ctrName})
+ podmanStopSession.WaitWithDefaultTimeout()
+ Expect(podmanStopSession.ExitCode()).To(Equal(0))
+
+ // We have to force cleanup so the unmount happens
+ podmanCleanupSession := podmanTest.Podman([]string{"container", "cleanup", ctrName})
+ podmanCleanupSession.WaitWithDefaultTimeout()
+ Expect(podmanCleanupSession.ExitCode()).To(Equal(0))
+
+ // Ensure volume is unmounted
+ mountCmd3, err := gexec.Start(exec.Command("mount"), GinkgoWriter, GinkgoWriter)
+ Expect(err).To(BeNil())
+ mountCmd3.Wait(90)
+ Expect(mountCmd3.ExitCode()).To(Equal(0))
+ os.Stdout.Sync()
+ os.Stderr.Sync()
+ mountOut3 := strings.Join(strings.Fields(fmt.Sprintf("%s", mountCmd3.Out.Contents())), " ")
+ fmt.Printf("Output: %s", mountOut3)
+ Expect(strings.Contains(mountOut3, volName)).To(BeFalse())
+ })
})
diff --git a/test/e2e/volume_create_test.go b/test/e2e/volume_create_test.go
index 041a9e6f0..77e8abbd4 100644
--- a/test/e2e/volume_create_test.go
+++ b/test/e2e/volume_create_test.go
@@ -57,4 +57,10 @@ var _ = Describe("Podman volume create", func() {
Expect(match).To(BeTrue())
Expect(len(check.OutputToStringArray())).To(Equal(1))
})
+
+ It("podman create volume with bad volume option", func() {
+ session := podmanTest.Podman([]string{"volume", "create", "--opt", "badOpt=bad"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Not(Equal(0)))
+ })
})