From c8193633cd82228b01e789becead410c2a940227 Mon Sep 17 00:00:00 2001 From: Matthew Heon Date: Thu, 29 Aug 2019 16:14:18 -0400 Subject: Change volume driver and options JSON tags In upcoming commits, we're going to turn on the backends for these fields. Volumes with these set will act fundamentally differently from other volumes. There will probably be validation required for each field. Until now, though, we've freely allowed creation of volumes with these set - they just did nothing. So we have no idea what could be in the DB with old volumes. Change the struct tags so we don't have to worry about old, unvalidated data. We'll start fresh with new volumes. Signed-off-by: Matthew Heon --- libpod/volume.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'libpod') diff --git a/libpod/volume.go b/libpod/volume.go index abfa7b3f4..e6e92c3ac 100644 --- a/libpod/volume.go +++ b/libpod/volume.go @@ -26,7 +26,7 @@ type VolumeConfig struct { Labels map[string]string `json:"labels"` // The volume driver. Empty string or local does not activate a volume // driver, all other volumes will. - Driver string `json:"driver"` + Driver string `json:"volumeDriver"` // The location the volume is mounted at. MountPoint string `json:"mountPoint"` // Time the volume was created. @@ -34,7 +34,7 @@ type VolumeConfig struct { // Options to pass to the volume driver. For the local driver, this is // a list of mount options. For other drivers, they are passed to the // volume driver handling the volume. - Options map[string]string `json:"options"` + Options map[string]string `json:"volumeOptions"` // Whether this volume was created for a specific container and will be // removed with it. IsCtrSpecific bool `json:"ctrSpecific"` -- cgit v1.2.3-54-g00ecf From 5a8a71ed817a4fa50fd9444846a50b76f25228d1 Mon Sep 17 00:00:00 2001 From: Matthew Heon Date: Fri, 30 Aug 2019 16:09:17 -0400 Subject: Add volume state We need to be able to track the number of times a volume has been mounted for tmpfs/nfs/etc volumes. As such, we need a mutable state for volumes. Add one, with the expected update/save methods in both states. There is backwards compat here, in that older volumes without a state will still be accepted. Signed-off-by: Matthew Heon --- libpod/boltdb_state.go | 114 ++++++++++++++++++++++++++++++++++++++++ libpod/boltdb_state_internal.go | 8 +++ libpod/in_memory_state.go | 30 +++++++++++ libpod/state.go | 4 ++ libpod/volume.go | 31 +++++++++-- 5 files changed, 183 insertions(+), 4 deletions(-) (limited to 'libpod') diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go index 1de8d80c9..ff5e62ce2 100644 --- a/libpod/boltdb_state.go +++ b/libpod/boltdb_state.go @@ -1352,6 +1352,16 @@ func (s *BoltState) AddVolume(volume *Volume) error { return errors.Wrapf(err, "error marshalling volume %s config to JSON", volume.Name()) } + // Volume state is allowed to not exist + var volStateJSON []byte + if volume.state != nil { + stateJSON, err := json.Marshal(volume.state) + if err != nil { + return errors.Wrapf(err, "error marshalling volume %s state to JSON", volume.Name()) + } + volStateJSON = stateJSON + } + db, err := s.getDBCon() if err != nil { return err @@ -1392,6 +1402,12 @@ func (s *BoltState) AddVolume(volume *Volume) error { return errors.Wrapf(err, "error storing volume %s configuration in DB", volume.Name()) } + if volStateJSON != nil { + if err := newVol.Put(stateKey, volStateJSON); err != nil { + return errors.Wrapf(err, "error storing volume %s state in DB", volume.Name()) + } + } + if err := allVolsBkt.Put(volName, volName); err != nil { return errors.Wrapf(err, "error storing volume %s in all volumes bucket in DB", volume.Name()) } @@ -1483,6 +1499,103 @@ func (s *BoltState) RemoveVolume(volume *Volume) error { return err } +// UpdateVolume updates the volume's state from the database. +func (s *BoltState) UpdateVolume(volume *Volume) error { + if !s.valid { + return define.ErrDBClosed + } + + if !volume.valid { + return define.ErrVolumeRemoved + } + + newState := new(VolumeState) + volumeName := []byte(volume.Name()) + + db, err := s.getDBCon() + if err != nil { + return err + } + defer s.deferredCloseDBCon(db) + + err = db.View(func(tx *bolt.Tx) error { + volBucket, err := getVolBucket(tx) + if err != nil { + return err + } + + volToUpdate := volBucket.Bucket(volumeName) + if volToUpdate == nil { + volume.valid = false + return errors.Wrapf(define.ErrNoSuchVolume, "no volume with name %s found in database", volume.Name()) + } + + stateBytes := volToUpdate.Get(stateKey) + if stateBytes == nil { + // Having no state is valid. + // Return nil, use the empty state. + return nil + } + + if err := json.Unmarshal(stateBytes, newState); err != nil { + return errors.Wrapf(err, "error unmarshalling volume %s state", volume.Name()) + } + + return nil + }) + if err != nil { + return err + } + + volume.state = newState + + return nil +} + +// SaveVolume saves the volume's state to the database. +func (s *BoltState) SaveVolume(volume *Volume) error { + if !s.valid { + return define.ErrDBClosed + } + + if !volume.valid { + return define.ErrVolumeRemoved + } + + volumeName := []byte(volume.Name()) + + var newStateJSON []byte + if volume.state != nil { + stateJSON, err := json.Marshal(volume.state) + if err != nil { + return errors.Wrapf(err, "error marshalling volume %s state to JSON", volume.Name()) + } + newStateJSON = stateJSON + } + + db, err := s.getDBCon() + if err != nil { + return err + } + defer s.deferredCloseDBCon(db) + + err = db.Update(func(tx *bolt.Tx) error { + volBucket, err := getVolBucket(tx) + if err != nil { + return err + } + + volToUpdate := volBucket.Bucket(volumeName) + if volToUpdate == nil { + volume.valid = false + return errors.Wrapf(define.ErrNoSuchVolume, "no volume with name %s found in database", volume.Name()) + } + + return volToUpdate.Put(stateKey, newStateJSON) + }) + return err +} + // AllVolumes returns all volumes present in the state func (s *BoltState) AllVolumes() ([]*Volume, error) { if !s.valid { @@ -1551,6 +1664,7 @@ func (s *BoltState) Volume(name string) (*Volume, error) { volume := new(Volume) volume.config = new(VolumeConfig) + volume.state = new(VolumeState) db, err := s.getDBCon() if err != nil { diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go index 6e4179835..8dc3d1309 100644 --- a/libpod/boltdb_state_internal.go +++ b/libpod/boltdb_state_internal.go @@ -449,6 +449,14 @@ func (s *BoltState) getVolumeFromDB(name []byte, volume *Volume, volBkt *bolt.Bu return errors.Wrapf(err, "error unmarshalling volume %s config from DB", string(name)) } + // Volume state is allowed to be nil for legacy compatability + volStateBytes := volDB.Get(stateKey) + if volStateBytes != nil { + if err := json.Unmarshal(volStateBytes, volume.state); err != nil { + return errors.Wrapf(err, "error unmarshalling volume %s state from DB", string(name)) + } + } + // Get the lock lock, err := s.runtime.lockManager.RetrieveLock(volume.config.LockID) if err != nil { diff --git a/libpod/in_memory_state.go b/libpod/in_memory_state.go index a9b735327..280ae5f5c 100644 --- a/libpod/in_memory_state.go +++ b/libpod/in_memory_state.go @@ -507,6 +507,36 @@ func (s *InMemoryState) RemoveVolume(volume *Volume) error { return nil } +// UpdateVolume updates a volume from the database. +// For the in-memory state, this is a no-op. +func (s *InMemoryState) UpdateVolume(volume *Volume) error { + if !volume.valid { + return define.ErrVolumeRemoved + } + + if _, ok := s.volumes[volume.Name()]; !ok { + volume.valid = false + return errors.Wrapf(define.ErrNoSuchVolume, "volume with name %q not found in state", volume.Name()) + } + + return nil +} + +// SaveVolume saves a volume's state to the database. +// For the in-memory state, this is a no-op. +func (s *InMemoryState) SaveVolume(volume *Volume) error { + if !volume.valid { + return define.ErrVolumeRemoved + } + + if _, ok := s.volumes[volume.Name()]; !ok { + volume.valid = false + return errors.Wrapf(define.ErrNoSuchVolume, "volume with name %q not found in state", volume.Name()) + } + + return nil +} + // VolumeInUse checks if the given volume is being used by at least one container func (s *InMemoryState) VolumeInUse(volume *Volume) ([]string, error) { if !volume.valid { diff --git a/libpod/state.go b/libpod/state.go index 5d704e69a..db4667ad6 100644 --- a/libpod/state.go +++ b/libpod/state.go @@ -203,6 +203,10 @@ type State interface { // RemoveVolume removes the specified volume. // Only volumes that have no container dependencies can be removed RemoveVolume(volume *Volume) error + // UpdateVolume updates the volume's state from the database. + UpdateVolume(volume *Volume) error + // SaveVolume saves a volume's state to the database. + SaveVolume(volume *Volume) error // AllVolumes returns all the volumes available in the state AllVolumes() ([]*Volume, error) } diff --git a/libpod/volume.go b/libpod/volume.go index e6e92c3ac..5970867c3 100644 --- a/libpod/volume.go +++ b/libpod/volume.go @@ -6,17 +6,20 @@ import ( "github.com/containers/libpod/libpod/lock" ) -// Volume is the type used to create named volumes -// TODO: all volumes should be created using this and the Volume API +// Volume is a libpod named volume. +// Named volumes may be shared by multiple containers, and may be created using +// more complex options than normal bind mounts. They may be backed by a mounted +// filesystem on the host. type Volume struct { config *VolumeConfig + state *VolumeState valid bool runtime *Runtime lock lock.Locker } -// VolumeConfig holds the volume's config information +// VolumeConfig holds the volume's immutable configuration. type VolumeConfig struct { // Name of the volume. Name string `json:"name"` @@ -34,7 +37,15 @@ type VolumeConfig struct { // Options to pass to the volume driver. For the local driver, this is // a list of mount options. For other drivers, they are passed to the // volume driver handling the volume. - Options map[string]string `json:"volumeOptions"` + Options map[string]string `json:"volumeOptions,omitempty"` + // Type is the type of the volume. This is only used with the local + // driver. It the the filesystem that we will attempt to mount - nfs, + // tmpfs, etc. + Type string `json:"type,omitempty"` + // Device is the device of the volume. This is only used with the local + // driver, and only with some filesystem types (e.g., not required by + // tmpfs). It is the device to mount. + Device string `json:"device,omitempty"` // Whether this volume was created for a specific container and will be // removed with it. IsCtrSpecific bool `json:"ctrSpecific"` @@ -44,6 +55,18 @@ type VolumeConfig struct { GID int `json:"gid"` } +// VolumeState holds the volume's mutable state. +// Volumes are not guaranteed to have a state. Only volumes using the Local +// driver that have mount options set will create a state. +type VolumeState struct { + // MountCount is the number of times this volume has been requested to + // be mounted. + // It is incremented on mount() and decremented on unmount(). + // On incrementing from 0, the volume will be mounted on the host. + // On decrementing to 0, the volume will be unmounted on the host. + MountCount uint `json:"mountCount"` +} + // Name retrieves the volume's name func (v *Volume) Name() string { return v.config.Name -- cgit v1.2.3-54-g00ecf From a760e325f3180638f9fedd0ee79d4c6695d8ba64 Mon Sep 17 00:00:00 2001 From: Matthew Heon Date: Tue, 3 Sep 2019 15:03:44 -0400 Subject: Add ability for volumes with options to mount/umount When volume options and the local volume driver are specified, the volume is intended to be mounted using the 'mount' command. Supported options will be used to volume the volume before the first container using it starts, and unmount the volume after the last container using it dies. This should work for any local filesystem, though at present I've only tested with tmpfs and btrfs. Signed-off-by: Matthew Heon --- docs/podman-volume-create.1.md | 2 + libpod/boltdb_state.go | 49 +++++++++++++ libpod/boltdb_state_internal.go | 2 +- libpod/container_internal.go | 73 ++++++++++++++++++- libpod/container_internal_linux.go | 49 ++++++++----- libpod/runtime_volume_linux.go | 23 ++++++ libpod/volume.go | 13 +--- libpod/volume_internal.go | 24 +++++++ libpod/volume_internal_linux.go | 128 ++++++++++++++++++++++++++++++++++ libpod/volume_internal_unsupported.go | 15 ++++ pkg/adapter/runtime.go | 2 +- pkg/varlinkapi/volumes.go | 3 +- test/e2e/run_volume_test.go | 59 ++++++++++++++++ test/e2e/volume_create_test.go | 6 ++ 14 files changed, 412 insertions(+), 36 deletions(-) create mode 100644 libpod/volume_internal_linux.go create mode 100644 libpod/volume_internal_unsupported.go (limited to 'libpod') diff --git a/docs/podman-volume-create.1.md b/docs/podman-volume-create.1.md index 3d3eb68b8..6612b0ad2 100644 --- a/docs/podman-volume-create.1.md +++ b/docs/podman-volume-create.1.md @@ -39,6 +39,8 @@ $ podman volume create myvol $ podman volume create $ podman volume create --label foo=bar myvol + +$ podman volume create --opt device=tmpfs --opt type=tmpfs --opt o=nodev,noexec myvol ``` ## SEE ALSO diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go index ff5e62ce2..acba61f2a 100644 --- a/libpod/boltdb_state.go +++ b/libpod/boltdb_state.go @@ -159,6 +159,16 @@ func (s *BoltState) Refresh() error { return err } + allVolsBucket, err := getAllVolsBucket(tx) + if err != nil { + return err + } + + volBucket, err := getVolBucket(tx) + if err != nil { + return err + } + // Iterate through all IDs. Check if they are containers. // If they are, unmarshal their state, and then clear // PID, mountpoint, and state for all of them @@ -235,6 +245,44 @@ func (s *BoltState) Refresh() error { return nil }) + if err != nil { + return err + } + + // Now refresh volumes + err = allVolsBucket.ForEach(func(id, name []byte) error { + dbVol := volBucket.Bucket(id) + if dbVol == nil { + return errors.Wrapf(define.ErrInternal, "inconsistency in state - volume %s is in all volumes bucket but volume not found", string(id)) + } + + // Get the state + volStateBytes := dbVol.Get(stateKey) + if volStateBytes == nil { + // If the volume doesn't have a state, nothing to do + return nil + } + + oldState := new(VolumeState) + + if err := json.Unmarshal(volStateBytes, oldState); err != nil { + return errors.Wrapf(err, "error unmarshalling state for volume %s", string(id)) + } + + // Reset mount count to 0 + oldState.MountCount = 0 + + newState, err := json.Marshal(oldState) + if err != nil { + return errors.Wrapf(err, "error marshalling state for volume %s", string(id)) + } + + if err := dbVol.Put(stateKey, newState); err != nil { + return errors.Wrapf(err, "error storing new state for volume %s", string(id)) + } + + return nil + }) return err }) return err @@ -1630,6 +1678,7 @@ func (s *BoltState) AllVolumes() ([]*Volume, error) { volume := new(Volume) volume.config = new(VolumeConfig) + volume.state = new(VolumeState) if err := s.getVolumeFromDB(id, volume, volBucket); err != nil { if errors.Cause(err) != define.ErrNSMismatch { diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go index 8dc3d1309..a50fce31e 100644 --- a/libpod/boltdb_state_internal.go +++ b/libpod/boltdb_state_internal.go @@ -449,7 +449,7 @@ func (s *BoltState) getVolumeFromDB(name []byte, volume *Volume, volBkt *bolt.Bu return errors.Wrapf(err, "error unmarshalling volume %s config from DB", string(name)) } - // Volume state is allowed to be nil for legacy compatability + // Volume state is allowed to be nil for legacy compatibility volStateBytes := volDB.Get(stateKey) if volStateBytes != nil { if err := json.Unmarshal(volStateBytes, volume.state); err != nil { diff --git a/libpod/container_internal.go b/libpod/container_internal.go index f51b53e85..67503f148 100644 --- a/libpod/container_internal.go +++ b/libpod/container_internal.go @@ -1205,7 +1205,7 @@ func (c *Container) restartWithTimeout(ctx context.Context, timeout uint) (err e // TODO: Add ability to override mount label so we can use this for Mount() too // TODO: Can we use this for export? Copying SHM into the export might not be // good -func (c *Container) mountStorage() (string, error) { +func (c *Container) mountStorage() (_ string, Err error) { var err error // Container already mounted, nothing to do if c.state.Mounted { @@ -1225,6 +1225,40 @@ func (c *Container) mountStorage() (string, error) { if err := os.Chown(c.config.ShmDir, c.RootUID(), c.RootGID()); err != nil { return "", errors.Wrapf(err, "failed to chown %s", c.config.ShmDir) } + defer func() { + if Err != nil { + if err := c.unmountSHM(c.config.ShmDir); err != nil { + logrus.Errorf("Error unmounting SHM for container %s after mount error: %v", c.ID(), err) + } + } + }() + } + + // Request a mount of all named volumes + for _, v := range c.config.NamedVolumes { + vol, err := c.runtime.state.Volume(v.Name) + if err != nil { + return "", errors.Wrapf(err, "error retrieving named volume %s for container %s", v.Name, c.ID()) + } + + if vol.needsMount() { + vol.lock.Lock() + if err := vol.mount(); err != nil { + vol.lock.Unlock() + return "", errors.Wrapf(err, "error mounting volume %s for container %s", vol.Name(), c.ID()) + } + vol.lock.Unlock() + defer func() { + if Err == nil { + return + } + vol.lock.Lock() + defer vol.lock.Unlock() + if err := vol.unmount(false); err != nil { + logrus.Errorf("Error unmounting volume %s after error mounting container %s: %v", vol.Name(), c.ID(), err) + } + }() + } } // TODO: generalize this mount code so it will mount every mount in ctr.config.Mounts @@ -1270,13 +1304,46 @@ func (c *Container) cleanupStorage() error { return err } + var cleanupErr error + + // Request an unmount of all named volumes + for _, v := range c.config.NamedVolumes { + vol, err := c.runtime.state.Volume(v.Name) + if err != nil { + if cleanupErr != nil { + logrus.Errorf("Error unmounting container %s: %v", c.ID(), cleanupErr) + } + cleanupErr = errors.Wrapf(err, "error retrieving named volume %s for container %s", v.Name, c.ID()) + + // We need to try and unmount every volume, so continue + // if they fail. + continue + } + + if vol.needsMount() { + vol.lock.Lock() + if err := vol.unmount(false); err != nil { + if cleanupErr != nil { + logrus.Errorf("Error unmounting container %s: %v", c.ID(), cleanupErr) + } + cleanupErr = errors.Wrapf(err, "error unmounting volume %s for container %s", vol.Name(), c.ID()) + } + vol.lock.Unlock() + } + } + c.state.Mountpoint = "" c.state.Mounted = false if c.valid { - return c.save() + if err := c.save(); err != nil { + if cleanupErr != nil { + logrus.Errorf("Error unmounting container %s: %v", c.ID(), cleanupErr) + } + cleanupErr = err + } } - return nil + return cleanupErr } // Unmount the a container and free its resources diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go index cb19b5484..33c57729b 100644 --- a/libpod/container_internal_linux.go +++ b/libpod/container_internal_linux.go @@ -61,7 +61,7 @@ func (c *Container) unmountSHM(mount string) error { // prepare mounts the container and sets up other required resources like net // namespaces -func (c *Container) prepare() (err error) { +func (c *Container) prepare() (Err error) { var ( wg sync.WaitGroup netNS ns.NetNS @@ -108,31 +108,42 @@ func (c *Container) prepare() (err error) { logrus.Debugf("Created root filesystem for container %s at %s", c.ID(), c.state.Mountpoint) }() - defer func() { - if err != nil { - if err2 := c.cleanupNetwork(); err2 != nil { - logrus.Errorf("Error cleaning up container %s network: %v", c.ID(), err2) - } - if err2 := c.cleanupStorage(); err2 != nil { - logrus.Errorf("Error cleaning up container %s storage: %v", c.ID(), err2) - } - } - }() - wg.Wait() + var createErr error if createNetNSErr != nil { - if mountStorageErr != nil { - logrus.Error(createNetNSErr) - return mountStorageErr - } - return createNetNSErr + createErr = createNetNSErr } if mountStorageErr != nil { - return mountStorageErr + logrus.Errorf("Error preparing container %s: %v", c.ID(), createErr) + createErr = mountStorageErr + } + + // Only trigger storage cleanup if mountStorage was successful. + // Otherwise, we may mess up mount counters. + if createNetNSErr != nil && mountStorageErr == nil { + if err := c.cleanupStorage(); err != nil { + // createErr is guaranteed non-nil, so print + // unconditionally + logrus.Errorf("Error preparing container %s: %v", c.ID(), createErr) + createErr = errors.Wrapf(err, "error unmounting storage for container %s after network create failure", c.ID()) + } + } + + // It's OK to unconditionally trigger network cleanup. If the network + // isn't ready it will do nothing. + if createErr != nil { + if err := c.cleanupNetwork(); err != nil { + logrus.Errorf("Error preparing container %s: %v", c.ID(), createErr) + createErr = errors.Wrapf(err, "error cleaning up container %s network after setup failure", c.ID()) + } + } + + if createErr != nil { + return createErr } - // Save the container + // Save changes to container state return c.save() } diff --git a/libpod/runtime_volume_linux.go b/libpod/runtime_volume_linux.go index 70296248c..cf8055c7b 100644 --- a/libpod/runtime_volume_linux.go +++ b/libpod/runtime_volume_linux.go @@ -48,6 +48,19 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption) } volume.config.CreatedTime = time.Now() + if volume.config.Driver == "local" { + logrus.Debugf("Validating options for local driver") + // Validate options + for key := range volume.config.Options { + switch key { + case "device", "o", "type": + // Do nothing, valid keys + default: + return nil, errors.Wrapf(define.ErrInvalidArg, "invalid mount option %s for driver 'local'", key) + } + } + } + // Create the mountpoint of this volume volPathRoot := filepath.Join(r.config.VolumePath, volume.config.Name) if err := os.MkdirAll(volPathRoot, 0700); err != nil { @@ -102,6 +115,11 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error return define.ErrVolumeRemoved } + // Update volume status to pick up a potential removal from state + if err := v.update(); err != nil { + return err + } + deps, err := r.state.VolumeInUse(v) if err != nil { return err @@ -137,6 +155,11 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error } } + // If the volume is still mounted - force unmount it + if err := v.unmount(true); err != nil { + return errors.Wrapf(err, "error unmounting volume %s", v.Name()) + } + // Set volume as invalid so it can no longer be used v.valid = false diff --git a/libpod/volume.go b/libpod/volume.go index 5970867c3..b4de3aedc 100644 --- a/libpod/volume.go +++ b/libpod/volume.go @@ -38,14 +38,6 @@ type VolumeConfig struct { // a list of mount options. For other drivers, they are passed to the // volume driver handling the volume. Options map[string]string `json:"volumeOptions,omitempty"` - // Type is the type of the volume. This is only used with the local - // driver. It the the filesystem that we will attempt to mount - nfs, - // tmpfs, etc. - Type string `json:"type,omitempty"` - // Device is the device of the volume. This is only used with the local - // driver, and only with some filesystem types (e.g., not required by - // tmpfs). It is the device to mount. - Device string `json:"device,omitempty"` // Whether this volume was created for a specific container and will be // removed with it. IsCtrSpecific bool `json:"ctrSpecific"` @@ -101,10 +93,9 @@ func (v *Volume) MountPoint() string { // Options return the volume's options func (v *Volume) Options() map[string]string { options := make(map[string]string) - for key, value := range v.config.Options { - options[key] = value + for k, v := range v.config.Options { + options[k] = v } - return options } diff --git a/libpod/volume_internal.go b/libpod/volume_internal.go index 35f0ca19d..8c11b907f 100644 --- a/libpod/volume_internal.go +++ b/libpod/volume_internal.go @@ -3,6 +3,8 @@ package libpod import ( "os" "path/filepath" + + "github.com/containers/libpod/libpod/define" ) // Creates a new volume @@ -20,3 +22,25 @@ func newVolume(runtime *Runtime) (*Volume, error) { func (v *Volume) teardownStorage() error { return os.RemoveAll(filepath.Join(v.runtime.config.VolumePath, v.Name())) } + +// Volumes with options set, or a filesystem type, or a device to mount need to +// be mounted and unmounted. +func (v *Volume) needsMount() bool { + return len(v.config.Options) > 0 && v.config.Driver == "local" +} + +// update() updates the volume state from the DB. +func (v *Volume) update() error { + if err := v.runtime.state.UpdateVolume(v); err != nil { + return err + } + if !v.valid { + return define.ErrVolumeRemoved + } + return nil +} + +// save() saves the volume state to the DB +func (v *Volume) save() error { + return v.runtime.state.SaveVolume(v) +} diff --git a/libpod/volume_internal_linux.go b/libpod/volume_internal_linux.go new file mode 100644 index 000000000..68ddb3c0d --- /dev/null +++ b/libpod/volume_internal_linux.go @@ -0,0 +1,128 @@ +// +build linux + +package libpod + +import ( + "io/ioutil" + "os/exec" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// mount mounts the volume if necessary. +// A mount is necessary if a volume has any options set. +// If a mount is necessary, v.state.MountCount will be incremented. +// If it was 0 when the increment occurred, the volume will be mounted on the +// host. Otherwise, we assume it is already mounted. +// Must be done while the volume is locked. +// Is a no-op on volumes that do not require a mount (as defined by +// volumeNeedsMount()) +func (v *Volume) mount() error { + if !v.needsMount() { + return nil + } + + // Update the volume from the DB to get an accurate mount counter. + if err := v.update(); err != nil { + return err + } + + // If the count is non-zero, the volume is already mounted. + // Nothing to do. + if v.state.MountCount > 0 { + v.state.MountCount = v.state.MountCount + 1 + logrus.Debugf("Volume %s mount count now at %d", v.Name(), v.state.MountCount) + return v.save() + } + + volDevice := v.config.Options["device"] + volType := v.config.Options["type"] + volOptions := v.config.Options["o"] + + // Some filesystems (tmpfs) don't have a device, but we still need to + // give the kernel something. + if volDevice == "" && volType != "" { + volDevice = volType + } + + // We need to use the actual mount command. + // Convincing unix.Mount to use the same semantics as the mount command + // itself seems prohibitively difficult. + // TODO: might want to cache this path in the runtime? + mountPath, err := exec.LookPath("mount") + if err != nil { + return errors.Wrapf(err, "error locating 'mount' binary") + } + mountArgs := []string{} + if volOptions != "" { + mountArgs = append(mountArgs, "-o", volOptions) + } + if volType != "" { + mountArgs = append(mountArgs, "-t", volType) + } + mountArgs = append(mountArgs, volDevice, v.config.MountPoint) + mountCmd := exec.Command(mountPath, mountArgs...) + + errPipe, err := mountCmd.StderrPipe() + if err != nil { + return errors.Wrapf(err, "getting stderr pipe") + } + if err := mountCmd.Start(); err != nil { + out, err2 := ioutil.ReadAll(errPipe) + if err2 != nil { + return errors.Wrapf(err2, "error reading mount STDERR") + } + return errors.Wrapf(errors.New(string(out)), "error mounting volume %s", v.Name()) + } + + logrus.Debugf("Mounted volume %s", v.Name()) + + // Increment the mount counter + v.state.MountCount = v.state.MountCount + 1 + logrus.Debugf("Volume %s mount count now at %d", v.Name(), v.state.MountCount) + return v.save() +} + +// unmount unmounts the volume if necessary. +// Unmounting a volume that is not mounted is a no-op. +// Unmounting a volume that does not require a mount is a no-op. +// The volume must be locked for this to occur. +// The mount counter will be decremented if non-zero. If the counter reaches 0, +// the volume will really be unmounted, as no further containers are using the +// volume. +// If force is set, the volume will be unmounted regardless of mount counter. +func (v *Volume) unmount(force bool) error { + if !v.needsMount() { + return nil + } + + // Update the volume from the DB to get an accurate mount counter. + if err := v.update(); err != nil { + return err + } + + if v.state.MountCount == 0 { + logrus.Debugf("Volume %s already unmounted", v.Name()) + return nil + } + + if !force { + v.state.MountCount = v.state.MountCount - 1 + } else { + v.state.MountCount = 0 + } + + logrus.Debugf("Volume %s mount count now at %d", v.Name(), v.state.MountCount) + + if v.state.MountCount == 0 { + // Unmount the volume + if err := unix.Unmount(v.config.MountPoint, unix.MNT_DETACH); err != nil { + return errors.Wrapf(err, "error unmounting volume %s", v.Name()) + } + logrus.Debugf("Unmounted volume %s", v.Name()) + } + + return v.save() +} diff --git a/libpod/volume_internal_unsupported.go b/libpod/volume_internal_unsupported.go new file mode 100644 index 000000000..74e24cfda --- /dev/null +++ b/libpod/volume_internal_unsupported.go @@ -0,0 +1,15 @@ +// +build !linux + +package libpod + +import ( + "github.com/containers/libpod/libpod/define" +) + +func (v *Volume) mount() error { + return define.ErrNotImplemented +} + +func (v *Volume) unmount(force bool) error { + return define.ErrNotImplemented +} diff --git a/pkg/adapter/runtime.go b/pkg/adapter/runtime.go index 0537308f8..dd15e1d15 100644 --- a/pkg/adapter/runtime.go +++ b/pkg/adapter/runtime.go @@ -185,7 +185,7 @@ func (r *LocalRuntime) CreateVolume(ctx context.Context, c *cliconfig.VolumeCrea options = append(options, libpod.WithVolumeLabels(labels)) } - if len(options) != 0 { + if len(opts) != 0 { options = append(options, libpod.WithVolumeOptions(opts)) } newVolume, err := r.NewVolume(ctx, options...) diff --git a/pkg/varlinkapi/volumes.go b/pkg/varlinkapi/volumes.go index 6dd86d831..a27950b21 100644 --- a/pkg/varlinkapi/volumes.go +++ b/pkg/varlinkapi/volumes.go @@ -67,7 +67,8 @@ func (i *LibpodAPI) GetVolumes(call iopodman.VarlinkCall, args []string, all boo Labels: v.Labels(), MountPoint: v.MountPoint(), Name: v.Name(), - Options: v.Options(), + // TODO change types here to be correct + //Options: v.Options(), } volumes = append(volumes, newVol) } diff --git a/test/e2e/run_volume_test.go b/test/e2e/run_volume_test.go index 5bad6744b..551e86b93 100644 --- a/test/e2e/run_volume_test.go +++ b/test/e2e/run_volume_test.go @@ -5,11 +5,14 @@ package integration import ( "fmt" "os" + "os/exec" "path/filepath" + "strings" . "github.com/containers/libpod/test/utils" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + "github.com/onsi/gomega/gexec" ) var _ = Describe("Podman run with volumes", func() { @@ -190,4 +193,60 @@ var _ = Describe("Podman run with volumes", func() { session.WaitWithDefaultTimeout() Expect(session.ExitCode()).To(Not(Equal(0))) }) + + It("podman run with tmpfs named volume mounts and unmounts", func() { + SkipIfRootless() + volName := "testvol" + mkVolume := podmanTest.Podman([]string{"volume", "create", "--opt", "type=tmpfs", "--opt", "device=tmpfs", "--opt", "o=nodev", "testvol"}) + mkVolume.WaitWithDefaultTimeout() + Expect(mkVolume.ExitCode()).To(Equal(0)) + + // Volume not mounted on create + mountCmd1, err := gexec.Start(exec.Command("mount"), GinkgoWriter, GinkgoWriter) + Expect(err).To(BeNil()) + mountCmd1.Wait(90) + Expect(mountCmd1.ExitCode()).To(Equal(0)) + os.Stdout.Sync() + os.Stderr.Sync() + mountOut1 := strings.Join(strings.Fields(fmt.Sprintf("%s", mountCmd1.Out.Contents())), " ") + fmt.Printf("Output: %s", mountOut1) + Expect(strings.Contains(mountOut1, volName)).To(BeFalse()) + + ctrName := "testctr" + podmanSession := podmanTest.Podman([]string{"run", "-d", "--name", ctrName, "-v", fmt.Sprintf("%s:/testvol", volName), ALPINE, "top"}) + podmanSession.WaitWithDefaultTimeout() + Expect(podmanSession.ExitCode()).To(Equal(0)) + + // Volume now mounted as container is running + mountCmd2, err := gexec.Start(exec.Command("mount"), GinkgoWriter, GinkgoWriter) + Expect(err).To(BeNil()) + mountCmd2.Wait(90) + Expect(mountCmd2.ExitCode()).To(Equal(0)) + os.Stdout.Sync() + os.Stderr.Sync() + mountOut2 := strings.Join(strings.Fields(fmt.Sprintf("%s", mountCmd2.Out.Contents())), " ") + fmt.Printf("Output: %s", mountOut2) + Expect(strings.Contains(mountOut2, volName)).To(BeTrue()) + + // Stop the container to unmount + podmanStopSession := podmanTest.Podman([]string{"stop", "--timeout", "0", ctrName}) + podmanStopSession.WaitWithDefaultTimeout() + Expect(podmanStopSession.ExitCode()).To(Equal(0)) + + // We have to force cleanup so the unmount happens + podmanCleanupSession := podmanTest.Podman([]string{"container", "cleanup", ctrName}) + podmanCleanupSession.WaitWithDefaultTimeout() + Expect(podmanCleanupSession.ExitCode()).To(Equal(0)) + + // Ensure volume is unmounted + mountCmd3, err := gexec.Start(exec.Command("mount"), GinkgoWriter, GinkgoWriter) + Expect(err).To(BeNil()) + mountCmd3.Wait(90) + Expect(mountCmd3.ExitCode()).To(Equal(0)) + os.Stdout.Sync() + os.Stderr.Sync() + mountOut3 := strings.Join(strings.Fields(fmt.Sprintf("%s", mountCmd3.Out.Contents())), " ") + fmt.Printf("Output: %s", mountOut3) + Expect(strings.Contains(mountOut3, volName)).To(BeFalse()) + }) }) diff --git a/test/e2e/volume_create_test.go b/test/e2e/volume_create_test.go index 041a9e6f0..77e8abbd4 100644 --- a/test/e2e/volume_create_test.go +++ b/test/e2e/volume_create_test.go @@ -57,4 +57,10 @@ var _ = Describe("Podman volume create", func() { Expect(match).To(BeTrue()) Expect(len(check.OutputToStringArray())).To(Equal(1)) }) + + It("podman create volume with bad volume option", func() { + session := podmanTest.Podman([]string{"volume", "create", "--opt", "badOpt=bad"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Not(Equal(0))) + }) }) -- cgit v1.2.3-54-g00ecf From de9a394fcff19ae4422a3f65502c8790787351fd Mon Sep 17 00:00:00 2001 From: Matthew Heon Date: Thu, 5 Sep 2019 10:00:50 -0400 Subject: Correctly report errors on unmounting SHM When we fail to remove a container's SHM, that's an error, and we need to report it as such. This may be part of our lingering storage woes. Also, remove MNT_DETACH. It may be another cause of the storage removal failures. Signed-off-by: Matthew Heon --- libpod/boltdb_state.go | 3 +-- libpod/container_internal.go | 21 +++++++++++++-------- libpod/container_internal_linux.go | 8 ++++---- libpod/define/config.go | 4 ++++ libpod/runtime_volume_linux.go | 4 ++-- libpod/volume_internal.go | 2 +- libpod/volume_internal_linux.go | 2 +- pkg/varlinkapi/volumes.go | 3 +-- 8 files changed, 27 insertions(+), 20 deletions(-) (limited to 'libpod') diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go index acba61f2a..31b551b36 100644 --- a/libpod/boltdb_state.go +++ b/libpod/boltdb_state.go @@ -1403,11 +1403,10 @@ func (s *BoltState) AddVolume(volume *Volume) error { // Volume state is allowed to not exist var volStateJSON []byte if volume.state != nil { - stateJSON, err := json.Marshal(volume.state) + volStateJSON, err = json.Marshal(volume.state) if err != nil { return errors.Wrapf(err, "error marshalling volume %s state to JSON", volume.Name()) } - volStateJSON = stateJSON } db, err := s.getDBCon() diff --git a/libpod/container_internal.go b/libpod/container_internal.go index 67503f148..ffc6c11ee 100644 --- a/libpod/container_internal.go +++ b/libpod/container_internal.go @@ -1253,10 +1253,10 @@ func (c *Container) mountStorage() (_ string, Err error) { return } vol.lock.Lock() - defer vol.lock.Unlock() if err := vol.unmount(false); err != nil { logrus.Errorf("Error unmounting volume %s after error mounting container %s: %v", vol.Name(), c.ID(), err) } + vol.lock.Unlock() }() } } @@ -1281,14 +1281,19 @@ func (c *Container) cleanupStorage() error { return nil } + var cleanupErr error + for _, containerMount := range c.config.Mounts { if err := c.unmountSHM(containerMount); err != nil { - return err + if cleanupErr != nil { + logrus.Errorf("Error unmounting container %s: %v", c.ID(), cleanupErr) + } + cleanupErr = err } } if c.config.Rootfs != "" { - return nil + return cleanupErr } if err := c.unmount(false); err != nil { @@ -1298,14 +1303,14 @@ func (c *Container) cleanupStorage() error { // state if errors.Cause(err) == storage.ErrNotAContainer || errors.Cause(err) == storage.ErrContainerUnknown { logrus.Errorf("Storage for container %s has been removed", c.ID()) - return nil + } else { + if cleanupErr != nil { + logrus.Errorf("Error cleaning up container %s storage: %v", c.ID(), cleanupErr) + } + cleanupErr = err } - - return err } - var cleanupErr error - // Request an unmount of all named volumes for _, v := range c.config.NamedVolumes { vol, err := c.runtime.state.Volume(v.Name) diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go index 33c57729b..a84db9aa5 100644 --- a/libpod/container_internal_linux.go +++ b/libpod/container_internal_linux.go @@ -49,12 +49,12 @@ func (c *Container) mountSHM(shmOptions string) error { } func (c *Container) unmountSHM(mount string) error { - if err := unix.Unmount(mount, unix.MNT_DETACH); err != nil { + if err := unix.Unmount(mount, 0); err != nil { if err != syscall.EINVAL { - logrus.Warnf("container %s failed to unmount %s : %v", c.ID(), mount, err) - } else { - logrus.Debugf("container %s failed to unmount %s : %v", c.ID(), mount, err) + return errors.Wrapf(err, "error unmounting container %s SHM mount %s", c.ID(), mount) } + // If it's just an EINVAL, debug logs only + logrus.Debugf("container %s failed to unmount %s : %v", c.ID(), mount, err) } return nil } diff --git a/libpod/define/config.go b/libpod/define/config.go index d8d6ccf55..c66f67feb 100644 --- a/libpod/define/config.go +++ b/libpod/define/config.go @@ -18,3 +18,7 @@ type InfoData struct { Type string Data map[string]interface{} } + +// VolumeDriverLocal is the "local" volume driver. It is managed by libpod +// itself. +const VolumeDriverLocal = "local" diff --git a/libpod/runtime_volume_linux.go b/libpod/runtime_volume_linux.go index cf8055c7b..9df93faf3 100644 --- a/libpod/runtime_volume_linux.go +++ b/libpod/runtime_volume_linux.go @@ -44,11 +44,11 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption) volume.config.Name = stringid.GenerateNonCryptoID() } if volume.config.Driver == "" { - volume.config.Driver = "local" + volume.config.Driver = define.VolumeDriverLocal } volume.config.CreatedTime = time.Now() - if volume.config.Driver == "local" { + if volume.config.Driver == define.VolumeDriverLocal { logrus.Debugf("Validating options for local driver") // Validate options for key := range volume.config.Options { diff --git a/libpod/volume_internal.go b/libpod/volume_internal.go index 8c11b907f..2e886e1b0 100644 --- a/libpod/volume_internal.go +++ b/libpod/volume_internal.go @@ -26,7 +26,7 @@ func (v *Volume) teardownStorage() error { // Volumes with options set, or a filesystem type, or a device to mount need to // be mounted and unmounted. func (v *Volume) needsMount() bool { - return len(v.config.Options) > 0 && v.config.Driver == "local" + return len(v.config.Options) > 0 && v.config.Driver == define.VolumeDriverLocal } // update() updates the volume state from the DB. diff --git a/libpod/volume_internal_linux.go b/libpod/volume_internal_linux.go index 68ddb3c0d..9ae4dcf69 100644 --- a/libpod/volume_internal_linux.go +++ b/libpod/volume_internal_linux.go @@ -67,7 +67,7 @@ func (v *Volume) mount() error { errPipe, err := mountCmd.StderrPipe() if err != nil { - return errors.Wrapf(err, "getting stderr pipe") + return errors.Wrapf(err, "error getting stderr pipe for mount") } if err := mountCmd.Start(); err != nil { out, err2 := ioutil.ReadAll(errPipe) diff --git a/pkg/varlinkapi/volumes.go b/pkg/varlinkapi/volumes.go index a27950b21..6dd86d831 100644 --- a/pkg/varlinkapi/volumes.go +++ b/pkg/varlinkapi/volumes.go @@ -67,8 +67,7 @@ func (i *LibpodAPI) GetVolumes(call iopodman.VarlinkCall, args []string, all boo Labels: v.Labels(), MountPoint: v.MountPoint(), Name: v.Name(), - // TODO change types here to be correct - //Options: v.Options(), + Options: v.Options(), } volumes = append(volumes, newVol) } -- cgit v1.2.3-54-g00ecf