summaryrefslogtreecommitdiff
path: root/libpod
diff options
context:
space:
mode:
Diffstat (limited to 'libpod')
-rw-r--r--libpod/boltdb_state.go1
-rw-r--r--libpod/boltdb_state_internal.go15
-rw-r--r--libpod/container_api.go10
-rw-r--r--libpod/container_inspect.go7
-rw-r--r--libpod/container_internal.go66
-rw-r--r--libpod/container_internal_linux.go39
-rw-r--r--libpod/define/containerstate.go5
-rw-r--r--libpod/define/errors.go4
-rw-r--r--libpod/define/volume_inspect.go51
-rw-r--r--libpod/options.go11
-rw-r--r--libpod/plugin/volume_api.go55
-rw-r--r--libpod/rootless_cni_linux.go2
-rw-r--r--libpod/runtime.go16
-rw-r--r--libpod/runtime_ctr.go149
-rw-r--r--libpod/runtime_volume_linux.go130
-rw-r--r--libpod/volume.go57
-rw-r--r--libpod/volume_inspect.go87
-rw-r--r--libpod/volume_internal.go13
-rw-r--r--libpod/volume_internal_linux.go46
19 files changed, 630 insertions, 134 deletions
diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go
index dcb2ff751..b2ee63b08 100644
--- a/libpod/boltdb_state.go
+++ b/libpod/boltdb_state.go
@@ -304,6 +304,7 @@ func (s *BoltState) Refresh() error {
// Reset mount count to 0
oldState.MountCount = 0
+ oldState.MountPoint = ""
newState, err := json.Marshal(oldState)
if err != nil {
diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go
index c06fedd3e..6014fbef3 100644
--- a/libpod/boltdb_state_internal.go
+++ b/libpod/boltdb_state_internal.go
@@ -497,6 +497,21 @@ func (s *BoltState) getVolumeFromDB(name []byte, volume *Volume, volBkt *bolt.Bu
}
}
+ // Retrieve volume driver
+ if volume.UsesVolumeDriver() {
+ plugin, err := s.runtime.getVolumePlugin(volume.config.Driver)
+ if err != nil {
+ // We want to fail gracefully here, to ensure that we
+ // can still remove volumes even if their plugin is
+ // missing. Otherwise, we end up with volumes that
+ // cannot even be retrieved from the database and will
+ // cause things like `volume ls` to fail.
+ logrus.Errorf("Volume %s uses volume plugin %s, but it cannot be accessed - some functionality may not be available: %v", volume.Name(), volume.config.Driver, err)
+ } else {
+ volume.plugin = plugin
+ }
+ }
+
// Get the lock
lock, err := s.runtime.lockManager.RetrieveLock(volume.config.LockID)
if err != nil {
diff --git a/libpod/container_api.go b/libpod/container_api.go
index 87ff764e3..0d62a2dd7 100644
--- a/libpod/container_api.go
+++ b/libpod/container_api.go
@@ -210,7 +210,13 @@ func (c *Container) Kill(signal uint) error {
}
// TODO: Is killing a paused container OK?
- if c.state.State != define.ContainerStateRunning {
+ switch c.state.State {
+ case define.ContainerStateRunning, define.ContainerStateStopping:
+ // Note that killing containers in "stopping" state is okay.
+ // In that state, the Podman is waiting for the runtime to
+ // stop the container and if that is taking too long, a user
+ // may have decided to kill the container after all.
+ default:
return errors.Wrapf(define.ErrCtrStateInvalid, "can only kill running containers. %s is in state %s", c.ID(), c.state.State.String())
}
@@ -539,7 +545,7 @@ func (c *Container) Cleanup(ctx context.Context) error {
}
// Check if state is good
- if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateCreated, define.ContainerStateStopped, define.ContainerStateExited) {
+ if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateCreated, define.ContainerStateStopped, define.ContainerStateStopping, define.ContainerStateExited) {
return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is running or paused, refusing to clean up", c.ID())
}
diff --git a/libpod/container_inspect.go b/libpod/container_inspect.go
index 870d92ca9..ac7eae56b 100644
--- a/libpod/container_inspect.go
+++ b/libpod/container_inspect.go
@@ -212,7 +212,12 @@ func (c *Container) getInspectMounts(namedVolumes []*ContainerNamedVolume, image
return nil, errors.Wrapf(err, "error looking up volume %s in container %s config", volume.Name, c.ID())
}
mountStruct.Driver = volFromDB.Driver()
- mountStruct.Source = volFromDB.MountPoint()
+
+ mountPoint, err := volFromDB.MountPoint()
+ if err != nil {
+ return nil, err
+ }
+ mountStruct.Source = mountPoint
parseMountOptionsForInspect(volume.Options, &mountStruct)
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index f4cdb749f..b9ea50783 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -764,7 +764,7 @@ func (c *Container) isStopped() (bool, error) {
return true, err
}
- return !c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused), nil
+ return !c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused, define.ContainerStateStopping), nil
}
// save container state to the database
@@ -1290,10 +1290,49 @@ func (c *Container) stop(timeout uint) error {
return err
}
+ // Set the container state to "stopping" and unlock the container
+ // before handing it over to conmon to unblock other commands. #8501
+ // demonstrates nicely that a high stop timeout will block even simple
+ // commands such as `podman ps` from progressing if the container lock
+ // is held when busy-waiting for the container to be stopped.
+ c.state.State = define.ContainerStateStopping
+ if err := c.save(); err != nil {
+ return errors.Wrapf(err, "error saving container %s state before stopping", c.ID())
+ }
+ if !c.batched {
+ c.lock.Unlock()
+ }
+
if err := c.ociRuntime.StopContainer(c, timeout, all); err != nil {
return err
}
+ if !c.batched {
+ c.lock.Lock()
+ if err := c.syncContainer(); err != nil {
+ switch errors.Cause(err) {
+ // If the container has already been removed (e.g., via
+ // the cleanup process), there's nothing left to do.
+ case define.ErrNoSuchCtr, define.ErrCtrRemoved:
+ return nil
+ default:
+ return err
+ }
+ }
+ }
+
+ // Since we're now subject to a race condition with other processes who
+ // may have altered the state (and other data), let's check if the
+ // state has changed. If so, we should return immediately and log a
+ // warning.
+ if c.state.State != define.ContainerStateStopping {
+ logrus.Warnf(
+ "Container %q state changed from %q to %q while waiting for it to be stopped: discontinuing stop procedure as another process interfered",
+ c.ID(), define.ContainerStateStopping, c.state.State,
+ )
+ return nil
+ }
+
c.newContainerEvent(events.Stop)
c.state.PID = 0
@@ -1541,8 +1580,18 @@ func (c *Container) mountNamedVolume(v *ContainerNamedVolume, mountpoint string)
return nil, err
}
+ // HACK HACK HACK - copy up into a volume driver is 100% broken
+ // right now.
+ if vol.UsesVolumeDriver() {
+ logrus.Infof("Not copying up into volume %s as it uses a volume driver", vol.Name())
+ return vol, nil
+ }
+
// If the volume is not empty, we should not copy up.
- volMount := vol.MountPoint()
+ volMount, err := vol.MountPoint()
+ if err != nil {
+ return nil, err
+ }
contents, err := ioutil.ReadDir(volMount)
if err != nil {
return nil, errors.Wrapf(err, "error listing contents of volume %s mountpoint when copying up from container %s", vol.Name(), c.ID())
@@ -1580,7 +1629,11 @@ func (c *Container) chownVolume(volumeName string) error {
return err
}
- if vol.state.NeedsChown {
+ // TODO: For now, I've disabled chowning volumes owned by non-Podman
+ // drivers. This may be safe, but it's really going to be a case-by-case
+ // thing, I think - safest to leave disabled now and reenable later if
+ // there is a demand.
+ if vol.state.NeedsChown && !vol.UsesVolumeDriver() {
vol.state.NeedsChown = false
uid := int(c.config.Spec.Process.User.UID)
@@ -1607,7 +1660,10 @@ func (c *Container) chownVolume(volumeName string) error {
return err
}
- mountPoint := vol.MountPoint()
+ mountPoint, err := vol.MountPoint()
+ if err != nil {
+ return err
+ }
if err := os.Lchown(mountPoint, uid, gid); err != nil {
return err
@@ -2116,7 +2172,7 @@ func (c *Container) sortUserVolumes(ctrSpec *spec.Spec) ([]*ContainerNamedVolume
// Check for an exit file, and handle one if present
func (c *Container) checkExitFile() error {
// If the container's not running, nothing to do.
- if !c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) {
+ if !c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused, define.ContainerStateStopping) {
return nil
}
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index ce2b52234..0553cc59c 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -341,7 +341,10 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
if err != nil {
return nil, errors.Wrapf(err, "error retrieving volume %s to add to container %s", namedVol.Name, c.ID())
}
- mountPoint := volume.MountPoint()
+ mountPoint, err := volume.MountPoint()
+ if err != nil {
+ return nil, err
+ }
volMount := spec.Mount{
Type: "bind",
Source: mountPoint,
@@ -903,7 +906,15 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error {
return err
}
- input, err := archive.TarWithOptions(volume.MountPoint(), &archive.TarOptions{
+ mp, err := volume.MountPoint()
+ if err != nil {
+ return err
+ }
+ if mp == "" {
+ return errors.Wrapf(define.ErrInternal, "volume %s is not mounted, cannot export", volume.Name())
+ }
+
+ input, err := archive.TarWithOptions(mp, &archive.TarOptions{
Compression: archive.Uncompressed,
IncludeSourceDir: true,
})
@@ -958,10 +969,10 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error {
func (c *Container) checkpointRestoreSupported() error {
if !criu.CheckForCriu() {
- return errors.Errorf("Checkpoint/Restore requires at least CRIU %d", criu.MinCriuVersion)
+ return errors.Errorf("checkpoint/restore requires at least CRIU %d", criu.MinCriuVersion)
}
if !c.ociRuntime.SupportsCheckpoint() {
- return errors.Errorf("Configured runtime does not support checkpoint/restore")
+ return errors.Errorf("configured runtime does not support checkpoint/restore")
}
return nil
}
@@ -993,7 +1004,7 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
}
if c.AutoRemove() && options.TargetFile == "" {
- return errors.Errorf("Cannot checkpoint containers that have been started with '--rm' unless '--export' is used")
+ return errors.Errorf("cannot checkpoint containers that have been started with '--rm' unless '--export' is used")
}
if err := c.checkpointRestoreLabelLog("dump.log"); err != nil {
@@ -1079,13 +1090,13 @@ func (c *Container) importCheckpoint(input string) error {
}
err = archive.Untar(archiveFile, c.bundlePath(), options)
if err != nil {
- return errors.Wrapf(err, "Unpacking of checkpoint archive %s failed", input)
+ return errors.Wrapf(err, "unpacking of checkpoint archive %s failed", input)
}
// Make sure the newly created config.json exists on disk
g := generate.Generator{Config: c.config.Spec}
if err = c.saveSpec(g.Config); err != nil {
- return errors.Wrap(err, "Saving imported container specification for restore failed")
+ return errors.Wrap(err, "saving imported container specification for restore failed")
}
return nil
@@ -1130,7 +1141,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
// Let's try to stat() CRIU's inventory file. If it does not exist, it makes
// no sense to try a restore. This is a minimal check if a checkpoint exist.
if _, err := os.Stat(filepath.Join(c.CheckpointPath(), "inventory.img")); os.IsNotExist(err) {
- return errors.Wrapf(err, "A complete checkpoint for this container cannot be found, cannot restore")
+ return errors.Wrapf(err, "a complete checkpoint for this container cannot be found, cannot restore")
}
if err := c.checkpointRestoreLabelLog("restore.log"); err != nil {
@@ -1286,16 +1297,22 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
volumeFile, err := os.Open(volumeFilePath)
if err != nil {
- return errors.Wrapf(err, "Failed to open volume file %s", volumeFilePath)
+ return errors.Wrapf(err, "failed to open volume file %s", volumeFilePath)
}
defer volumeFile.Close()
volume, err := c.runtime.GetVolume(v.Name)
if err != nil {
- return errors.Wrapf(err, "Failed to retrieve volume %s", v.Name)
+ return errors.Wrapf(err, "failed to retrieve volume %s", v.Name)
}
- mountPoint := volume.MountPoint()
+ mountPoint, err := volume.MountPoint()
+ if err != nil {
+ return err
+ }
+ if mountPoint == "" {
+ return errors.Wrapf(err, "unable to import volume %s as it is not mounted", volume.Name())
+ }
if err := archive.UntarUncompressed(volumeFile, mountPoint, nil); err != nil {
return errors.Wrapf(err, "Failed to extract volume %s to %s", volumeFilePath, mountPoint)
}
diff --git a/libpod/define/containerstate.go b/libpod/define/containerstate.go
index 825e77387..5d2bc9099 100644
--- a/libpod/define/containerstate.go
+++ b/libpod/define/containerstate.go
@@ -28,6 +28,9 @@ const (
// ContainerStateRemoving indicates the container is in the process of
// being removed.
ContainerStateRemoving ContainerStatus = iota
+ // ContainerStateStopping indicates the container is in the process of
+ // being stopped.
+ ContainerStateStopping ContainerStatus = iota
)
// ContainerStatus returns a string representation for users
@@ -50,6 +53,8 @@ func (t ContainerStatus) String() string {
return "exited"
case ContainerStateRemoving:
return "removing"
+ case ContainerStateStopping:
+ return "stopping"
}
return "bad state"
}
diff --git a/libpod/define/errors.go b/libpod/define/errors.go
index 568f8e88d..d37bc397e 100644
--- a/libpod/define/errors.go
+++ b/libpod/define/errors.go
@@ -35,6 +35,10 @@ var (
// aliases.
ErrNoAliases = errors.New("no aliases for container")
+ // ErrMissingPlugin indicates that the requested operation requires a
+ // plugin that is not present on the system or in the configuration.
+ ErrMissingPlugin = errors.New("required plugin missing")
+
// ErrCtrExists indicates a container with the same name or ID already
// exists
ErrCtrExists = errors.New("container already exists")
diff --git a/libpod/define/volume_inspect.go b/libpod/define/volume_inspect.go
new file mode 100644
index 000000000..20602ea16
--- /dev/null
+++ b/libpod/define/volume_inspect.go
@@ -0,0 +1,51 @@
+package define
+
+import (
+ "time"
+)
+
+// InspectVolumeData is the output of Inspect() on a volume. It is matched to
+// the format of 'docker volume inspect'.
+type InspectVolumeData struct {
+ // Name is the name of the volume.
+ Name string `json:"Name"`
+ // Driver is the driver used to create the volume.
+ // If set to "local" or "", the Local driver (Podman built-in code) is
+ // used to service the volume; otherwise, a volume plugin with the given
+ // name is used to mount and manage the volume.
+ Driver string `json:"Driver"`
+ // Mountpoint is the path on the host where the volume is mounted.
+ Mountpoint string `json:"Mountpoint"`
+ // CreatedAt is the date and time the volume was created at. This is not
+ // stored for older Libpod volumes; if so, it will be omitted.
+ CreatedAt time.Time `json:"CreatedAt,omitempty"`
+ // Status is used to return information on the volume's current state,
+ // if the volume was created using a volume plugin (uses a Driver that
+ // is not the local driver).
+ // Status is provided to us by an external program, so no guarantees are
+ // made about its format or contents. Further, it is an optional field,
+ // so it may not be set even in cases where a volume plugin is in use.
+ Status map[string]interface{} `json:"Status,omitempty"`
+ // Labels includes the volume's configured labels, key:value pairs that
+ // can be passed during volume creation to provide information for third
+ // party tools.
+ Labels map[string]string `json:"Labels"`
+ // Scope is unused and provided solely for Docker compatibility. It is
+ // unconditionally set to "local".
+ Scope string `json:"Scope"`
+ // Options is a set of options that were used when creating the volume.
+ // For the Local driver, these are mount options that will be used to
+ // determine how a local filesystem is mounted; they are handled as
+ // parameters to Mount in a manner described in the volume create
+ // manpage.
+ // For non-local drivers, these are passed as-is to the volume plugin.
+ Options map[string]string `json:"Options"`
+ // UID is the UID that the volume was created with.
+ UID int `json:"UID,omitempty"`
+ // GID is the GID that the volume was created with.
+ GID int `json:"GID,omitempty"`
+ // Anonymous indicates that the volume was created as an anonymous
+ // volume for a specific container, and will be be removed when any
+ // container using it is removed.
+ Anonymous bool `json:"Anonymous,omitempty"`
+}
diff --git a/libpod/options.go b/libpod/options.go
index 31c0b9ac9..c7bac7e1f 100644
--- a/libpod/options.go
+++ b/libpod/options.go
@@ -1549,17 +1549,6 @@ func WithVolumeDriver(driver string) VolumeCreateOption {
return define.ErrVolumeFinalized
}
- // Uncomment when volume plugins are ready for use.
- // if driver != define.VolumeDriverLocal {
- // if _, err := plugin.GetVolumePlugin(driver); err != nil {
- // return err
- // }
- // }
-
- if driver != define.VolumeDriverLocal {
- return define.ErrNotImplemented
- }
-
volume.config.Driver = driver
return nil
}
diff --git a/libpod/plugin/volume_api.go b/libpod/plugin/volume_api.go
index 2500a4f36..c5dec651c 100644
--- a/libpod/plugin/volume_api.go
+++ b/libpod/plugin/volume_api.go
@@ -2,8 +2,9 @@ package plugin
import (
"bytes"
- "fmt"
+ "context"
"io/ioutil"
+ "net"
"net/http"
"os"
"path/filepath"
@@ -43,7 +44,6 @@ var (
const (
defaultTimeout = 5 * time.Second
- defaultPath = "/run/docker/plugins"
volumePluginType = "VolumeDriver"
)
@@ -64,6 +64,8 @@ type VolumePlugin struct {
Name string
// SocketPath is the unix socket at which the plugin is accessed.
SocketPath string
+ // Client is the HTTP client we use to connect to the plugin.
+ Client *http.Client
}
// This is the response from the activate endpoint of the API.
@@ -76,7 +78,7 @@ type activateResponse struct {
func validatePlugin(newPlugin *VolumePlugin) error {
// It's a socket. Is it a plugin?
// Hit the Activate endpoint to find out if it is, and if so what kind
- req, err := http.NewRequest("POST", activatePath, nil)
+ req, err := http.NewRequest("POST", "http://plugin"+activatePath, nil)
if err != nil {
return errors.Wrapf(err, "error making request to volume plugin %s activation endpoint", newPlugin.Name)
}
@@ -84,9 +86,7 @@ func validatePlugin(newPlugin *VolumePlugin) error {
req.Header.Set("Host", newPlugin.getURI())
req.Header.Set("Content-Type", sdk.DefaultContentTypeV1_1)
- client := new(http.Client)
- client.Timeout = defaultTimeout
- resp, err := client.Do(req)
+ resp, err := newPlugin.Client.Do(req)
if err != nil {
return errors.Wrapf(err, "error sending request to plugin %s activation endpoint", newPlugin.Name)
}
@@ -121,22 +121,28 @@ func validatePlugin(newPlugin *VolumePlugin) error {
return errors.Wrapf(ErrNotVolumePlugin, "plugin %s does not implement volume plugin, instead provides %s", newPlugin.Name, strings.Join(respStruct.Implements, ", "))
}
+ if plugins == nil {
+ plugins = make(map[string]*VolumePlugin)
+ }
+
plugins[newPlugin.Name] = newPlugin
return nil
}
-// GetVolumePlugin gets a single volume plugin by path.
-// TODO: We should not be auto-completing based on a default path; we should
-// require volumes to have been pre-specified in containers.conf (will need a
-// function to pre-populate the plugins list, and we should probably do a lazy
-// initialization there to not slow things down too much).
-func GetVolumePlugin(name string) (*VolumePlugin, error) {
+// GetVolumePlugin gets a single volume plugin, with the given name, at the
+// given path.
+func GetVolumePlugin(name string, path string) (*VolumePlugin, error) {
pluginsLock.Lock()
defer pluginsLock.Unlock()
plugin, exists := plugins[name]
if exists {
+ // This shouldn't be possible, but just in case...
+ if plugin.SocketPath != filepath.Clean(path) {
+ return nil, errors.Wrapf(define.ErrInvalidArg, "requested path %q for volume plugin %s does not match pre-existing path for plugin, %q", path, name, plugin.SocketPath)
+ }
+
return plugin, nil
}
@@ -144,7 +150,20 @@ func GetVolumePlugin(name string) (*VolumePlugin, error) {
newPlugin := new(VolumePlugin)
newPlugin.Name = name
- newPlugin.SocketPath = filepath.Join(defaultPath, fmt.Sprintf("%s.sock", name))
+ newPlugin.SocketPath = filepath.Clean(path)
+
+ // Need an HTTP client to force a Unix connection.
+ // And since we can reuse it, might as well cache it.
+ client := new(http.Client)
+ client.Timeout = defaultTimeout
+ // This bit borrowed from pkg/bindings/connection.go
+ client.Transport = &http.Transport{
+ DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) {
+ return (&net.Dialer{}).DialContext(ctx, "unix", newPlugin.SocketPath)
+ },
+ DisableCompression: true,
+ }
+ newPlugin.Client = client
stat, err := os.Stat(newPlugin.SocketPath)
if err != nil {
@@ -183,6 +202,7 @@ func (p *VolumePlugin) verifyReachable() error {
}
// Send a request to the volume plugin for handling.
+// Callers *MUST* close the response when they are done.
func (p *VolumePlugin) sendRequest(toJSON interface{}, hasBody bool, endpoint string) (*http.Response, error) {
var (
reqJSON []byte
@@ -196,7 +216,7 @@ func (p *VolumePlugin) sendRequest(toJSON interface{}, hasBody bool, endpoint st
}
}
- req, err := http.NewRequest("POST", endpoint, bytes.NewReader(reqJSON))
+ req, err := http.NewRequest("POST", "http://plugin"+endpoint, bytes.NewReader(reqJSON))
if err != nil {
return nil, errors.Wrapf(err, "error making request to volume plugin %s endpoint %s", p.Name, endpoint)
}
@@ -204,13 +224,12 @@ func (p *VolumePlugin) sendRequest(toJSON interface{}, hasBody bool, endpoint st
req.Header.Set("Host", p.getURI())
req.Header.Set("Content-Type", sdk.DefaultContentTypeV1_1)
- client := new(http.Client)
- client.Timeout = defaultTimeout
- resp, err := client.Do(req)
+ resp, err := p.Client.Do(req)
if err != nil {
return nil, errors.Wrapf(err, "error sending request to volume plugin %s endpoint %s", p.Name, endpoint)
}
- defer resp.Body.Close()
+ // We are *deliberately not closing* response here. It is the
+ // responsibility of the caller to do so after reading the response.
return resp, nil
}
diff --git a/libpod/rootless_cni_linux.go b/libpod/rootless_cni_linux.go
index ce8a87759..9a980750f 100644
--- a/libpod/rootless_cni_linux.go
+++ b/libpod/rootless_cni_linux.go
@@ -110,6 +110,8 @@ func DeallocRootlessCNI(ctx context.Context, c *Container) error {
logrus.Warn(err)
}
logrus.Debugf("rootless CNI: removing infra container %q", infra.ID())
+ infra.lock.Lock()
+ defer infra.lock.Unlock()
if err := c.runtime.removeContainer(ctx, infra, true, false, true); err != nil {
return err
}
diff --git a/libpod/runtime.go b/libpod/runtime.go
index 1004e4fa7..34c737a67 100644
--- a/libpod/runtime.go
+++ b/libpod/runtime.go
@@ -17,6 +17,7 @@ import (
"github.com/containers/podman/v2/libpod/events"
"github.com/containers/podman/v2/libpod/image"
"github.com/containers/podman/v2/libpod/lock"
+ "github.com/containers/podman/v2/libpod/plugin"
"github.com/containers/podman/v2/libpod/shutdown"
"github.com/containers/podman/v2/pkg/cgroups"
"github.com/containers/podman/v2/pkg/registries"
@@ -888,3 +889,18 @@ func (r *Runtime) reloadStorageConf() error {
logrus.Infof("applied new storage configuration: %v", r.storageConfig)
return nil
}
+
+// getVolumePlugin gets a specific volume plugin given its name.
+func (r *Runtime) getVolumePlugin(name string) (*plugin.VolumePlugin, error) {
+ // There is no plugin for local.
+ if name == define.VolumeDriverLocal || name == "" {
+ return nil, nil
+ }
+
+ pluginPath, ok := r.config.Engine.VolumePlugins[name]
+ if !ok {
+ return nil, errors.Wrapf(define.ErrMissingPlugin, "no volume plugin with name %s available", name)
+ }
+
+ return plugin.GetVolumePlugin(name, pluginPath)
+}
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index f22e48746..d2bcd8db3 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -72,6 +72,140 @@ func (r *Runtime) RestoreContainer(ctx context.Context, rSpec *spec.Spec, config
return r.setupContainer(ctx, ctr)
}
+// RenameContainer renames the given container.
+// The given container object will be rendered unusable, and a new, renamed
+// Container will be returned.
+func (r *Runtime) RenameContainer(ctx context.Context, ctr *Container, newName string) (*Container, error) {
+ ctr.lock.Lock()
+ defer ctr.lock.Unlock()
+
+ if err := ctr.syncContainer(); err != nil {
+ return nil, err
+ }
+
+ if newName == "" || !define.NameRegex.MatchString(newName) {
+ return nil, define.RegexError
+ }
+
+ // Check if the name is available.
+ // This is *100% NOT ATOMIC* so any failures in-flight will do
+ // *VERY BAD THINGS* to the state. So we have to try and catch all we
+ // can before starting.
+ if _, err := r.state.LookupContainerID(newName); err == nil {
+ return nil, errors.Wrapf(define.ErrCtrExists, "name %s is already in use by another container", newName)
+ }
+ if _, err := r.state.LookupPod(newName); err == nil {
+ return nil, errors.Wrapf(define.ErrPodExists, "name %s is already in use by another pod", newName)
+ }
+
+ // TODO: Investigate if it is possible to remove this limitation.
+ depCtrs, err := r.state.ContainerInUse(ctr)
+ if err != nil {
+ return nil, err
+ }
+ if len(depCtrs) > 0 {
+ return nil, errors.Wrapf(define.ErrCtrExists, "cannot rename container %s as it is in use by other containers: %v", ctr.ID(), strings.Join(depCtrs, ","))
+ }
+
+ // We need to pull an updated config, in case another rename fired and
+ // the config was re-written.
+ newConf, err := r.state.GetContainerConfig(ctr.ID())
+ if err != nil {
+ return nil, errors.Wrapf(err, "error retrieving container %s configuration from DB to remove", ctr.ID())
+ }
+ ctr.config = newConf
+
+ // TODO: This is going to fail if we have active exec sessions, too.
+ // Investigate fixing that at a later date.
+
+ var pod *Pod
+ if ctr.config.Pod != "" {
+ tmpPod, err := r.state.Pod(ctr.config.Pod)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error retrieving container %s pod", ctr.ID())
+ }
+ pod = tmpPod
+ // Lock pod to ensure it's not removed while we're working
+ pod.lock.Lock()
+ defer pod.lock.Unlock()
+ }
+
+ // Lock all volumes to ensure they are not removed while we're working
+ volsLocked := make(map[string]bool)
+ for _, namedVol := range ctr.config.NamedVolumes {
+ if volsLocked[namedVol.Name] {
+ continue
+ }
+ vol, err := r.state.Volume(namedVol.Name)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error retrieving volume used by container %s", ctr.ID())
+ }
+
+ volsLocked[vol.Name()] = true
+ vol.lock.Lock()
+ defer vol.lock.Unlock()
+ }
+
+ logrus.Infof("Going to rename container %s from %q to %q", ctr.ID(), ctr.Name(), newName)
+
+ // Step 1: remove the old container.
+ if pod != nil {
+ if err := r.state.RemoveContainerFromPod(pod, ctr); err != nil {
+ return nil, errors.Wrapf(err, "error renaming container %s", ctr.ID())
+ }
+ } else {
+ if err := r.state.RemoveContainer(ctr); err != nil {
+ return nil, errors.Wrapf(err, "error renaming container %s", ctr.ID())
+ }
+ }
+
+ // Step 2: Make a new container based on the old one.
+ // TODO: Should we deep-copy the container config and state, to be safe?
+ newCtr := new(Container)
+ newCtr.config = ctr.config
+ newCtr.state = ctr.state
+ newCtr.lock = ctr.lock
+ newCtr.ociRuntime = ctr.ociRuntime
+ newCtr.runtime = r
+ newCtr.rootlessSlirpSyncR = ctr.rootlessSlirpSyncR
+ newCtr.rootlessSlirpSyncW = ctr.rootlessSlirpSyncW
+ newCtr.rootlessPortSyncR = ctr.rootlessPortSyncR
+ newCtr.rootlessPortSyncW = ctr.rootlessPortSyncW
+
+ newCtr.valid = true
+ newCtr.config.Name = newName
+
+ // Step 3: Add that new container to the DB
+ if pod != nil {
+ if err := r.state.AddContainerToPod(pod, newCtr); err != nil {
+ return nil, errors.Wrapf(err, "error renaming container %s", newCtr.ID())
+ }
+ } else {
+ if err := r.state.AddContainer(newCtr); err != nil {
+ return nil, errors.Wrapf(err, "error renaming container %s", newCtr.ID())
+ }
+ }
+
+ // Step 4: Save the new container, to force the state to be written to
+ // the DB. This may not be necessary, depending on DB implementation,
+ // but let's do it to be safe.
+ if err := newCtr.save(); err != nil {
+ return nil, err
+ }
+
+ // Step 5: rename the container in c/storage.
+ // This can fail if the name is already in use by a non-Podman
+ // container. This puts us in a bad spot - we've already renamed the
+ // container in Podman. We can swap the order, but then we have the
+ // opposite problem. Atomicity is a real problem here, with no easy
+ // solution.
+ if err := r.store.SetNames(newCtr.ID(), []string{newCtr.Name()}); err != nil {
+ return nil, err
+ }
+
+ return newCtr, nil
+}
+
func (r *Runtime) initContainerVariables(rSpec *spec.Spec, config *ContainerConfig) (*Container, error) {
if rSpec == nil {
return nil, errors.Wrapf(define.ErrInvalidArg, "must provide a valid runtime spec to create container")
@@ -393,7 +527,7 @@ func (r *Runtime) RemoveContainer(ctx context.Context, c *Container, force bool,
// removePod is used only when removing pods. It instructs Podman to ignore
// infra container protections, and *not* remove from the database (as pod
// remove will handle that).
-func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool, removeVolume bool, removePod bool) error {
+func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, removeVolume, removePod bool) error {
span, _ := opentracing.StartSpanFromContext(ctx, "removeContainer")
span.SetTag("type", "runtime")
defer span.Finish()
@@ -406,6 +540,18 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool,
}
}
+ // We need to refresh container config from the DB, to ensure that any
+ // changes (e.g. a rename) are picked up before we start removing.
+ // Since HasContainer above succeeded, we can safely assume the
+ // container exists.
+ // This is *very iffy* but it should be OK because the container won't
+ // exist once we're done.
+ newConf, err := r.state.GetContainerConfig(c.ID())
+ if err != nil {
+ return errors.Wrapf(err, "error retrieving container %s configuration from DB to remove", c.ID())
+ }
+ c.config = newConf
+
logrus.Debugf("Removing container %s", c.ID())
// We need to lock the pod before we lock the container.
@@ -413,7 +559,6 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool,
// Don't need to do this in pod removal case - we're evicting the entire
// pod.
var pod *Pod
- var err error
runtime := c.runtime
if c.config.Pod != "" && !removePod {
pod, err = r.state.Pod(c.config.Pod)
diff --git a/libpod/runtime_volume_linux.go b/libpod/runtime_volume_linux.go
index 9bf0fd108..4a29f01aa 100644
--- a/libpod/runtime_volume_linux.go
+++ b/libpod/runtime_volume_linux.go
@@ -11,7 +11,9 @@ import (
"github.com/containers/podman/v2/libpod/define"
"github.com/containers/podman/v2/libpod/events"
+ volplugin "github.com/containers/podman/v2/libpod/plugin"
"github.com/containers/storage/pkg/stringid"
+ pluginapi "github.com/docker/go-plugins-helpers/volume"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -53,6 +55,14 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption)
return nil, errors.Wrapf(define.ErrVolumeExists, "volume with name %s already exists", volume.config.Name)
}
+ // Plugin can be nil if driver is local, but that's OK - superfluous
+ // assignment doesn't hurt much.
+ plugin, err := r.getVolumePlugin(volume.config.Driver)
+ if err != nil {
+ return nil, errors.Wrapf(err, "volume %s uses volume plugin %s but it could not be retrieved", volume.config.Name, volume.config.Driver)
+ }
+ volume.plugin = plugin
+
if volume.config.Driver == define.VolumeDriverLocal {
logrus.Debugf("Validating options for local driver")
// Validate options
@@ -66,25 +76,38 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption)
}
}
- // Create the mountpoint of this volume
- volPathRoot := filepath.Join(r.config.Engine.VolumePath, volume.config.Name)
- if err := os.MkdirAll(volPathRoot, 0700); err != nil {
- return nil, errors.Wrapf(err, "error creating volume directory %q", volPathRoot)
- }
- if err := os.Chown(volPathRoot, volume.config.UID, volume.config.GID); err != nil {
- return nil, errors.Wrapf(err, "error chowning volume directory %q to %d:%d", volPathRoot, volume.config.UID, volume.config.GID)
- }
- fullVolPath := filepath.Join(volPathRoot, "_data")
- if err := os.MkdirAll(fullVolPath, 0755); err != nil {
- return nil, errors.Wrapf(err, "error creating volume directory %q", fullVolPath)
- }
- if err := os.Chown(fullVolPath, volume.config.UID, volume.config.GID); err != nil {
- return nil, errors.Wrapf(err, "error chowning volume directory %q to %d:%d", fullVolPath, volume.config.UID, volume.config.GID)
- }
- if err := LabelVolumePath(fullVolPath); err != nil {
- return nil, err
+ // Now we get conditional: we either need to make the volume in the
+ // volume plugin, or on disk if not using a plugin.
+ if volume.plugin != nil {
+ // We can't chown, or relabel, or similar the path the volume is
+ // using, because it's not managed by us.
+ // TODO: reevaluate this once we actually have volume plugins in
+ // use in production - it may be safe, but I can't tell without
+ // knowing what the actual plugin does...
+ if err := makeVolumeInPluginIfNotExist(volume.config.Name, volume.config.Options, volume.plugin); err != nil {
+ return nil, err
+ }
+ } else {
+ // Create the mountpoint of this volume
+ volPathRoot := filepath.Join(r.config.Engine.VolumePath, volume.config.Name)
+ if err := os.MkdirAll(volPathRoot, 0700); err != nil {
+ return nil, errors.Wrapf(err, "error creating volume directory %q", volPathRoot)
+ }
+ if err := os.Chown(volPathRoot, volume.config.UID, volume.config.GID); err != nil {
+ return nil, errors.Wrapf(err, "error chowning volume directory %q to %d:%d", volPathRoot, volume.config.UID, volume.config.GID)
+ }
+ fullVolPath := filepath.Join(volPathRoot, "_data")
+ if err := os.MkdirAll(fullVolPath, 0755); err != nil {
+ return nil, errors.Wrapf(err, "error creating volume directory %q", fullVolPath)
+ }
+ if err := os.Chown(fullVolPath, volume.config.UID, volume.config.GID); err != nil {
+ return nil, errors.Wrapf(err, "error chowning volume directory %q to %d:%d", fullVolPath, volume.config.UID, volume.config.GID)
+ }
+ if err := LabelVolumePath(fullVolPath); err != nil {
+ return nil, err
+ }
+ volume.config.MountPoint = fullVolPath
}
- volume.config.MountPoint = fullVolPath
lock, err := r.lockManager.AllocateLock()
if err != nil {
@@ -111,6 +134,39 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption)
return volume, nil
}
+// makeVolumeInPluginIfNotExist makes a volume in the given volume plugin if it
+// does not already exist.
+func makeVolumeInPluginIfNotExist(name string, options map[string]string, plugin *volplugin.VolumePlugin) error {
+ // Ping the volume plugin to see if it exists first.
+ // If it does, use the existing volume in the plugin.
+ // Options may not match exactly, but not much we can do about
+ // that. Not complaining avoids a lot of the sync issues we see
+ // with c/storage and libpod DB.
+ needsCreate := true
+ getReq := new(pluginapi.GetRequest)
+ getReq.Name = name
+ if resp, err := plugin.GetVolume(getReq); err == nil {
+ // TODO: What do we do if we get a 200 response, but the
+ // Volume is nil? The docs on the Plugin API are very
+ // nonspecific, so I don't know if this is valid or
+ // not...
+ if resp != nil {
+ needsCreate = false
+ logrus.Infof("Volume %q already exists in plugin %q, using existing volume", name, plugin.Name)
+ }
+ }
+ if needsCreate {
+ createReq := new(pluginapi.CreateRequest)
+ createReq.Name = name
+ createReq.Options = options
+ if err := plugin.CreateVolume(createReq); err != nil {
+ return errors.Wrapf(err, "error creating volume %q in plugin %s", name, plugin.Name)
+ }
+ }
+
+ return nil
+}
+
// removeVolume removes the specified volume from state as well tears down its mountpoint and storage
func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error {
if !v.valid {
@@ -185,9 +241,43 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error
var removalErr error
+ // If we use a volume plugin, we need to remove from the plugin.
+ if v.UsesVolumeDriver() {
+ canRemove := true
+
+ // Do we have a volume driver?
+ if v.plugin == nil {
+ canRemove = false
+ removalErr = errors.Wrapf(define.ErrMissingPlugin, "cannot remove volume %s from plugin %s, but it has been removed from Podman", v.Name(), v.Driver())
+ } else {
+ // Ping the plugin first to verify the volume still
+ // exists.
+ // We're trying to be very tolerant of missing volumes
+ // in the backend, to avoid the problems we see with
+ // sync between c/storage and the Libpod DB.
+ getReq := new(pluginapi.GetRequest)
+ getReq.Name = v.Name()
+ if _, err := v.plugin.GetVolume(getReq); err != nil {
+ canRemove = false
+ removalErr = errors.Wrapf(err, "volume %s could not be retrieved from plugin %s, but it has been removed from Podman", v.Name(), v.Driver())
+ }
+ }
+ if canRemove {
+ req := new(pluginapi.RemoveRequest)
+ req.Name = v.Name()
+ if err := v.plugin.RemoveVolume(req); err != nil {
+ removalErr = errors.Wrapf(err, "volume %s could not be removed from plugin %s, but it has been removed from Podman", v.Name(), v.Driver())
+ }
+ }
+ }
+
// Free the volume's lock
if err := v.lock.Free(); err != nil {
- removalErr = errors.Wrapf(err, "error freeing lock for volume %s", v.Name())
+ if removalErr == nil {
+ removalErr = errors.Wrapf(err, "error freeing lock for volume %s", v.Name())
+ } else {
+ logrus.Errorf("Error freeing lock for volume %q: %v", v.Name(), err)
+ }
}
// Delete the mountpoint path of the volume, that is delete the volume
@@ -196,7 +286,7 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error
if removalErr == nil {
removalErr = errors.Wrapf(err, "error cleaning up volume storage for %q", v.Name())
} else {
- logrus.Errorf("error cleaning up volume storage for volume %q: %v", v.Name(), err)
+ logrus.Errorf("Error cleaning up volume storage for volume %q: %v", v.Name(), err)
}
}
diff --git a/libpod/volume.go b/libpod/volume.go
index ed08d375f..4c137cb8e 100644
--- a/libpod/volume.go
+++ b/libpod/volume.go
@@ -7,6 +7,7 @@ import (
"github.com/containers/podman/v2/libpod/define"
"github.com/containers/podman/v2/libpod/lock"
+ "github.com/containers/podman/v2/libpod/plugin"
)
// Volume is a libpod named volume.
@@ -18,6 +19,7 @@ type Volume struct {
state *VolumeState
valid bool
+ plugin *plugin.VolumePlugin
runtime *Runtime
lock lock.Locker
}
@@ -31,7 +33,7 @@ type VolumeConfig struct {
// Labels for the volume.
Labels map[string]string `json:"labels"`
// The volume driver. Empty string or local does not activate a volume
- // driver, all other volumes will.
+ // driver, all other values will.
Driver string `json:"volumeDriver"`
// The location the volume is mounted at.
MountPoint string `json:"mountPoint"`
@@ -53,6 +55,10 @@ type VolumeConfig struct {
// Volumes are not guaranteed to have a state. Only volumes using the Local
// driver that have mount options set will create a state.
type VolumeState struct {
+ // Mountpoint is the location where the volume was mounted.
+ // This is only used for volumes using a volume plugin, which will mount
+ // at non-standard locations.
+ MountPoint string `json:"mountPoint,omitempty"`
// MountCount is the number of times this volume has been requested to
// be mounted.
// It is incremented on mount() and decremented on unmount().
@@ -115,8 +121,20 @@ func (v *Volume) Labels() map[string]string {
}
// MountPoint returns the volume's mountpoint on the host
-func (v *Volume) MountPoint() string {
- return v.config.MountPoint
+func (v *Volume) MountPoint() (string, error) {
+ // For the sake of performance, avoid locking unless we have to.
+ if v.UsesVolumeDriver() {
+ v.lock.Lock()
+ defer v.lock.Unlock()
+
+ if err := v.update(); err != nil {
+ return "", err
+ }
+
+ return v.state.MountPoint, nil
+ }
+
+ return v.config.MountPoint, nil
}
// Options return the volume's options
@@ -139,14 +157,19 @@ func (v *Volume) UID() (int, error) {
v.lock.Lock()
defer v.lock.Unlock()
- if !v.valid {
- return -1, define.ErrVolumeRemoved
+ if err := v.update(); err != nil {
+ return -1, err
}
+ return v.uid(), nil
+}
+
+// Internal, unlocked accessor for UID.
+func (v *Volume) uid() int {
if v.state.UIDChowned > 0 {
- return v.state.UIDChowned, nil
+ return v.state.UIDChowned
}
- return v.config.UID, nil
+ return v.config.UID
}
// GID returns the GID the volume will be created as.
@@ -154,14 +177,19 @@ func (v *Volume) GID() (int, error) {
v.lock.Lock()
defer v.lock.Unlock()
- if !v.valid {
- return -1, define.ErrVolumeRemoved
+ if err := v.update(); err != nil {
+ return -1, err
}
+ return v.gid(), nil
+}
+
+// Internal, unlocked accessor for GID.
+func (v *Volume) gid() int {
if v.state.GIDChowned > 0 {
- return v.state.GIDChowned, nil
+ return v.state.GIDChowned
}
- return v.config.GID, nil
+ return v.config.GID
}
// CreatedTime returns the time the volume was created at. It was not tracked
@@ -198,3 +226,10 @@ func (v *Volume) IsDangling() (bool, error) {
}
return len(ctrs) == 0, nil
}
+
+// UsesVolumeDriver determines whether the volume uses a volume driver. Volume
+// drivers are pluggable backends for volumes that will manage the storage and
+// mounting.
+func (v *Volume) UsesVolumeDriver() bool {
+ return !(v.config.Driver == define.VolumeDriverLocal || v.config.Driver == "")
+}
diff --git a/libpod/volume_inspect.go b/libpod/volume_inspect.go
index c8b20b8f1..2448d1bb5 100644
--- a/libpod/volume_inspect.go
+++ b/libpod/volume_inspect.go
@@ -1,60 +1,52 @@
package libpod
import (
- "time"
-
"github.com/containers/podman/v2/libpod/define"
+ pluginapi "github.com/docker/go-plugins-helpers/volume"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
)
-// InspectVolumeData is the output of Inspect() on a volume. It is matched to
-// the format of 'docker volume inspect'.
-type InspectVolumeData struct {
- // Name is the name of the volume.
- Name string `json:"Name"`
- // Driver is the driver used to create the volume.
- // This will be properly implemented in a future version.
- Driver string `json:"Driver"`
- // Mountpoint is the path on the host where the volume is mounted.
- Mountpoint string `json:"Mountpoint"`
- // CreatedAt is the date and time the volume was created at. This is not
- // stored for older Libpod volumes; if so, it will be omitted.
- CreatedAt time.Time `json:"CreatedAt,omitempty"`
- // Status is presently unused and provided only for Docker compatibility.
- // In the future it will be used to return information on the volume's
- // current state.
- Status map[string]string `json:"Status,omitempty"`
- // Labels includes the volume's configured labels, key:value pairs that
- // can be passed during volume creation to provide information for third
- // party tools.
- Labels map[string]string `json:"Labels"`
- // Scope is unused and provided solely for Docker compatibility. It is
- // unconditionally set to "local".
- Scope string `json:"Scope"`
- // Options is a set of options that were used when creating the volume.
- // It is presently not used.
- Options map[string]string `json:"Options"`
- // UID is the UID that the volume was created with.
- UID int `json:"UID,omitempty"`
- // GID is the GID that the volume was created with.
- GID int `json:"GID,omitempty"`
- // Anonymous indicates that the volume was created as an anonymous
- // volume for a specific container, and will be be removed when any
- // container using it is removed.
- Anonymous bool `json:"Anonymous,omitempty"`
-}
-
// Inspect provides detailed information about the configuration of the given
// volume.
-func (v *Volume) Inspect() (*InspectVolumeData, error) {
+func (v *Volume) Inspect() (*define.InspectVolumeData, error) {
if !v.valid {
return nil, define.ErrVolumeRemoved
}
- data := new(InspectVolumeData)
+ v.lock.Lock()
+ defer v.lock.Unlock()
+
+ if err := v.update(); err != nil {
+ return nil, err
+ }
+
+ data := new(define.InspectVolumeData)
+
+ data.Mountpoint = v.config.MountPoint
+ if v.UsesVolumeDriver() {
+ logrus.Debugf("Querying volume plugin %s for status", v.config.Driver)
+ data.Mountpoint = v.state.MountPoint
+
+ if v.plugin == nil {
+ return nil, errors.Wrapf(define.ErrMissingPlugin, "volume %s uses volume plugin %s but it is not available, cannot inspect", v.Name(), v.config.Driver)
+ }
+
+ // Retrieve status for the volume.
+ // Need to query the volume driver.
+ req := new(pluginapi.GetRequest)
+ req.Name = v.Name()
+ resp, err := v.plugin.GetVolume(req)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error retrieving volume %s information from plugin %s", v.Name(), v.Driver())
+ }
+ if resp != nil {
+ data.Status = resp.Status
+ }
+ }
data.Name = v.config.Name
data.Driver = v.config.Driver
- data.Mountpoint = v.config.MountPoint
data.CreatedAt = v.config.CreatedTime
data.Labels = make(map[string]string)
for k, v := range v.config.Labels {
@@ -65,15 +57,8 @@ func (v *Volume) Inspect() (*InspectVolumeData, error) {
for k, v := range v.config.Options {
data.Options[k] = v
}
- var err error
- data.UID, err = v.UID()
- if err != nil {
- return nil, err
- }
- data.GID, err = v.GID()
- if err != nil {
- return nil, err
- }
+ data.UID = v.uid()
+ data.GID = v.gid()
data.Anonymous = v.config.IsAnon
return data, nil
diff --git a/libpod/volume_internal.go b/libpod/volume_internal.go
index 95cb752e0..88d940370 100644
--- a/libpod/volume_internal.go
+++ b/libpod/volume_internal.go
@@ -22,13 +22,24 @@ func newVolume(runtime *Runtime) *Volume {
// teardownStorage deletes the volume from volumePath
func (v *Volume) teardownStorage() error {
+ if v.UsesVolumeDriver() {
+ return nil
+ }
+
+ // TODO: Should this be converted to use v.config.MountPoint?
return os.RemoveAll(filepath.Join(v.runtime.config.Engine.VolumePath, v.Name()))
}
// Volumes with options set, or a filesystem type, or a device to mount need to
// be mounted and unmounted.
func (v *Volume) needsMount() bool {
- return len(v.config.Options) > 0 && v.config.Driver == define.VolumeDriverLocal
+ // Non-local driver always needs mount
+ if v.UsesVolumeDriver() {
+ return true
+ }
+
+ // Local driver with options needs mount
+ return len(v.config.Options) > 0
}
// update() updates the volume state from the DB.
diff --git a/libpod/volume_internal_linux.go b/libpod/volume_internal_linux.go
index bbf47f124..e184505e7 100644
--- a/libpod/volume_internal_linux.go
+++ b/libpod/volume_internal_linux.go
@@ -8,11 +8,17 @@ import (
"github.com/containers/podman/v2/libpod/define"
"github.com/containers/podman/v2/pkg/rootless"
+ pluginapi "github.com/docker/go-plugins-helpers/volume"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
+// This is a pseudo-container ID to use when requesting a mount or unmount from
+// the volume plugins.
+// This is the shas256 of the string "placeholder\n".
+const pseudoCtrID = "2f73349cfc4630255319c6c8dfc1b46a8996ace9d14d8e07563b165915918ec2"
+
// mount mounts the volume if necessary.
// A mount is necessary if a volume has any options set.
// If a mount is necessary, v.state.MountCount will be incremented.
@@ -20,7 +26,7 @@ import (
// host. Otherwise, we assume it is already mounted.
// Must be done while the volume is locked.
// Is a no-op on volumes that do not require a mount (as defined by
-// volumeNeedsMount())
+// volumeNeedsMount()).
func (v *Volume) mount() error {
if !v.needsMount() {
return nil
@@ -44,6 +50,28 @@ func (v *Volume) mount() error {
return v.save()
}
+ // Volume plugins implement their own mount counter, based on the ID of
+ // the mounting container. But we already have one, and honestly I trust
+ // ours more. So hardcode container ID to something reasonable, and use
+ // the same one for everything.
+ if v.UsesVolumeDriver() {
+ if v.plugin == nil {
+ return errors.Wrapf(define.ErrMissingPlugin, "volume plugin %s (needed by volume %s) missing", v.Driver(), v.Name())
+ }
+
+ req := new(pluginapi.MountRequest)
+ req.Name = v.Name()
+ req.ID = pseudoCtrID
+ mountPoint, err := v.plugin.MountVolume(req)
+ if err != nil {
+ return err
+ }
+
+ v.state.MountCount += 1
+ v.state.MountPoint = mountPoint
+ return v.save()
+ }
+
volDevice := v.config.Options["device"]
volType := v.config.Options["type"]
volOptions := v.config.Options["o"]
@@ -132,6 +160,22 @@ func (v *Volume) unmount(force bool) error {
logrus.Debugf("Volume %s mount count now at %d", v.Name(), v.state.MountCount)
if v.state.MountCount == 0 {
+ if v.UsesVolumeDriver() {
+ if v.plugin == nil {
+ return errors.Wrapf(define.ErrMissingPlugin, "volume plugin %s (needed by volume %s) missing", v.Driver(), v.Name())
+ }
+
+ req := new(pluginapi.UnmountRequest)
+ req.Name = v.Name()
+ req.ID = pseudoCtrID
+ if err := v.plugin.UnmountVolume(req); err != nil {
+ return err
+ }
+
+ v.state.MountPoint = ""
+ return v.save()
+ }
+
// Unmount the volume
if err := unix.Unmount(v.config.MountPoint, unix.MNT_DETACH); err != nil {
if err == unix.EINVAL {