diff options
author | Matthew Heon <matthew.heon@pm.me> | 2020-12-04 16:24:56 -0500 |
---|---|---|
committer | Matthew Heon <mheon@redhat.com> | 2021-01-14 15:35:33 -0500 |
commit | b53cb57680a6fd7b383636ac2d6cd71003532915 (patch) | |
tree | 3979d1f9763326cd4db3a80742cec3a031ca99b2 | |
parent | 2b7793b6121d336a285fb7b9a7612c221cbf63d2 (diff) | |
download | podman-b53cb57680a6fd7b383636ac2d6cd71003532915.tar.gz podman-b53cb57680a6fd7b383636ac2d6cd71003532915.tar.bz2 podman-b53cb57680a6fd7b383636ac2d6cd71003532915.zip |
Initial implementation of volume plugins
This implements support for mounting and unmounting volumes
backed by volume plugins. Support for actually retrieving
plugins requires a pull request to land in containers.conf and
then that to be vendored, and as such is not yet ready. Given
this, this code is only compile tested. However, the code for
everything past retrieving the plugin has been written - there is
support for creating, removing, mounting, and unmounting volumes,
which should allow full functionality once the c/common PR is
merged.
A major change is the signature of the MountPoint function for
volumes, which now, by necessity, returns an error. Named volumes
managed by a plugin do not have a mountpoint we control; instead,
it is managed entirely by the plugin. As such, we need to cache
the path in the DB, and calls to retrieve it now need to access
the DB (and may fail as such).
Notably absent is support for SELinux relabelling and chowning
these volumes. Given that we don't manage the mountpoint for
these volumes, I am extremely reluctant to try and modify it - we
could easily break the plugin trying to chown or relabel it.
Also, we had no less than *5* separate implementations of
inspecting a volume floating around in pkg/infra/abi and
pkg/api/handlers/libpod. And none of them used volume.Inspect(),
the only correct way of inspecting volumes. Remove them all and
consolidate to using the correct way. Compat API is likely still
doing things the wrong way, but that is an issue for another day.
Fixes #4304
Signed-off-by: Matthew Heon <matthew.heon@pm.me>
25 files changed, 500 insertions, 248 deletions
diff --git a/docs/source/markdown/podman-volume-create.1.md b/docs/source/markdown/podman-volume-create.1.md index 118f024df..a06411000 100644 --- a/docs/source/markdown/podman-volume-create.1.md +++ b/docs/source/markdown/podman-volume-create.1.md @@ -17,7 +17,7 @@ driver options can be set using the **--opt** flag. #### **--driver**=*driver* -Specify the volume driver name (default local). +Specify the volume driver name (default **local**). Setting this to a value other than **local** Podman will attempt to create the volume using a volume plugin with the given name. Such plugins must be defined in the **volume_plugins** section of the **containers.conf**(5) configuration file. #### **--help** @@ -30,13 +30,14 @@ Set metadata for a volume (e.g., --label mykey=value). #### **--opt**=*option*, **-o** Set driver specific options. -For the default driver, `local`, this allows a volume to be configured to mount a filesystem on the host. +For the default driver, **local**, this allows a volume to be configured to mount a filesystem on the host. For the `local` driver the following options are supported: `type`, `device`, and `o`. The `type` option sets the type of the filesystem to be mounted, and is equivalent to the `-t` flag to **mount(8)**. The `device` option sets the device to be mounted, and is equivalent to the `device` argument to **mount(8)**. The `o` option sets options for the mount, and is equivalent to the `-o` flag to **mount(8)** with two exceptions. The `o` option supports `uid` and `gid` options to set the UID and GID of the created volume that are not normally supported by **mount(8)**. -Using volume options with the `local` driver requires root privileges. +Using volume options with the **local** driver requires root privileges. +When not using the **local** driver, the given options will be passed directly to the volume plugin. In this case, supported options will be dictated by the plugin in question, not Podman. ## EXAMPLES @@ -53,7 +54,8 @@ $ podman volume create --label foo=bar myvol ``` ## SEE ALSO -podman-volume(1), mount(8) +**podman-volume**(1), **mount**(8), **containers.conf**(5) ## HISTORY +January 2020, updated with information on volume plugins by Matthew Heon <mheon@redhat.com> November 2018, Originally compiled by Urvashi Mohnani <umohnani@redhat.com> diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go index dcb2ff751..b2ee63b08 100644 --- a/libpod/boltdb_state.go +++ b/libpod/boltdb_state.go @@ -304,6 +304,7 @@ func (s *BoltState) Refresh() error { // Reset mount count to 0 oldState.MountCount = 0 + oldState.MountPoint = "" newState, err := json.Marshal(oldState) if err != nil { diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go index c06fedd3e..6014fbef3 100644 --- a/libpod/boltdb_state_internal.go +++ b/libpod/boltdb_state_internal.go @@ -497,6 +497,21 @@ func (s *BoltState) getVolumeFromDB(name []byte, volume *Volume, volBkt *bolt.Bu } } + // Retrieve volume driver + if volume.UsesVolumeDriver() { + plugin, err := s.runtime.getVolumePlugin(volume.config.Driver) + if err != nil { + // We want to fail gracefully here, to ensure that we + // can still remove volumes even if their plugin is + // missing. Otherwise, we end up with volumes that + // cannot even be retrieved from the database and will + // cause things like `volume ls` to fail. + logrus.Errorf("Volume %s uses volume plugin %s, but it cannot be accessed - some functionality may not be available: %v", volume.Name(), volume.config.Driver, err) + } else { + volume.plugin = plugin + } + } + // Get the lock lock, err := s.runtime.lockManager.RetrieveLock(volume.config.LockID) if err != nil { diff --git a/libpod/container_inspect.go b/libpod/container_inspect.go index 870d92ca9..ac7eae56b 100644 --- a/libpod/container_inspect.go +++ b/libpod/container_inspect.go @@ -212,7 +212,12 @@ func (c *Container) getInspectMounts(namedVolumes []*ContainerNamedVolume, image return nil, errors.Wrapf(err, "error looking up volume %s in container %s config", volume.Name, c.ID()) } mountStruct.Driver = volFromDB.Driver() - mountStruct.Source = volFromDB.MountPoint() + + mountPoint, err := volFromDB.MountPoint() + if err != nil { + return nil, err + } + mountStruct.Source = mountPoint parseMountOptionsForInspect(volume.Options, &mountStruct) diff --git a/libpod/container_internal.go b/libpod/container_internal.go index 6d4d3ade0..b9ea50783 100644 --- a/libpod/container_internal.go +++ b/libpod/container_internal.go @@ -1580,8 +1580,18 @@ func (c *Container) mountNamedVolume(v *ContainerNamedVolume, mountpoint string) return nil, err } + // HACK HACK HACK - copy up into a volume driver is 100% broken + // right now. + if vol.UsesVolumeDriver() { + logrus.Infof("Not copying up into volume %s as it uses a volume driver", vol.Name()) + return vol, nil + } + // If the volume is not empty, we should not copy up. - volMount := vol.MountPoint() + volMount, err := vol.MountPoint() + if err != nil { + return nil, err + } contents, err := ioutil.ReadDir(volMount) if err != nil { return nil, errors.Wrapf(err, "error listing contents of volume %s mountpoint when copying up from container %s", vol.Name(), c.ID()) @@ -1619,7 +1629,11 @@ func (c *Container) chownVolume(volumeName string) error { return err } - if vol.state.NeedsChown { + // TODO: For now, I've disabled chowning volumes owned by non-Podman + // drivers. This may be safe, but it's really going to be a case-by-case + // thing, I think - safest to leave disabled now and reenable later if + // there is a demand. + if vol.state.NeedsChown && !vol.UsesVolumeDriver() { vol.state.NeedsChown = false uid := int(c.config.Spec.Process.User.UID) @@ -1646,7 +1660,10 @@ func (c *Container) chownVolume(volumeName string) error { return err } - mountPoint := vol.MountPoint() + mountPoint, err := vol.MountPoint() + if err != nil { + return err + } if err := os.Lchown(mountPoint, uid, gid); err != nil { return err diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go index ce2b52234..0553cc59c 100644 --- a/libpod/container_internal_linux.go +++ b/libpod/container_internal_linux.go @@ -341,7 +341,10 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) { if err != nil { return nil, errors.Wrapf(err, "error retrieving volume %s to add to container %s", namedVol.Name, c.ID()) } - mountPoint := volume.MountPoint() + mountPoint, err := volume.MountPoint() + if err != nil { + return nil, err + } volMount := spec.Mount{ Type: "bind", Source: mountPoint, @@ -903,7 +906,15 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error { return err } - input, err := archive.TarWithOptions(volume.MountPoint(), &archive.TarOptions{ + mp, err := volume.MountPoint() + if err != nil { + return err + } + if mp == "" { + return errors.Wrapf(define.ErrInternal, "volume %s is not mounted, cannot export", volume.Name()) + } + + input, err := archive.TarWithOptions(mp, &archive.TarOptions{ Compression: archive.Uncompressed, IncludeSourceDir: true, }) @@ -958,10 +969,10 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error { func (c *Container) checkpointRestoreSupported() error { if !criu.CheckForCriu() { - return errors.Errorf("Checkpoint/Restore requires at least CRIU %d", criu.MinCriuVersion) + return errors.Errorf("checkpoint/restore requires at least CRIU %d", criu.MinCriuVersion) } if !c.ociRuntime.SupportsCheckpoint() { - return errors.Errorf("Configured runtime does not support checkpoint/restore") + return errors.Errorf("configured runtime does not support checkpoint/restore") } return nil } @@ -993,7 +1004,7 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO } if c.AutoRemove() && options.TargetFile == "" { - return errors.Errorf("Cannot checkpoint containers that have been started with '--rm' unless '--export' is used") + return errors.Errorf("cannot checkpoint containers that have been started with '--rm' unless '--export' is used") } if err := c.checkpointRestoreLabelLog("dump.log"); err != nil { @@ -1079,13 +1090,13 @@ func (c *Container) importCheckpoint(input string) error { } err = archive.Untar(archiveFile, c.bundlePath(), options) if err != nil { - return errors.Wrapf(err, "Unpacking of checkpoint archive %s failed", input) + return errors.Wrapf(err, "unpacking of checkpoint archive %s failed", input) } // Make sure the newly created config.json exists on disk g := generate.Generator{Config: c.config.Spec} if err = c.saveSpec(g.Config); err != nil { - return errors.Wrap(err, "Saving imported container specification for restore failed") + return errors.Wrap(err, "saving imported container specification for restore failed") } return nil @@ -1130,7 +1141,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti // Let's try to stat() CRIU's inventory file. If it does not exist, it makes // no sense to try a restore. This is a minimal check if a checkpoint exist. if _, err := os.Stat(filepath.Join(c.CheckpointPath(), "inventory.img")); os.IsNotExist(err) { - return errors.Wrapf(err, "A complete checkpoint for this container cannot be found, cannot restore") + return errors.Wrapf(err, "a complete checkpoint for this container cannot be found, cannot restore") } if err := c.checkpointRestoreLabelLog("restore.log"); err != nil { @@ -1286,16 +1297,22 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti volumeFile, err := os.Open(volumeFilePath) if err != nil { - return errors.Wrapf(err, "Failed to open volume file %s", volumeFilePath) + return errors.Wrapf(err, "failed to open volume file %s", volumeFilePath) } defer volumeFile.Close() volume, err := c.runtime.GetVolume(v.Name) if err != nil { - return errors.Wrapf(err, "Failed to retrieve volume %s", v.Name) + return errors.Wrapf(err, "failed to retrieve volume %s", v.Name) } - mountPoint := volume.MountPoint() + mountPoint, err := volume.MountPoint() + if err != nil { + return err + } + if mountPoint == "" { + return errors.Wrapf(err, "unable to import volume %s as it is not mounted", volume.Name()) + } if err := archive.UntarUncompressed(volumeFile, mountPoint, nil); err != nil { return errors.Wrapf(err, "Failed to extract volume %s to %s", volumeFilePath, mountPoint) } diff --git a/libpod/define/errors.go b/libpod/define/errors.go index 568f8e88d..d37bc397e 100644 --- a/libpod/define/errors.go +++ b/libpod/define/errors.go @@ -35,6 +35,10 @@ var ( // aliases. ErrNoAliases = errors.New("no aliases for container") + // ErrMissingPlugin indicates that the requested operation requires a + // plugin that is not present on the system or in the configuration. + ErrMissingPlugin = errors.New("required plugin missing") + // ErrCtrExists indicates a container with the same name or ID already // exists ErrCtrExists = errors.New("container already exists") diff --git a/libpod/define/volume_inspect.go b/libpod/define/volume_inspect.go new file mode 100644 index 000000000..20602ea16 --- /dev/null +++ b/libpod/define/volume_inspect.go @@ -0,0 +1,51 @@ +package define + +import ( + "time" +) + +// InspectVolumeData is the output of Inspect() on a volume. It is matched to +// the format of 'docker volume inspect'. +type InspectVolumeData struct { + // Name is the name of the volume. + Name string `json:"Name"` + // Driver is the driver used to create the volume. + // If set to "local" or "", the Local driver (Podman built-in code) is + // used to service the volume; otherwise, a volume plugin with the given + // name is used to mount and manage the volume. + Driver string `json:"Driver"` + // Mountpoint is the path on the host where the volume is mounted. + Mountpoint string `json:"Mountpoint"` + // CreatedAt is the date and time the volume was created at. This is not + // stored for older Libpod volumes; if so, it will be omitted. + CreatedAt time.Time `json:"CreatedAt,omitempty"` + // Status is used to return information on the volume's current state, + // if the volume was created using a volume plugin (uses a Driver that + // is not the local driver). + // Status is provided to us by an external program, so no guarantees are + // made about its format or contents. Further, it is an optional field, + // so it may not be set even in cases where a volume plugin is in use. + Status map[string]interface{} `json:"Status,omitempty"` + // Labels includes the volume's configured labels, key:value pairs that + // can be passed during volume creation to provide information for third + // party tools. + Labels map[string]string `json:"Labels"` + // Scope is unused and provided solely for Docker compatibility. It is + // unconditionally set to "local". + Scope string `json:"Scope"` + // Options is a set of options that were used when creating the volume. + // For the Local driver, these are mount options that will be used to + // determine how a local filesystem is mounted; they are handled as + // parameters to Mount in a manner described in the volume create + // manpage. + // For non-local drivers, these are passed as-is to the volume plugin. + Options map[string]string `json:"Options"` + // UID is the UID that the volume was created with. + UID int `json:"UID,omitempty"` + // GID is the GID that the volume was created with. + GID int `json:"GID,omitempty"` + // Anonymous indicates that the volume was created as an anonymous + // volume for a specific container, and will be be removed when any + // container using it is removed. + Anonymous bool `json:"Anonymous,omitempty"` +} diff --git a/libpod/options.go b/libpod/options.go index 31c0b9ac9..c7bac7e1f 100644 --- a/libpod/options.go +++ b/libpod/options.go @@ -1549,17 +1549,6 @@ func WithVolumeDriver(driver string) VolumeCreateOption { return define.ErrVolumeFinalized } - // Uncomment when volume plugins are ready for use. - // if driver != define.VolumeDriverLocal { - // if _, err := plugin.GetVolumePlugin(driver); err != nil { - // return err - // } - // } - - if driver != define.VolumeDriverLocal { - return define.ErrNotImplemented - } - volume.config.Driver = driver return nil } diff --git a/libpod/plugin/volume_api.go b/libpod/plugin/volume_api.go index 2500a4f36..c5dec651c 100644 --- a/libpod/plugin/volume_api.go +++ b/libpod/plugin/volume_api.go @@ -2,8 +2,9 @@ package plugin import ( "bytes" - "fmt" + "context" "io/ioutil" + "net" "net/http" "os" "path/filepath" @@ -43,7 +44,6 @@ var ( const ( defaultTimeout = 5 * time.Second - defaultPath = "/run/docker/plugins" volumePluginType = "VolumeDriver" ) @@ -64,6 +64,8 @@ type VolumePlugin struct { Name string // SocketPath is the unix socket at which the plugin is accessed. SocketPath string + // Client is the HTTP client we use to connect to the plugin. + Client *http.Client } // This is the response from the activate endpoint of the API. @@ -76,7 +78,7 @@ type activateResponse struct { func validatePlugin(newPlugin *VolumePlugin) error { // It's a socket. Is it a plugin? // Hit the Activate endpoint to find out if it is, and if so what kind - req, err := http.NewRequest("POST", activatePath, nil) + req, err := http.NewRequest("POST", "http://plugin"+activatePath, nil) if err != nil { return errors.Wrapf(err, "error making request to volume plugin %s activation endpoint", newPlugin.Name) } @@ -84,9 +86,7 @@ func validatePlugin(newPlugin *VolumePlugin) error { req.Header.Set("Host", newPlugin.getURI()) req.Header.Set("Content-Type", sdk.DefaultContentTypeV1_1) - client := new(http.Client) - client.Timeout = defaultTimeout - resp, err := client.Do(req) + resp, err := newPlugin.Client.Do(req) if err != nil { return errors.Wrapf(err, "error sending request to plugin %s activation endpoint", newPlugin.Name) } @@ -121,22 +121,28 @@ func validatePlugin(newPlugin *VolumePlugin) error { return errors.Wrapf(ErrNotVolumePlugin, "plugin %s does not implement volume plugin, instead provides %s", newPlugin.Name, strings.Join(respStruct.Implements, ", ")) } + if plugins == nil { + plugins = make(map[string]*VolumePlugin) + } + plugins[newPlugin.Name] = newPlugin return nil } -// GetVolumePlugin gets a single volume plugin by path. -// TODO: We should not be auto-completing based on a default path; we should -// require volumes to have been pre-specified in containers.conf (will need a -// function to pre-populate the plugins list, and we should probably do a lazy -// initialization there to not slow things down too much). -func GetVolumePlugin(name string) (*VolumePlugin, error) { +// GetVolumePlugin gets a single volume plugin, with the given name, at the +// given path. +func GetVolumePlugin(name string, path string) (*VolumePlugin, error) { pluginsLock.Lock() defer pluginsLock.Unlock() plugin, exists := plugins[name] if exists { + // This shouldn't be possible, but just in case... + if plugin.SocketPath != filepath.Clean(path) { + return nil, errors.Wrapf(define.ErrInvalidArg, "requested path %q for volume plugin %s does not match pre-existing path for plugin, %q", path, name, plugin.SocketPath) + } + return plugin, nil } @@ -144,7 +150,20 @@ func GetVolumePlugin(name string) (*VolumePlugin, error) { newPlugin := new(VolumePlugin) newPlugin.Name = name - newPlugin.SocketPath = filepath.Join(defaultPath, fmt.Sprintf("%s.sock", name)) + newPlugin.SocketPath = filepath.Clean(path) + + // Need an HTTP client to force a Unix connection. + // And since we can reuse it, might as well cache it. + client := new(http.Client) + client.Timeout = defaultTimeout + // This bit borrowed from pkg/bindings/connection.go + client.Transport = &http.Transport{ + DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) { + return (&net.Dialer{}).DialContext(ctx, "unix", newPlugin.SocketPath) + }, + DisableCompression: true, + } + newPlugin.Client = client stat, err := os.Stat(newPlugin.SocketPath) if err != nil { @@ -183,6 +202,7 @@ func (p *VolumePlugin) verifyReachable() error { } // Send a request to the volume plugin for handling. +// Callers *MUST* close the response when they are done. func (p *VolumePlugin) sendRequest(toJSON interface{}, hasBody bool, endpoint string) (*http.Response, error) { var ( reqJSON []byte @@ -196,7 +216,7 @@ func (p *VolumePlugin) sendRequest(toJSON interface{}, hasBody bool, endpoint st } } - req, err := http.NewRequest("POST", endpoint, bytes.NewReader(reqJSON)) + req, err := http.NewRequest("POST", "http://plugin"+endpoint, bytes.NewReader(reqJSON)) if err != nil { return nil, errors.Wrapf(err, "error making request to volume plugin %s endpoint %s", p.Name, endpoint) } @@ -204,13 +224,12 @@ func (p *VolumePlugin) sendRequest(toJSON interface{}, hasBody bool, endpoint st req.Header.Set("Host", p.getURI()) req.Header.Set("Content-Type", sdk.DefaultContentTypeV1_1) - client := new(http.Client) - client.Timeout = defaultTimeout - resp, err := client.Do(req) + resp, err := p.Client.Do(req) if err != nil { return nil, errors.Wrapf(err, "error sending request to volume plugin %s endpoint %s", p.Name, endpoint) } - defer resp.Body.Close() + // We are *deliberately not closing* response here. It is the + // responsibility of the caller to do so after reading the response. return resp, nil } diff --git a/libpod/runtime.go b/libpod/runtime.go index 1004e4fa7..34c737a67 100644 --- a/libpod/runtime.go +++ b/libpod/runtime.go @@ -17,6 +17,7 @@ import ( "github.com/containers/podman/v2/libpod/events" "github.com/containers/podman/v2/libpod/image" "github.com/containers/podman/v2/libpod/lock" + "github.com/containers/podman/v2/libpod/plugin" "github.com/containers/podman/v2/libpod/shutdown" "github.com/containers/podman/v2/pkg/cgroups" "github.com/containers/podman/v2/pkg/registries" @@ -888,3 +889,18 @@ func (r *Runtime) reloadStorageConf() error { logrus.Infof("applied new storage configuration: %v", r.storageConfig) return nil } + +// getVolumePlugin gets a specific volume plugin given its name. +func (r *Runtime) getVolumePlugin(name string) (*plugin.VolumePlugin, error) { + // There is no plugin for local. + if name == define.VolumeDriverLocal || name == "" { + return nil, nil + } + + pluginPath, ok := r.config.Engine.VolumePlugins[name] + if !ok { + return nil, errors.Wrapf(define.ErrMissingPlugin, "no volume plugin with name %s available", name) + } + + return plugin.GetVolumePlugin(name, pluginPath) +} diff --git a/libpod/runtime_volume_linux.go b/libpod/runtime_volume_linux.go index 9bf0fd108..4a29f01aa 100644 --- a/libpod/runtime_volume_linux.go +++ b/libpod/runtime_volume_linux.go @@ -11,7 +11,9 @@ import ( "github.com/containers/podman/v2/libpod/define" "github.com/containers/podman/v2/libpod/events" + volplugin "github.com/containers/podman/v2/libpod/plugin" "github.com/containers/storage/pkg/stringid" + pluginapi "github.com/docker/go-plugins-helpers/volume" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -53,6 +55,14 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption) return nil, errors.Wrapf(define.ErrVolumeExists, "volume with name %s already exists", volume.config.Name) } + // Plugin can be nil if driver is local, but that's OK - superfluous + // assignment doesn't hurt much. + plugin, err := r.getVolumePlugin(volume.config.Driver) + if err != nil { + return nil, errors.Wrapf(err, "volume %s uses volume plugin %s but it could not be retrieved", volume.config.Name, volume.config.Driver) + } + volume.plugin = plugin + if volume.config.Driver == define.VolumeDriverLocal { logrus.Debugf("Validating options for local driver") // Validate options @@ -66,25 +76,38 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption) } } - // Create the mountpoint of this volume - volPathRoot := filepath.Join(r.config.Engine.VolumePath, volume.config.Name) - if err := os.MkdirAll(volPathRoot, 0700); err != nil { - return nil, errors.Wrapf(err, "error creating volume directory %q", volPathRoot) - } - if err := os.Chown(volPathRoot, volume.config.UID, volume.config.GID); err != nil { - return nil, errors.Wrapf(err, "error chowning volume directory %q to %d:%d", volPathRoot, volume.config.UID, volume.config.GID) - } - fullVolPath := filepath.Join(volPathRoot, "_data") - if err := os.MkdirAll(fullVolPath, 0755); err != nil { - return nil, errors.Wrapf(err, "error creating volume directory %q", fullVolPath) - } - if err := os.Chown(fullVolPath, volume.config.UID, volume.config.GID); err != nil { - return nil, errors.Wrapf(err, "error chowning volume directory %q to %d:%d", fullVolPath, volume.config.UID, volume.config.GID) - } - if err := LabelVolumePath(fullVolPath); err != nil { - return nil, err + // Now we get conditional: we either need to make the volume in the + // volume plugin, or on disk if not using a plugin. + if volume.plugin != nil { + // We can't chown, or relabel, or similar the path the volume is + // using, because it's not managed by us. + // TODO: reevaluate this once we actually have volume plugins in + // use in production - it may be safe, but I can't tell without + // knowing what the actual plugin does... + if err := makeVolumeInPluginIfNotExist(volume.config.Name, volume.config.Options, volume.plugin); err != nil { + return nil, err + } + } else { + // Create the mountpoint of this volume + volPathRoot := filepath.Join(r.config.Engine.VolumePath, volume.config.Name) + if err := os.MkdirAll(volPathRoot, 0700); err != nil { + return nil, errors.Wrapf(err, "error creating volume directory %q", volPathRoot) + } + if err := os.Chown(volPathRoot, volume.config.UID, volume.config.GID); err != nil { + return nil, errors.Wrapf(err, "error chowning volume directory %q to %d:%d", volPathRoot, volume.config.UID, volume.config.GID) + } + fullVolPath := filepath.Join(volPathRoot, "_data") + if err := os.MkdirAll(fullVolPath, 0755); err != nil { + return nil, errors.Wrapf(err, "error creating volume directory %q", fullVolPath) + } + if err := os.Chown(fullVolPath, volume.config.UID, volume.config.GID); err != nil { + return nil, errors.Wrapf(err, "error chowning volume directory %q to %d:%d", fullVolPath, volume.config.UID, volume.config.GID) + } + if err := LabelVolumePath(fullVolPath); err != nil { + return nil, err + } + volume.config.MountPoint = fullVolPath } - volume.config.MountPoint = fullVolPath lock, err := r.lockManager.AllocateLock() if err != nil { @@ -111,6 +134,39 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption) return volume, nil } +// makeVolumeInPluginIfNotExist makes a volume in the given volume plugin if it +// does not already exist. +func makeVolumeInPluginIfNotExist(name string, options map[string]string, plugin *volplugin.VolumePlugin) error { + // Ping the volume plugin to see if it exists first. + // If it does, use the existing volume in the plugin. + // Options may not match exactly, but not much we can do about + // that. Not complaining avoids a lot of the sync issues we see + // with c/storage and libpod DB. + needsCreate := true + getReq := new(pluginapi.GetRequest) + getReq.Name = name + if resp, err := plugin.GetVolume(getReq); err == nil { + // TODO: What do we do if we get a 200 response, but the + // Volume is nil? The docs on the Plugin API are very + // nonspecific, so I don't know if this is valid or + // not... + if resp != nil { + needsCreate = false + logrus.Infof("Volume %q already exists in plugin %q, using existing volume", name, plugin.Name) + } + } + if needsCreate { + createReq := new(pluginapi.CreateRequest) + createReq.Name = name + createReq.Options = options + if err := plugin.CreateVolume(createReq); err != nil { + return errors.Wrapf(err, "error creating volume %q in plugin %s", name, plugin.Name) + } + } + + return nil +} + // removeVolume removes the specified volume from state as well tears down its mountpoint and storage func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error { if !v.valid { @@ -185,9 +241,43 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error var removalErr error + // If we use a volume plugin, we need to remove from the plugin. + if v.UsesVolumeDriver() { + canRemove := true + + // Do we have a volume driver? + if v.plugin == nil { + canRemove = false + removalErr = errors.Wrapf(define.ErrMissingPlugin, "cannot remove volume %s from plugin %s, but it has been removed from Podman", v.Name(), v.Driver()) + } else { + // Ping the plugin first to verify the volume still + // exists. + // We're trying to be very tolerant of missing volumes + // in the backend, to avoid the problems we see with + // sync between c/storage and the Libpod DB. + getReq := new(pluginapi.GetRequest) + getReq.Name = v.Name() + if _, err := v.plugin.GetVolume(getReq); err != nil { + canRemove = false + removalErr = errors.Wrapf(err, "volume %s could not be retrieved from plugin %s, but it has been removed from Podman", v.Name(), v.Driver()) + } + } + if canRemove { + req := new(pluginapi.RemoveRequest) + req.Name = v.Name() + if err := v.plugin.RemoveVolume(req); err != nil { + removalErr = errors.Wrapf(err, "volume %s could not be removed from plugin %s, but it has been removed from Podman", v.Name(), v.Driver()) + } + } + } + // Free the volume's lock if err := v.lock.Free(); err != nil { - removalErr = errors.Wrapf(err, "error freeing lock for volume %s", v.Name()) + if removalErr == nil { + removalErr = errors.Wrapf(err, "error freeing lock for volume %s", v.Name()) + } else { + logrus.Errorf("Error freeing lock for volume %q: %v", v.Name(), err) + } } // Delete the mountpoint path of the volume, that is delete the volume @@ -196,7 +286,7 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error if removalErr == nil { removalErr = errors.Wrapf(err, "error cleaning up volume storage for %q", v.Name()) } else { - logrus.Errorf("error cleaning up volume storage for volume %q: %v", v.Name(), err) + logrus.Errorf("Error cleaning up volume storage for volume %q: %v", v.Name(), err) } } diff --git a/libpod/volume.go b/libpod/volume.go index ed08d375f..4c137cb8e 100644 --- a/libpod/volume.go +++ b/libpod/volume.go @@ -7,6 +7,7 @@ import ( "github.com/containers/podman/v2/libpod/define" "github.com/containers/podman/v2/libpod/lock" + "github.com/containers/podman/v2/libpod/plugin" ) // Volume is a libpod named volume. @@ -18,6 +19,7 @@ type Volume struct { state *VolumeState valid bool + plugin *plugin.VolumePlugin runtime *Runtime lock lock.Locker } @@ -31,7 +33,7 @@ type VolumeConfig struct { // Labels for the volume. Labels map[string]string `json:"labels"` // The volume driver. Empty string or local does not activate a volume - // driver, all other volumes will. + // driver, all other values will. Driver string `json:"volumeDriver"` // The location the volume is mounted at. MountPoint string `json:"mountPoint"` @@ -53,6 +55,10 @@ type VolumeConfig struct { // Volumes are not guaranteed to have a state. Only volumes using the Local // driver that have mount options set will create a state. type VolumeState struct { + // Mountpoint is the location where the volume was mounted. + // This is only used for volumes using a volume plugin, which will mount + // at non-standard locations. + MountPoint string `json:"mountPoint,omitempty"` // MountCount is the number of times this volume has been requested to // be mounted. // It is incremented on mount() and decremented on unmount(). @@ -115,8 +121,20 @@ func (v *Volume) Labels() map[string]string { } // MountPoint returns the volume's mountpoint on the host -func (v *Volume) MountPoint() string { - return v.config.MountPoint +func (v *Volume) MountPoint() (string, error) { + // For the sake of performance, avoid locking unless we have to. + if v.UsesVolumeDriver() { + v.lock.Lock() + defer v.lock.Unlock() + + if err := v.update(); err != nil { + return "", err + } + + return v.state.MountPoint, nil + } + + return v.config.MountPoint, nil } // Options return the volume's options @@ -139,14 +157,19 @@ func (v *Volume) UID() (int, error) { v.lock.Lock() defer v.lock.Unlock() - if !v.valid { - return -1, define.ErrVolumeRemoved + if err := v.update(); err != nil { + return -1, err } + return v.uid(), nil +} + +// Internal, unlocked accessor for UID. +func (v *Volume) uid() int { if v.state.UIDChowned > 0 { - return v.state.UIDChowned, nil + return v.state.UIDChowned } - return v.config.UID, nil + return v.config.UID } // GID returns the GID the volume will be created as. @@ -154,14 +177,19 @@ func (v *Volume) GID() (int, error) { v.lock.Lock() defer v.lock.Unlock() - if !v.valid { - return -1, define.ErrVolumeRemoved + if err := v.update(); err != nil { + return -1, err } + return v.gid(), nil +} + +// Internal, unlocked accessor for GID. +func (v *Volume) gid() int { if v.state.GIDChowned > 0 { - return v.state.GIDChowned, nil + return v.state.GIDChowned } - return v.config.GID, nil + return v.config.GID } // CreatedTime returns the time the volume was created at. It was not tracked @@ -198,3 +226,10 @@ func (v *Volume) IsDangling() (bool, error) { } return len(ctrs) == 0, nil } + +// UsesVolumeDriver determines whether the volume uses a volume driver. Volume +// drivers are pluggable backends for volumes that will manage the storage and +// mounting. +func (v *Volume) UsesVolumeDriver() bool { + return !(v.config.Driver == define.VolumeDriverLocal || v.config.Driver == "") +} diff --git a/libpod/volume_inspect.go b/libpod/volume_inspect.go index c8b20b8f1..2448d1bb5 100644 --- a/libpod/volume_inspect.go +++ b/libpod/volume_inspect.go @@ -1,60 +1,52 @@ package libpod import ( - "time" - "github.com/containers/podman/v2/libpod/define" + pluginapi "github.com/docker/go-plugins-helpers/volume" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) -// InspectVolumeData is the output of Inspect() on a volume. It is matched to -// the format of 'docker volume inspect'. -type InspectVolumeData struct { - // Name is the name of the volume. - Name string `json:"Name"` - // Driver is the driver used to create the volume. - // This will be properly implemented in a future version. - Driver string `json:"Driver"` - // Mountpoint is the path on the host where the volume is mounted. - Mountpoint string `json:"Mountpoint"` - // CreatedAt is the date and time the volume was created at. This is not - // stored for older Libpod volumes; if so, it will be omitted. - CreatedAt time.Time `json:"CreatedAt,omitempty"` - // Status is presently unused and provided only for Docker compatibility. - // In the future it will be used to return information on the volume's - // current state. - Status map[string]string `json:"Status,omitempty"` - // Labels includes the volume's configured labels, key:value pairs that - // can be passed during volume creation to provide information for third - // party tools. - Labels map[string]string `json:"Labels"` - // Scope is unused and provided solely for Docker compatibility. It is - // unconditionally set to "local". - Scope string `json:"Scope"` - // Options is a set of options that were used when creating the volume. - // It is presently not used. - Options map[string]string `json:"Options"` - // UID is the UID that the volume was created with. - UID int `json:"UID,omitempty"` - // GID is the GID that the volume was created with. - GID int `json:"GID,omitempty"` - // Anonymous indicates that the volume was created as an anonymous - // volume for a specific container, and will be be removed when any - // container using it is removed. - Anonymous bool `json:"Anonymous,omitempty"` -} - // Inspect provides detailed information about the configuration of the given // volume. -func (v *Volume) Inspect() (*InspectVolumeData, error) { +func (v *Volume) Inspect() (*define.InspectVolumeData, error) { if !v.valid { return nil, define.ErrVolumeRemoved } - data := new(InspectVolumeData) + v.lock.Lock() + defer v.lock.Unlock() + + if err := v.update(); err != nil { + return nil, err + } + + data := new(define.InspectVolumeData) + + data.Mountpoint = v.config.MountPoint + if v.UsesVolumeDriver() { + logrus.Debugf("Querying volume plugin %s for status", v.config.Driver) + data.Mountpoint = v.state.MountPoint + + if v.plugin == nil { + return nil, errors.Wrapf(define.ErrMissingPlugin, "volume %s uses volume plugin %s but it is not available, cannot inspect", v.Name(), v.config.Driver) + } + + // Retrieve status for the volume. + // Need to query the volume driver. + req := new(pluginapi.GetRequest) + req.Name = v.Name() + resp, err := v.plugin.GetVolume(req) + if err != nil { + return nil, errors.Wrapf(err, "error retrieving volume %s information from plugin %s", v.Name(), v.Driver()) + } + if resp != nil { + data.Status = resp.Status + } + } data.Name = v.config.Name data.Driver = v.config.Driver - data.Mountpoint = v.config.MountPoint data.CreatedAt = v.config.CreatedTime data.Labels = make(map[string]string) for k, v := range v.config.Labels { @@ -65,15 +57,8 @@ func (v *Volume) Inspect() (*InspectVolumeData, error) { for k, v := range v.config.Options { data.Options[k] = v } - var err error - data.UID, err = v.UID() - if err != nil { - return nil, err - } - data.GID, err = v.GID() - if err != nil { - return nil, err - } + data.UID = v.uid() + data.GID = v.gid() data.Anonymous = v.config.IsAnon return data, nil diff --git a/libpod/volume_internal.go b/libpod/volume_internal.go index 95cb752e0..88d940370 100644 --- a/libpod/volume_internal.go +++ b/libpod/volume_internal.go @@ -22,13 +22,24 @@ func newVolume(runtime *Runtime) *Volume { // teardownStorage deletes the volume from volumePath func (v *Volume) teardownStorage() error { + if v.UsesVolumeDriver() { + return nil + } + + // TODO: Should this be converted to use v.config.MountPoint? return os.RemoveAll(filepath.Join(v.runtime.config.Engine.VolumePath, v.Name())) } // Volumes with options set, or a filesystem type, or a device to mount need to // be mounted and unmounted. func (v *Volume) needsMount() bool { - return len(v.config.Options) > 0 && v.config.Driver == define.VolumeDriverLocal + // Non-local driver always needs mount + if v.UsesVolumeDriver() { + return true + } + + // Local driver with options needs mount + return len(v.config.Options) > 0 } // update() updates the volume state from the DB. diff --git a/libpod/volume_internal_linux.go b/libpod/volume_internal_linux.go index bbf47f124..e184505e7 100644 --- a/libpod/volume_internal_linux.go +++ b/libpod/volume_internal_linux.go @@ -8,11 +8,17 @@ import ( "github.com/containers/podman/v2/libpod/define" "github.com/containers/podman/v2/pkg/rootless" + pluginapi "github.com/docker/go-plugins-helpers/volume" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) +// This is a pseudo-container ID to use when requesting a mount or unmount from +// the volume plugins. +// This is the shas256 of the string "placeholder\n". +const pseudoCtrID = "2f73349cfc4630255319c6c8dfc1b46a8996ace9d14d8e07563b165915918ec2" + // mount mounts the volume if necessary. // A mount is necessary if a volume has any options set. // If a mount is necessary, v.state.MountCount will be incremented. @@ -20,7 +26,7 @@ import ( // host. Otherwise, we assume it is already mounted. // Must be done while the volume is locked. // Is a no-op on volumes that do not require a mount (as defined by -// volumeNeedsMount()) +// volumeNeedsMount()). func (v *Volume) mount() error { if !v.needsMount() { return nil @@ -44,6 +50,28 @@ func (v *Volume) mount() error { return v.save() } + // Volume plugins implement their own mount counter, based on the ID of + // the mounting container. But we already have one, and honestly I trust + // ours more. So hardcode container ID to something reasonable, and use + // the same one for everything. + if v.UsesVolumeDriver() { + if v.plugin == nil { + return errors.Wrapf(define.ErrMissingPlugin, "volume plugin %s (needed by volume %s) missing", v.Driver(), v.Name()) + } + + req := new(pluginapi.MountRequest) + req.Name = v.Name() + req.ID = pseudoCtrID + mountPoint, err := v.plugin.MountVolume(req) + if err != nil { + return err + } + + v.state.MountCount += 1 + v.state.MountPoint = mountPoint + return v.save() + } + volDevice := v.config.Options["device"] volType := v.config.Options["type"] volOptions := v.config.Options["o"] @@ -132,6 +160,22 @@ func (v *Volume) unmount(force bool) error { logrus.Debugf("Volume %s mount count now at %d", v.Name(), v.state.MountCount) if v.state.MountCount == 0 { + if v.UsesVolumeDriver() { + if v.plugin == nil { + return errors.Wrapf(define.ErrMissingPlugin, "volume plugin %s (needed by volume %s) missing", v.Driver(), v.Name()) + } + + req := new(pluginapi.UnmountRequest) + req.Name = v.Name() + req.ID = pseudoCtrID + if err := v.plugin.UnmountVolume(req); err != nil { + return err + } + + v.state.MountPoint = "" + return v.save() + } + // Unmount the volume if err := unix.Unmount(v.config.MountPoint, unix.MNT_DETACH); err != nil { if err == unix.EINVAL { diff --git a/pkg/api/handlers/compat/volumes.go b/pkg/api/handlers/compat/volumes.go index 4903bbad4..82e70eb90 100644 --- a/pkg/api/handlers/compat/volumes.go +++ b/pkg/api/handlers/compat/volumes.go @@ -58,10 +58,15 @@ func ListVolumes(w http.ResponseWriter, r *http.Request) { } volumeConfigs := make([]*docker_api_types.Volume, 0, len(vols)) for _, v := range vols { + mp, err := v.MountPoint() + if err != nil { + utils.InternalServerError(w, err) + return + } config := docker_api_types.Volume{ Name: v.Name(), Driver: v.Driver(), - Mountpoint: v.MountPoint(), + Mountpoint: mp, CreatedAt: v.CreatedTime().Format(time.RFC3339), Labels: v.Labels(), Scope: v.Scope(), @@ -106,11 +111,16 @@ func CreateVolume(w http.ResponseWriter, r *http.Request) { // if using the compat layer and the volume already exists, we // must return a 201 with the same information as create if existingVolume != nil && !utils.IsLibpodRequest(r) { + mp, err := existingVolume.MountPoint() + if err != nil { + utils.InternalServerError(w, err) + return + } response := docker_api_types.Volume{ CreatedAt: existingVolume.CreatedTime().Format(time.RFC3339), Driver: existingVolume.Driver(), Labels: existingVolume.Labels(), - Mountpoint: existingVolume.MountPoint(), + Mountpoint: mp, Name: existingVolume.Name(), Options: existingVolume.Options(), Scope: existingVolume.Scope(), @@ -146,10 +156,15 @@ func CreateVolume(w http.ResponseWriter, r *http.Request) { utils.InternalServerError(w, err) return } + mp, err := vol.MountPoint() + if err != nil { + utils.InternalServerError(w, err) + return + } volResponse := docker_api_types.Volume{ Name: config.Name, Driver: config.Driver, - Mountpoint: config.MountPoint, + Mountpoint: mp, CreatedAt: config.CreatedTime.Format(time.RFC3339), Labels: config.Labels, Options: config.Options, @@ -173,10 +188,15 @@ func InspectVolume(w http.ResponseWriter, r *http.Request) { utils.VolumeNotFound(w, name, err) return } + mp, err := vol.MountPoint() + if err != nil { + utils.InternalServerError(w, err) + return + } volResponse := docker_api_types.Volume{ Name: vol.Name(), Driver: vol.Driver(), - Mountpoint: vol.MountPoint(), + Mountpoint: mp, CreatedAt: vol.CreatedTime().Format(time.RFC3339), Labels: vol.Labels(), Options: vol.Options(), diff --git a/pkg/api/handlers/libpod/volumes.go b/pkg/api/handlers/libpod/volumes.go index 6f9537515..38fdf1b4d 100644 --- a/pkg/api/handlers/libpod/volumes.go +++ b/pkg/api/handlers/libpod/volumes.go @@ -60,20 +60,13 @@ func CreateVolume(w http.ResponseWriter, r *http.Request) { utils.InternalServerError(w, err) return } - config, err := vol.Config() + inspectOut, err := vol.Inspect() if err != nil { utils.InternalServerError(w, err) return } volResponse := entities.VolumeConfigResponse{ - Name: config.Name, - Driver: config.Driver, - Mountpoint: config.MountPoint, - CreatedAt: config.CreatedTime, - Labels: config.Labels, - Options: config.Options, - UID: config.UID, - GID: config.GID, + InspectVolumeData: *inspectOut, } utils.WriteResponse(w, http.StatusCreated, volResponse) } @@ -88,27 +81,13 @@ func InspectVolume(w http.ResponseWriter, r *http.Request) { utils.VolumeNotFound(w, name, err) return } - var uid, gid int - uid, err = vol.UID() + inspectOut, err := vol.Inspect() if err != nil { - utils.Error(w, "Error fetching volume UID", http.StatusInternalServerError, err) - return - } - gid, err = vol.GID() - if err != nil { - utils.Error(w, "Error fetching volume GID", http.StatusInternalServerError, err) + utils.InternalServerError(w, err) return } volResponse := entities.VolumeConfigResponse{ - Name: vol.Name(), - Driver: vol.Driver(), - Mountpoint: vol.MountPoint(), - CreatedAt: vol.CreatedTime(), - Labels: vol.Labels(), - Scope: vol.Scope(), - Options: vol.Options(), - UID: uid, - GID: gid, + InspectVolumeData: *inspectOut, } utils.WriteResponse(w, http.StatusOK, volResponse) } @@ -143,27 +122,13 @@ func ListVolumes(w http.ResponseWriter, r *http.Request) { } volumeConfigs := make([]*entities.VolumeListReport, 0, len(vols)) for _, v := range vols { - var uid, gid int - uid, err = v.UID() + inspectOut, err := v.Inspect() if err != nil { - utils.Error(w, "Error fetching volume UID", http.StatusInternalServerError, err) - return - } - gid, err = v.GID() - if err != nil { - utils.Error(w, "Error fetching volume GID", http.StatusInternalServerError, err) + utils.InternalServerError(w, err) return } config := entities.VolumeConfigResponse{ - Name: v.Name(), - Driver: v.Driver(), - Mountpoint: v.MountPoint(), - CreatedAt: v.CreatedTime(), - Labels: v.Labels(), - Scope: v.Scope(), - Options: v.Options(), - UID: uid, - GID: gid, + InspectVolumeData: *inspectOut, } volumeConfigs = append(volumeConfigs, &entities.VolumeListReport{VolumeConfigResponse: config}) } diff --git a/pkg/domain/entities/volumes.go b/pkg/domain/entities/volumes.go index 06438f5e9..c826ee389 100644 --- a/pkg/domain/entities/volumes.go +++ b/pkg/domain/entities/volumes.go @@ -2,8 +2,8 @@ package entities import ( "net/url" - "time" + "github.com/containers/podman/v2/libpod/define" docker_api_types "github.com/docker/docker/api/types" docker_api_types_volume "github.com/docker/docker/api/types/volume" ) @@ -26,38 +26,7 @@ type IDOrNameResponse struct { } type VolumeConfigResponse struct { - // Name is the name of the volume. - Name string `json:"Name"` - // Driver is the driver used to create the volume. - // This will be properly implemented in a future version. - Driver string `json:"Driver"` - // Mountpoint is the path on the host where the volume is mounted. - Mountpoint string `json:"Mountpoint"` - // CreatedAt is the date and time the volume was created at. This is not - // stored for older Libpod volumes; if so, it will be omitted. - CreatedAt time.Time `json:"CreatedAt,omitempty"` - // Status is presently unused and provided only for Docker compatibility. - // In the future it will be used to return information on the volume's - // current state. - Status map[string]string `json:"Status,omitempty"` - // Labels includes the volume's configured labels, key:value pairs that - // can be passed during volume creation to provide information for third - // party tools. - Labels map[string]string `json:"Labels"` - // Scope is unused and provided solely for Docker compatibility. It is - // unconditionally set to "local". - Scope string `json:"Scope"` - // Options is a set of options that were used when creating the volume. - // It is presently not used. - Options map[string]string `json:"Options"` - // UID is the UID that the volume was created with. - UID int `json:"UID"` - // GID is the GID that the volume was created with. - GID int `json:"GID"` - // Anonymous indicates that the volume was created as an anonymous - // volume for a specific container, and will be be removed when any - // container using it is removed. - Anonymous bool `json:"Anonymous"` + define.InspectVolumeData } // VolumeInfo Volume list response diff --git a/pkg/domain/infra/abi/containers_stat.go b/pkg/domain/infra/abi/containers_stat.go index 5b43ee2f4..931e77026 100644 --- a/pkg/domain/infra/abi/containers_stat.go +++ b/pkg/domain/infra/abi/containers_stat.go @@ -144,16 +144,29 @@ func resolveContainerPaths(container *libpod.Container, mountPoint string, conta } if volume != nil { logrus.Debugf("Container path %q resolved to volume %q on path %q", containerPath, volume.Name(), searchPath) + + // TODO: We really need to force the volume to mount + // before doing this, but that API is not exposed + // externally right now and doing so is beyond the scope + // of this commit. + mountPoint, err := volume.MountPoint() + if err != nil { + return "", "", err + } + if mountPoint == "" { + return "", "", errors.Errorf("volume %s is not mounted, cannot copy into it", volume.Name()) + } + // We found a matching volume for searchPath. We now // need to first find the relative path of our input // path to the searchPath, and then join it with the // volume's mount point. pathRelativeToVolume := strings.TrimPrefix(pathRelativeToContainerMountPoint, searchPath) - absolutePathOnTheVolumeMount, err := securejoin.SecureJoin(volume.MountPoint(), pathRelativeToVolume) + absolutePathOnTheVolumeMount, err := securejoin.SecureJoin(mountPoint, pathRelativeToVolume) if err != nil { return "", "", err } - return volume.MountPoint(), absolutePathOnTheVolumeMount, nil + return mountPoint, absolutePathOnTheVolumeMount, nil } if mount := findBindMount(container, searchPath); mount != nil { diff --git a/pkg/domain/infra/abi/system.go b/pkg/domain/infra/abi/system.go index 97fa9d374..f29b98696 100644 --- a/pkg/domain/infra/abi/system.go +++ b/pkg/domain/infra/abi/system.go @@ -312,7 +312,17 @@ func (ic *ContainerEngine) SystemDf(ctx context.Context, options entities.System var reclaimableSize int64 for _, v := range vols { var consInUse int - volSize, err := sizeOfPath(v.MountPoint()) + mountPoint, err := v.MountPoint() + if err != nil { + return nil, err + } + if mountPoint == "" { + // We can't get any info on this volume, as it's not + // mounted. + // TODO: fix this. + continue + } + volSize, err := sizeOfPath(mountPoint) if err != nil { return nil, err } diff --git a/pkg/domain/infra/abi/volumes.go b/pkg/domain/infra/abi/volumes.go index 3c9dd9fc0..823605052 100644 --- a/pkg/domain/infra/abi/volumes.go +++ b/pkg/domain/infra/abi/volumes.go @@ -103,25 +103,12 @@ func (ic *ContainerEngine) VolumeInspect(ctx context.Context, namesOrIds []strin } reports := make([]*entities.VolumeInspectReport, 0, len(vols)) for _, v := range vols { - var uid, gid int - uid, err = v.UID() - if err != nil { - return nil, nil, err - } - gid, err = v.GID() + inspectOut, err := v.Inspect() if err != nil { return nil, nil, err } config := entities.VolumeConfigResponse{ - Name: v.Name(), - Driver: v.Driver(), - Mountpoint: v.MountPoint(), - CreatedAt: v.CreatedTime(), - Labels: v.Labels(), - Scope: v.Scope(), - Options: v.Options(), - UID: uid, - GID: gid, + InspectVolumeData: *inspectOut, } reports = append(reports, &entities.VolumeInspectReport{VolumeConfigResponse: &config}) } @@ -155,25 +142,12 @@ func (ic *ContainerEngine) VolumeList(ctx context.Context, opts entities.VolumeL } reports := make([]*entities.VolumeListReport, 0, len(vols)) for _, v := range vols { - var uid, gid int - uid, err = v.UID() - if err != nil { - return nil, err - } - gid, err = v.GID() + inspectOut, err := v.Inspect() if err != nil { return nil, err } config := entities.VolumeConfigResponse{ - Name: v.Name(), - Driver: v.Driver(), - Mountpoint: v.MountPoint(), - CreatedAt: v.CreatedTime(), - Labels: v.Labels(), - Scope: v.Scope(), - Options: v.Options(), - UID: uid, - GID: gid, + InspectVolumeData: *inspectOut, } reports = append(reports, &entities.VolumeListReport{VolumeConfigResponse: config}) } diff --git a/test/apiv2/30-volumes.at b/test/apiv2/30-volumes.at index 33f4ea37f..b38810039 100644 --- a/test/apiv2/30-volumes.at +++ b/test/apiv2/30-volumes.at @@ -12,7 +12,7 @@ t POST libpod/volumes/create name=foo1 201 \ .Mountpoint=$volumepath/foo1/_data \ .CreatedAt~[0-9]\\{4\\}-[0-9]\\{2\\}-[0-9]\\{2\\}.* \ .Labels={} \ - .Options=null + .Options={} t POST libpod/volumes/create '' 201 t POST libpod/volumes/create \ '"Name":"foo2","Label":{"testlabel":"testonly"},"Options":{"type":"tmpfs","o":"nodev,noexec"}}' 201 \ diff --git a/test/apiv2/45-system.at b/test/apiv2/45-system.at index 44cd05a13..985d86e56 100644 --- a/test/apiv2/45-system.at +++ b/test/apiv2/45-system.at @@ -19,7 +19,7 @@ t POST libpod/volumes/create name=foo1 201 \ .Mountpoint=$volumepath/foo1/_data \ .CreatedAt~[0-9]\\{4\\}-[0-9]\\{2\\}-[0-9]\\{2\\}.* \ .Labels={} \ - .Options=null + .Options={} t GET system/df 200 '.Volumes[0].Name=foo1' diff --git a/test/e2e/checkpoint_test.go b/test/e2e/checkpoint_test.go index abc37792a..3270ce685 100644 --- a/test/e2e/checkpoint_test.go +++ b/test/e2e/checkpoint_test.go @@ -623,7 +623,7 @@ var _ = Describe("Podman checkpoint", func() { result := podmanTest.Podman([]string{"container", "checkpoint", "-l"}) result.WaitWithDefaultTimeout() Expect(result).To(ExitWithError()) - Expect(result.ErrorToString()).To(ContainSubstring("Cannot checkpoint containers that have been started with '--rm'")) + Expect(result.ErrorToString()).To(ContainSubstring("cannot checkpoint containers that have been started with '--rm'")) // Checkpointing with --export should still work fileName := "/tmp/checkpoint-" + cid + ".tar.gz" |