diff options
author | OpenShift Merge Robot <openshift-merge-robot@users.noreply.github.com> | 2021-01-14 21:56:37 -0500 |
---|---|---|
committer | GitHub <noreply@github.com> | 2021-01-14 21:56:37 -0500 |
commit | 8ce9995951b14a0a4d7252cdd97597411fd5f980 (patch) | |
tree | b59fcfdfc29ff3979186116e23373c8d72f31169 | |
parent | 2b7793b6121d336a285fb7b9a7612c221cbf63d2 (diff) | |
parent | f781efd2dca4c1db54762c6edec2b915e48dd5d8 (diff) | |
download | podman-8ce9995951b14a0a4d7252cdd97597411fd5f980.tar.gz podman-8ce9995951b14a0a4d7252cdd97597411fd5f980.tar.bz2 podman-8ce9995951b14a0a4d7252cdd97597411fd5f980.zip |
Merge pull request #8604 from mheon/volume_plugin_impl
Initial implementation of volume plugins
34 files changed, 1029 insertions, 264 deletions
diff --git a/.gitignore b/.gitignore index f87c8974f..6a5ae509c 100644 --- a/.gitignore +++ b/.gitignore @@ -29,6 +29,7 @@ release.txt /test/checkseccomp/checkseccomp /test/copyimg/copyimg /test/goecho/goecho +/test/testvol/testvol .vscode* result # Necessary to prevent hack/tree-status.sh false-positive diff --git a/Containerfile-testvol b/Containerfile-testvol new file mode 100644 index 000000000..6ff45064b --- /dev/null +++ b/Containerfile-testvol @@ -0,0 +1,10 @@ +FROM golang:1.15-alpine AS build-img +COPY ./test/testvol/ /go/src/github.com/containers/podman/cmd/testvol/ +COPY ./vendor /go/src/github.com/containers/podman/vendor/ +WORKDIR /go/src/github.com/containers/podman +RUN go build -o /testvol ./cmd/testvol + +FROM alpine +COPY --from=build-img /testvol /usr/local/bin +WORKDIR / +ENTRYPOINT ["/usr/local/bin/testvol"] @@ -180,6 +180,14 @@ gofmt: ## Verify the source code gofmt test/checkseccomp/checkseccomp: .gopathok $(wildcard test/checkseccomp/*.go) $(GO) build $(BUILDFLAGS) -ldflags '$(LDFLAGS_PODMAN)' -tags "$(BUILDTAGS)" -o $@ ./test/checkseccomp +.PHONY: test/testvol/testvol +test/testvol/testvol: .gopathok $(wildcard test/testvol/*.go) + $(GO) build $(BUILDFLAGS) -ldflags '$(LDFLAGS_PODMAN)' -o $@ ./test/testvol + +.PHONY: volume-plugin-test-image +volume-plugin-test-img: + podman build -t quay.io/libpod/volume-plugin-test-img -f Containerfile-testvol . + .PHONY: test/goecho/goecho test/goecho/goecho: .gopathok $(wildcard test/goecho/*.go) $(GO) build $(BUILDFLAGS) -ldflags '$(LDFLAGS_PODMAN)' -o $@ ./test/goecho diff --git a/docs/source/markdown/podman-volume-create.1.md b/docs/source/markdown/podman-volume-create.1.md index 118f024df..a06411000 100644 --- a/docs/source/markdown/podman-volume-create.1.md +++ b/docs/source/markdown/podman-volume-create.1.md @@ -17,7 +17,7 @@ driver options can be set using the **--opt** flag. #### **--driver**=*driver* -Specify the volume driver name (default local). +Specify the volume driver name (default **local**). Setting this to a value other than **local** Podman will attempt to create the volume using a volume plugin with the given name. Such plugins must be defined in the **volume_plugins** section of the **containers.conf**(5) configuration file. #### **--help** @@ -30,13 +30,14 @@ Set metadata for a volume (e.g., --label mykey=value). #### **--opt**=*option*, **-o** Set driver specific options. -For the default driver, `local`, this allows a volume to be configured to mount a filesystem on the host. +For the default driver, **local**, this allows a volume to be configured to mount a filesystem on the host. For the `local` driver the following options are supported: `type`, `device`, and `o`. The `type` option sets the type of the filesystem to be mounted, and is equivalent to the `-t` flag to **mount(8)**. The `device` option sets the device to be mounted, and is equivalent to the `device` argument to **mount(8)**. The `o` option sets options for the mount, and is equivalent to the `-o` flag to **mount(8)** with two exceptions. The `o` option supports `uid` and `gid` options to set the UID and GID of the created volume that are not normally supported by **mount(8)**. -Using volume options with the `local` driver requires root privileges. +Using volume options with the **local** driver requires root privileges. +When not using the **local** driver, the given options will be passed directly to the volume plugin. In this case, supported options will be dictated by the plugin in question, not Podman. ## EXAMPLES @@ -53,7 +54,8 @@ $ podman volume create --label foo=bar myvol ``` ## SEE ALSO -podman-volume(1), mount(8) +**podman-volume**(1), **mount**(8), **containers.conf**(5) ## HISTORY +January 2020, updated with information on volume plugins by Matthew Heon <mheon@redhat.com> November 2018, Originally compiled by Urvashi Mohnani <umohnani@redhat.com> diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go index dcb2ff751..b2ee63b08 100644 --- a/libpod/boltdb_state.go +++ b/libpod/boltdb_state.go @@ -304,6 +304,7 @@ func (s *BoltState) Refresh() error { // Reset mount count to 0 oldState.MountCount = 0 + oldState.MountPoint = "" newState, err := json.Marshal(oldState) if err != nil { diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go index c06fedd3e..6014fbef3 100644 --- a/libpod/boltdb_state_internal.go +++ b/libpod/boltdb_state_internal.go @@ -497,6 +497,21 @@ func (s *BoltState) getVolumeFromDB(name []byte, volume *Volume, volBkt *bolt.Bu } } + // Retrieve volume driver + if volume.UsesVolumeDriver() { + plugin, err := s.runtime.getVolumePlugin(volume.config.Driver) + if err != nil { + // We want to fail gracefully here, to ensure that we + // can still remove volumes even if their plugin is + // missing. Otherwise, we end up with volumes that + // cannot even be retrieved from the database and will + // cause things like `volume ls` to fail. + logrus.Errorf("Volume %s uses volume plugin %s, but it cannot be accessed - some functionality may not be available: %v", volume.Name(), volume.config.Driver, err) + } else { + volume.plugin = plugin + } + } + // Get the lock lock, err := s.runtime.lockManager.RetrieveLock(volume.config.LockID) if err != nil { diff --git a/libpod/container_inspect.go b/libpod/container_inspect.go index 870d92ca9..ac7eae56b 100644 --- a/libpod/container_inspect.go +++ b/libpod/container_inspect.go @@ -212,7 +212,12 @@ func (c *Container) getInspectMounts(namedVolumes []*ContainerNamedVolume, image return nil, errors.Wrapf(err, "error looking up volume %s in container %s config", volume.Name, c.ID()) } mountStruct.Driver = volFromDB.Driver() - mountStruct.Source = volFromDB.MountPoint() + + mountPoint, err := volFromDB.MountPoint() + if err != nil { + return nil, err + } + mountStruct.Source = mountPoint parseMountOptionsForInspect(volume.Options, &mountStruct) diff --git a/libpod/container_internal.go b/libpod/container_internal.go index 6d4d3ade0..b9ea50783 100644 --- a/libpod/container_internal.go +++ b/libpod/container_internal.go @@ -1580,8 +1580,18 @@ func (c *Container) mountNamedVolume(v *ContainerNamedVolume, mountpoint string) return nil, err } + // HACK HACK HACK - copy up into a volume driver is 100% broken + // right now. + if vol.UsesVolumeDriver() { + logrus.Infof("Not copying up into volume %s as it uses a volume driver", vol.Name()) + return vol, nil + } + // If the volume is not empty, we should not copy up. - volMount := vol.MountPoint() + volMount, err := vol.MountPoint() + if err != nil { + return nil, err + } contents, err := ioutil.ReadDir(volMount) if err != nil { return nil, errors.Wrapf(err, "error listing contents of volume %s mountpoint when copying up from container %s", vol.Name(), c.ID()) @@ -1619,7 +1629,11 @@ func (c *Container) chownVolume(volumeName string) error { return err } - if vol.state.NeedsChown { + // TODO: For now, I've disabled chowning volumes owned by non-Podman + // drivers. This may be safe, but it's really going to be a case-by-case + // thing, I think - safest to leave disabled now and reenable later if + // there is a demand. + if vol.state.NeedsChown && !vol.UsesVolumeDriver() { vol.state.NeedsChown = false uid := int(c.config.Spec.Process.User.UID) @@ -1646,7 +1660,10 @@ func (c *Container) chownVolume(volumeName string) error { return err } - mountPoint := vol.MountPoint() + mountPoint, err := vol.MountPoint() + if err != nil { + return err + } if err := os.Lchown(mountPoint, uid, gid); err != nil { return err diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go index ce2b52234..0553cc59c 100644 --- a/libpod/container_internal_linux.go +++ b/libpod/container_internal_linux.go @@ -341,7 +341,10 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) { if err != nil { return nil, errors.Wrapf(err, "error retrieving volume %s to add to container %s", namedVol.Name, c.ID()) } - mountPoint := volume.MountPoint() + mountPoint, err := volume.MountPoint() + if err != nil { + return nil, err + } volMount := spec.Mount{ Type: "bind", Source: mountPoint, @@ -903,7 +906,15 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error { return err } - input, err := archive.TarWithOptions(volume.MountPoint(), &archive.TarOptions{ + mp, err := volume.MountPoint() + if err != nil { + return err + } + if mp == "" { + return errors.Wrapf(define.ErrInternal, "volume %s is not mounted, cannot export", volume.Name()) + } + + input, err := archive.TarWithOptions(mp, &archive.TarOptions{ Compression: archive.Uncompressed, IncludeSourceDir: true, }) @@ -958,10 +969,10 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error { func (c *Container) checkpointRestoreSupported() error { if !criu.CheckForCriu() { - return errors.Errorf("Checkpoint/Restore requires at least CRIU %d", criu.MinCriuVersion) + return errors.Errorf("checkpoint/restore requires at least CRIU %d", criu.MinCriuVersion) } if !c.ociRuntime.SupportsCheckpoint() { - return errors.Errorf("Configured runtime does not support checkpoint/restore") + return errors.Errorf("configured runtime does not support checkpoint/restore") } return nil } @@ -993,7 +1004,7 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO } if c.AutoRemove() && options.TargetFile == "" { - return errors.Errorf("Cannot checkpoint containers that have been started with '--rm' unless '--export' is used") + return errors.Errorf("cannot checkpoint containers that have been started with '--rm' unless '--export' is used") } if err := c.checkpointRestoreLabelLog("dump.log"); err != nil { @@ -1079,13 +1090,13 @@ func (c *Container) importCheckpoint(input string) error { } err = archive.Untar(archiveFile, c.bundlePath(), options) if err != nil { - return errors.Wrapf(err, "Unpacking of checkpoint archive %s failed", input) + return errors.Wrapf(err, "unpacking of checkpoint archive %s failed", input) } // Make sure the newly created config.json exists on disk g := generate.Generator{Config: c.config.Spec} if err = c.saveSpec(g.Config); err != nil { - return errors.Wrap(err, "Saving imported container specification for restore failed") + return errors.Wrap(err, "saving imported container specification for restore failed") } return nil @@ -1130,7 +1141,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti // Let's try to stat() CRIU's inventory file. If it does not exist, it makes // no sense to try a restore. This is a minimal check if a checkpoint exist. if _, err := os.Stat(filepath.Join(c.CheckpointPath(), "inventory.img")); os.IsNotExist(err) { - return errors.Wrapf(err, "A complete checkpoint for this container cannot be found, cannot restore") + return errors.Wrapf(err, "a complete checkpoint for this container cannot be found, cannot restore") } if err := c.checkpointRestoreLabelLog("restore.log"); err != nil { @@ -1286,16 +1297,22 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti volumeFile, err := os.Open(volumeFilePath) if err != nil { - return errors.Wrapf(err, "Failed to open volume file %s", volumeFilePath) + return errors.Wrapf(err, "failed to open volume file %s", volumeFilePath) } defer volumeFile.Close() volume, err := c.runtime.GetVolume(v.Name) if err != nil { - return errors.Wrapf(err, "Failed to retrieve volume %s", v.Name) + return errors.Wrapf(err, "failed to retrieve volume %s", v.Name) } - mountPoint := volume.MountPoint() + mountPoint, err := volume.MountPoint() + if err != nil { + return err + } + if mountPoint == "" { + return errors.Wrapf(err, "unable to import volume %s as it is not mounted", volume.Name()) + } if err := archive.UntarUncompressed(volumeFile, mountPoint, nil); err != nil { return errors.Wrapf(err, "Failed to extract volume %s to %s", volumeFilePath, mountPoint) } diff --git a/libpod/define/errors.go b/libpod/define/errors.go index 568f8e88d..d37bc397e 100644 --- a/libpod/define/errors.go +++ b/libpod/define/errors.go @@ -35,6 +35,10 @@ var ( // aliases. ErrNoAliases = errors.New("no aliases for container") + // ErrMissingPlugin indicates that the requested operation requires a + // plugin that is not present on the system or in the configuration. + ErrMissingPlugin = errors.New("required plugin missing") + // ErrCtrExists indicates a container with the same name or ID already // exists ErrCtrExists = errors.New("container already exists") diff --git a/libpod/define/volume_inspect.go b/libpod/define/volume_inspect.go new file mode 100644 index 000000000..20602ea16 --- /dev/null +++ b/libpod/define/volume_inspect.go @@ -0,0 +1,51 @@ +package define + +import ( + "time" +) + +// InspectVolumeData is the output of Inspect() on a volume. It is matched to +// the format of 'docker volume inspect'. +type InspectVolumeData struct { + // Name is the name of the volume. + Name string `json:"Name"` + // Driver is the driver used to create the volume. + // If set to "local" or "", the Local driver (Podman built-in code) is + // used to service the volume; otherwise, a volume plugin with the given + // name is used to mount and manage the volume. + Driver string `json:"Driver"` + // Mountpoint is the path on the host where the volume is mounted. + Mountpoint string `json:"Mountpoint"` + // CreatedAt is the date and time the volume was created at. This is not + // stored for older Libpod volumes; if so, it will be omitted. + CreatedAt time.Time `json:"CreatedAt,omitempty"` + // Status is used to return information on the volume's current state, + // if the volume was created using a volume plugin (uses a Driver that + // is not the local driver). + // Status is provided to us by an external program, so no guarantees are + // made about its format or contents. Further, it is an optional field, + // so it may not be set even in cases where a volume plugin is in use. + Status map[string]interface{} `json:"Status,omitempty"` + // Labels includes the volume's configured labels, key:value pairs that + // can be passed during volume creation to provide information for third + // party tools. + Labels map[string]string `json:"Labels"` + // Scope is unused and provided solely for Docker compatibility. It is + // unconditionally set to "local". + Scope string `json:"Scope"` + // Options is a set of options that were used when creating the volume. + // For the Local driver, these are mount options that will be used to + // determine how a local filesystem is mounted; they are handled as + // parameters to Mount in a manner described in the volume create + // manpage. + // For non-local drivers, these are passed as-is to the volume plugin. + Options map[string]string `json:"Options"` + // UID is the UID that the volume was created with. + UID int `json:"UID,omitempty"` + // GID is the GID that the volume was created with. + GID int `json:"GID,omitempty"` + // Anonymous indicates that the volume was created as an anonymous + // volume for a specific container, and will be be removed when any + // container using it is removed. + Anonymous bool `json:"Anonymous,omitempty"` +} diff --git a/libpod/options.go b/libpod/options.go index 31c0b9ac9..c7bac7e1f 100644 --- a/libpod/options.go +++ b/libpod/options.go @@ -1549,17 +1549,6 @@ func WithVolumeDriver(driver string) VolumeCreateOption { return define.ErrVolumeFinalized } - // Uncomment when volume plugins are ready for use. - // if driver != define.VolumeDriverLocal { - // if _, err := plugin.GetVolumePlugin(driver); err != nil { - // return err - // } - // } - - if driver != define.VolumeDriverLocal { - return define.ErrNotImplemented - } - volume.config.Driver = driver return nil } diff --git a/libpod/plugin/volume_api.go b/libpod/plugin/volume_api.go index 2500a4f36..c5dec651c 100644 --- a/libpod/plugin/volume_api.go +++ b/libpod/plugin/volume_api.go @@ -2,8 +2,9 @@ package plugin import ( "bytes" - "fmt" + "context" "io/ioutil" + "net" "net/http" "os" "path/filepath" @@ -43,7 +44,6 @@ var ( const ( defaultTimeout = 5 * time.Second - defaultPath = "/run/docker/plugins" volumePluginType = "VolumeDriver" ) @@ -64,6 +64,8 @@ type VolumePlugin struct { Name string // SocketPath is the unix socket at which the plugin is accessed. SocketPath string + // Client is the HTTP client we use to connect to the plugin. + Client *http.Client } // This is the response from the activate endpoint of the API. @@ -76,7 +78,7 @@ type activateResponse struct { func validatePlugin(newPlugin *VolumePlugin) error { // It's a socket. Is it a plugin? // Hit the Activate endpoint to find out if it is, and if so what kind - req, err := http.NewRequest("POST", activatePath, nil) + req, err := http.NewRequest("POST", "http://plugin"+activatePath, nil) if err != nil { return errors.Wrapf(err, "error making request to volume plugin %s activation endpoint", newPlugin.Name) } @@ -84,9 +86,7 @@ func validatePlugin(newPlugin *VolumePlugin) error { req.Header.Set("Host", newPlugin.getURI()) req.Header.Set("Content-Type", sdk.DefaultContentTypeV1_1) - client := new(http.Client) - client.Timeout = defaultTimeout - resp, err := client.Do(req) + resp, err := newPlugin.Client.Do(req) if err != nil { return errors.Wrapf(err, "error sending request to plugin %s activation endpoint", newPlugin.Name) } @@ -121,22 +121,28 @@ func validatePlugin(newPlugin *VolumePlugin) error { return errors.Wrapf(ErrNotVolumePlugin, "plugin %s does not implement volume plugin, instead provides %s", newPlugin.Name, strings.Join(respStruct.Implements, ", ")) } + if plugins == nil { + plugins = make(map[string]*VolumePlugin) + } + plugins[newPlugin.Name] = newPlugin return nil } -// GetVolumePlugin gets a single volume plugin by path. -// TODO: We should not be auto-completing based on a default path; we should -// require volumes to have been pre-specified in containers.conf (will need a -// function to pre-populate the plugins list, and we should probably do a lazy -// initialization there to not slow things down too much). -func GetVolumePlugin(name string) (*VolumePlugin, error) { +// GetVolumePlugin gets a single volume plugin, with the given name, at the +// given path. +func GetVolumePlugin(name string, path string) (*VolumePlugin, error) { pluginsLock.Lock() defer pluginsLock.Unlock() plugin, exists := plugins[name] if exists { + // This shouldn't be possible, but just in case... + if plugin.SocketPath != filepath.Clean(path) { + return nil, errors.Wrapf(define.ErrInvalidArg, "requested path %q for volume plugin %s does not match pre-existing path for plugin, %q", path, name, plugin.SocketPath) + } + return plugin, nil } @@ -144,7 +150,20 @@ func GetVolumePlugin(name string) (*VolumePlugin, error) { newPlugin := new(VolumePlugin) newPlugin.Name = name - newPlugin.SocketPath = filepath.Join(defaultPath, fmt.Sprintf("%s.sock", name)) + newPlugin.SocketPath = filepath.Clean(path) + + // Need an HTTP client to force a Unix connection. + // And since we can reuse it, might as well cache it. + client := new(http.Client) + client.Timeout = defaultTimeout + // This bit borrowed from pkg/bindings/connection.go + client.Transport = &http.Transport{ + DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) { + return (&net.Dialer{}).DialContext(ctx, "unix", newPlugin.SocketPath) + }, + DisableCompression: true, + } + newPlugin.Client = client stat, err := os.Stat(newPlugin.SocketPath) if err != nil { @@ -183,6 +202,7 @@ func (p *VolumePlugin) verifyReachable() error { } // Send a request to the volume plugin for handling. +// Callers *MUST* close the response when they are done. func (p *VolumePlugin) sendRequest(toJSON interface{}, hasBody bool, endpoint string) (*http.Response, error) { var ( reqJSON []byte @@ -196,7 +216,7 @@ func (p *VolumePlugin) sendRequest(toJSON interface{}, hasBody bool, endpoint st } } - req, err := http.NewRequest("POST", endpoint, bytes.NewReader(reqJSON)) + req, err := http.NewRequest("POST", "http://plugin"+endpoint, bytes.NewReader(reqJSON)) if err != nil { return nil, errors.Wrapf(err, "error making request to volume plugin %s endpoint %s", p.Name, endpoint) } @@ -204,13 +224,12 @@ func (p *VolumePlugin) sendRequest(toJSON interface{}, hasBody bool, endpoint st req.Header.Set("Host", p.getURI()) req.Header.Set("Content-Type", sdk.DefaultContentTypeV1_1) - client := new(http.Client) - client.Timeout = defaultTimeout - resp, err := client.Do(req) + resp, err := p.Client.Do(req) if err != nil { return nil, errors.Wrapf(err, "error sending request to volume plugin %s endpoint %s", p.Name, endpoint) } - defer resp.Body.Close() + // We are *deliberately not closing* response here. It is the + // responsibility of the caller to do so after reading the response. return resp, nil } diff --git a/libpod/runtime.go b/libpod/runtime.go index 1004e4fa7..34c737a67 100644 --- a/libpod/runtime.go +++ b/libpod/runtime.go @@ -17,6 +17,7 @@ import ( "github.com/containers/podman/v2/libpod/events" "github.com/containers/podman/v2/libpod/image" "github.com/containers/podman/v2/libpod/lock" + "github.com/containers/podman/v2/libpod/plugin" "github.com/containers/podman/v2/libpod/shutdown" "github.com/containers/podman/v2/pkg/cgroups" "github.com/containers/podman/v2/pkg/registries" @@ -888,3 +889,18 @@ func (r *Runtime) reloadStorageConf() error { logrus.Infof("applied new storage configuration: %v", r.storageConfig) return nil } + +// getVolumePlugin gets a specific volume plugin given its name. +func (r *Runtime) getVolumePlugin(name string) (*plugin.VolumePlugin, error) { + // There is no plugin for local. + if name == define.VolumeDriverLocal || name == "" { + return nil, nil + } + + pluginPath, ok := r.config.Engine.VolumePlugins[name] + if !ok { + return nil, errors.Wrapf(define.ErrMissingPlugin, "no volume plugin with name %s available", name) + } + + return plugin.GetVolumePlugin(name, pluginPath) +} diff --git a/libpod/runtime_volume_linux.go b/libpod/runtime_volume_linux.go index 9bf0fd108..4a29f01aa 100644 --- a/libpod/runtime_volume_linux.go +++ b/libpod/runtime_volume_linux.go @@ -11,7 +11,9 @@ import ( "github.com/containers/podman/v2/libpod/define" "github.com/containers/podman/v2/libpod/events" + volplugin "github.com/containers/podman/v2/libpod/plugin" "github.com/containers/storage/pkg/stringid" + pluginapi "github.com/docker/go-plugins-helpers/volume" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -53,6 +55,14 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption) return nil, errors.Wrapf(define.ErrVolumeExists, "volume with name %s already exists", volume.config.Name) } + // Plugin can be nil if driver is local, but that's OK - superfluous + // assignment doesn't hurt much. + plugin, err := r.getVolumePlugin(volume.config.Driver) + if err != nil { + return nil, errors.Wrapf(err, "volume %s uses volume plugin %s but it could not be retrieved", volume.config.Name, volume.config.Driver) + } + volume.plugin = plugin + if volume.config.Driver == define.VolumeDriverLocal { logrus.Debugf("Validating options for local driver") // Validate options @@ -66,25 +76,38 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption) } } - // Create the mountpoint of this volume - volPathRoot := filepath.Join(r.config.Engine.VolumePath, volume.config.Name) - if err := os.MkdirAll(volPathRoot, 0700); err != nil { - return nil, errors.Wrapf(err, "error creating volume directory %q", volPathRoot) - } - if err := os.Chown(volPathRoot, volume.config.UID, volume.config.GID); err != nil { - return nil, errors.Wrapf(err, "error chowning volume directory %q to %d:%d", volPathRoot, volume.config.UID, volume.config.GID) - } - fullVolPath := filepath.Join(volPathRoot, "_data") - if err := os.MkdirAll(fullVolPath, 0755); err != nil { - return nil, errors.Wrapf(err, "error creating volume directory %q", fullVolPath) - } - if err := os.Chown(fullVolPath, volume.config.UID, volume.config.GID); err != nil { - return nil, errors.Wrapf(err, "error chowning volume directory %q to %d:%d", fullVolPath, volume.config.UID, volume.config.GID) - } - if err := LabelVolumePath(fullVolPath); err != nil { - return nil, err + // Now we get conditional: we either need to make the volume in the + // volume plugin, or on disk if not using a plugin. + if volume.plugin != nil { + // We can't chown, or relabel, or similar the path the volume is + // using, because it's not managed by us. + // TODO: reevaluate this once we actually have volume plugins in + // use in production - it may be safe, but I can't tell without + // knowing what the actual plugin does... + if err := makeVolumeInPluginIfNotExist(volume.config.Name, volume.config.Options, volume.plugin); err != nil { + return nil, err + } + } else { + // Create the mountpoint of this volume + volPathRoot := filepath.Join(r.config.Engine.VolumePath, volume.config.Name) + if err := os.MkdirAll(volPathRoot, 0700); err != nil { + return nil, errors.Wrapf(err, "error creating volume directory %q", volPathRoot) + } + if err := os.Chown(volPathRoot, volume.config.UID, volume.config.GID); err != nil { + return nil, errors.Wrapf(err, "error chowning volume directory %q to %d:%d", volPathRoot, volume.config.UID, volume.config.GID) + } + fullVolPath := filepath.Join(volPathRoot, "_data") + if err := os.MkdirAll(fullVolPath, 0755); err != nil { + return nil, errors.Wrapf(err, "error creating volume directory %q", fullVolPath) + } + if err := os.Chown(fullVolPath, volume.config.UID, volume.config.GID); err != nil { + return nil, errors.Wrapf(err, "error chowning volume directory %q to %d:%d", fullVolPath, volume.config.UID, volume.config.GID) + } + if err := LabelVolumePath(fullVolPath); err != nil { + return nil, err + } + volume.config.MountPoint = fullVolPath } - volume.config.MountPoint = fullVolPath lock, err := r.lockManager.AllocateLock() if err != nil { @@ -111,6 +134,39 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption) return volume, nil } +// makeVolumeInPluginIfNotExist makes a volume in the given volume plugin if it +// does not already exist. +func makeVolumeInPluginIfNotExist(name string, options map[string]string, plugin *volplugin.VolumePlugin) error { + // Ping the volume plugin to see if it exists first. + // If it does, use the existing volume in the plugin. + // Options may not match exactly, but not much we can do about + // that. Not complaining avoids a lot of the sync issues we see + // with c/storage and libpod DB. + needsCreate := true + getReq := new(pluginapi.GetRequest) + getReq.Name = name + if resp, err := plugin.GetVolume(getReq); err == nil { + // TODO: What do we do if we get a 200 response, but the + // Volume is nil? The docs on the Plugin API are very + // nonspecific, so I don't know if this is valid or + // not... + if resp != nil { + needsCreate = false + logrus.Infof("Volume %q already exists in plugin %q, using existing volume", name, plugin.Name) + } + } + if needsCreate { + createReq := new(pluginapi.CreateRequest) + createReq.Name = name + createReq.Options = options + if err := plugin.CreateVolume(createReq); err != nil { + return errors.Wrapf(err, "error creating volume %q in plugin %s", name, plugin.Name) + } + } + + return nil +} + // removeVolume removes the specified volume from state as well tears down its mountpoint and storage func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error { if !v.valid { @@ -185,9 +241,43 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error var removalErr error + // If we use a volume plugin, we need to remove from the plugin. + if v.UsesVolumeDriver() { + canRemove := true + + // Do we have a volume driver? + if v.plugin == nil { + canRemove = false + removalErr = errors.Wrapf(define.ErrMissingPlugin, "cannot remove volume %s from plugin %s, but it has been removed from Podman", v.Name(), v.Driver()) + } else { + // Ping the plugin first to verify the volume still + // exists. + // We're trying to be very tolerant of missing volumes + // in the backend, to avoid the problems we see with + // sync between c/storage and the Libpod DB. + getReq := new(pluginapi.GetRequest) + getReq.Name = v.Name() + if _, err := v.plugin.GetVolume(getReq); err != nil { + canRemove = false + removalErr = errors.Wrapf(err, "volume %s could not be retrieved from plugin %s, but it has been removed from Podman", v.Name(), v.Driver()) + } + } + if canRemove { + req := new(pluginapi.RemoveRequest) + req.Name = v.Name() + if err := v.plugin.RemoveVolume(req); err != nil { + removalErr = errors.Wrapf(err, "volume %s could not be removed from plugin %s, but it has been removed from Podman", v.Name(), v.Driver()) + } + } + } + // Free the volume's lock if err := v.lock.Free(); err != nil { - removalErr = errors.Wrapf(err, "error freeing lock for volume %s", v.Name()) + if removalErr == nil { + removalErr = errors.Wrapf(err, "error freeing lock for volume %s", v.Name()) + } else { + logrus.Errorf("Error freeing lock for volume %q: %v", v.Name(), err) + } } // Delete the mountpoint path of the volume, that is delete the volume @@ -196,7 +286,7 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error if removalErr == nil { removalErr = errors.Wrapf(err, "error cleaning up volume storage for %q", v.Name()) } else { - logrus.Errorf("error cleaning up volume storage for volume %q: %v", v.Name(), err) + logrus.Errorf("Error cleaning up volume storage for volume %q: %v", v.Name(), err) } } diff --git a/libpod/volume.go b/libpod/volume.go index ed08d375f..4c137cb8e 100644 --- a/libpod/volume.go +++ b/libpod/volume.go @@ -7,6 +7,7 @@ import ( "github.com/containers/podman/v2/libpod/define" "github.com/containers/podman/v2/libpod/lock" + "github.com/containers/podman/v2/libpod/plugin" ) // Volume is a libpod named volume. @@ -18,6 +19,7 @@ type Volume struct { state *VolumeState valid bool + plugin *plugin.VolumePlugin runtime *Runtime lock lock.Locker } @@ -31,7 +33,7 @@ type VolumeConfig struct { // Labels for the volume. Labels map[string]string `json:"labels"` // The volume driver. Empty string or local does not activate a volume - // driver, all other volumes will. + // driver, all other values will. Driver string `json:"volumeDriver"` // The location the volume is mounted at. MountPoint string `json:"mountPoint"` @@ -53,6 +55,10 @@ type VolumeConfig struct { // Volumes are not guaranteed to have a state. Only volumes using the Local // driver that have mount options set will create a state. type VolumeState struct { + // Mountpoint is the location where the volume was mounted. + // This is only used for volumes using a volume plugin, which will mount + // at non-standard locations. + MountPoint string `json:"mountPoint,omitempty"` // MountCount is the number of times this volume has been requested to // be mounted. // It is incremented on mount() and decremented on unmount(). @@ -115,8 +121,20 @@ func (v *Volume) Labels() map[string]string { } // MountPoint returns the volume's mountpoint on the host -func (v *Volume) MountPoint() string { - return v.config.MountPoint +func (v *Volume) MountPoint() (string, error) { + // For the sake of performance, avoid locking unless we have to. + if v.UsesVolumeDriver() { + v.lock.Lock() + defer v.lock.Unlock() + + if err := v.update(); err != nil { + return "", err + } + + return v.state.MountPoint, nil + } + + return v.config.MountPoint, nil } // Options return the volume's options @@ -139,14 +157,19 @@ func (v *Volume) UID() (int, error) { v.lock.Lock() defer v.lock.Unlock() - if !v.valid { - return -1, define.ErrVolumeRemoved + if err := v.update(); err != nil { + return -1, err } + return v.uid(), nil +} + +// Internal, unlocked accessor for UID. +func (v *Volume) uid() int { if v.state.UIDChowned > 0 { - return v.state.UIDChowned, nil + return v.state.UIDChowned } - return v.config.UID, nil + return v.config.UID } // GID returns the GID the volume will be created as. @@ -154,14 +177,19 @@ func (v *Volume) GID() (int, error) { v.lock.Lock() defer v.lock.Unlock() - if !v.valid { - return -1, define.ErrVolumeRemoved + if err := v.update(); err != nil { + return -1, err } + return v.gid(), nil +} + +// Internal, unlocked accessor for GID. +func (v *Volume) gid() int { if v.state.GIDChowned > 0 { - return v.state.GIDChowned, nil + return v.state.GIDChowned } - return v.config.GID, nil + return v.config.GID } // CreatedTime returns the time the volume was created at. It was not tracked @@ -198,3 +226,10 @@ func (v *Volume) IsDangling() (bool, error) { } return len(ctrs) == 0, nil } + +// UsesVolumeDriver determines whether the volume uses a volume driver. Volume +// drivers are pluggable backends for volumes that will manage the storage and +// mounting. +func (v *Volume) UsesVolumeDriver() bool { + return !(v.config.Driver == define.VolumeDriverLocal || v.config.Driver == "") +} diff --git a/libpod/volume_inspect.go b/libpod/volume_inspect.go index c8b20b8f1..2448d1bb5 100644 --- a/libpod/volume_inspect.go +++ b/libpod/volume_inspect.go @@ -1,60 +1,52 @@ package libpod import ( - "time" - "github.com/containers/podman/v2/libpod/define" + pluginapi "github.com/docker/go-plugins-helpers/volume" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) -// InspectVolumeData is the output of Inspect() on a volume. It is matched to -// the format of 'docker volume inspect'. -type InspectVolumeData struct { - // Name is the name of the volume. - Name string `json:"Name"` - // Driver is the driver used to create the volume. - // This will be properly implemented in a future version. - Driver string `json:"Driver"` - // Mountpoint is the path on the host where the volume is mounted. - Mountpoint string `json:"Mountpoint"` - // CreatedAt is the date and time the volume was created at. This is not - // stored for older Libpod volumes; if so, it will be omitted. - CreatedAt time.Time `json:"CreatedAt,omitempty"` - // Status is presently unused and provided only for Docker compatibility. - // In the future it will be used to return information on the volume's - // current state. - Status map[string]string `json:"Status,omitempty"` - // Labels includes the volume's configured labels, key:value pairs that - // can be passed during volume creation to provide information for third - // party tools. - Labels map[string]string `json:"Labels"` - // Scope is unused and provided solely for Docker compatibility. It is - // unconditionally set to "local". - Scope string `json:"Scope"` - // Options is a set of options that were used when creating the volume. - // It is presently not used. - Options map[string]string `json:"Options"` - // UID is the UID that the volume was created with. - UID int `json:"UID,omitempty"` - // GID is the GID that the volume was created with. - GID int `json:"GID,omitempty"` - // Anonymous indicates that the volume was created as an anonymous - // volume for a specific container, and will be be removed when any - // container using it is removed. - Anonymous bool `json:"Anonymous,omitempty"` -} - // Inspect provides detailed information about the configuration of the given // volume. -func (v *Volume) Inspect() (*InspectVolumeData, error) { +func (v *Volume) Inspect() (*define.InspectVolumeData, error) { if !v.valid { return nil, define.ErrVolumeRemoved } - data := new(InspectVolumeData) + v.lock.Lock() + defer v.lock.Unlock() + + if err := v.update(); err != nil { + return nil, err + } + + data := new(define.InspectVolumeData) + + data.Mountpoint = v.config.MountPoint + if v.UsesVolumeDriver() { + logrus.Debugf("Querying volume plugin %s for status", v.config.Driver) + data.Mountpoint = v.state.MountPoint + + if v.plugin == nil { + return nil, errors.Wrapf(define.ErrMissingPlugin, "volume %s uses volume plugin %s but it is not available, cannot inspect", v.Name(), v.config.Driver) + } + + // Retrieve status for the volume. + // Need to query the volume driver. + req := new(pluginapi.GetRequest) + req.Name = v.Name() + resp, err := v.plugin.GetVolume(req) + if err != nil { + return nil, errors.Wrapf(err, "error retrieving volume %s information from plugin %s", v.Name(), v.Driver()) + } + if resp != nil { + data.Status = resp.Status + } + } data.Name = v.config.Name data.Driver = v.config.Driver - data.Mountpoint = v.config.MountPoint data.CreatedAt = v.config.CreatedTime data.Labels = make(map[string]string) for k, v := range v.config.Labels { @@ -65,15 +57,8 @@ func (v *Volume) Inspect() (*InspectVolumeData, error) { for k, v := range v.config.Options { data.Options[k] = v } - var err error - data.UID, err = v.UID() - if err != nil { - return nil, err - } - data.GID, err = v.GID() - if err != nil { - return nil, err - } + data.UID = v.uid() + data.GID = v.gid() data.Anonymous = v.config.IsAnon return data, nil diff --git a/libpod/volume_internal.go b/libpod/volume_internal.go index 95cb752e0..88d940370 100644 --- a/libpod/volume_internal.go +++ b/libpod/volume_internal.go @@ -22,13 +22,24 @@ func newVolume(runtime *Runtime) *Volume { // teardownStorage deletes the volume from volumePath func (v *Volume) teardownStorage() error { + if v.UsesVolumeDriver() { + return nil + } + + // TODO: Should this be converted to use v.config.MountPoint? return os.RemoveAll(filepath.Join(v.runtime.config.Engine.VolumePath, v.Name())) } // Volumes with options set, or a filesystem type, or a device to mount need to // be mounted and unmounted. func (v *Volume) needsMount() bool { - return len(v.config.Options) > 0 && v.config.Driver == define.VolumeDriverLocal + // Non-local driver always needs mount + if v.UsesVolumeDriver() { + return true + } + + // Local driver with options needs mount + return len(v.config.Options) > 0 } // update() updates the volume state from the DB. diff --git a/libpod/volume_internal_linux.go b/libpod/volume_internal_linux.go index bbf47f124..e184505e7 100644 --- a/libpod/volume_internal_linux.go +++ b/libpod/volume_internal_linux.go @@ -8,11 +8,17 @@ import ( "github.com/containers/podman/v2/libpod/define" "github.com/containers/podman/v2/pkg/rootless" + pluginapi "github.com/docker/go-plugins-helpers/volume" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) +// This is a pseudo-container ID to use when requesting a mount or unmount from +// the volume plugins. +// This is the shas256 of the string "placeholder\n". +const pseudoCtrID = "2f73349cfc4630255319c6c8dfc1b46a8996ace9d14d8e07563b165915918ec2" + // mount mounts the volume if necessary. // A mount is necessary if a volume has any options set. // If a mount is necessary, v.state.MountCount will be incremented. @@ -20,7 +26,7 @@ import ( // host. Otherwise, we assume it is already mounted. // Must be done while the volume is locked. // Is a no-op on volumes that do not require a mount (as defined by -// volumeNeedsMount()) +// volumeNeedsMount()). func (v *Volume) mount() error { if !v.needsMount() { return nil @@ -44,6 +50,28 @@ func (v *Volume) mount() error { return v.save() } + // Volume plugins implement their own mount counter, based on the ID of + // the mounting container. But we already have one, and honestly I trust + // ours more. So hardcode container ID to something reasonable, and use + // the same one for everything. + if v.UsesVolumeDriver() { + if v.plugin == nil { + return errors.Wrapf(define.ErrMissingPlugin, "volume plugin %s (needed by volume %s) missing", v.Driver(), v.Name()) + } + + req := new(pluginapi.MountRequest) + req.Name = v.Name() + req.ID = pseudoCtrID + mountPoint, err := v.plugin.MountVolume(req) + if err != nil { + return err + } + + v.state.MountCount += 1 + v.state.MountPoint = mountPoint + return v.save() + } + volDevice := v.config.Options["device"] volType := v.config.Options["type"] volOptions := v.config.Options["o"] @@ -132,6 +160,22 @@ func (v *Volume) unmount(force bool) error { logrus.Debugf("Volume %s mount count now at %d", v.Name(), v.state.MountCount) if v.state.MountCount == 0 { + if v.UsesVolumeDriver() { + if v.plugin == nil { + return errors.Wrapf(define.ErrMissingPlugin, "volume plugin %s (needed by volume %s) missing", v.Driver(), v.Name()) + } + + req := new(pluginapi.UnmountRequest) + req.Name = v.Name() + req.ID = pseudoCtrID + if err := v.plugin.UnmountVolume(req); err != nil { + return err + } + + v.state.MountPoint = "" + return v.save() + } + // Unmount the volume if err := unix.Unmount(v.config.MountPoint, unix.MNT_DETACH); err != nil { if err == unix.EINVAL { diff --git a/pkg/api/handlers/compat/volumes.go b/pkg/api/handlers/compat/volumes.go index 4903bbad4..82e70eb90 100644 --- a/pkg/api/handlers/compat/volumes.go +++ b/pkg/api/handlers/compat/volumes.go @@ -58,10 +58,15 @@ func ListVolumes(w http.ResponseWriter, r *http.Request) { } volumeConfigs := make([]*docker_api_types.Volume, 0, len(vols)) for _, v := range vols { + mp, err := v.MountPoint() + if err != nil { + utils.InternalServerError(w, err) + return + } config := docker_api_types.Volume{ Name: v.Name(), Driver: v.Driver(), - Mountpoint: v.MountPoint(), + Mountpoint: mp, CreatedAt: v.CreatedTime().Format(time.RFC3339), Labels: v.Labels(), Scope: v.Scope(), @@ -106,11 +111,16 @@ func CreateVolume(w http.ResponseWriter, r *http.Request) { // if using the compat layer and the volume already exists, we // must return a 201 with the same information as create if existingVolume != nil && !utils.IsLibpodRequest(r) { + mp, err := existingVolume.MountPoint() + if err != nil { + utils.InternalServerError(w, err) + return + } response := docker_api_types.Volume{ CreatedAt: existingVolume.CreatedTime().Format(time.RFC3339), Driver: existingVolume.Driver(), Labels: existingVolume.Labels(), - Mountpoint: existingVolume.MountPoint(), + Mountpoint: mp, Name: existingVolume.Name(), Options: existingVolume.Options(), Scope: existingVolume.Scope(), @@ -146,10 +156,15 @@ func CreateVolume(w http.ResponseWriter, r *http.Request) { utils.InternalServerError(w, err) return } + mp, err := vol.MountPoint() + if err != nil { + utils.InternalServerError(w, err) + return + } volResponse := docker_api_types.Volume{ Name: config.Name, Driver: config.Driver, - Mountpoint: config.MountPoint, + Mountpoint: mp, CreatedAt: config.CreatedTime.Format(time.RFC3339), Labels: config.Labels, Options: config.Options, @@ -173,10 +188,15 @@ func InspectVolume(w http.ResponseWriter, r *http.Request) { utils.VolumeNotFound(w, name, err) return } + mp, err := vol.MountPoint() + if err != nil { + utils.InternalServerError(w, err) + return + } volResponse := docker_api_types.Volume{ Name: vol.Name(), Driver: vol.Driver(), - Mountpoint: vol.MountPoint(), + Mountpoint: mp, CreatedAt: vol.CreatedTime().Format(time.RFC3339), Labels: vol.Labels(), Options: vol.Options(), diff --git a/pkg/api/handlers/libpod/volumes.go b/pkg/api/handlers/libpod/volumes.go index 6f9537515..38fdf1b4d 100644 --- a/pkg/api/handlers/libpod/volumes.go +++ b/pkg/api/handlers/libpod/volumes.go @@ -60,20 +60,13 @@ func CreateVolume(w http.ResponseWriter, r *http.Request) { utils.InternalServerError(w, err) return } - config, err := vol.Config() + inspectOut, err := vol.Inspect() if err != nil { utils.InternalServerError(w, err) return } volResponse := entities.VolumeConfigResponse{ - Name: config.Name, - Driver: config.Driver, - Mountpoint: config.MountPoint, - CreatedAt: config.CreatedTime, - Labels: config.Labels, - Options: config.Options, - UID: config.UID, - GID: config.GID, + InspectVolumeData: *inspectOut, } utils.WriteResponse(w, http.StatusCreated, volResponse) } @@ -88,27 +81,13 @@ func InspectVolume(w http.ResponseWriter, r *http.Request) { utils.VolumeNotFound(w, name, err) return } - var uid, gid int - uid, err = vol.UID() + inspectOut, err := vol.Inspect() if err != nil { - utils.Error(w, "Error fetching volume UID", http.StatusInternalServerError, err) - return - } - gid, err = vol.GID() - if err != nil { - utils.Error(w, "Error fetching volume GID", http.StatusInternalServerError, err) + utils.InternalServerError(w, err) return } volResponse := entities.VolumeConfigResponse{ - Name: vol.Name(), - Driver: vol.Driver(), - Mountpoint: vol.MountPoint(), - CreatedAt: vol.CreatedTime(), - Labels: vol.Labels(), - Scope: vol.Scope(), - Options: vol.Options(), - UID: uid, - GID: gid, + InspectVolumeData: *inspectOut, } utils.WriteResponse(w, http.StatusOK, volResponse) } @@ -143,27 +122,13 @@ func ListVolumes(w http.ResponseWriter, r *http.Request) { } volumeConfigs := make([]*entities.VolumeListReport, 0, len(vols)) for _, v := range vols { - var uid, gid int - uid, err = v.UID() + inspectOut, err := v.Inspect() if err != nil { - utils.Error(w, "Error fetching volume UID", http.StatusInternalServerError, err) - return - } - gid, err = v.GID() - if err != nil { - utils.Error(w, "Error fetching volume GID", http.StatusInternalServerError, err) + utils.InternalServerError(w, err) return } config := entities.VolumeConfigResponse{ - Name: v.Name(), - Driver: v.Driver(), - Mountpoint: v.MountPoint(), - CreatedAt: v.CreatedTime(), - Labels: v.Labels(), - Scope: v.Scope(), - Options: v.Options(), - UID: uid, - GID: gid, + InspectVolumeData: *inspectOut, } volumeConfigs = append(volumeConfigs, &entities.VolumeListReport{VolumeConfigResponse: config}) } diff --git a/pkg/domain/entities/volumes.go b/pkg/domain/entities/volumes.go index 06438f5e9..c826ee389 100644 --- a/pkg/domain/entities/volumes.go +++ b/pkg/domain/entities/volumes.go @@ -2,8 +2,8 @@ package entities import ( "net/url" - "time" + "github.com/containers/podman/v2/libpod/define" docker_api_types "github.com/docker/docker/api/types" docker_api_types_volume "github.com/docker/docker/api/types/volume" ) @@ -26,38 +26,7 @@ type IDOrNameResponse struct { } type VolumeConfigResponse struct { - // Name is the name of the volume. - Name string `json:"Name"` - // Driver is the driver used to create the volume. - // This will be properly implemented in a future version. - Driver string `json:"Driver"` - // Mountpoint is the path on the host where the volume is mounted. - Mountpoint string `json:"Mountpoint"` - // CreatedAt is the date and time the volume was created at. This is not - // stored for older Libpod volumes; if so, it will be omitted. - CreatedAt time.Time `json:"CreatedAt,omitempty"` - // Status is presently unused and provided only for Docker compatibility. - // In the future it will be used to return information on the volume's - // current state. - Status map[string]string `json:"Status,omitempty"` - // Labels includes the volume's configured labels, key:value pairs that - // can be passed during volume creation to provide information for third - // party tools. - Labels map[string]string `json:"Labels"` - // Scope is unused and provided solely for Docker compatibility. It is - // unconditionally set to "local". - Scope string `json:"Scope"` - // Options is a set of options that were used when creating the volume. - // It is presently not used. - Options map[string]string `json:"Options"` - // UID is the UID that the volume was created with. - UID int `json:"UID"` - // GID is the GID that the volume was created with. - GID int `json:"GID"` - // Anonymous indicates that the volume was created as an anonymous - // volume for a specific container, and will be be removed when any - // container using it is removed. - Anonymous bool `json:"Anonymous"` + define.InspectVolumeData } // VolumeInfo Volume list response diff --git a/pkg/domain/infra/abi/containers_stat.go b/pkg/domain/infra/abi/containers_stat.go index 5b43ee2f4..931e77026 100644 --- a/pkg/domain/infra/abi/containers_stat.go +++ b/pkg/domain/infra/abi/containers_stat.go @@ -144,16 +144,29 @@ func resolveContainerPaths(container *libpod.Container, mountPoint string, conta } if volume != nil { logrus.Debugf("Container path %q resolved to volume %q on path %q", containerPath, volume.Name(), searchPath) + + // TODO: We really need to force the volume to mount + // before doing this, but that API is not exposed + // externally right now and doing so is beyond the scope + // of this commit. + mountPoint, err := volume.MountPoint() + if err != nil { + return "", "", err + } + if mountPoint == "" { + return "", "", errors.Errorf("volume %s is not mounted, cannot copy into it", volume.Name()) + } + // We found a matching volume for searchPath. We now // need to first find the relative path of our input // path to the searchPath, and then join it with the // volume's mount point. pathRelativeToVolume := strings.TrimPrefix(pathRelativeToContainerMountPoint, searchPath) - absolutePathOnTheVolumeMount, err := securejoin.SecureJoin(volume.MountPoint(), pathRelativeToVolume) + absolutePathOnTheVolumeMount, err := securejoin.SecureJoin(mountPoint, pathRelativeToVolume) if err != nil { return "", "", err } - return volume.MountPoint(), absolutePathOnTheVolumeMount, nil + return mountPoint, absolutePathOnTheVolumeMount, nil } if mount := findBindMount(container, searchPath); mount != nil { diff --git a/pkg/domain/infra/abi/system.go b/pkg/domain/infra/abi/system.go index 97fa9d374..f29b98696 100644 --- a/pkg/domain/infra/abi/system.go +++ b/pkg/domain/infra/abi/system.go @@ -312,7 +312,17 @@ func (ic *ContainerEngine) SystemDf(ctx context.Context, options entities.System var reclaimableSize int64 for _, v := range vols { var consInUse int - volSize, err := sizeOfPath(v.MountPoint()) + mountPoint, err := v.MountPoint() + if err != nil { + return nil, err + } + if mountPoint == "" { + // We can't get any info on this volume, as it's not + // mounted. + // TODO: fix this. + continue + } + volSize, err := sizeOfPath(mountPoint) if err != nil { return nil, err } diff --git a/pkg/domain/infra/abi/volumes.go b/pkg/domain/infra/abi/volumes.go index 3c9dd9fc0..823605052 100644 --- a/pkg/domain/infra/abi/volumes.go +++ b/pkg/domain/infra/abi/volumes.go @@ -103,25 +103,12 @@ func (ic *ContainerEngine) VolumeInspect(ctx context.Context, namesOrIds []strin } reports := make([]*entities.VolumeInspectReport, 0, len(vols)) for _, v := range vols { - var uid, gid int - uid, err = v.UID() - if err != nil { - return nil, nil, err - } - gid, err = v.GID() + inspectOut, err := v.Inspect() if err != nil { return nil, nil, err } config := entities.VolumeConfigResponse{ - Name: v.Name(), - Driver: v.Driver(), - Mountpoint: v.MountPoint(), - CreatedAt: v.CreatedTime(), - Labels: v.Labels(), - Scope: v.Scope(), - Options: v.Options(), - UID: uid, - GID: gid, + InspectVolumeData: *inspectOut, } reports = append(reports, &entities.VolumeInspectReport{VolumeConfigResponse: &config}) } @@ -155,25 +142,12 @@ func (ic *ContainerEngine) VolumeList(ctx context.Context, opts entities.VolumeL } reports := make([]*entities.VolumeListReport, 0, len(vols)) for _, v := range vols { - var uid, gid int - uid, err = v.UID() - if err != nil { - return nil, err - } - gid, err = v.GID() + inspectOut, err := v.Inspect() if err != nil { return nil, err } config := entities.VolumeConfigResponse{ - Name: v.Name(), - Driver: v.Driver(), - Mountpoint: v.MountPoint(), - CreatedAt: v.CreatedTime(), - Labels: v.Labels(), - Scope: v.Scope(), - Options: v.Options(), - UID: uid, - GID: gid, + InspectVolumeData: *inspectOut, } reports = append(reports, &entities.VolumeListReport{VolumeConfigResponse: config}) } diff --git a/test/apiv2/30-volumes.at b/test/apiv2/30-volumes.at index 33f4ea37f..b38810039 100644 --- a/test/apiv2/30-volumes.at +++ b/test/apiv2/30-volumes.at @@ -12,7 +12,7 @@ t POST libpod/volumes/create name=foo1 201 \ .Mountpoint=$volumepath/foo1/_data \ .CreatedAt~[0-9]\\{4\\}-[0-9]\\{2\\}-[0-9]\\{2\\}.* \ .Labels={} \ - .Options=null + .Options={} t POST libpod/volumes/create '' 201 t POST libpod/volumes/create \ '"Name":"foo2","Label":{"testlabel":"testonly"},"Options":{"type":"tmpfs","o":"nodev,noexec"}}' 201 \ diff --git a/test/apiv2/45-system.at b/test/apiv2/45-system.at index 44cd05a13..985d86e56 100644 --- a/test/apiv2/45-system.at +++ b/test/apiv2/45-system.at @@ -19,7 +19,7 @@ t POST libpod/volumes/create name=foo1 201 \ .Mountpoint=$volumepath/foo1/_data \ .CreatedAt~[0-9]\\{4\\}-[0-9]\\{2\\}-[0-9]\\{2\\}.* \ .Labels={} \ - .Options=null + .Options={} t GET system/df 200 '.Volumes[0].Name=foo1' diff --git a/test/e2e/checkpoint_test.go b/test/e2e/checkpoint_test.go index abc37792a..3270ce685 100644 --- a/test/e2e/checkpoint_test.go +++ b/test/e2e/checkpoint_test.go @@ -623,7 +623,7 @@ var _ = Describe("Podman checkpoint", func() { result := podmanTest.Podman([]string{"container", "checkpoint", "-l"}) result.WaitWithDefaultTimeout() Expect(result).To(ExitWithError()) - Expect(result.ErrorToString()).To(ContainSubstring("Cannot checkpoint containers that have been started with '--rm'")) + Expect(result.ErrorToString()).To(ContainSubstring("cannot checkpoint containers that have been started with '--rm'")) // Checkpointing with --export should still work fileName := "/tmp/checkpoint-" + cid + ".tar.gz" diff --git a/test/e2e/common_test.go b/test/e2e/common_test.go index 18679dd53..2668b1e7b 100644 --- a/test/e2e/common_test.go +++ b/test/e2e/common_test.go @@ -122,7 +122,7 @@ var _ = SynchronizedBeforeSuite(func() []byte { } // Pull cirros but don't put it into the cache - pullImages := []string{cirros, fedoraToolbox} + pullImages := []string{cirros, fedoraToolbox, volumeTest} pullImages = append(pullImages, CACHE_IMAGES...) for _, image := range pullImages { podman.createArtifact(image) @@ -483,13 +483,7 @@ func (p *PodmanTestIntegration) CleanupVolume() { session := p.Podman([]string{"volume", "rm", "-fa"}) session.Wait(90) - // Stop remove service on volume cleanup - p.StopRemoteService() - - // Nuke tempdir - if err := os.RemoveAll(p.TempDir); err != nil { - fmt.Printf("%q\n", err) - } + p.Cleanup() } // InspectContainerToJSON takes the session output of an inspect diff --git a/test/e2e/config.go b/test/e2e/config.go index e66cd6846..2552595ad 100644 --- a/test/e2e/config.go +++ b/test/e2e/config.go @@ -15,6 +15,7 @@ var ( healthcheck = "quay.io/libpod/alpine_healthcheck:latest" ImageCacheDir = "/tmp/podman/imagecachedir" fedoraToolbox = "registry.fedoraproject.org/f32/fedora-toolbox:latest" + volumeTest = "quay.io/libpod/volume-plugin-test-img:latest" // This image has seccomp profiles that blocks all syscalls. // The intention behind blocking all syscalls is to prevent diff --git a/test/e2e/config/containers.conf b/test/e2e/config/containers.conf index 35153ba05..5a5e4b7a5 100644 --- a/test/e2e/config/containers.conf +++ b/test/e2e/config/containers.conf @@ -56,3 +56,17 @@ umask = "0002" [engine] network_cmd_options=["allow_host_loopback=true"] + +# We need to ensure each test runs on a separate plugin instance... +# For now, let's just make a bunch of plugin paths and have each test use one. +[engine.volume_plugins] +testvol0 = "/run/docker/plugins/testvol0.sock" +testvol1 = "/run/docker/plugins/testvol1.sock" +testvol2 = "/run/docker/plugins/testvol2.sock" +testvol3 = "/run/docker/plugins/testvol3.sock" +testvol4 = "/run/docker/plugins/testvol4.sock" +testvol5 = "/run/docker/plugins/testvol5.sock" +testvol6 = "/run/docker/plugins/testvol6.sock" +testvol7 = "/run/docker/plugins/testvol7.sock" +testvol8 = "/run/docker/plugins/testvol8.sock" +testvol9 = "/run/docker/plugins/testvol9.sock" diff --git a/test/e2e/volume_plugin_test.go b/test/e2e/volume_plugin_test.go new file mode 100644 index 000000000..16edab27c --- /dev/null +++ b/test/e2e/volume_plugin_test.go @@ -0,0 +1,184 @@ +package integration + +import ( + "fmt" + "os" + "path/filepath" + + . "github.com/containers/podman/v2/test/utils" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("Podman volume plugins", func() { + var ( + tempdir string + err error + podmanTest *PodmanTestIntegration + ) + + BeforeEach(func() { + tempdir, err = CreateTempDirInTempDir() + if err != nil { + os.Exit(1) + } + podmanTest = PodmanTestCreate(tempdir) + podmanTest.Setup() + podmanTest.SeedImages() + os.Setenv("CONTAINERS_CONF", "config/containers.conf") + SkipIfRemote("Volume plugins only supported as local") + SkipIfRootless("Root is required for volume plugin testing") + os.MkdirAll("/run/docker/plugins", 0755) + }) + + AfterEach(func() { + podmanTest.CleanupVolume() + f := CurrentGinkgoTestDescription() + processTestResult(f) + os.Unsetenv("CONTAINERS_CONF") + }) + + It("volume create with nonexistent plugin errors", func() { + session := podmanTest.Podman([]string{"volume", "create", "--driver", "notexist", "test_volume_name"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Not(Equal(0))) + }) + + It("volume create with not-running plugin does not error", func() { + session := podmanTest.Podman([]string{"volume", "create", "--driver", "testvol0", "test_volume_name"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Not(Equal(0))) + }) + + It("volume create and remove with running plugin succeeds", func() { + podmanTest.AddImageToRWStore(volumeTest) + + pluginStatePath := filepath.Join(podmanTest.TempDir, "volumes") + os.Mkdir(pluginStatePath, 0755) + + // Keep this distinct within tests to avoid multiple tests using the same plugin. + pluginName := "testvol1" + plugin := podmanTest.Podman([]string{"run", "--security-opt", "label=disable", "-v", "/run/docker/plugins:/run/docker/plugins", "-v", fmt.Sprintf("%v:%v", pluginStatePath, pluginStatePath), "-d", volumeTest, "--sock-name", pluginName, "--path", pluginStatePath}) + plugin.WaitWithDefaultTimeout() + Expect(plugin.ExitCode()).To(Equal(0)) + + volName := "testVolume1" + create := podmanTest.Podman([]string{"volume", "create", "--driver", pluginName, volName}) + create.WaitWithDefaultTimeout() + Expect(create.ExitCode()).To(Equal(0)) + + ls1 := podmanTest.Podman([]string{"volume", "ls", "-q"}) + ls1.WaitWithDefaultTimeout() + Expect(ls1.ExitCode()).To(Equal(0)) + arrOutput := ls1.OutputToStringArray() + Expect(len(arrOutput)).To(Equal(1)) + Expect(arrOutput[0]).To(ContainSubstring(volName)) + + remove := podmanTest.Podman([]string{"volume", "rm", volName}) + remove.WaitWithDefaultTimeout() + Expect(remove.ExitCode()).To(Equal(0)) + + ls2 := podmanTest.Podman([]string{"volume", "ls", "-q"}) + ls2.WaitWithDefaultTimeout() + Expect(ls2.ExitCode()).To(Equal(0)) + Expect(len(ls2.OutputToStringArray())).To(Equal(0)) + }) + + It("volume inspect with running plugin succeeds", func() { + podmanTest.AddImageToRWStore(volumeTest) + + pluginStatePath := filepath.Join(podmanTest.TempDir, "volumes") + os.Mkdir(pluginStatePath, 0755) + + // Keep this distinct within tests to avoid multiple tests using the same plugin. + pluginName := "testvol2" + plugin := podmanTest.Podman([]string{"run", "--security-opt", "label=disable", "-v", "/run/docker/plugins:/run/docker/plugins", "-v", fmt.Sprintf("%v:%v", pluginStatePath, pluginStatePath), "-d", volumeTest, "--sock-name", pluginName, "--path", pluginStatePath}) + plugin.WaitWithDefaultTimeout() + Expect(plugin.ExitCode()).To(Equal(0)) + + volName := "testVolume1" + create := podmanTest.Podman([]string{"volume", "create", "--driver", pluginName, volName}) + create.WaitWithDefaultTimeout() + Expect(create.ExitCode()).To(Equal(0)) + + volInspect := podmanTest.Podman([]string{"volume", "inspect", "--format", "{{ .Driver }}", volName}) + volInspect.WaitWithDefaultTimeout() + Expect(volInspect.ExitCode()).To(Equal(0)) + Expect(volInspect.OutputToString()).To(ContainSubstring(pluginName)) + }) + + It("remove plugin with stopped plugin succeeds", func() { + podmanTest.AddImageToRWStore(volumeTest) + + pluginStatePath := filepath.Join(podmanTest.TempDir, "volumes") + os.Mkdir(pluginStatePath, 0755) + + // Keep this distinct within tests to avoid multiple tests using the same plugin. + pluginName := "testvol3" + ctrName := "pluginCtr" + plugin := podmanTest.Podman([]string{"run", "--name", ctrName, "--security-opt", "label=disable", "-v", "/run/docker/plugins:/run/docker/plugins", "-v", fmt.Sprintf("%v:%v", pluginStatePath, pluginStatePath), "-d", volumeTest, "--sock-name", pluginName, "--path", pluginStatePath}) + plugin.WaitWithDefaultTimeout() + Expect(plugin.ExitCode()).To(Equal(0)) + + volName := "testVolume1" + create := podmanTest.Podman([]string{"volume", "create", "--driver", pluginName, volName}) + create.WaitWithDefaultTimeout() + Expect(create.ExitCode()).To(Equal(0)) + + ls1 := podmanTest.Podman([]string{"volume", "ls", "-q"}) + ls1.WaitWithDefaultTimeout() + Expect(ls1.ExitCode()).To(Equal(0)) + arrOutput := ls1.OutputToStringArray() + Expect(len(arrOutput)).To(Equal(1)) + Expect(arrOutput[0]).To(ContainSubstring(volName)) + + stop := podmanTest.Podman([]string{"stop", "--timeout", "0", ctrName}) + stop.WaitWithDefaultTimeout() + Expect(stop.ExitCode()).To(Equal(0)) + + // Remove should exit non-zero because missing plugin + remove := podmanTest.Podman([]string{"volume", "rm", volName}) + remove.WaitWithDefaultTimeout() + Expect(remove.ExitCode()).To(Not(Equal(0))) + + // But the volume should still be gone + ls2 := podmanTest.Podman([]string{"volume", "ls", "-q"}) + ls2.WaitWithDefaultTimeout() + Expect(ls2.ExitCode()).To(Equal(0)) + Expect(len(ls2.OutputToStringArray())).To(Equal(0)) + }) + + It("use plugin in containers", func() { + podmanTest.AddImageToRWStore(volumeTest) + + pluginStatePath := filepath.Join(podmanTest.TempDir, "volumes") + os.Mkdir(pluginStatePath, 0755) + + // Keep this distinct within tests to avoid multiple tests using the same plugin. + pluginName := "testvol4" + plugin := podmanTest.Podman([]string{"run", "--security-opt", "label=disable", "-v", "/run/docker/plugins:/run/docker/plugins", "-v", fmt.Sprintf("%v:%v", pluginStatePath, pluginStatePath), "-d", volumeTest, "--sock-name", pluginName, "--path", pluginStatePath}) + plugin.WaitWithDefaultTimeout() + Expect(plugin.ExitCode()).To(Equal(0)) + + volName := "testVolume1" + create := podmanTest.Podman([]string{"volume", "create", "--driver", pluginName, volName}) + create.WaitWithDefaultTimeout() + Expect(create.ExitCode()).To(Equal(0)) + + ctr1 := podmanTest.Podman([]string{"run", "--security-opt", "label=disable", "-v", fmt.Sprintf("%v:/test", volName), ALPINE, "sh", "-c", "touch /test/testfile && echo helloworld > /test/testfile"}) + ctr1.WaitWithDefaultTimeout() + Expect(ctr1.ExitCode()).To(Equal(0)) + + ctr2 := podmanTest.Podman([]string{"run", "--security-opt", "label=disable", "-v", fmt.Sprintf("%v:/test", volName), ALPINE, "cat", "/test/testfile"}) + ctr2.WaitWithDefaultTimeout() + Expect(ctr2.ExitCode()).To(Equal(0)) + Expect(ctr2.OutputToString()).To(ContainSubstring("helloworld")) + + // HACK: `volume rm -f` is timing out trying to remove containers using the volume. + // Solution: remove them manually... + // TODO: fix this when I get back + rmAll := podmanTest.Podman([]string{"rm", "-af"}) + rmAll.WaitWithDefaultTimeout() + Expect(rmAll.ExitCode()).To(Equal(0)) + }) +}) diff --git a/test/python/docker/test_containers.py b/test/python/docker/test_containers.py index 5a9f761a6..01e049ed4 100644 --- a/test/python/docker/test_containers.py +++ b/test/python/docker/test_containers.py @@ -179,11 +179,3 @@ class TestContainers(unittest.TestCase): filters = {"name": "top"} ctnrs = self.client.containers.list(all=True, filters=filters) self.assertEqual(len(ctnrs), 1) - - def test_rename_container(self): - top = self.client.containers.get(TestContainers.topContainerId) - - # rename bogus container - with self.assertRaises(errors.APIError) as error: - top.rename(name="newname") - self.assertEqual(error.exception.response.status_code, 404) diff --git a/test/testvol/main.go b/test/testvol/main.go new file mode 100644 index 000000000..14f253aa7 --- /dev/null +++ b/test/testvol/main.go @@ -0,0 +1,309 @@ +package main + +import ( + "io/ioutil" + "os" + "path/filepath" + "sync" + "time" + + "github.com/docker/go-plugins-helpers/volume" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" +) + +var rootCmd = &cobra.Command{ + Use: "testvol", + Short: "testvol - volume plugin for Podman", + Long: `Creates simple directory volumes using the Volume Plugin API for testing volume plugin functionality`, + RunE: func(cmd *cobra.Command, args []string) error { + return startServer(config.sockName) + }, + PersistentPreRunE: before, +} + +// Configuration for the volume plugin +type cliConfig struct { + logLevel string + sockName string + path string +} + +// Default configuration is stored here. Will be overwritten by flags. +var config cliConfig = cliConfig{ + logLevel: "error", + sockName: "test-volume-plugin", +} + +func init() { + rootCmd.Flags().StringVar(&config.sockName, "sock-name", config.sockName, "Name of unix socket for plugin") + rootCmd.Flags().StringVar(&config.path, "path", "", "Path to initialize state and mount points") + rootCmd.PersistentFlags().StringVar(&config.logLevel, "log-level", config.logLevel, "Log messages including and over the specified level: debug, info, warn, error, fatal, panic") +} + +func before(cmd *cobra.Command, args []string) error { + if config.logLevel == "" { + config.logLevel = "error" + } + + level, err := logrus.ParseLevel(config.logLevel) + if err != nil { + return err + } + + logrus.SetLevel(level) + + return nil +} + +func main() { + if err := rootCmd.Execute(); err != nil { + logrus.Errorf("Error running volume plugin: %v", err) + os.Exit(1) + } + + os.Exit(0) +} + +// startServer runs the HTTP server and responds to requests +func startServer(socketPath string) error { + logrus.Debugf("Starting server...") + + if config.path == "" { + path, err := ioutil.TempDir("", "test_volume_plugin") + if err != nil { + return errors.Wrapf(err, "error getting directory for plugin") + } + config.path = path + } else { + pathStat, err := os.Stat(config.path) + if err != nil { + return errors.Wrapf(err, "unable to access requested plugin state directory") + } + if !pathStat.IsDir() { + return errors.Errorf("cannot use %v as plugin state dir as it is not a directory", config.path) + } + } + + handle, err := makeDirDriver(config.path) + if err != nil { + return errors.Wrapf(err, "error making volume driver") + } + logrus.Infof("Using %s for volume path", config.path) + + server := volume.NewHandler(handle) + if err := server.ServeUnix(socketPath, 0); err != nil { + return errors.Wrapf(err, "error starting server") + } + return nil +} + +// DirDriver is a trivial volume driver implementation. +// the volumes field maps name to volume +type DirDriver struct { + lock sync.Mutex + volumesPath string + volumes map[string]*dirVol +} + +type dirVol struct { + name string + path string + options map[string]string + mounts map[string]bool + createTime time.Time +} + +// Make a new DirDriver. +func makeDirDriver(path string) (volume.Driver, error) { + drv := new(DirDriver) + drv.volumesPath = path + drv.volumes = make(map[string]*dirVol) + + return drv, nil +} + +// Capabilities returns the capabilities of the driver. +func (d *DirDriver) Capabilities() *volume.CapabilitiesResponse { + logrus.Infof("Hit Capabilities() endpoint") + + return &volume.CapabilitiesResponse{ + volume.Capability{ + "local", + }, + } +} + +// Create creates a volume. +func (d *DirDriver) Create(opts *volume.CreateRequest) error { + d.lock.Lock() + defer d.lock.Unlock() + + logrus.Infof("Hit Create() endpoint") + + if _, exists := d.volumes[opts.Name]; exists { + return errors.Errorf("volume with name %s already exists", opts.Name) + } + + newVol := new(dirVol) + newVol.name = opts.Name + newVol.mounts = make(map[string]bool) + newVol.options = make(map[string]string) + newVol.createTime = time.Now() + for k, v := range opts.Options { + newVol.options[k] = v + } + + volPath := filepath.Join(d.volumesPath, opts.Name) + if err := os.Mkdir(volPath, 0755); err != nil { + return errors.Wrapf(err, "error making volume directory") + } + newVol.path = volPath + + d.volumes[opts.Name] = newVol + + logrus.Debugf("Made volume with name %s and path %s", newVol.name, newVol.path) + + return nil +} + +// List lists all volumes available. +func (d *DirDriver) List() (*volume.ListResponse, error) { + d.lock.Lock() + defer d.lock.Unlock() + + logrus.Infof("Hit List() endpoint") + + vols := new(volume.ListResponse) + vols.Volumes = []*volume.Volume{} + + for _, vol := range d.volumes { + newVol := new(volume.Volume) + newVol.Name = vol.name + newVol.Mountpoint = vol.path + newVol.CreatedAt = vol.createTime.String() + vols.Volumes = append(vols.Volumes, newVol) + logrus.Debugf("Adding volume %s to list response", newVol.Name) + } + + return vols, nil +} + +// Get retrieves a single volume. +func (d *DirDriver) Get(req *volume.GetRequest) (*volume.GetResponse, error) { + d.lock.Lock() + defer d.lock.Unlock() + + logrus.Infof("Hit Get() endpoint") + + vol, exists := d.volumes[req.Name] + if !exists { + logrus.Debugf("Did not find volume %s", req.Name) + return nil, errors.Errorf("no volume with name %s found", req.Name) + } + + logrus.Debugf("Found volume %s", req.Name) + + resp := new(volume.GetResponse) + resp.Volume = new(volume.Volume) + resp.Volume.Name = vol.name + resp.Volume.Mountpoint = vol.path + resp.Volume.CreatedAt = vol.createTime.String() + + return resp, nil +} + +// Remove removes a single volume. +func (d *DirDriver) Remove(req *volume.RemoveRequest) error { + d.lock.Lock() + defer d.lock.Unlock() + + logrus.Infof("Hit Remove() endpoint") + + vol, exists := d.volumes[req.Name] + if !exists { + logrus.Debugf("Did not find volume %s", req.Name) + return errors.Errorf("no volume with name %s found") + } + logrus.Debugf("Found volume %s", req.Name) + + if len(vol.mounts) > 0 { + logrus.Debugf("Cannot remove %s, is mounted", req.Name) + return errors.Errorf("volume %s is mounted and cannot be removed") + } + + delete(d.volumes, req.Name) + + if err := os.RemoveAll(vol.path); err != nil { + return errors.Wrapf(err, "error removing mountpoint of volume %s", req.Name) + } + + logrus.Debugf("Removed volume %s", req.Name) + + return nil +} + +// Path returns the path a single volume is mounted at. +func (d *DirDriver) Path(req *volume.PathRequest) (*volume.PathResponse, error) { + d.lock.Lock() + defer d.lock.Unlock() + + logrus.Infof("Hit Path() endpoint") + + // TODO: Should we return error if not mounted? + + vol, exists := d.volumes[req.Name] + if !exists { + logrus.Debugf("Cannot locate volume %s", req.Name) + return nil, errors.Errorf("no volume with name %s found", req.Name) + } + + return &volume.PathResponse{ + vol.path, + }, nil +} + +// Mount mounts the volume. +func (d *DirDriver) Mount(req *volume.MountRequest) (*volume.MountResponse, error) { + d.lock.Lock() + defer d.lock.Unlock() + + logrus.Infof("Hit Mount() endpoint") + + vol, exists := d.volumes[req.Name] + if !exists { + logrus.Debugf("Cannot locate volume %s", req.Name) + return nil, errors.Errorf("no volume with name %s found", req.Name) + } + + vol.mounts[req.ID] = true + + return &volume.MountResponse{ + vol.path, + }, nil +} + +// Unmount unmounts the volume. +func (d *DirDriver) Unmount(req *volume.UnmountRequest) error { + d.lock.Lock() + defer d.lock.Unlock() + + logrus.Infof("Hit Unmount() endpoint") + + vol, exists := d.volumes[req.Name] + if !exists { + logrus.Debugf("Cannot locate volume %s", req.Name) + return errors.Errorf("no volume with name %s found", req.Name) + } + + mount := vol.mounts[req.ID] + if !mount { + logrus.Debugf("Volume %s is not mounted by %s", req.Name, req.ID) + return errors.Errorf("volume %s is not mounted by %s", req.Name, req.ID) + } + + delete(vol.mounts, req.ID) + + return nil +} |