summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--Containerfile-testvol10
-rw-r--r--Makefile8
-rw-r--r--test/e2e/common_test.go10
-rw-r--r--test/e2e/config.go1
-rw-r--r--test/e2e/config/containers.conf14
-rw-r--r--test/e2e/volume_plugin_test.go184
-rw-r--r--test/python/docker/test_containers.py8
-rw-r--r--test/testvol/main.go309
9 files changed, 529 insertions, 16 deletions
diff --git a/.gitignore b/.gitignore
index f87c8974f..6a5ae509c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -29,6 +29,7 @@ release.txt
/test/checkseccomp/checkseccomp
/test/copyimg/copyimg
/test/goecho/goecho
+/test/testvol/testvol
.vscode*
result
# Necessary to prevent hack/tree-status.sh false-positive
diff --git a/Containerfile-testvol b/Containerfile-testvol
new file mode 100644
index 000000000..6ff45064b
--- /dev/null
+++ b/Containerfile-testvol
@@ -0,0 +1,10 @@
+FROM golang:1.15-alpine AS build-img
+COPY ./test/testvol/ /go/src/github.com/containers/podman/cmd/testvol/
+COPY ./vendor /go/src/github.com/containers/podman/vendor/
+WORKDIR /go/src/github.com/containers/podman
+RUN go build -o /testvol ./cmd/testvol
+
+FROM alpine
+COPY --from=build-img /testvol /usr/local/bin
+WORKDIR /
+ENTRYPOINT ["/usr/local/bin/testvol"]
diff --git a/Makefile b/Makefile
index 48c3431cd..cc7d9f0ff 100644
--- a/Makefile
+++ b/Makefile
@@ -180,6 +180,14 @@ gofmt: ## Verify the source code gofmt
test/checkseccomp/checkseccomp: .gopathok $(wildcard test/checkseccomp/*.go)
$(GO) build $(BUILDFLAGS) -ldflags '$(LDFLAGS_PODMAN)' -tags "$(BUILDTAGS)" -o $@ ./test/checkseccomp
+.PHONY: test/testvol/testvol
+test/testvol/testvol: .gopathok $(wildcard test/testvol/*.go)
+ $(GO) build $(BUILDFLAGS) -ldflags '$(LDFLAGS_PODMAN)' -o $@ ./test/testvol
+
+.PHONY: volume-plugin-test-image
+volume-plugin-test-img:
+ podman build -t quay.io/libpod/volume-plugin-test-img -f Containerfile-testvol .
+
.PHONY: test/goecho/goecho
test/goecho/goecho: .gopathok $(wildcard test/goecho/*.go)
$(GO) build $(BUILDFLAGS) -ldflags '$(LDFLAGS_PODMAN)' -o $@ ./test/goecho
diff --git a/test/e2e/common_test.go b/test/e2e/common_test.go
index 18679dd53..2668b1e7b 100644
--- a/test/e2e/common_test.go
+++ b/test/e2e/common_test.go
@@ -122,7 +122,7 @@ var _ = SynchronizedBeforeSuite(func() []byte {
}
// Pull cirros but don't put it into the cache
- pullImages := []string{cirros, fedoraToolbox}
+ pullImages := []string{cirros, fedoraToolbox, volumeTest}
pullImages = append(pullImages, CACHE_IMAGES...)
for _, image := range pullImages {
podman.createArtifact(image)
@@ -483,13 +483,7 @@ func (p *PodmanTestIntegration) CleanupVolume() {
session := p.Podman([]string{"volume", "rm", "-fa"})
session.Wait(90)
- // Stop remove service on volume cleanup
- p.StopRemoteService()
-
- // Nuke tempdir
- if err := os.RemoveAll(p.TempDir); err != nil {
- fmt.Printf("%q\n", err)
- }
+ p.Cleanup()
}
// InspectContainerToJSON takes the session output of an inspect
diff --git a/test/e2e/config.go b/test/e2e/config.go
index e66cd6846..2552595ad 100644
--- a/test/e2e/config.go
+++ b/test/e2e/config.go
@@ -15,6 +15,7 @@ var (
healthcheck = "quay.io/libpod/alpine_healthcheck:latest"
ImageCacheDir = "/tmp/podman/imagecachedir"
fedoraToolbox = "registry.fedoraproject.org/f32/fedora-toolbox:latest"
+ volumeTest = "quay.io/libpod/volume-plugin-test-img:latest"
// This image has seccomp profiles that blocks all syscalls.
// The intention behind blocking all syscalls is to prevent
diff --git a/test/e2e/config/containers.conf b/test/e2e/config/containers.conf
index 35153ba05..5a5e4b7a5 100644
--- a/test/e2e/config/containers.conf
+++ b/test/e2e/config/containers.conf
@@ -56,3 +56,17 @@ umask = "0002"
[engine]
network_cmd_options=["allow_host_loopback=true"]
+
+# We need to ensure each test runs on a separate plugin instance...
+# For now, let's just make a bunch of plugin paths and have each test use one.
+[engine.volume_plugins]
+testvol0 = "/run/docker/plugins/testvol0.sock"
+testvol1 = "/run/docker/plugins/testvol1.sock"
+testvol2 = "/run/docker/plugins/testvol2.sock"
+testvol3 = "/run/docker/plugins/testvol3.sock"
+testvol4 = "/run/docker/plugins/testvol4.sock"
+testvol5 = "/run/docker/plugins/testvol5.sock"
+testvol6 = "/run/docker/plugins/testvol6.sock"
+testvol7 = "/run/docker/plugins/testvol7.sock"
+testvol8 = "/run/docker/plugins/testvol8.sock"
+testvol9 = "/run/docker/plugins/testvol9.sock"
diff --git a/test/e2e/volume_plugin_test.go b/test/e2e/volume_plugin_test.go
new file mode 100644
index 000000000..16edab27c
--- /dev/null
+++ b/test/e2e/volume_plugin_test.go
@@ -0,0 +1,184 @@
+package integration
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ . "github.com/containers/podman/v2/test/utils"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("Podman volume plugins", func() {
+ var (
+ tempdir string
+ err error
+ podmanTest *PodmanTestIntegration
+ )
+
+ BeforeEach(func() {
+ tempdir, err = CreateTempDirInTempDir()
+ if err != nil {
+ os.Exit(1)
+ }
+ podmanTest = PodmanTestCreate(tempdir)
+ podmanTest.Setup()
+ podmanTest.SeedImages()
+ os.Setenv("CONTAINERS_CONF", "config/containers.conf")
+ SkipIfRemote("Volume plugins only supported as local")
+ SkipIfRootless("Root is required for volume plugin testing")
+ os.MkdirAll("/run/docker/plugins", 0755)
+ })
+
+ AfterEach(func() {
+ podmanTest.CleanupVolume()
+ f := CurrentGinkgoTestDescription()
+ processTestResult(f)
+ os.Unsetenv("CONTAINERS_CONF")
+ })
+
+ It("volume create with nonexistent plugin errors", func() {
+ session := podmanTest.Podman([]string{"volume", "create", "--driver", "notexist", "test_volume_name"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Not(Equal(0)))
+ })
+
+ It("volume create with not-running plugin does not error", func() {
+ session := podmanTest.Podman([]string{"volume", "create", "--driver", "testvol0", "test_volume_name"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Not(Equal(0)))
+ })
+
+ It("volume create and remove with running plugin succeeds", func() {
+ podmanTest.AddImageToRWStore(volumeTest)
+
+ pluginStatePath := filepath.Join(podmanTest.TempDir, "volumes")
+ os.Mkdir(pluginStatePath, 0755)
+
+ // Keep this distinct within tests to avoid multiple tests using the same plugin.
+ pluginName := "testvol1"
+ plugin := podmanTest.Podman([]string{"run", "--security-opt", "label=disable", "-v", "/run/docker/plugins:/run/docker/plugins", "-v", fmt.Sprintf("%v:%v", pluginStatePath, pluginStatePath), "-d", volumeTest, "--sock-name", pluginName, "--path", pluginStatePath})
+ plugin.WaitWithDefaultTimeout()
+ Expect(plugin.ExitCode()).To(Equal(0))
+
+ volName := "testVolume1"
+ create := podmanTest.Podman([]string{"volume", "create", "--driver", pluginName, volName})
+ create.WaitWithDefaultTimeout()
+ Expect(create.ExitCode()).To(Equal(0))
+
+ ls1 := podmanTest.Podman([]string{"volume", "ls", "-q"})
+ ls1.WaitWithDefaultTimeout()
+ Expect(ls1.ExitCode()).To(Equal(0))
+ arrOutput := ls1.OutputToStringArray()
+ Expect(len(arrOutput)).To(Equal(1))
+ Expect(arrOutput[0]).To(ContainSubstring(volName))
+
+ remove := podmanTest.Podman([]string{"volume", "rm", volName})
+ remove.WaitWithDefaultTimeout()
+ Expect(remove.ExitCode()).To(Equal(0))
+
+ ls2 := podmanTest.Podman([]string{"volume", "ls", "-q"})
+ ls2.WaitWithDefaultTimeout()
+ Expect(ls2.ExitCode()).To(Equal(0))
+ Expect(len(ls2.OutputToStringArray())).To(Equal(0))
+ })
+
+ It("volume inspect with running plugin succeeds", func() {
+ podmanTest.AddImageToRWStore(volumeTest)
+
+ pluginStatePath := filepath.Join(podmanTest.TempDir, "volumes")
+ os.Mkdir(pluginStatePath, 0755)
+
+ // Keep this distinct within tests to avoid multiple tests using the same plugin.
+ pluginName := "testvol2"
+ plugin := podmanTest.Podman([]string{"run", "--security-opt", "label=disable", "-v", "/run/docker/plugins:/run/docker/plugins", "-v", fmt.Sprintf("%v:%v", pluginStatePath, pluginStatePath), "-d", volumeTest, "--sock-name", pluginName, "--path", pluginStatePath})
+ plugin.WaitWithDefaultTimeout()
+ Expect(plugin.ExitCode()).To(Equal(0))
+
+ volName := "testVolume1"
+ create := podmanTest.Podman([]string{"volume", "create", "--driver", pluginName, volName})
+ create.WaitWithDefaultTimeout()
+ Expect(create.ExitCode()).To(Equal(0))
+
+ volInspect := podmanTest.Podman([]string{"volume", "inspect", "--format", "{{ .Driver }}", volName})
+ volInspect.WaitWithDefaultTimeout()
+ Expect(volInspect.ExitCode()).To(Equal(0))
+ Expect(volInspect.OutputToString()).To(ContainSubstring(pluginName))
+ })
+
+ It("remove plugin with stopped plugin succeeds", func() {
+ podmanTest.AddImageToRWStore(volumeTest)
+
+ pluginStatePath := filepath.Join(podmanTest.TempDir, "volumes")
+ os.Mkdir(pluginStatePath, 0755)
+
+ // Keep this distinct within tests to avoid multiple tests using the same plugin.
+ pluginName := "testvol3"
+ ctrName := "pluginCtr"
+ plugin := podmanTest.Podman([]string{"run", "--name", ctrName, "--security-opt", "label=disable", "-v", "/run/docker/plugins:/run/docker/plugins", "-v", fmt.Sprintf("%v:%v", pluginStatePath, pluginStatePath), "-d", volumeTest, "--sock-name", pluginName, "--path", pluginStatePath})
+ plugin.WaitWithDefaultTimeout()
+ Expect(plugin.ExitCode()).To(Equal(0))
+
+ volName := "testVolume1"
+ create := podmanTest.Podman([]string{"volume", "create", "--driver", pluginName, volName})
+ create.WaitWithDefaultTimeout()
+ Expect(create.ExitCode()).To(Equal(0))
+
+ ls1 := podmanTest.Podman([]string{"volume", "ls", "-q"})
+ ls1.WaitWithDefaultTimeout()
+ Expect(ls1.ExitCode()).To(Equal(0))
+ arrOutput := ls1.OutputToStringArray()
+ Expect(len(arrOutput)).To(Equal(1))
+ Expect(arrOutput[0]).To(ContainSubstring(volName))
+
+ stop := podmanTest.Podman([]string{"stop", "--timeout", "0", ctrName})
+ stop.WaitWithDefaultTimeout()
+ Expect(stop.ExitCode()).To(Equal(0))
+
+ // Remove should exit non-zero because missing plugin
+ remove := podmanTest.Podman([]string{"volume", "rm", volName})
+ remove.WaitWithDefaultTimeout()
+ Expect(remove.ExitCode()).To(Not(Equal(0)))
+
+ // But the volume should still be gone
+ ls2 := podmanTest.Podman([]string{"volume", "ls", "-q"})
+ ls2.WaitWithDefaultTimeout()
+ Expect(ls2.ExitCode()).To(Equal(0))
+ Expect(len(ls2.OutputToStringArray())).To(Equal(0))
+ })
+
+ It("use plugin in containers", func() {
+ podmanTest.AddImageToRWStore(volumeTest)
+
+ pluginStatePath := filepath.Join(podmanTest.TempDir, "volumes")
+ os.Mkdir(pluginStatePath, 0755)
+
+ // Keep this distinct within tests to avoid multiple tests using the same plugin.
+ pluginName := "testvol4"
+ plugin := podmanTest.Podman([]string{"run", "--security-opt", "label=disable", "-v", "/run/docker/plugins:/run/docker/plugins", "-v", fmt.Sprintf("%v:%v", pluginStatePath, pluginStatePath), "-d", volumeTest, "--sock-name", pluginName, "--path", pluginStatePath})
+ plugin.WaitWithDefaultTimeout()
+ Expect(plugin.ExitCode()).To(Equal(0))
+
+ volName := "testVolume1"
+ create := podmanTest.Podman([]string{"volume", "create", "--driver", pluginName, volName})
+ create.WaitWithDefaultTimeout()
+ Expect(create.ExitCode()).To(Equal(0))
+
+ ctr1 := podmanTest.Podman([]string{"run", "--security-opt", "label=disable", "-v", fmt.Sprintf("%v:/test", volName), ALPINE, "sh", "-c", "touch /test/testfile && echo helloworld > /test/testfile"})
+ ctr1.WaitWithDefaultTimeout()
+ Expect(ctr1.ExitCode()).To(Equal(0))
+
+ ctr2 := podmanTest.Podman([]string{"run", "--security-opt", "label=disable", "-v", fmt.Sprintf("%v:/test", volName), ALPINE, "cat", "/test/testfile"})
+ ctr2.WaitWithDefaultTimeout()
+ Expect(ctr2.ExitCode()).To(Equal(0))
+ Expect(ctr2.OutputToString()).To(ContainSubstring("helloworld"))
+
+ // HACK: `volume rm -f` is timing out trying to remove containers using the volume.
+ // Solution: remove them manually...
+ // TODO: fix this when I get back
+ rmAll := podmanTest.Podman([]string{"rm", "-af"})
+ rmAll.WaitWithDefaultTimeout()
+ Expect(rmAll.ExitCode()).To(Equal(0))
+ })
+})
diff --git a/test/python/docker/test_containers.py b/test/python/docker/test_containers.py
index 5a9f761a6..01e049ed4 100644
--- a/test/python/docker/test_containers.py
+++ b/test/python/docker/test_containers.py
@@ -179,11 +179,3 @@ class TestContainers(unittest.TestCase):
filters = {"name": "top"}
ctnrs = self.client.containers.list(all=True, filters=filters)
self.assertEqual(len(ctnrs), 1)
-
- def test_rename_container(self):
- top = self.client.containers.get(TestContainers.topContainerId)
-
- # rename bogus container
- with self.assertRaises(errors.APIError) as error:
- top.rename(name="newname")
- self.assertEqual(error.exception.response.status_code, 404)
diff --git a/test/testvol/main.go b/test/testvol/main.go
new file mode 100644
index 000000000..14f253aa7
--- /dev/null
+++ b/test/testvol/main.go
@@ -0,0 +1,309 @@
+package main
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sync"
+ "time"
+
+ "github.com/docker/go-plugins-helpers/volume"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/cobra"
+)
+
+var rootCmd = &cobra.Command{
+ Use: "testvol",
+ Short: "testvol - volume plugin for Podman",
+ Long: `Creates simple directory volumes using the Volume Plugin API for testing volume plugin functionality`,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return startServer(config.sockName)
+ },
+ PersistentPreRunE: before,
+}
+
+// Configuration for the volume plugin
+type cliConfig struct {
+ logLevel string
+ sockName string
+ path string
+}
+
+// Default configuration is stored here. Will be overwritten by flags.
+var config cliConfig = cliConfig{
+ logLevel: "error",
+ sockName: "test-volume-plugin",
+}
+
+func init() {
+ rootCmd.Flags().StringVar(&config.sockName, "sock-name", config.sockName, "Name of unix socket for plugin")
+ rootCmd.Flags().StringVar(&config.path, "path", "", "Path to initialize state and mount points")
+ rootCmd.PersistentFlags().StringVar(&config.logLevel, "log-level", config.logLevel, "Log messages including and over the specified level: debug, info, warn, error, fatal, panic")
+}
+
+func before(cmd *cobra.Command, args []string) error {
+ if config.logLevel == "" {
+ config.logLevel = "error"
+ }
+
+ level, err := logrus.ParseLevel(config.logLevel)
+ if err != nil {
+ return err
+ }
+
+ logrus.SetLevel(level)
+
+ return nil
+}
+
+func main() {
+ if err := rootCmd.Execute(); err != nil {
+ logrus.Errorf("Error running volume plugin: %v", err)
+ os.Exit(1)
+ }
+
+ os.Exit(0)
+}
+
+// startServer runs the HTTP server and responds to requests
+func startServer(socketPath string) error {
+ logrus.Debugf("Starting server...")
+
+ if config.path == "" {
+ path, err := ioutil.TempDir("", "test_volume_plugin")
+ if err != nil {
+ return errors.Wrapf(err, "error getting directory for plugin")
+ }
+ config.path = path
+ } else {
+ pathStat, err := os.Stat(config.path)
+ if err != nil {
+ return errors.Wrapf(err, "unable to access requested plugin state directory")
+ }
+ if !pathStat.IsDir() {
+ return errors.Errorf("cannot use %v as plugin state dir as it is not a directory", config.path)
+ }
+ }
+
+ handle, err := makeDirDriver(config.path)
+ if err != nil {
+ return errors.Wrapf(err, "error making volume driver")
+ }
+ logrus.Infof("Using %s for volume path", config.path)
+
+ server := volume.NewHandler(handle)
+ if err := server.ServeUnix(socketPath, 0); err != nil {
+ return errors.Wrapf(err, "error starting server")
+ }
+ return nil
+}
+
+// DirDriver is a trivial volume driver implementation.
+// the volumes field maps name to volume
+type DirDriver struct {
+ lock sync.Mutex
+ volumesPath string
+ volumes map[string]*dirVol
+}
+
+type dirVol struct {
+ name string
+ path string
+ options map[string]string
+ mounts map[string]bool
+ createTime time.Time
+}
+
+// Make a new DirDriver.
+func makeDirDriver(path string) (volume.Driver, error) {
+ drv := new(DirDriver)
+ drv.volumesPath = path
+ drv.volumes = make(map[string]*dirVol)
+
+ return drv, nil
+}
+
+// Capabilities returns the capabilities of the driver.
+func (d *DirDriver) Capabilities() *volume.CapabilitiesResponse {
+ logrus.Infof("Hit Capabilities() endpoint")
+
+ return &volume.CapabilitiesResponse{
+ volume.Capability{
+ "local",
+ },
+ }
+}
+
+// Create creates a volume.
+func (d *DirDriver) Create(opts *volume.CreateRequest) error {
+ d.lock.Lock()
+ defer d.lock.Unlock()
+
+ logrus.Infof("Hit Create() endpoint")
+
+ if _, exists := d.volumes[opts.Name]; exists {
+ return errors.Errorf("volume with name %s already exists", opts.Name)
+ }
+
+ newVol := new(dirVol)
+ newVol.name = opts.Name
+ newVol.mounts = make(map[string]bool)
+ newVol.options = make(map[string]string)
+ newVol.createTime = time.Now()
+ for k, v := range opts.Options {
+ newVol.options[k] = v
+ }
+
+ volPath := filepath.Join(d.volumesPath, opts.Name)
+ if err := os.Mkdir(volPath, 0755); err != nil {
+ return errors.Wrapf(err, "error making volume directory")
+ }
+ newVol.path = volPath
+
+ d.volumes[opts.Name] = newVol
+
+ logrus.Debugf("Made volume with name %s and path %s", newVol.name, newVol.path)
+
+ return nil
+}
+
+// List lists all volumes available.
+func (d *DirDriver) List() (*volume.ListResponse, error) {
+ d.lock.Lock()
+ defer d.lock.Unlock()
+
+ logrus.Infof("Hit List() endpoint")
+
+ vols := new(volume.ListResponse)
+ vols.Volumes = []*volume.Volume{}
+
+ for _, vol := range d.volumes {
+ newVol := new(volume.Volume)
+ newVol.Name = vol.name
+ newVol.Mountpoint = vol.path
+ newVol.CreatedAt = vol.createTime.String()
+ vols.Volumes = append(vols.Volumes, newVol)
+ logrus.Debugf("Adding volume %s to list response", newVol.Name)
+ }
+
+ return vols, nil
+}
+
+// Get retrieves a single volume.
+func (d *DirDriver) Get(req *volume.GetRequest) (*volume.GetResponse, error) {
+ d.lock.Lock()
+ defer d.lock.Unlock()
+
+ logrus.Infof("Hit Get() endpoint")
+
+ vol, exists := d.volumes[req.Name]
+ if !exists {
+ logrus.Debugf("Did not find volume %s", req.Name)
+ return nil, errors.Errorf("no volume with name %s found", req.Name)
+ }
+
+ logrus.Debugf("Found volume %s", req.Name)
+
+ resp := new(volume.GetResponse)
+ resp.Volume = new(volume.Volume)
+ resp.Volume.Name = vol.name
+ resp.Volume.Mountpoint = vol.path
+ resp.Volume.CreatedAt = vol.createTime.String()
+
+ return resp, nil
+}
+
+// Remove removes a single volume.
+func (d *DirDriver) Remove(req *volume.RemoveRequest) error {
+ d.lock.Lock()
+ defer d.lock.Unlock()
+
+ logrus.Infof("Hit Remove() endpoint")
+
+ vol, exists := d.volumes[req.Name]
+ if !exists {
+ logrus.Debugf("Did not find volume %s", req.Name)
+ return errors.Errorf("no volume with name %s found")
+ }
+ logrus.Debugf("Found volume %s", req.Name)
+
+ if len(vol.mounts) > 0 {
+ logrus.Debugf("Cannot remove %s, is mounted", req.Name)
+ return errors.Errorf("volume %s is mounted and cannot be removed")
+ }
+
+ delete(d.volumes, req.Name)
+
+ if err := os.RemoveAll(vol.path); err != nil {
+ return errors.Wrapf(err, "error removing mountpoint of volume %s", req.Name)
+ }
+
+ logrus.Debugf("Removed volume %s", req.Name)
+
+ return nil
+}
+
+// Path returns the path a single volume is mounted at.
+func (d *DirDriver) Path(req *volume.PathRequest) (*volume.PathResponse, error) {
+ d.lock.Lock()
+ defer d.lock.Unlock()
+
+ logrus.Infof("Hit Path() endpoint")
+
+ // TODO: Should we return error if not mounted?
+
+ vol, exists := d.volumes[req.Name]
+ if !exists {
+ logrus.Debugf("Cannot locate volume %s", req.Name)
+ return nil, errors.Errorf("no volume with name %s found", req.Name)
+ }
+
+ return &volume.PathResponse{
+ vol.path,
+ }, nil
+}
+
+// Mount mounts the volume.
+func (d *DirDriver) Mount(req *volume.MountRequest) (*volume.MountResponse, error) {
+ d.lock.Lock()
+ defer d.lock.Unlock()
+
+ logrus.Infof("Hit Mount() endpoint")
+
+ vol, exists := d.volumes[req.Name]
+ if !exists {
+ logrus.Debugf("Cannot locate volume %s", req.Name)
+ return nil, errors.Errorf("no volume with name %s found", req.Name)
+ }
+
+ vol.mounts[req.ID] = true
+
+ return &volume.MountResponse{
+ vol.path,
+ }, nil
+}
+
+// Unmount unmounts the volume.
+func (d *DirDriver) Unmount(req *volume.UnmountRequest) error {
+ d.lock.Lock()
+ defer d.lock.Unlock()
+
+ logrus.Infof("Hit Unmount() endpoint")
+
+ vol, exists := d.volumes[req.Name]
+ if !exists {
+ logrus.Debugf("Cannot locate volume %s", req.Name)
+ return errors.Errorf("no volume with name %s found", req.Name)
+ }
+
+ mount := vol.mounts[req.ID]
+ if !mount {
+ logrus.Debugf("Volume %s is not mounted by %s", req.Name, req.ID)
+ return errors.Errorf("volume %s is not mounted by %s", req.Name, req.ID)
+ }
+
+ delete(vol.mounts, req.ID)
+
+ return nil
+}