summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorOpenShift Merge Robot <openshift-merge-robot@users.noreply.github.com>2019-02-14 21:45:58 +0100
committerGitHub <noreply@github.com>2019-02-14 21:45:58 +0100
commit8ec9eb0a7617b6c2db6ed092ab741552dc7cba02 (patch)
tree2d1212425257a8d332f4400d0896aa2e465176b0
parentdd82acd8ba02be51ec5fea65584e1f7b2036d7c8 (diff)
parent52df1fa7e054d577e8416d1d46db1741ad324d4a (diff)
downloadpodman-8ec9eb0a7617b6c2db6ed092ab741552dc7cba02.tar.gz
podman-8ec9eb0a7617b6c2db6ed092ab741552dc7cba02.tar.bz2
podman-8ec9eb0a7617b6c2db6ed092ab741552dc7cba02.zip
Merge pull request #2229 from rhatdan/volumes
Fix volume handling in podman
-rwxr-xr-xAPI.md6
-rw-r--r--cmd/podman/cleanup.go2
-rw-r--r--cmd/podman/cliconfig/config.go5
-rw-r--r--cmd/podman/containers_prune.go13
-rw-r--r--cmd/podman/create.go3
-rw-r--r--cmd/podman/rm.go5
-rw-r--r--cmd/podman/run.go2
-rw-r--r--cmd/podman/start.go2
-rw-r--r--cmd/podman/system_prune.go2
-rw-r--r--cmd/podman/varlink/io.podman.varlink4
-rw-r--r--commands.md1
-rw-r--r--contrib/perftest/main.go2
-rw-r--r--docs/podman.1.md1
-rw-r--r--libpod/adapter/runtime_remote.go2
-rw-r--r--libpod/container.go3
-rw-r--r--libpod/container_internal.go139
-rw-r--r--libpod/container_internal_linux.go7
-rw-r--r--libpod/options.go3
-rw-r--r--libpod/runtime_ctr.go29
-rw-r--r--libpod/runtime_img.go2
-rw-r--r--libpod/volume_internal.go4
-rw-r--r--pkg/spec/createconfig.go44
-rw-r--r--pkg/util/utils.go10
-rw-r--r--pkg/varlinkapi/containers.go6
-rw-r--r--pkg/varlinkapi/containers_create.go7
25 files changed, 132 insertions, 172 deletions
diff --git a/API.md b/API.md
index e44862c3f..dd19aaf42 100755
--- a/API.md
+++ b/API.md
@@ -97,7 +97,7 @@ in the [API.md](https://github.com/containers/libpod/blob/master/API.md) file in
[func ReceiveFile(path: string, delete: bool) int](#ReceiveFile)
-[func RemoveContainer(name: string, force: bool) string](#RemoveContainer)
+[func RemoveContainer(name: string, force: bool, removeVolumes: bool) string](#RemoveContainer)
[func RemoveImage(name: string, force: bool) string](#RemoveImage)
@@ -777,9 +777,9 @@ method ReceiveFile(path: [string](https://godoc.org/builtin#string), delete: [bo
### <a name="RemoveContainer"></a>func RemoveContainer
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
-method RemoveContainer(name: [string](https://godoc.org/builtin#string), force: [bool](https://godoc.org/builtin#bool)) [string](https://godoc.org/builtin#string)</div>
+method RemoveContainer(name: [string](https://godoc.org/builtin#string), force: [bool](https://godoc.org/builtin#bool), removeVolumes: [bool](https://godoc.org/builtin#bool)) [string](https://godoc.org/builtin#string)</div>
RemoveContainer takes requires the name or ID of container as well a boolean representing whether a running
-container can be stopped and removed. Upon successful removal of the container, its ID is returned. If the
+container can be stopped and removed. It also takes a flag on whether or not to remove builtin volumes. Upon successful removal of the container, its ID is returned. If the
container cannot be found by name or ID, a [ContainerNotFound](#ContainerNotFound) error will be returned.
#### Example
~~~
diff --git a/cmd/podman/cleanup.go b/cmd/podman/cleanup.go
index 537679d75..e465a30e6 100644
--- a/cmd/podman/cleanup.go
+++ b/cmd/podman/cleanup.go
@@ -58,7 +58,7 @@ func cleanupCmd(c *cliconfig.CleanupValues) error {
for _, ctr := range cleanupContainers {
hadError := false
if c.Remove {
- if err := runtime.RemoveContainer(ctx, ctr, false); err != nil {
+ if err := runtime.RemoveContainer(ctx, ctr, false, false); err != nil {
if lastError != nil {
fmt.Fprintln(os.Stderr, lastError)
}
diff --git a/cmd/podman/cliconfig/config.go b/cmd/podman/cliconfig/config.go
index 85ded6da0..f38bcaa62 100644
--- a/cmd/podman/cliconfig/config.go
+++ b/cmd/podman/cliconfig/config.go
@@ -135,6 +135,11 @@ type PruneImagesValues struct {
All bool
}
+type PruneContainersValues struct {
+ PodmanCommand
+ Force bool
+}
+
type ImportValues struct {
PodmanCommand
Change []string
diff --git a/cmd/podman/containers_prune.go b/cmd/podman/containers_prune.go
index acc138fe0..bae578e1d 100644
--- a/cmd/podman/containers_prune.go
+++ b/cmd/podman/containers_prune.go
@@ -13,13 +13,12 @@ import (
)
var (
- pruneContainersCommand cliconfig.ContainersPrune
+ pruneContainersCommand cliconfig.PruneContainersValues
pruneContainersDescription = `
podman container prune
Removes all exited containers
`
-
_pruneContainersCommand = &cobra.Command{
Use: "prune",
Short: "Remove all stopped containers",
@@ -35,9 +34,11 @@ var (
func init() {
pruneContainersCommand.Command = _pruneContainersCommand
pruneContainersCommand.SetUsageTemplate(UsageTemplate())
+ flags := pruneContainersCommand.Flags()
+ flags.BoolVarP(&pruneContainersCommand.Force, "force", "f", false, "Force removal of a running container. The default is false")
}
-func pruneContainers(runtime *adapter.LocalRuntime, ctx context.Context, maxWorkers int, force bool) error {
+func pruneContainers(runtime *adapter.LocalRuntime, ctx context.Context, maxWorkers int, force, volumes bool) error {
var deleteFuncs []shared.ParallelWorkerInput
filter := func(c *libpod.Container) bool {
@@ -57,7 +58,7 @@ func pruneContainers(runtime *adapter.LocalRuntime, ctx context.Context, maxWork
for _, container := range delContainers {
con := container
f := func() error {
- return runtime.RemoveContainer(ctx, con, force)
+ return runtime.RemoveContainer(ctx, con, force, volumes)
}
deleteFuncs = append(deleteFuncs, shared.ParallelWorkerInput{
@@ -70,7 +71,7 @@ func pruneContainers(runtime *adapter.LocalRuntime, ctx context.Context, maxWork
return printParallelOutput(deleteErrors, errCount)
}
-func pruneContainersCmd(c *cliconfig.ContainersPrune) error {
+func pruneContainersCmd(c *cliconfig.PruneContainersValues) error {
runtime, err := adapter.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
@@ -83,5 +84,5 @@ func pruneContainersCmd(c *cliconfig.ContainersPrune) error {
}
logrus.Debugf("Setting maximum workers to %d", maxWorkers)
- return pruneContainers(runtime, getContext(), maxWorkers, c.Bool("force"))
+ return pruneContainers(runtime, getContext(), maxWorkers, c.Bool("force"), c.Bool("volumes"))
}
diff --git a/cmd/podman/create.go b/cmd/podman/create.go
index 1a7f419c0..392163424 100644
--- a/cmd/podman/create.go
+++ b/cmd/podman/create.go
@@ -646,9 +646,10 @@ func parseCreateOpts(ctx context.Context, c *cliconfig.PodmanCommand, runtime *l
}
var ImageVolumes map[string]struct{}
- if data != nil {
+ if data != nil && c.String("image-volume") != "ignore" {
ImageVolumes = data.Config.Volumes
}
+
var imageVolType = map[string]string{
"bind": "",
"tmpfs": "",
diff --git a/cmd/podman/rm.go b/cmd/podman/rm.go
index 1e5e9d254..d170e5357 100644
--- a/cmd/podman/rm.go
+++ b/cmd/podman/rm.go
@@ -39,8 +39,7 @@ func init() {
flags.BoolVarP(&rmCommand.All, "all", "a", false, "Remove all containers")
flags.BoolVarP(&rmCommand.Force, "force", "f", false, "Force removal of a running container. The default is false")
flags.BoolVarP(&rmCommand.Latest, "latest", "l", false, "Act on the latest container podman is aware of")
- flags.BoolVarP(&rmCommand.Volumes, "volumes", "v", false, "Remove the volumes associated with the container (Not implemented yet)")
-
+ flags.BoolVarP(&rmCommand.Volumes, "volumes", "v", false, "Remove the volumes associated with the container")
}
// saveCmd saves the image to either docker-archive or oci
@@ -79,7 +78,7 @@ func rmCmd(c *cliconfig.RmValues) error {
for _, container := range delContainers {
con := container
f := func() error {
- return runtime.RemoveContainer(ctx, con, c.Force)
+ return runtime.RemoveContainer(ctx, con, c.Force, c.Volumes)
}
deleteFuncs = append(deleteFuncs, shared.ParallelWorkerInput{
diff --git a/cmd/podman/run.go b/cmd/podman/run.go
index 16ec7c3c0..64f8b6856 100644
--- a/cmd/podman/run.go
+++ b/cmd/podman/run.go
@@ -132,7 +132,7 @@ func runCmd(c *cliconfig.RunValues) error {
exitCode = 126
}
if c.IsSet("rm") {
- if deleteError := runtime.RemoveContainer(ctx, ctr, true); deleteError != nil {
+ if deleteError := runtime.RemoveContainer(ctx, ctr, true, false); deleteError != nil {
logrus.Errorf("unable to remove container %s after failing to start and attach to it", ctr.ID())
}
}
diff --git a/cmd/podman/start.go b/cmd/podman/start.go
index d1434508d..3a606d662 100644
--- a/cmd/podman/start.go
+++ b/cmd/podman/start.go
@@ -144,7 +144,7 @@ func startCmd(c *cliconfig.StartValues) error {
logrus.Errorf("unable to detect if container %s should be deleted", ctr.ID())
}
if createArtifact.Rm {
- if rmErr := runtime.RemoveContainer(ctx, ctr, true); rmErr != nil {
+ if rmErr := runtime.RemoveContainer(ctx, ctr, true, false); rmErr != nil {
logrus.Errorf("unable to remove container %s after it failed to start", ctr.ID())
}
}
diff --git a/cmd/podman/system_prune.go b/cmd/podman/system_prune.go
index a88027558..a91d7bf0a 100644
--- a/cmd/podman/system_prune.go
+++ b/cmd/podman/system_prune.go
@@ -76,7 +76,7 @@ Are you sure you want to continue? [y/N] `, volumeString)
ctx := getContext()
fmt.Println("Deleted Containers")
- lasterr := pruneContainers(runtime, ctx, shared.Parallelize("rm"), false)
+ lasterr := pruneContainers(runtime, ctx, shared.Parallelize("rm"), false, false)
if c.Bool("volumes") {
fmt.Println("Deleted Volumes")
err := volumePrune(runtime, getContext())
diff --git a/cmd/podman/varlink/io.podman.varlink b/cmd/podman/varlink/io.podman.varlink
index dc6a25c44..3b7a11fc3 100644
--- a/cmd/podman/varlink/io.podman.varlink
+++ b/cmd/podman/varlink/io.podman.varlink
@@ -600,7 +600,7 @@ method GetAttachSockets(name: string) -> (sockets: Sockets)
# a [ContainerNotFound](#ContainerNotFound) error is returned.
method WaitContainer(name: string) -> (exitcode: int)
-# RemoveContainer takes requires the name or ID of container as well a boolean representing whether a running
+# RemoveContainer takes requires the name or ID of container as well a boolean representing whether a running and a boolean indicating whether to remove builtin volumes
# container can be stopped and removed. Upon successful removal of the container, its ID is returned. If the
# container cannot be found by name or ID, a [ContainerNotFound](#ContainerNotFound) error will be returned.
# #### Example
@@ -610,7 +610,7 @@ method WaitContainer(name: string) -> (exitcode: int)
# "container": "62f4fd98cb57f529831e8f90610e54bba74bd6f02920ffb485e15376ed365c20"
# }
# ~~~
-method RemoveContainer(name: string, force: bool) -> (container: string)
+method RemoveContainer(name: string, force: bool, removeVolumes: bool) -> (container: string)
# DeleteStoppedContainers will delete all containers that are not running. It will return a list the deleted
# container IDs. See also [RemoveContainer](RemoveContainer).
diff --git a/commands.md b/commands.md
index c7d03d5ad..37fc57f65 100644
--- a/commands.md
+++ b/commands.md
@@ -73,6 +73,7 @@
| [podman-unpause(1)](/docs/podman-unpause.1.md) | Unpause one or more running containers |[![...](/docs/play.png)](https://asciinema.org/a/141292)|
| [podman-varlink(1)](/docs/podman-varlink.1.md) | Run the varlink backend ||
| [podman-version(1)](/docs/podman-version.1.md) | Display the version information |[![...](/docs/play.png)](https://asciinema.org/a/mfrn61pjZT9Fc8L4NbfdSqfgu)|
+| [podman-volume(1)](/docs/podman-volume.1.md) | Manage Volumes ||
| [podman-volume-create(1)](/docs/podman-volume-create.1.md) | Create a volume ||
| [podman-volume-inspect(1)](/docs/podman-volume-inspect.1.md) | Get detailed information on one or more volumes ||
| [podman-volume-ls(1)](/docs/podman-volume-ls.1.md) | List all the available volumes ||
diff --git a/contrib/perftest/main.go b/contrib/perftest/main.go
index 6a6725ab9..c0a91209f 100644
--- a/contrib/perftest/main.go
+++ b/contrib/perftest/main.go
@@ -218,7 +218,7 @@ func runSingleThreadedStressTest(ctx context.Context, client *libpod.Runtime, im
//Delete Container
deleteStartTime := time.Now()
- err = client.RemoveContainer(ctx, ctr, true)
+ err = client.RemoveContainer(ctx, ctr, true, false)
if err != nil {
return nil, err
}
diff --git a/docs/podman.1.md b/docs/podman.1.md
index 51ef00383..760f27310 100644
--- a/docs/podman.1.md
+++ b/docs/podman.1.md
@@ -168,6 +168,7 @@ the exit codes follow the `chroot` standard, see below:
| [podman-umount(1)](podman-umount.1.md) | Unmount a working container's root filesystem. |
| [podman-unpause(1)](podman-unpause.1.md) | Unpause one or more containers. |
| [podman-version(1)](podman-version.1.md) | Display the Podman version information. |
+| [podman-volume(1)](podman-volume.1.md) | Manage Volumes. |
| [podman-wait(1)](podman-wait.1.md) | Wait on one or more containers to stop and print their exit codes. |
## FILES
diff --git a/libpod/adapter/runtime_remote.go b/libpod/adapter/runtime_remote.go
index 66ff5ed51..a96676ee2 100644
--- a/libpod/adapter/runtime_remote.go
+++ b/libpod/adapter/runtime_remote.go
@@ -544,7 +544,7 @@ func (r *LocalRuntime) GetContainers(filters ...libpod.ContainerFilter) ([]*libp
// RemoveContainer removes the given container
// If force is specified, the container will be stopped first
// Otherwise, RemoveContainer will return an error if the container is running
-func (r *LocalRuntime) RemoveContainer(ctx context.Context, c *libpod.Container, force bool) error {
+func (r *LocalRuntime) RemoveContainer(ctx context.Context, c *libpod.Container, force, volumes bool) error {
return libpod.ErrNotImplemented
}
diff --git a/libpod/container.go b/libpod/container.go
index fec61533d..75f4a4a4f 100644
--- a/libpod/container.go
+++ b/libpod/container.go
@@ -358,8 +358,7 @@ type ContainerConfig struct {
ExitCommand []string `json:"exitCommand,omitempty"`
// LocalVolumes are the built-in volumes we get from the --volumes-from flag
// It picks up the built-in volumes of the container used by --volumes-from
- LocalVolumes []string
-
+ LocalVolumes []spec.Mount
// IsInfra is a bool indicating whether this container is an infra container used for
// sharing kernel namespaces in a pod
IsInfra bool `json:"pause"`
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index f82cbd674..b2ebad777 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -10,21 +10,16 @@ import (
"path/filepath"
"strconv"
"strings"
- "syscall"
"time"
- "github.com/containers/buildah/imagebuildah"
"github.com/containers/libpod/pkg/ctime"
"github.com/containers/libpod/pkg/hooks"
"github.com/containers/libpod/pkg/hooks/exec"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
- "github.com/containers/storage/pkg/chrootarchive"
"github.com/containers/storage/pkg/mount"
- "github.com/opencontainers/runc/libcontainer/user"
spec "github.com/opencontainers/runtime-spec/specs-go"
- "github.com/opencontainers/runtime-tools/generate"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -1053,113 +1048,6 @@ func (c *Container) writeStringToRundir(destFile, output string) (string, error)
return filepath.Join(c.state.DestinationRunDir, destFile), nil
}
-func (c *Container) addLocalVolumes(ctx context.Context, g *generate.Generator, execUser *user.ExecUser) error {
- var uid, gid int
- mountPoint := c.state.Mountpoint
- if !c.state.Mounted {
- return errors.Wrapf(ErrInternal, "container is not mounted")
- }
- newImage, err := c.runtime.imageRuntime.NewFromLocal(c.config.RootfsImageID)
- if err != nil {
- return err
- }
- imageData, err := newImage.Inspect(ctx)
- if err != nil {
- return err
- }
- // Add the built-in volumes of the container passed in to --volumes-from
- for _, vol := range c.config.LocalVolumes {
- if imageData.Config.Volumes == nil {
- imageData.Config.Volumes = map[string]struct{}{
- vol: {},
- }
- } else {
- imageData.Config.Volumes[vol] = struct{}{}
- }
- }
-
- if c.config.User != "" {
- if execUser == nil {
- return errors.Wrapf(ErrInternal, "nil pointer passed to addLocalVolumes for execUser")
- }
- uid = execUser.Uid
- gid = execUser.Gid
- }
-
- for k := range imageData.Config.Volumes {
- mount := spec.Mount{
- Destination: k,
- Type: "bind",
- Options: []string{"private", "bind", "rw"},
- }
- if MountExists(g.Mounts(), k) {
- continue
- }
- volumePath := filepath.Join(c.config.StaticDir, "volumes", k)
-
- // Ensure the symlinks are resolved
- resolvedSymlink, err := imagebuildah.ResolveSymLink(mountPoint, k)
- if err != nil {
- return errors.Wrapf(ErrCtrStateInvalid, "cannot resolve %s in %s for container %s", k, mountPoint, c.ID())
- }
- var srcPath string
- if resolvedSymlink != "" {
- srcPath = filepath.Join(mountPoint, resolvedSymlink)
- } else {
- srcPath = filepath.Join(mountPoint, k)
- }
-
- if _, err := os.Stat(srcPath); os.IsNotExist(err) {
- logrus.Infof("Volume image mount point %s does not exist in root FS, need to create it", k)
- if err = os.MkdirAll(srcPath, 0755); err != nil {
- return errors.Wrapf(err, "error creating directory %q for volume %q in container %q", volumePath, k, c.ID())
- }
-
- if err = os.Chown(srcPath, uid, gid); err != nil {
- return errors.Wrapf(err, "error chowning directory %q for volume %q in container %q", srcPath, k, c.ID())
- }
- }
-
- if _, err := os.Stat(volumePath); os.IsNotExist(err) {
- if err = os.MkdirAll(volumePath, 0755); err != nil {
- return errors.Wrapf(err, "error creating directory %q for volume %q in container %q", volumePath, k, c.ID())
- }
-
- if err = os.Chown(volumePath, uid, gid); err != nil {
- return errors.Wrapf(err, "error chowning directory %q for volume %q in container %q", volumePath, k, c.ID())
- }
-
- if err = label.Relabel(volumePath, c.config.MountLabel, false); err != nil {
- return errors.Wrapf(err, "error relabeling directory %q for volume %q in container %q", volumePath, k, c.ID())
- }
- if err = chrootarchive.NewArchiver(nil).CopyWithTar(srcPath, volumePath); err != nil && !os.IsNotExist(err) {
- return errors.Wrapf(err, "error populating directory %q for volume %q in container %q using contents of %q", volumePath, k, c.ID(), srcPath)
- }
-
- // Set the volume path with the same owner and permission of source path
- sstat, _ := os.Stat(srcPath)
- st, ok := sstat.Sys().(*syscall.Stat_t)
- if !ok {
- return fmt.Errorf("could not convert to syscall.Stat_t")
- }
- uid := int(st.Uid)
- gid := int(st.Gid)
-
- if err := os.Lchown(volumePath, uid, gid); err != nil {
- return err
- }
- if os.Chmod(volumePath, sstat.Mode()); err != nil {
- return err
- }
-
- }
-
- mount.Source = volumePath
- g.AddMount(mount)
- }
- return nil
-}
-
// Save OCI spec to disk, replacing any existing specs for the container
func (c *Container) saveSpec(spec *spec.Spec) error {
// If the OCI spec already exists, we need to replace it
@@ -1303,3 +1191,30 @@ func getExcludedCGroups() (excludes []string) {
excludes = []string{"rdma"}
return
}
+
+// namedVolumes returns named volumes for the container
+func (c *Container) namedVolumes() ([]string, error) {
+ var volumes []string
+ for _, vol := range c.config.Spec.Mounts {
+ if strings.HasPrefix(vol.Source, c.runtime.config.VolumePath) {
+ volume := strings.TrimPrefix(vol.Source, c.runtime.config.VolumePath+"/")
+ split := strings.Split(volume, "/")
+ volume = split[0]
+ if _, err := c.runtime.state.Volume(volume); err == nil {
+ volumes = append(volumes, volume)
+ }
+ }
+ }
+ return volumes, nil
+}
+
+// this should be from chrootarchive.
+func (c *Container) copyWithTarFromImage(src, dest string) error {
+ mountpoint, err := c.mount()
+ if err != nil {
+ return err
+ }
+ a := archive.NewDefaultArchiver()
+ source := filepath.Join(mountpoint, src)
+ return a.CopyWithTar(source, dest)
+}
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index bcdfdaee3..65cb47c8c 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -235,13 +235,6 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
}
}
- // Bind builtin image volumes
- if c.config.Rootfs == "" && c.config.ImageVolumes {
- if err := c.addLocalVolumes(ctx, &g, execUser); err != nil {
- return nil, errors.Wrapf(err, "error mounting image volumes")
- }
- }
-
if c.config.User != "" {
// User and Group must go together
g.SetProcessUID(uint32(execUser.Uid))
diff --git a/libpod/options.go b/libpod/options.go
index d965c058e..06737776b 100644
--- a/libpod/options.go
+++ b/libpod/options.go
@@ -11,6 +11,7 @@ import (
"github.com/containers/storage"
"github.com/containers/storage/pkg/idtools"
"github.com/cri-o/ocicni/pkg/ocicni"
+ spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
)
@@ -1058,7 +1059,7 @@ func WithUserVolumes(volumes []string) CtrCreateOption {
// from a container passed in to the --volumes-from flag.
// This stores the built-in volume information in the Config so we can
// add them when creating the container.
-func WithLocalVolumes(volumes []string) CtrCreateOption {
+func WithLocalVolumes(volumes []spec.Mount) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
return ErrCtrFinalized
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index 4f8192198..185090cf7 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -177,9 +177,12 @@ func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ..
if err != nil {
newVol, err := r.newVolume(ctx, WithVolumeName(vol.Source))
if err != nil {
- logrus.Errorf("error creating named volume %q: %v", vol.Source, err)
+ return nil, errors.Wrapf(err, "error creating named volume %q", vol.Source)
}
ctr.config.Spec.Mounts[i].Source = newVol.MountPoint()
+ if err := ctr.copyWithTarFromImage(ctr.config.Spec.Mounts[i].Destination, ctr.config.Spec.Mounts[i].Source); err != nil && !os.IsNotExist(err) {
+ return nil, errors.Wrapf(err, "Failed to copy content into new volume mount %q", vol.Source)
+ }
continue
}
ctr.config.Spec.Mounts[i].Source = volInfo.MountPoint()
@@ -225,17 +228,19 @@ func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ..
// RemoveContainer removes the given container
// If force is specified, the container will be stopped first
+// If removeVolume is specified, named volumes used by the container will
+// be removed also if and only if the container is the sole user
// Otherwise, RemoveContainer will return an error if the container is running
-func (r *Runtime) RemoveContainer(ctx context.Context, c *Container, force bool) error {
+func (r *Runtime) RemoveContainer(ctx context.Context, c *Container, force bool, removeVolume bool) error {
r.lock.Lock()
defer r.lock.Unlock()
- return r.removeContainer(ctx, c, force)
+ return r.removeContainer(ctx, c, force, removeVolume)
}
// Internal function to remove a container
// Locks the container, but does not lock the runtime
-func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool) error {
+func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool, removeVolume bool) error {
if !c.valid {
if ok, _ := r.state.HasContainer(c.ID()); !ok {
// Container probably already removed
@@ -248,6 +253,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool)
// To avoid races around removing a container and the pod it is in
var pod *Pod
var err error
+ runtime := c.runtime
if c.config.Pod != "" {
pod, err = r.state.Pod(c.config.Pod)
if err != nil {
@@ -333,6 +339,13 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool)
return errors.Wrapf(ErrCtrExists, "container %s has dependent containers which must be removed before it: %s", c.ID(), depsStr)
}
+ var volumes []string
+ if removeVolume {
+ volumes, err = c.namedVolumes()
+ if err != nil {
+ logrus.Errorf("unable to retrieve builtin volumes for container %v: %v", c.ID(), err)
+ }
+ }
var cleanupErr error
// Remove the container from the state
if c.config.Pod != "" {
@@ -397,6 +410,14 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool)
}
}
+ for _, v := range volumes {
+ if volume, err := runtime.state.Volume(v); err == nil {
+ if err := runtime.removeVolume(ctx, volume, false, true); err != nil && err != ErrNoSuchVolume && err != ErrVolumeBeingUsed {
+ logrus.Errorf("cleanup volume (%s): %v", v, err)
+ }
+ }
+ }
+
return cleanupErr
}
diff --git a/libpod/runtime_img.go b/libpod/runtime_img.go
index c20aa77a3..1e9689362 100644
--- a/libpod/runtime_img.go
+++ b/libpod/runtime_img.go
@@ -43,7 +43,7 @@ func (r *Runtime) RemoveImage(ctx context.Context, img *image.Image, force bool)
if len(imageCtrs) > 0 && len(img.Names()) <= 1 {
if force {
for _, ctr := range imageCtrs {
- if err := r.removeContainer(ctx, ctr, true); err != nil {
+ if err := r.removeContainer(ctx, ctr, true, false); err != nil {
return "", errors.Wrapf(err, "error removing image %s: container %s using image could not be removed", img.ID(), ctr.ID())
}
}
diff --git a/libpod/volume_internal.go b/libpod/volume_internal.go
index 800e6d106..0de8a2350 100644
--- a/libpod/volume_internal.go
+++ b/libpod/volume_internal.go
@@ -5,10 +5,6 @@ import (
"path/filepath"
)
-// VolumePath is the path under which all volumes that are created using the
-// local driver will be created
-// const VolumePath = "/var/lib/containers/storage/volumes"
-
// Creates a new volume
func newVolume(runtime *Runtime) (*Volume, error) {
volume := new(Volume)
diff --git a/pkg/spec/createconfig.go b/pkg/spec/createconfig.go
index 8edab831f..8da44a2f0 100644
--- a/pkg/spec/createconfig.go
+++ b/pkg/spec/createconfig.go
@@ -11,7 +11,9 @@ import (
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/pkg/namespaces"
+ "github.com/containers/libpod/pkg/rootless"
"github.com/containers/storage"
+ "github.com/containers/storage/pkg/stringid"
"github.com/cri-o/ocicni/pkg/ocicni"
"github.com/docker/go-connections/nat"
spec "github.com/opencontainers/runtime-spec/specs-go"
@@ -133,8 +135,8 @@ type CreateConfig struct {
SeccompProfilePath string //SecurityOpts
SecurityOpts []string
Rootfs string
- LocalVolumes []string //Keeps track of the built-in volumes of container used in the --volumes-from flag
- Syslog bool // Whether to enable syslog on exit commands
+ LocalVolumes []spec.Mount //Keeps track of the built-in volumes of container used in the --volumes-from flag
+ Syslog bool // Whether to enable syslog on exit commands
}
func u32Ptr(i int64) *uint32 { u := uint32(i); return &u }
@@ -215,7 +217,7 @@ func (c *CreateConfig) initFSMounts() []spec.Mount {
//GetVolumeMounts takes user provided input for bind mounts and creates Mount structs
func (c *CreateConfig) GetVolumeMounts(specMounts []spec.Mount) ([]spec.Mount, error) {
- var m []spec.Mount
+ m := c.LocalVolumes
for _, i := range c.Volumes {
var options []string
spliti := strings.Split(i, ":")
@@ -233,22 +235,31 @@ func (c *CreateConfig) GetVolumeMounts(specMounts []spec.Mount) ([]spec.Mount, e
logrus.Debugf("User mount %s:%s options %v", spliti[0], spliti[1], options)
}
- // volumes from image config
- if c.ImageVolumeType != "tmpfs" {
+ if c.ImageVolumeType == "ignore" {
return m, nil
}
+
for vol := range c.BuiltinImgVolumes {
if libpod.MountExists(specMounts, vol) {
continue
}
+
mount := spec.Mount{
Destination: vol,
- Type: string(TypeTmpfs),
- Source: string(TypeTmpfs),
- Options: []string{"rprivate", "rw", "noexec", "nosuid", "nodev", "tmpcopyup"},
+ Type: c.ImageVolumeType,
+ Options: []string{"rprivate", "rw", "nodev"},
+ }
+ if c.ImageVolumeType == "tmpfs" {
+ mount.Source = "tmpfs"
+ mount.Options = append(mount.Options, "tmpcopyup")
+ } else {
+ // This will cause a new local Volume to be created on your system
+ mount.Source = stringid.GenerateNonCryptoID()
+ mount.Options = append(mount.Options, "bind")
}
m = append(m, mount)
}
+
return m, nil
}
@@ -256,6 +267,11 @@ func (c *CreateConfig) GetVolumeMounts(specMounts []spec.Mount) ([]spec.Mount, e
// and adds it to c.Volumes of the current container.
func (c *CreateConfig) GetVolumesFrom() error {
var options string
+
+ if rootless.SkipStorageSetup() {
+ return nil
+ }
+
for _, vol := range c.VolumesFrom {
splitVol := strings.SplitN(vol, ":", 2)
if len(splitVol) == 2 {
@@ -265,6 +281,10 @@ func (c *CreateConfig) GetVolumesFrom() error {
if err != nil {
return errors.Wrapf(err, "error looking up container %q", splitVol[0])
}
+ inspect, err := ctr.Inspect(false)
+ if err != nil {
+ return errors.Wrapf(err, "error inspecting %q", splitVol[0])
+ }
var createArtifact CreateConfig
artifact, err := ctr.GetArtifact("create-config")
if err != nil {
@@ -273,9 +293,13 @@ func (c *CreateConfig) GetVolumesFrom() error {
if err := json.Unmarshal(artifact, &createArtifact); err != nil {
return err
}
-
for key := range createArtifact.BuiltinImgVolumes {
- c.LocalVolumes = append(c.LocalVolumes, key)
+ for _, m := range inspect.Mounts {
+ if m.Destination == key {
+ c.LocalVolumes = append(c.LocalVolumes, m)
+ break
+ }
+ }
}
for _, i := range createArtifact.Volumes {
diff --git a/pkg/util/utils.go b/pkg/util/utils.go
index 52f431881..db8a3d5bb 100644
--- a/pkg/util/utils.go
+++ b/pkg/util/utils.go
@@ -259,8 +259,8 @@ func GetRootlessStorageOpts() (storage.StoreOptions, error) {
return opts, nil
}
-// GetRootlessVolumeInfo returns where all the name volumes will be created in rootless mode
-func GetRootlessVolumeInfo() (string, error) {
+// GetRootlessVolumePath returns where all the name volumes will be created in rootless mode
+func GetRootlessVolumePath() (string, error) {
dataDir, _, err := GetRootlessDirInfo()
if err != nil {
return "", err
@@ -307,15 +307,13 @@ func GetDefaultStoreOptions() (storage.StoreOptions, string, error) {
err error
)
storageOpts := storage.DefaultStoreOptions
- volumePath := "/var/lib/containers/storage"
-
+ volumePath := filepath.Join(storageOpts.GraphRoot, "volumes")
if rootless.IsRootless() {
storageOpts, err = GetRootlessStorageOpts()
if err != nil {
return storageOpts, volumePath, err
}
-
- volumePath, err = GetRootlessVolumeInfo()
+ volumePath, err = GetRootlessVolumePath()
if err != nil {
return storageOpts, volumePath, err
}
diff --git a/pkg/varlinkapi/containers.go b/pkg/varlinkapi/containers.go
index 2b2832838..8a52efa61 100644
--- a/pkg/varlinkapi/containers.go
+++ b/pkg/varlinkapi/containers.go
@@ -358,13 +358,13 @@ func (i *LibpodAPI) WaitContainer(call iopodman.VarlinkCall, name string) error
}
// RemoveContainer ...
-func (i *LibpodAPI) RemoveContainer(call iopodman.VarlinkCall, name string, force bool) error {
+func (i *LibpodAPI) RemoveContainer(call iopodman.VarlinkCall, name string, force bool, removeVolumes bool) error {
ctx := getContext()
ctr, err := i.Runtime.LookupContainer(name)
if err != nil {
return call.ReplyContainerNotFound(name)
}
- if err := i.Runtime.RemoveContainer(ctx, ctr, force); err != nil {
+ if err := i.Runtime.RemoveContainer(ctx, ctr, force, removeVolumes); err != nil {
return call.ReplyErrorOccurred(err.Error())
}
return call.ReplyRemoveContainer(ctr.ID())
@@ -385,7 +385,7 @@ func (i *LibpodAPI) DeleteStoppedContainers(call iopodman.VarlinkCall) error {
return call.ReplyErrorOccurred(err.Error())
}
if state != libpod.ContainerStateRunning {
- if err := i.Runtime.RemoveContainer(ctx, ctr, false); err != nil {
+ if err := i.Runtime.RemoveContainer(ctx, ctr, false, false); err != nil {
return call.ReplyErrorOccurred(err.Error())
}
deletedContainers = append(deletedContainers, ctr.ID())
diff --git a/pkg/varlinkapi/containers_create.go b/pkg/varlinkapi/containers_create.go
index f1835a189..6b53b22c6 100644
--- a/pkg/varlinkapi/containers_create.go
+++ b/pkg/varlinkapi/containers_create.go
@@ -131,9 +131,14 @@ func varlinkCreateToCreateConfig(ctx context.Context, create iopodman.Create, ru
}
imageID := data.ID
+ var ImageVolumes map[string]struct{}
+ if data != nil && create.Image_volume_type != "ignore" {
+ ImageVolumes = data.Config.Volumes
+ }
+
config := &cc.CreateConfig{
Runtime: runtime,
- BuiltinImgVolumes: data.Config.Volumes,
+ BuiltinImgVolumes: ImageVolumes,
ConmonPidFile: create.Conmon_pidfile,
ImageVolumeType: create.Image_volume_type,
CapAdd: create.Cap_add,