summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.cirrus.yml7
-rwxr-xr-xAPI.md18
-rw-r--r--Dockerfile17
-rw-r--r--Makefile9
-rw-r--r--cmd/podman/generate_kube.go4
-rw-r--r--cmd/podman/play_kube.go21
-rw-r--r--cmd/podman/tree.go77
-rw-r--r--cmd/podman/varlink/io.podman.varlink10
-rw-r--r--docs/podman-volume-rm.1.md3
-rw-r--r--docs/tutorials/podman_tutorial.md12
-rw-r--r--libpod/boltdb_state.go79
-rw-r--r--libpod/boltdb_state_internal.go53
-rw-r--r--libpod/container.go33
-rw-r--r--libpod/container_internal.go16
-rw-r--r--libpod/container_internal_linux.go18
-rw-r--r--libpod/image/image.go28
-rw-r--r--libpod/in_memory_state.go32
-rw-r--r--libpod/options.go92
-rw-r--r--libpod/runtime_ctr.go67
-rw-r--r--libpod/runtime_volume_linux.go24
-rw-r--r--libpod/state.go4
-rw-r--r--pkg/adapter/images.go34
-rw-r--r--pkg/adapter/images_remote.go32
-rw-r--r--pkg/adapter/runtime_remote.go7
-rw-r--r--pkg/registrar/registrar_test.go318
-rw-r--r--pkg/spec/createconfig.go127
-rw-r--r--pkg/spec/spec.go40
-rw-r--r--pkg/varlinkapi/images.go38
-rw-r--r--test/e2e/commit_test.go2
-rw-r--r--test/e2e/generate_kube_test.go4
-rw-r--r--test/e2e/healthcheck_run_test.go3
-rw-r--r--test/e2e/run_test.go1
-rw-r--r--test/e2e/volume_rm_test.go4
-rw-r--r--test/framework/framework.go56
34 files changed, 802 insertions, 488 deletions
diff --git a/.cirrus.yml b/.cirrus.yml
index 3516c7d61..325176179 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -121,6 +121,7 @@ gating_task:
gate_script:
# N/B: entrypoint.sh resets $GOSRC (same as make clean)
+ - '/usr/local/bin/entrypoint.sh install.tools'
- '/usr/local/bin/entrypoint.sh validate'
- '/usr/local/bin/entrypoint.sh lint'
- '${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/test/test_dot_cirrus_yaml.py'
@@ -179,9 +180,9 @@ build_each_commit_task:
gce_instance:
image_project: "libpod-218412"
zone: "us-central1-a" # Required by Cirrus for the time being
- cpu: 2
- memory: "4Gb"
- disk: 40
+ cpu: 8
+ memory: "8Gb"
+ disk: 200
image_name: "${FEDORA_CACHE_IMAGE_NAME}"
timeout_in: 30m
diff --git a/API.md b/API.md
index f1c1b89cd..0accf5009 100755
--- a/API.md
+++ b/API.md
@@ -5,6 +5,8 @@ in the [API.md](https://github.com/containers/libpod/blob/master/API.md) file in
[func BuildImage(build: BuildInfo) MoreResponse](#BuildImage)
+[func BuildImageHierarchyMap(name: string) string](#BuildImageHierarchyMap)
+
[func Commit(name: string, image_name: string, changes: []string, author: string, message: string, pause: bool, manifestType: string) string](#Commit)
[func ContainerArtifacts(name: string, artifactName: string) string](#ContainerArtifacts)
@@ -57,6 +59,8 @@ in the [API.md](https://github.com/containers/libpod/blob/master/API.md) file in
[func GetInfo() PodmanInfo](#GetInfo)
+[func GetLayersMapWithImageInfo() string](#GetLayersMapWithImageInfo)
+
[func GetPod(name: string) ListPodData](#GetPod)
[func GetPodStats(name: string) string, ContainerStats](#GetPodStats)
@@ -259,6 +263,11 @@ method BuildImage(build: [BuildInfo](#BuildInfo)) [MoreResponse](#MoreResponse)<
BuildImage takes a [BuildInfo](#BuildInfo) structure and builds an image. At a minimum, you must provide the
'dockerfile' and 'tags' options in the BuildInfo structure. It will return a [MoreResponse](#MoreResponse) structure
that contains the build logs and resulting image ID.
+### <a name="BuildImageHierarchyMap"></a>func BuildImageHierarchyMap
+<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
+
+method BuildImageHierarchyMap(name: [string](https://godoc.org/builtin#string)) [string](https://godoc.org/builtin#string)</div>
+BuildImageHierarchyMap is for the development of Podman and should not be used.
### <a name="Commit"></a>func Commit
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
@@ -396,7 +405,7 @@ $ varlink call -m unix:/run/podman/io.podman/io.podman.DeleteUnusedImages
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
method Diff(name: [string](https://godoc.org/builtin#string)) [DiffInfo](#DiffInfo)</div>
-
+Diff returns a diff between libpod objects
### <a name="ExportContainer"></a>func ExportContainer
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
@@ -520,6 +529,11 @@ If the image caGetImage returns be found, [ImageNotFound](#ImageNotFound) will b
method GetInfo() [PodmanInfo](#PodmanInfo)</div>
GetInfo returns a [PodmanInfo](#PodmanInfo) struct that describes podman and its host such as storage stats,
build information of Podman, and system-wide registries.
+### <a name="GetLayersMapWithImageInfo"></a>func GetLayersMapWithImageInfo
+<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
+
+method GetLayersMapWithImageInfo() [string](https://godoc.org/builtin#string)</div>
+GetLayersMapWithImageInfo is for the development of Podman and should not be used.
### <a name="GetPod"></a>func GetPod
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
@@ -1507,6 +1521,8 @@ containers [int](https://godoc.org/builtin#int)
labels [map[string]](#map[string])
isParent [bool](https://godoc.org/builtin#bool)
+
+topLayer [string](https://godoc.org/builtin#string)
### <a name="ImageHistory"></a>type ImageHistory
ImageHistory describes the returned structure from ImageHistory.
diff --git a/Dockerfile b/Dockerfile
index 28e0b88cb..83cd3fccd 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,6 +1,4 @@
-FROM golang:1.11
-
-RUN echo 'deb http://httpredir.debian.org/debian jessie-backports main' > /etc/apt/sources.list.d/backports.list
+FROM golang:1.12
RUN apt-get update && apt-get install -y \
apparmor \
@@ -23,6 +21,8 @@ RUN apt-get update && apt-get install -y \
libostree-dev \
libprotobuf-dev \
libprotobuf-c0-dev \
+ libseccomp2 \
+ libseccomp-dev \
libtool \
libudev-dev \
protobuf-c-compiler \
@@ -43,17 +43,6 @@ RUN apt-get update && apt-get install -y \
--no-install-recommends \
&& apt-get clean
-ENV LIBSECCOMP_COMMIT release-2.3
-RUN set -x \
- && git clone https://github.com/seccomp/libseccomp "$GOPATH/src/github.com/seccomp/libseccomp" \
- && cd "$GOPATH/src/github.com/seccomp/libseccomp" \
- && git fetch origin --tags \
- && git checkout -q "$LIBSECCOMP_COMMIT" \
- && ./autogen.sh \
- && ./configure --prefix=/usr \
- && make all \
- && make install
-
# Install runc
ENV RUNC_COMMIT 96ec2177ae841256168fcf76954f7177af9446eb
RUN set -x \
diff --git a/Makefile b/Makefile
index 1f4a7f6e7..ebd0ddf2d 100644
--- a/Makefile
+++ b/Makefile
@@ -30,7 +30,6 @@ BASHINSTALLDIR=${PREFIX}/share/bash-completion/completions
ZSHINSTALLDIR=${PREFIX}/share/zsh/site-functions
SELINUXOPT ?= $(shell test -x /usr/sbin/selinuxenabled && selinuxenabled && echo -Z)
-PACKAGES ?= $(shell $(GO) list -tags "${BUILDTAGS}" ./... | grep -v github.com/containers/libpod/vendor | grep -v e2e | grep -v system )
COMMIT_NO ?= $(shell git rev-parse HEAD 2> /dev/null || true)
GIT_COMMIT ?= $(if $(shell git status --porcelain --untracked-files=no),${COMMIT_NO}-dirty,${COMMIT_NO})
@@ -172,7 +171,13 @@ testunit: libpodimage ## Run unittest on the built image
${CONTAINER_RUNTIME} run -e STORAGE_OPTIONS="--storage-driver=vfs" -e TESTFLAGS -e CGROUP_MANAGER=cgroupfs -e OCI_RUNTIME -e TRAVIS -t --privileged --rm -v ${CURDIR}:/go/src/${PROJECT} ${LIBPOD_IMAGE} make localunit
localunit: test/goecho/goecho varlink_generate
- $(GO) test -tags "$(BUILDTAGS)" -cover $(PACKAGES)
+ ginkgo \
+ -r \
+ --skipPackage test/e2e,pkg/apparmor \
+ --cover \
+ --covermode atomic \
+ --tags "$(BUILDTAGS)" \
+ --succinct
$(MAKE) -C contrib/cirrus/packer test
ginkgo:
diff --git a/cmd/podman/generate_kube.go b/cmd/podman/generate_kube.go
index 42cfba8d8..c58372899 100644
--- a/cmd/podman/generate_kube.go
+++ b/cmd/podman/generate_kube.go
@@ -5,7 +5,6 @@ import (
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/libpod"
- "github.com/containers/libpod/pkg/rootless"
podmanVersion "github.com/containers/libpod/version"
"github.com/ghodss/yaml"
"github.com/pkg/errors"
@@ -53,9 +52,6 @@ func generateKubeYAMLCmd(c *cliconfig.GenerateKubeValues) error {
servicePorts []v1.ServicePort
)
- if rootless.IsRootless() {
- return errors.Wrapf(libpod.ErrNotImplemented, "rootless users")
- }
args := c.InputArgs
if len(args) != 1 {
return errors.Errorf("you must provide exactly one container|pod ID or name")
diff --git a/cmd/podman/play_kube.go b/cmd/podman/play_kube.go
index b468a7a89..cbe961279 100644
--- a/cmd/podman/play_kube.go
+++ b/cmd/podman/play_kube.go
@@ -15,7 +15,6 @@ import (
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/image"
ns "github.com/containers/libpod/pkg/namespaces"
- "github.com/containers/libpod/pkg/rootless"
"github.com/containers/libpod/pkg/spec"
"github.com/containers/storage"
"github.com/cri-o/ocicni/pkg/ocicni"
@@ -73,9 +72,6 @@ func playKubeYAMLCmd(c *cliconfig.KubePlayValues) error {
)
ctx := getContext()
- if rootless.IsRootless() {
- return errors.Wrapf(libpod.ErrNotImplemented, "rootless users")
- }
args := c.InputArgs
if len(args) > 1 {
return errors.New("you can only play one kubernetes file at a time")
@@ -243,6 +239,9 @@ func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container
envs map[string]string
)
+ // The default for MemorySwappiness is -1, not 0
+ containerConfig.Resources.MemorySwappiness = -1
+
containerConfig.Runtime = runtime
containerConfig.Image = containerYAML.Image
containerConfig.ImageID = newImage.ID()
@@ -270,7 +269,19 @@ func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container
}
}
- containerConfig.Command = containerYAML.Command
+ containerConfig.Command = []string{}
+ if imageData != nil && imageData.Config != nil {
+ containerConfig.Command = append(containerConfig.Command, imageData.Config.Entrypoint...)
+ }
+ if len(containerConfig.Command) != 0 {
+ containerConfig.Command = append(containerConfig.Command, containerYAML.Command...)
+ } else if imageData != nil && imageData.Config != nil {
+ containerConfig.Command = append(containerConfig.Command, imageData.Config.Cmd...)
+ }
+ if imageData != nil && len(containerConfig.Command) == 0 {
+ return nil, errors.Errorf("No command specified in container YAML or as CMD or ENTRYPOINT in this image for %s", containerConfig.Name)
+ }
+
containerConfig.StopSignal = 15
// If the user does not pass in ID mappings, just set to basics
diff --git a/cmd/podman/tree.go b/cmd/podman/tree.go
index c56e35aef..371e88495 100644
--- a/cmd/podman/tree.go
+++ b/cmd/podman/tree.go
@@ -5,9 +5,9 @@ import (
"fmt"
"github.com/containers/libpod/cmd/podman/cliconfig"
- "github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/libpod/image"
- units "github.com/docker/go-units"
+ "github.com/containers/libpod/pkg/adapter"
+ "github.com/docker/go-units"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -41,16 +41,6 @@ func init() {
treeCommand.Flags().BoolVar(&treeCommand.WhatRequires, "whatrequires", false, "Show all child images and layers of the specified image")
}
-// infoImage keep information of Image along with all associated layers
-type infoImage struct {
- // id of image
- id string
- // tags of image
- tags []string
- // layers stores all layers of image.
- layers []image.LayerInfo
-}
-
func treeCmd(c *cliconfig.TreeValues) error {
args := c.InputArgs
if len(args) == 0 {
@@ -60,46 +50,33 @@ func treeCmd(c *cliconfig.TreeValues) error {
return errors.Errorf("you must provide at most 1 argument")
}
- runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand)
+ runtime, err := adapter.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
defer runtime.Shutdown(false)
-
- img, err := runtime.ImageRuntime().NewFromLocal(args[0])
+ imageInfo, layerInfoMap, img, err := runtime.Tree(c)
if err != nil {
return err
}
+ return printTree(imageInfo, layerInfoMap, img, c.WhatRequires)
+}
- // Fetch map of image-layers, which is used for printing output.
- layerInfoMap, err := image.GetLayersMapWithImageInfo(runtime.ImageRuntime())
- if err != nil {
- return errors.Wrapf(err, "error while retriving layers of image %q", img.InputName)
- }
-
- // Create an imageInfo and fill the image and layer info
- imageInfo := &infoImage{
- id: img.ID(),
- tags: img.Names(),
- }
-
+func printTree(imageInfo *image.InfoImage, layerInfoMap map[string]*image.LayerInfo, img *adapter.ContainerImage, whatRequires bool) error {
size, err := img.Size(context.Background())
if err != nil {
- return errors.Wrapf(err, "error while retriving image size")
+ return err
}
- fmt.Printf("Image ID: %s\n", imageInfo.id[:12])
- fmt.Printf("Tags:\t %s\n", imageInfo.tags)
+
+ fmt.Printf("Image ID: %s\n", imageInfo.ID[:12])
+ fmt.Printf("Tags:\t %s\n", imageInfo.Tags)
fmt.Printf("Size:\t %v\n", units.HumanSizeWithPrecision(float64(*size), 4))
fmt.Printf(fmt.Sprintf("Image Layers\n"))
- if !c.WhatRequires {
+ if !whatRequires {
// fill imageInfo with layers associated with image.
// the layers will be filled such that
// (Start)RootLayer->...intermediate Parent Layer(s)-> TopLayer(End)
- err := buildImageHierarchyMap(imageInfo, layerInfoMap, img.TopLayer())
- if err != nil {
- return err
- }
// Build output from imageInfo into buffer
printImageHierarchy(imageInfo)
@@ -108,30 +85,8 @@ func treeCmd(c *cliconfig.TreeValues) error {
// the layers will be filled such that
// (Start)TopLayer->...intermediate Child Layer(s)-> Child TopLayer(End)
// (Forks)... intermediate Child Layer(s) -> Child Top Layer(End)
- err := printImageChildren(layerInfoMap, img.TopLayer(), "", true)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// Stores hierarchy of images such that all parent layers using which image is built are stored in imageInfo
-// Layers are added such that (Start)RootLayer->...intermediate Parent Layer(s)-> TopLayer(End)
-func buildImageHierarchyMap(imageInfo *infoImage, layerMap map[string]*image.LayerInfo, layerID string) error {
- if layerID == "" {
- return nil
- }
- ll, ok := layerMap[layerID]
- if !ok {
- return fmt.Errorf("lookup error: layerid %s not found", layerID)
+ return printImageChildren(layerInfoMap, img.TopLayer(), "", true)
}
- if err := buildImageHierarchyMap(imageInfo, layerMap, ll.ParentID); err != nil {
- return err
- }
-
- imageInfo.layers = append(imageInfo.layers, *ll)
return nil
}
@@ -175,14 +130,14 @@ func printImageChildren(layerMap map[string]*image.LayerInfo, layerID string, pr
}
// prints the layers info of image
-func printImageHierarchy(imageInfo *infoImage) {
- for count, l := range imageInfo.layers {
+func printImageHierarchy(imageInfo *image.InfoImage) {
+ for count, l := range imageInfo.Layers {
var tags string
intend := middleItem
if len(l.RepoTags) > 0 {
tags = fmt.Sprintf(" Top Layer of: %s", l.RepoTags)
}
- if count == len(imageInfo.layers)-1 {
+ if count == len(imageInfo.Layers)-1 {
intend = lastItem
}
fmt.Printf("%s ID: %s Size: %7v%s\n", intend, l.ID[:12], units.HumanSizeWithPrecision(float64(l.Size), 4), tags)
diff --git a/cmd/podman/varlink/io.podman.varlink b/cmd/podman/varlink/io.podman.varlink
index 2ff06a6f6..92fdcd20f 100644
--- a/cmd/podman/varlink/io.podman.varlink
+++ b/cmd/podman/varlink/io.podman.varlink
@@ -68,7 +68,8 @@ type Image (
virtualSize: int,
containers: int,
labels: [string]string,
- isParent: bool
+ isParent: bool,
+ topLayer: string
)
# ImageHistory describes the returned structure from ImageHistory.
@@ -1161,8 +1162,15 @@ method LoadImage(name: string, inputFile: string, quiet: bool, deleteFile: bool)
# GetEvents returns known libpod events filtered by the options provided.
method GetEvents(filter: []string, since: string, until: string) -> (events: Event)
+# Diff returns a diff between libpod objects
method Diff(name: string) -> (diffs: []DiffInfo)
+# GetLayersMapWithImageInfo is for the development of Podman and should not be used.
+method GetLayersMapWithImageInfo() -> (layerMap: string)
+
+# BuildImageHierarchyMap is for the development of Podman and should not be used.
+method BuildImageHierarchyMap(name: string) -> (imageInfo: string)
+
# ImageNotFound means the image could not be found by the provided name or ID in local storage.
error ImageNotFound (id: string, reason: string)
diff --git a/docs/podman-volume-rm.1.md b/docs/podman-volume-rm.1.md
index c23d7675c..8c3765235 100644
--- a/docs/podman-volume-rm.1.md
+++ b/docs/podman-volume-rm.1.md
@@ -21,7 +21,8 @@ Remove all volumes.
**-f**, **--force**=""
-Remove a volume by force, even if it is being used by a container
+Remove a volume by force.
+If it is being used by containers, the containers will be removed first.
**--help**
diff --git a/docs/tutorials/podman_tutorial.md b/docs/tutorials/podman_tutorial.md
index bfff90016..2abd9c50f 100644
--- a/docs/tutorials/podman_tutorial.md
+++ b/docs/tutorials/podman_tutorial.md
@@ -123,13 +123,14 @@ sudo make install PREFIX=/usr
This sample container will run a very basic httpd server that serves only its index
page.
```console
-podman run -dt -e HTTPD_VAR_RUN=/var/run/httpd -e HTTPD_MAIN_CONF_D_PATH=/etc/httpd/conf.d \
+podman run -dt -p 8080:8080/tcp -e HTTPD_VAR_RUN=/var/run/httpd -e HTTPD_MAIN_CONF_D_PATH=/etc/httpd/conf.d \
-e HTTPD_MAIN_CONF_PATH=/etc/httpd/conf \
-e HTTPD_CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/httpd/ \
registry.fedoraproject.org/f27/httpd /usr/bin/run-httpd
```
Because the container is being run in detached mode, represented by the *-d* in the podman run command, podman
-will print the container ID after it has run.
+will print the container ID after it has run. Note that we use port forwarding to be able to
+access the HTTP server. For successful running at least slirp4netns v0.3.0 is needed.
### Listing running containers
The Podman *ps* command is used to list creating and running containers.
@@ -140,10 +141,11 @@ podman ps
Note: If you add *-a* to the *ps* command, Podman will show all containers.
### Inspecting a running container
You can "inspect" a running container for metadata and details about itself. We can even use
-the inspect subcommand to see what IP address was assigned to the container.
+the inspect subcommand to see what IP address was assigned to the container. As the container is running in rootless mode, an IP address is not assigned and the value will be listed as "none" in the output from inspect.
```console
-$ sudo podman inspect -l | grep IPAddress\":
- "IPAddress": "10.88.6.140",
+$ podman inspect -l | grep IPAddress\":
+ "SecondaryIPAddresses": null,
+ "IPAddress": "",
```
Note: The -l is a convenience argument for **latest container**. You can also use the container's ID instead
diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go
index 92a7b1538..d8cfa2bda 100644
--- a/libpod/boltdb_state.go
+++ b/libpod/boltdb_state.go
@@ -1358,56 +1358,6 @@ func (s *BoltState) AddVolume(volume *Volume) error {
return err
}
-// RemoveVolCtrDep updates the container dependencies sub bucket of the given volume.
-// It deletes it from the bucket when found.
-// This is important when force removing a volume and we want to get rid of the dependencies.
-func (s *BoltState) RemoveVolCtrDep(volume *Volume, ctrID string) error {
- if ctrID == "" {
- return nil
- }
-
- if !s.valid {
- return ErrDBBadConfig
- }
-
- if !volume.valid {
- return ErrVolumeRemoved
- }
-
- volName := []byte(volume.Name())
-
- db, err := s.getDBCon()
- if err != nil {
- return err
- }
- defer s.closeDBCon(db)
-
- err = db.Update(func(tx *bolt.Tx) error {
- volBkt, err := getVolBucket(tx)
- if err != nil {
- return err
- }
-
- volDB := volBkt.Bucket(volName)
- if volDB == nil {
- volume.valid = false
- return errors.Wrapf(ErrNoSuchVolume, "no volume with name %s found in database", volume.Name())
- }
-
- // Make a subbucket for the containers using the volume
- ctrDepsBkt := volDB.Bucket(volDependenciesBkt)
- depCtrID := []byte(ctrID)
- if depExists := ctrDepsBkt.Get(depCtrID); depExists != nil {
- if err := ctrDepsBkt.Delete(depCtrID); err != nil {
- return errors.Wrapf(err, "error deleting container dependencies %q for volume %s in ctrDependencies bucket in DB", ctrID, volume.Name())
- }
- }
-
- return nil
- })
- return err
-}
-
// RemoveVolume removes the given volume from the state
func (s *BoltState) RemoveVolume(volume *Volume) error {
if !s.valid {
@@ -1433,6 +1383,11 @@ func (s *BoltState) RemoveVolume(volume *Volume) error {
return err
}
+ ctrBkt, err := getCtrBucket(tx)
+ if err != nil {
+ return err
+ }
+
// Check if the volume exists
volDB := volBkt.Bucket(volName)
if volDB == nil {
@@ -1448,6 +1403,18 @@ func (s *BoltState) RemoveVolume(volume *Volume) error {
if volCtrsBkt != nil {
var deps []string
err = volCtrsBkt.ForEach(func(id, value []byte) error {
+ // Alright, this is ugly.
+ // But we need it to work around the change in
+ // volume dependency handling, to make sure that
+ // older Podman versions don't cause DB
+ // corruption.
+ // Look up all dependencies and see that they
+ // still exist before appending.
+ ctrExists := ctrBkt.Bucket(id)
+ if ctrExists == nil {
+ return nil
+ }
+
deps = append(deps, string(id))
return nil
})
@@ -1629,6 +1596,11 @@ func (s *BoltState) VolumeInUse(volume *Volume) ([]string, error) {
return err
}
+ ctrBucket, err := getCtrBucket(tx)
+ if err != nil {
+ return err
+ }
+
volDB := volBucket.Bucket([]byte(volume.Name()))
if volDB == nil {
volume.valid = false
@@ -1642,6 +1614,13 @@ func (s *BoltState) VolumeInUse(volume *Volume) ([]string, error) {
// Iterate through and add dependencies
err = dependsBkt.ForEach(func(id, value []byte) error {
+ // Look up all dependencies and see that they
+ // still exist before appending.
+ ctrExists := ctrBucket.Bucket(id)
+ if ctrExists == nil {
+ return nil
+ }
+
depCtrs = append(depCtrs, string(id))
return nil
diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go
index b6a0759b1..a6900a6d3 100644
--- a/libpod/boltdb_state_internal.go
+++ b/libpod/boltdb_state_internal.go
@@ -564,23 +564,17 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
}
}
- // Add container to volume dependencies bucket if container is using a named volume
- if ctr.runtime.config.VolumePath == "" {
- return nil
- }
- for _, vol := range ctr.config.Spec.Mounts {
- if strings.Contains(vol.Source, ctr.runtime.config.VolumePath) {
- volName := strings.Split(vol.Source[len(ctr.runtime.config.VolumePath)+1:], "/")[0]
- volDB := volBkt.Bucket([]byte(volName))
- if volDB == nil {
- return errors.Wrapf(ErrNoSuchVolume, "no volume with name %s found in database", volName)
- }
+ // Add container to named volume dependencies buckets
+ for _, vol := range ctr.config.NamedVolumes {
+ volDB := volBkt.Bucket([]byte(vol.Name))
+ if volDB == nil {
+ return errors.Wrapf(ErrNoSuchVolume, "no volume with name %s found in database when adding container %s", vol.Name, ctr.ID())
+ }
- ctrDepsBkt := volDB.Bucket(volDependenciesBkt)
- if depExists := ctrDepsBkt.Get(ctrID); depExists == nil {
- if err := ctrDepsBkt.Put(ctrID, ctrID); err != nil {
- return errors.Wrapf(err, "error storing container dependencies %q for volume %s in ctrDependencies bucket in DB", ctr.ID(), volName)
- }
+ ctrDepsBkt := volDB.Bucket(volDependenciesBkt)
+ if depExists := ctrDepsBkt.Get(ctrID); depExists == nil {
+ if err := ctrDepsBkt.Put(ctrID, ctrID); err != nil {
+ return errors.Wrapf(err, "error adding container %s to volume %s dependencies", ctr.ID(), vol.Name)
}
}
}
@@ -745,22 +739,19 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error
}
}
- // Remove container from volume dependencies bucket if container is using a named volume
- for _, vol := range ctr.config.Spec.Mounts {
- if strings.Contains(vol.Source, ctr.runtime.config.VolumePath) {
- volName := strings.Split(vol.Source[len(ctr.runtime.config.VolumePath)+1:], "/")[0]
-
- volDB := volBkt.Bucket([]byte(volName))
- if volDB == nil {
- // Let's assume the volume was already deleted and continue to remove the container
- continue
- }
+ // Remove container from named volume dependencies buckets
+ for _, vol := range ctr.config.NamedVolumes {
+ volDB := volBkt.Bucket([]byte(vol.Name))
+ if volDB == nil {
+ // Let's assume the volume was already deleted and
+ // continue to remove the container
+ continue
+ }
- ctrDepsBkt := volDB.Bucket(volDependenciesBkt)
- if depExists := ctrDepsBkt.Get(ctrID); depExists != nil {
- if err := ctrDepsBkt.Delete(ctrID); err != nil {
- return errors.Wrapf(err, "error deleting container dependencies %q for volume %s in ctrDependencies bucket in DB", ctr.ID(), volName)
- }
+ ctrDepsBkt := volDB.Bucket(volDependenciesBkt)
+ if depExists := ctrDepsBkt.Get(ctrID); depExists == nil {
+ if err := ctrDepsBkt.Delete(ctrID); err != nil {
+ return errors.Wrapf(err, "error deleting container %s dependency on volume %s", ctr.ID(), vol.Name)
}
}
}
diff --git a/libpod/container.go b/libpod/container.go
index 739406e42..6d5e063ab 100644
--- a/libpod/container.go
+++ b/libpod/container.go
@@ -234,6 +234,8 @@ type ContainerConfig struct {
// These include the SHM mount.
// These must be unmounted before the container's rootfs is unmounted.
Mounts []string `json:"mounts,omitempty"`
+ // NamedVolumes lists the named volumes to mount into the container.
+ NamedVolumes []*ContainerNamedVolume `json:"namedVolumes,omitempty"`
// Security Config
@@ -354,9 +356,6 @@ type ContainerConfig struct {
// ExitCommand is the container's exit command.
// This Command will be executed when the container exits
ExitCommand []string `json:"exitCommand,omitempty"`
- // LocalVolumes are the built-in volumes we get from the --volumes-from flag
- // It picks up the built-in volumes of the container used by --volumes-from
- LocalVolumes []spec.Mount
// IsInfra is a bool indicating whether this container is an infra container used for
// sharing kernel namespaces in a pod
IsInfra bool `json:"pause"`
@@ -368,6 +367,18 @@ type ContainerConfig struct {
HealthCheckConfig *manifest.Schema2HealthConfig `json:"healthcheck"`
}
+// ContainerNamedVolume is a named volume that will be mounted into the
+// container. Each named volume is a libpod Volume present in the state.
+type ContainerNamedVolume struct {
+ // Name is the name of the volume to mount in.
+ // Must resolve to a valid volume present in this Podman.
+ Name string `json:"volumeName"`
+ // Dest is the mount's destination
+ Dest string `json:"dest"`
+ // Options are fstab style mount options
+ Options []string `json:"options,omitempty"`
+}
+
// ContainerStatus returns a string representation for users
// of a container state
func (t ContainerStatus) String() string {
@@ -488,6 +499,22 @@ func (c *Container) StaticDir() string {
return c.config.StaticDir
}
+// NamedVolumes returns the container's named volumes.
+// The name of each is guaranteed to point to a valid libpod Volume present in
+// the state.
+func (c *Container) NamedVolumes() []*ContainerNamedVolume {
+ volumes := []*ContainerNamedVolume{}
+ for _, vol := range c.config.NamedVolumes {
+ newVol := new(ContainerNamedVolume)
+ newVol.Name = vol.Name
+ newVol.Dest = vol.Dest
+ newVol.Options = vol.Options
+ volumes = append(volumes, newVol)
+ }
+
+ return volumes
+}
+
// Privileged returns whether the container is privileged
func (c *Container) Privileged() bool {
return c.config.Privileged
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index daa32007a..22df36c11 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -1403,22 +1403,6 @@ func getExcludedCGroups() (excludes []string) {
return
}
-// namedVolumes returns named volumes for the container
-func (c *Container) namedVolumes() ([]string, error) {
- var volumes []string
- for _, vol := range c.config.Spec.Mounts {
- if strings.HasPrefix(vol.Source, c.runtime.config.VolumePath) {
- volume := strings.TrimPrefix(vol.Source, c.runtime.config.VolumePath+"/")
- split := strings.Split(volume, "/")
- volume = split[0]
- if _, err := c.runtime.state.Volume(volume); err == nil {
- volumes = append(volumes, volume)
- }
- }
- }
- return volumes, nil
-}
-
// this should be from chrootarchive.
func (c *Container) copyWithTarFromImage(src, dest string) error {
mountpoint, err := c.mount()
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index 504d6c135..4d6bf61a3 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -195,6 +195,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
if err := c.makeBindMounts(); err != nil {
return nil, err
}
+
// Check if the spec file mounts contain the label Relabel flags z or Z.
// If they do, relabel the source directory and then remove the option.
for i := range g.Config.Mounts {
@@ -218,6 +219,23 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
g.SetProcessSelinuxLabel(c.ProcessLabel())
g.SetLinuxMountLabel(c.MountLabel())
+
+ // Add named volumes
+ for _, namedVol := range c.config.NamedVolumes {
+ volume, err := c.runtime.GetVolume(namedVol.Name)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error retrieving volume %s to add to container %s", namedVol.Name, c.ID())
+ }
+ mountPoint := volume.MountPoint()
+ volMount := spec.Mount{
+ Type: "bind",
+ Source: mountPoint,
+ Destination: namedVol.Dest,
+ Options: namedVol.Options,
+ }
+ g.AddMount(volMount)
+ }
+
// Add bind mounts to container
for dstPath, srcPath := range c.state.BindMounts {
newMount := spec.Mount{
diff --git a/libpod/image/image.go b/libpod/image/image.go
index 4862bf1d6..cc056b816 100644
--- a/libpod/image/image.go
+++ b/libpod/image/image.go
@@ -68,6 +68,16 @@ type Runtime struct {
EventsLogFilePath string
}
+// InfoImage keep information of Image along with all associated layers
+type InfoImage struct {
+ // ID of image
+ ID string
+ // Tags of image
+ Tags []string
+ // Layers stores all layers of image.
+ Layers []LayerInfo
+}
+
// ErrRepoTagNotFound is the error returned when the image id given doesn't match a rep tag in store
var ErrRepoTagNotFound = errors.New("unable to match user input to any specific repotag")
@@ -1277,3 +1287,21 @@ func GetLayersMapWithImageInfo(imageruntime *Runtime) (map[string]*LayerInfo, er
}
return layerInfoMap, nil
}
+
+// BuildImageHierarchyMap stores hierarchy of images such that all parent layers using which image is built are stored in imageInfo
+// Layers are added such that (Start)RootLayer->...intermediate Parent Layer(s)-> TopLayer(End)
+func BuildImageHierarchyMap(imageInfo *InfoImage, layerMap map[string]*LayerInfo, layerID string) error {
+ if layerID == "" {
+ return nil
+ }
+ ll, ok := layerMap[layerID]
+ if !ok {
+ return fmt.Errorf("lookup error: layerid %s not found", layerID)
+ }
+ if err := BuildImageHierarchyMap(imageInfo, layerMap, ll.ParentID); err != nil {
+ return err
+ }
+
+ imageInfo.Layers = append(imageInfo.Layers, *ll)
+ return nil
+}
diff --git a/libpod/in_memory_state.go b/libpod/in_memory_state.go
index ab4fc8ba7..2669206df 100644
--- a/libpod/in_memory_state.go
+++ b/libpod/in_memory_state.go
@@ -249,11 +249,8 @@ func (s *InMemoryState) AddContainer(ctr *Container) error {
}
// Add container to volume dependencies
- for _, vol := range ctr.config.Spec.Mounts {
- if strings.Contains(vol.Source, ctr.runtime.config.VolumePath) {
- volName := strings.Split(vol.Source[len(ctr.runtime.config.VolumePath)+1:], "/")[0]
- s.addCtrToVolDependsMap(ctr.ID(), volName)
- }
+ for _, vol := range ctr.config.NamedVolumes {
+ s.addCtrToVolDependsMap(ctr.ID(), vol.Name)
}
return nil
@@ -306,12 +303,9 @@ func (s *InMemoryState) RemoveContainer(ctr *Container) error {
s.removeCtrFromDependsMap(ctr.ID(), depCtr)
}
- // Remove container from volume dependencies
- for _, vol := range ctr.config.Spec.Mounts {
- if strings.Contains(vol.Source, ctr.runtime.config.VolumePath) {
- volName := strings.Split(vol.Source[len(ctr.runtime.config.VolumePath)+1:], "/")[0]
- s.removeCtrFromVolDependsMap(ctr.ID(), volName)
- }
+ // Remove this container from volume dependencies
+ for _, vol := range ctr.config.NamedVolumes {
+ s.removeCtrFromVolDependsMap(ctr.ID(), vol.Name)
}
return nil
@@ -492,22 +486,6 @@ func (s *InMemoryState) RemoveVolume(volume *Volume) error {
return nil
}
-// RemoveVolCtrDep updates the container dependencies of the volume
-func (s *InMemoryState) RemoveVolCtrDep(volume *Volume, ctrID string) error {
- if !volume.valid {
- return errors.Wrapf(ErrVolumeRemoved, "volume with name %s is not valid", volume.Name())
- }
-
- if _, ok := s.volumes[volume.Name()]; !ok {
- return errors.Wrapf(ErrNoSuchVolume, "volume with name %s doesn't exists in state", volume.Name())
- }
-
- // Remove container that is using this volume
- s.removeCtrFromVolDependsMap(ctrID, volume.Name())
-
- return nil
-}
-
// VolumeInUse checks if the given volume is being used by at least one container
func (s *InMemoryState) VolumeInUse(volume *Volume) ([]string, error) {
if !volume.valid {
diff --git a/libpod/options.go b/libpod/options.go
index 24f126e66..9326e54e4 100644
--- a/libpod/options.go
+++ b/libpod/options.go
@@ -13,7 +13,6 @@ import (
"github.com/containers/storage"
"github.com/containers/storage/pkg/idtools"
"github.com/cri-o/ocicni/pkg/ocicni"
- spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
)
@@ -1111,24 +1110,6 @@ func WithUserVolumes(volumes []string) CtrCreateOption {
}
}
-// WithLocalVolumes sets the built-in volumes of the container retrieved
-// from a container passed in to the --volumes-from flag.
-// This stores the built-in volume information in the Config so we can
-// add them when creating the container.
-func WithLocalVolumes(volumes []spec.Mount) CtrCreateOption {
- return func(ctr *Container) error {
- if ctr.valid {
- return ErrCtrFinalized
- }
-
- if volumes != nil {
- ctr.config.LocalVolumes = append(ctr.config.LocalVolumes, volumes...)
- }
-
- return nil
- }
-}
-
// WithEntrypoint sets the entrypoint of the container.
// This is not used to change the container's spec, but will instead be used
// during commit to populate the entrypoint of the new image.
@@ -1255,6 +1236,35 @@ func withIsInfra() CtrCreateOption {
}
}
+// WithNamedVolumes adds the given named volumes to the container.
+func WithNamedVolumes(volumes []*ContainerNamedVolume) CtrCreateOption {
+ return func(ctr *Container) error {
+ if ctr.valid {
+ return ErrCtrFinalized
+ }
+
+ destinations := make(map[string]bool)
+
+ for _, vol := range volumes {
+ // Don't check if they already exist.
+ // If they don't we will automatically create them.
+
+ if _, ok := destinations[vol.Dest]; ok {
+ return errors.Wrapf(ErrInvalidArg, "two volumes found with destination %s", vol.Dest)
+ }
+ destinations[vol.Dest] = true
+
+ ctr.config.NamedVolumes = append(ctr.config.NamedVolumes, &ContainerNamedVolume{
+ Name: vol.Name,
+ Dest: vol.Dest,
+ Options: vol.Options,
+ })
+ }
+
+ return nil
+ }
+}
+
// Volume Creation Options
// WithVolumeName sets the name of the volume.
@@ -1274,68 +1284,72 @@ func WithVolumeName(name string) VolumeCreateOption {
}
}
-// WithVolumeUID sets the uid of the owner.
-func WithVolumeUID(uid int) VolumeCreateOption {
+// WithVolumeLabels sets the labels of the volume.
+func WithVolumeLabels(labels map[string]string) VolumeCreateOption {
return func(volume *Volume) error {
if volume.valid {
return ErrVolumeFinalized
}
- volume.config.UID = uid
+
+ volume.config.Labels = make(map[string]string)
+ for key, value := range labels {
+ volume.config.Labels[key] = value
+ }
+
return nil
}
}
-// WithVolumeGID sets the gid of the owner.
-func WithVolumeGID(gid int) VolumeCreateOption {
+// WithVolumeDriver sets the driver of the volume.
+func WithVolumeDriver(driver string) VolumeCreateOption {
return func(volume *Volume) error {
if volume.valid {
return ErrVolumeFinalized
}
- volume.config.GID = gid
+
+ volume.config.Driver = driver
+
return nil
}
}
-// WithVolumeLabels sets the labels of the volume.
-func WithVolumeLabels(labels map[string]string) VolumeCreateOption {
+// WithVolumeOptions sets the options of the volume.
+func WithVolumeOptions(options map[string]string) VolumeCreateOption {
return func(volume *Volume) error {
if volume.valid {
return ErrVolumeFinalized
}
- volume.config.Labels = make(map[string]string)
- for key, value := range labels {
- volume.config.Labels[key] = value
+ volume.config.Options = make(map[string]string)
+ for key, value := range options {
+ volume.config.Options[key] = value
}
return nil
}
}
-// WithVolumeDriver sets the driver of the volume.
-func WithVolumeDriver(driver string) VolumeCreateOption {
+// WithVolumeUID sets the UID that the volume will be created as.
+func WithVolumeUID(uid int) VolumeCreateOption {
return func(volume *Volume) error {
if volume.valid {
return ErrVolumeFinalized
}
- volume.config.Driver = driver
+ volume.config.UID = uid
return nil
}
}
-// WithVolumeOptions sets the options of the volume.
-func WithVolumeOptions(options map[string]string) VolumeCreateOption {
+// WithVolumeGID sets the GID that the volume will be created as.
+func WithVolumeGID(gid int) VolumeCreateOption {
return func(volume *Volume) error {
if volume.valid {
return ErrVolumeFinalized
}
- volume.config.Options = make(map[string]string)
- for key, value := range options {
- volume.config.Options[key] = value
- }
+ volume.config.GID = gid
return nil
}
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index da2399685..800b42851 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -99,9 +99,6 @@ func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ..
ctr.state.State = ContainerStateConfigured
ctr.runtime = r
- ctr.valid = true
- ctr.state.State = ContainerStateConfigured
-
var pod *Pod
if ctr.config.Pod != "" {
// Get the pod from state
@@ -173,24 +170,29 @@ func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ..
ctr.config.ConmonPidFile = filepath.Join(ctr.config.StaticDir, "conmon.pid")
}
- // Go through the volume mounts and check for named volumes
- // If the named volme already exists continue, otherwise create
- // the storage for the named volume.
- for i, vol := range ctr.config.Spec.Mounts {
- if vol.Source[0] != '/' && isNamedVolume(vol.Source) {
- volInfo, err := r.state.Volume(vol.Source)
- if err != nil {
- newVol, err := r.newVolume(ctx, WithVolumeName(vol.Source), withSetCtrSpecific(), WithVolumeUID(ctr.RootUID()), WithVolumeGID(ctr.RootGID()))
- if err != nil {
- return nil, errors.Wrapf(err, "error creating named volume %q", vol.Source)
- }
- ctr.config.Spec.Mounts[i].Source = newVol.MountPoint()
- if err := ctr.copyWithTarFromImage(ctr.config.Spec.Mounts[i].Destination, ctr.config.Spec.Mounts[i].Source); err != nil && !os.IsNotExist(err) {
- return nil, errors.Wrapf(err, "failed to copy content into new volume mount %q", vol.Source)
- }
- continue
- }
- ctr.config.Spec.Mounts[i].Source = volInfo.MountPoint()
+ // Go through named volumes and add them.
+ // If they don't exist they will be created using basic options.
+ for _, vol := range ctr.config.NamedVolumes {
+ // Check if it exists already
+ _, err := r.state.Volume(vol.Name)
+ if err == nil {
+ // The volume exists, we're good
+ continue
+ } else if errors.Cause(err) != ErrNoSuchVolume {
+ return nil, errors.Wrapf(err, "error retrieving named volume %s for new container", vol.Name)
+ }
+
+ logrus.Debugf("Creating new volume %s for container", vol.Name)
+
+ // The volume does not exist, so we need to create it.
+ newVol, err := r.newVolume(ctx, WithVolumeName(vol.Name), withSetCtrSpecific(),
+ WithVolumeUID(ctr.RootUID()), WithVolumeGID(ctr.RootGID()))
+ if err != nil {
+ return nil, errors.Wrapf(err, "error creating named volume %q", vol.Name)
+ }
+
+ if err := ctr.copyWithTarFromImage(vol.Dest, newVol.MountPoint()); err != nil && !os.IsNotExist(err) {
+ return nil, errors.Wrapf(err, "Failed to copy content into new volume mount %q", vol.Name)
}
}
@@ -344,13 +346,6 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool,
return errors.Wrapf(ErrCtrExists, "container %s has dependent containers which must be removed before it: %s", c.ID(), depsStr)
}
- var volumes []string
- if removeVolume {
- volumes, err = c.namedVolumes()
- if err != nil {
- logrus.Errorf("unable to retrieve builtin volumes for container %v: %v", c.ID(), err)
- }
- }
var cleanupErr error
// Remove the container from the state
if c.config.Pod != "" {
@@ -415,8 +410,12 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool,
}
}
- for _, v := range volumes {
- if volume, err := runtime.state.Volume(v); err == nil {
+ if !removeVolume {
+ return cleanupErr
+ }
+
+ for _, v := range c.config.NamedVolumes {
+ if volume, err := runtime.state.Volume(v.Name); err == nil {
if !volume.IsCtrSpecific() {
continue
}
@@ -548,14 +547,6 @@ func (r *Runtime) GetLatestContainer() (*Container, error) {
return ctrs[lastCreatedIndex], nil
}
-// Check if volName is a named volume and not one of the default mounts we add to containers
-func isNamedVolume(volName string) bool {
- if volName != "proc" && volName != "tmpfs" && volName != "devpts" && volName != "shm" && volName != "mqueue" && volName != "sysfs" && volName != "cgroup" {
- return true
- }
- return false
-}
-
// Export is the libpod portion of exporting a container to a tar file
func (r *Runtime) Export(name string, path string) error {
ctr, err := r.LookupContainer(name)
diff --git a/libpod/runtime_volume_linux.go b/libpod/runtime_volume_linux.go
index db5c29242..40040fc52 100644
--- a/libpod/runtime_volume_linux.go
+++ b/libpod/runtime_volume_linux.go
@@ -98,12 +98,26 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error
if !force {
return errors.Wrapf(ErrVolumeBeingUsed, "volume %s is being used by the following container(s): %s", v.Name(), depsStr)
}
- // If using force, log the warning that the volume is being used by at least one container
- logrus.Warnf("volume %s is being used by the following container(s): %s", v.Name(), depsStr)
- // Remove the container dependencies so we can go ahead and delete the volume
+
+ // We need to remove all containers using the volume
for _, dep := range deps {
- if err := r.state.RemoveVolCtrDep(v, dep); err != nil {
- return errors.Wrapf(err, "unable to remove container dependency %q from volume %q while trying to delete volume by force", dep, v.Name())
+ ctr, err := r.state.Container(dep)
+ if err != nil {
+ // If the container's removed, no point in
+ // erroring.
+ if errors.Cause(err) == ErrNoSuchCtr || errors.Cause(err) == ErrCtrRemoved {
+ continue
+ }
+
+ return errors.Wrapf(err, "error removing container %s that depends on volume %s", dep, v.Name())
+ }
+
+ // TODO: do we want to set force here when removing
+ // containers?
+ // I'm inclined to say no, in case someone accidentally
+ // wipes a container they're using...
+ if err := r.removeContainer(ctx, ctr, false, false); err != nil {
+ return errors.Wrapf(err, "error removing container %s that depends on volume %s", ctr.ID(), v.Name())
}
}
}
diff --git a/libpod/state.go b/libpod/state.go
index 4296fc3cd..d0ad1a1f8 100644
--- a/libpod/state.go
+++ b/libpod/state.go
@@ -192,10 +192,6 @@ type State interface {
// AddVolume adds the specified volume to state. The volume's name
// must be unique within the list of existing volumes
AddVolume(volume *Volume) error
- // RemoveVolCtrDep updates the list of container dependencies that the
- // volume has. It either deletes the dependent container ID from
- // the sub-bucket
- RemoveVolCtrDep(volume *Volume, ctrID string) error
// RemoveVolume removes the specified volume.
// Only volumes that have no container dependencies can be removed
RemoveVolume(volume *Volume) error
diff --git a/pkg/adapter/images.go b/pkg/adapter/images.go
new file mode 100644
index 000000000..c8ea1cdea
--- /dev/null
+++ b/pkg/adapter/images.go
@@ -0,0 +1,34 @@
+// +build !remoteclient
+
+package adapter
+
+import (
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/containers/libpod/libpod/image"
+ "github.com/pkg/errors"
+)
+
+// Tree ...
+func (r *LocalRuntime) Tree(c *cliconfig.TreeValues) (*image.InfoImage, map[string]*image.LayerInfo, *ContainerImage, error) {
+ img, err := r.NewImageFromLocal(c.InputArgs[0])
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ // Fetch map of image-layers, which is used for printing output.
+ layerInfoMap, err := image.GetLayersMapWithImageInfo(r.Runtime.ImageRuntime())
+ if err != nil {
+ return nil, nil, nil, errors.Wrapf(err, "error while retrieving layers of image %q", img.InputName)
+ }
+
+ // Create an imageInfo and fill the image and layer info
+ imageInfo := &image.InfoImage{
+ ID: img.ID(),
+ Tags: img.Names(),
+ }
+
+ if err := image.BuildImageHierarchyMap(imageInfo, layerInfoMap, img.TopLayer()); err != nil {
+ return nil, nil, nil, err
+ }
+ return imageInfo, layerInfoMap, img, nil
+}
diff --git a/pkg/adapter/images_remote.go b/pkg/adapter/images_remote.go
index e7b38dccc..722058d4a 100644
--- a/pkg/adapter/images_remote.go
+++ b/pkg/adapter/images_remote.go
@@ -6,8 +6,11 @@ import (
"context"
"encoding/json"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
iopodman "github.com/containers/libpod/cmd/podman/varlink"
+ "github.com/containers/libpod/libpod/image"
"github.com/containers/libpod/pkg/inspect"
+ "github.com/pkg/errors"
)
// Inspect returns returns an ImageData struct from over a varlink connection
@@ -22,3 +25,32 @@ func (i *ContainerImage) Inspect(ctx context.Context) (*inspect.ImageData, error
}
return &data, nil
}
+
+// Tree ...
+func (r *LocalRuntime) Tree(c *cliconfig.TreeValues) (*image.InfoImage, map[string]*image.LayerInfo, *ContainerImage, error) {
+ layerInfoMap := make(map[string]*image.LayerInfo)
+ imageInfo := &image.InfoImage{}
+
+ img, err := r.NewImageFromLocal(c.InputArgs[0])
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ reply, err := iopodman.GetLayersMapWithImageInfo().Call(r.Conn)
+ if err != nil {
+ return nil, nil, nil, errors.Wrap(err, "failed to obtain image layers")
+ }
+ if err := json.Unmarshal([]byte(reply), &layerInfoMap); err != nil {
+ return nil, nil, nil, errors.Wrap(err, "failed to unmarshal image layers")
+ }
+
+ reply, err = iopodman.BuildImageHierarchyMap().Call(r.Conn, c.InputArgs[0])
+ if err != nil {
+ return nil, nil, nil, errors.Wrap(err, "failed to get build image map")
+ }
+ if err := json.Unmarshal([]byte(reply), imageInfo); err != nil {
+ return nil, nil, nil, errors.Wrap(err, "failed to unmarshal build image map")
+ }
+
+ return imageInfo, layerInfoMap, img, nil
+}
diff --git a/pkg/adapter/runtime_remote.go b/pkg/adapter/runtime_remote.go
index 978c9ffd8..807a9ad8f 100644
--- a/pkg/adapter/runtime_remote.go
+++ b/pkg/adapter/runtime_remote.go
@@ -82,6 +82,7 @@ type remoteImage struct {
Digest digest.Digest
isParent bool
Runtime *LocalRuntime
+ TopLayer string
}
// Container ...
@@ -147,6 +148,7 @@ func imageInListToContainerImage(i iopodman.Image, name string, runtime *LocalRu
Names: i.RepoTags,
isParent: i.IsParent,
Runtime: runtime,
+ TopLayer: i.TopLayer,
}
return &ContainerImage{ri}, nil
}
@@ -280,6 +282,11 @@ func (ci *ContainerImage) Dangling() bool {
return len(ci.Names()) == 0
}
+// TopLayer returns an images top layer as a string
+func (ci *ContainerImage) TopLayer() string {
+ return ci.remoteImage.TopLayer
+}
+
// TagImage ...
func (ci *ContainerImage) TagImage(tag string) error {
_, err := iopodman.TagImage().Call(ci.Runtime.Conn, ci.ID(), tag)
diff --git a/pkg/registrar/registrar_test.go b/pkg/registrar/registrar_test.go
index 0c1ef312a..50af95915 100644
--- a/pkg/registrar/registrar_test.go
+++ b/pkg/registrar/registrar_test.go
@@ -1,119 +1,213 @@
-package registrar
+package registrar_test
import (
- "reflect"
"testing"
-)
-
-func TestReserve(t *testing.T) {
- r := NewRegistrar()
-
- obj := "test1"
- if err := r.Reserve("test", obj); err != nil {
- t.Fatal(err)
- }
-
- if err := r.Reserve("test", obj); err != nil {
- t.Fatal(err)
- }
-
- obj2 := "test2"
- err := r.Reserve("test", obj2)
- if err == nil {
- t.Fatalf("expected error when reserving an already reserved name to another object")
- }
- if err != ErrNameReserved {
- t.Fatal("expected `ErrNameReserved` error when attempting to reserve an already reserved name")
- }
-}
-
-func TestRelease(t *testing.T) {
- r := NewRegistrar()
- obj := "testing"
-
- if err := r.Reserve("test", obj); err != nil {
- t.Fatal(err)
- }
- r.Release("test")
- r.Release("test") // Ensure there is no panic here
- if err := r.Reserve("test", obj); err != nil {
- t.Fatal(err)
- }
-}
-
-func TestGetNames(t *testing.T) {
- r := NewRegistrar()
- obj := "testing"
- names := []string{"test1", "test2"}
-
- for _, name := range names {
- if err := r.Reserve(name, obj); err != nil {
- t.Fatal(err)
- }
- }
- r.Reserve("test3", "other")
-
- names2, err := r.GetNames(obj)
- if err != nil {
- t.Fatal(err)
- }
-
- if !reflect.DeepEqual(names, names2) {
- t.Fatalf("Exepected: %v, Got: %v", names, names2)
- }
-}
+ "github.com/containers/libpod/pkg/registrar"
+ . "github.com/containers/libpod/test/framework"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
-func TestDelete(t *testing.T) {
- r := NewRegistrar()
- obj := "testing"
- names := []string{"test1", "test2"}
- for _, name := range names {
- if err := r.Reserve(name, obj); err != nil {
- t.Fatal(err)
- }
- }
-
- r.Reserve("test3", "other")
- r.Delete(obj)
-
- _, err := r.GetNames(obj)
- if err == nil {
- t.Fatal("expected error getting names for deleted key")
- }
-
- if err != ErrNoSuchKey {
- t.Fatal("expected `ErrNoSuchKey`")
- }
+// TestRegistrar runs the created specs
+func TestRegistrar(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "Registrar")
}
-func TestGet(t *testing.T) {
- r := NewRegistrar()
- obj := "testing"
- name := "test"
-
- _, err := r.Get(name)
- if err == nil {
- t.Fatal("expected error when key does not exist")
- }
- if err != ErrNameNotReserved {
- t.Fatal(err)
- }
-
- if err := r.Reserve(name, obj); err != nil {
- t.Fatal(err)
- }
-
- if _, err = r.Get(name); err != nil {
- t.Fatal(err)
- }
-
- r.Delete(obj)
- _, err = r.Get(name)
- if err == nil {
- t.Fatal("expected error when key does not exist")
- }
- if err != ErrNameNotReserved {
- t.Fatal(err)
- }
-}
+// nolint: gochecknoglobals
+var t *TestFramework
+
+var _ = BeforeSuite(func() {
+ t = NewTestFramework(NilFunc, NilFunc)
+ t.Setup()
+})
+
+var _ = AfterSuite(func() {
+ t.Teardown()
+})
+
+// The actual test suite
+var _ = t.Describe("Registrar", func() {
+ // Constant test data needed by some tests
+ const (
+ testKey = "testKey"
+ testName = "testName"
+ anotherKey = "anotherKey"
+ )
+
+ // The system under test
+ var sut *registrar.Registrar
+
+ // Prepare the system under test and register a test name and key before
+ // each test
+ BeforeEach(func() {
+ sut = registrar.NewRegistrar()
+ Expect(sut.Reserve(testName, testKey)).To(BeNil())
+ })
+
+ t.Describe("Reserve", func() {
+ It("should succeed to reserve a new registrar", func() {
+ // Given
+ // When
+ err := sut.Reserve("name", "key")
+
+ // Then
+ Expect(err).To(BeNil())
+ })
+
+ It("should succeed to reserve a registrar twice", func() {
+ // Given
+ // When
+ err := sut.Reserve(testName, testKey)
+
+ // Then
+ Expect(err).To(BeNil())
+ })
+
+ It("should fail to reserve an already reserved registrar", func() {
+ // Given
+ // When
+ err := sut.Reserve(testName, anotherKey)
+
+ // Then
+ Expect(err).NotTo(BeNil())
+ Expect(err).To(Equal(registrar.ErrNameReserved))
+ })
+ })
+
+ t.Describe("Release", func() {
+ It("should succeed to release a registered registrar multiple times", func() {
+ // Given
+ // When
+ // Then
+ sut.Release(testName)
+ sut.Release(testName)
+ })
+
+ It("should succeed to release a unknown registrar multiple times", func() {
+ // Given
+ // When
+ // Then
+ sut.Release(anotherKey)
+ sut.Release(anotherKey)
+ })
+
+ It("should succeed to release and re-register a registrar", func() {
+ // Given
+ // When
+ sut.Release(testName)
+ err := sut.Reserve(testName, testKey)
+
+ // Then
+ Expect(err).To(BeNil())
+ })
+ })
+
+ t.Describe("GetNames", func() {
+ It("should succeed to retrieve a single name for a registrar", func() {
+ // Given
+ // When
+ names, err := sut.GetNames(testKey)
+
+ // Then
+ Expect(err).To(BeNil())
+ Expect(len(names)).To(Equal(1))
+ Expect(names[0]).To(Equal(testName))
+ })
+
+ It("should succeed to retrieve all names for a registrar", func() {
+ // Given
+ testNames := []string{"test1", "test2"}
+ for _, name := range testNames {
+ Expect(sut.Reserve(name, anotherKey)).To(BeNil())
+ }
+
+ // When
+ names, err := sut.GetNames(anotherKey)
+
+ // Then
+ Expect(err).To(BeNil())
+ Expect(len(names)).To(Equal(2))
+ Expect(names).To(Equal(testNames))
+ })
+ })
+
+ t.Describe("GetNames", func() {
+ It("should succeed to retrieve a single name for a registrar", func() {
+ // Given
+ // When
+ names, err := sut.GetNames(testKey)
+
+ // Then
+ Expect(err).To(BeNil())
+ Expect(len(names)).To(Equal(1))
+ Expect(names[0]).To(Equal(testName))
+ })
+
+ It("should succeed to retrieve all names for a registrar", func() {
+ // Given
+ anotherKey := "anotherKey"
+ testNames := []string{"test1", "test2"}
+ for _, name := range testNames {
+ Expect(sut.Reserve(name, anotherKey)).To(BeNil())
+ }
+
+ // When
+ names, err := sut.GetNames(anotherKey)
+
+ // Then
+ Expect(err).To(BeNil())
+ Expect(len(names)).To(Equal(2))
+ Expect(names).To(Equal(testNames))
+ })
+ })
+
+ t.Describe("Delete", func() {
+ It("should succeed to delete a registrar", func() {
+ // Given
+ // When
+ sut.Delete(testKey)
+
+ // Then
+ names, err := sut.GetNames(testKey)
+ Expect(len(names)).To(BeZero())
+ Expect(err).To(Equal(registrar.ErrNoSuchKey))
+ })
+ })
+
+ t.Describe("Get", func() {
+ It("should succeed to get a key for a registrar", func() {
+ // Given
+ // When
+ key, err := sut.Get(testName)
+
+ // Then
+ Expect(err).To(BeNil())
+ Expect(key).To(Equal(testKey))
+ })
+
+ It("should fail to get a key for a not existing registrar", func() {
+ // Given
+ // When
+ key, err := sut.Get("notExistingName")
+
+ // Then
+ Expect(key).To(BeEmpty())
+ Expect(err).To(Equal(registrar.ErrNameNotReserved))
+ })
+ })
+
+ t.Describe("GetAll", func() {
+ It("should succeed to get all names", func() {
+ // Given
+ // When
+ names := sut.GetAll()
+
+ // Then
+ Expect(len(names)).To(Equal(1))
+ Expect(len(names[testKey])).To(Equal(1))
+ Expect(names[testKey][0]).To(Equal(testName))
+ })
+ })
+})
diff --git a/pkg/spec/createconfig.go b/pkg/spec/createconfig.go
index a433fc16d..e71d9d3db 100644
--- a/pkg/spec/createconfig.go
+++ b/pkg/spec/createconfig.go
@@ -1,7 +1,6 @@
package createconfig
import (
- "encoding/json"
"fmt"
"net"
"os"
@@ -23,18 +22,16 @@ import (
"golang.org/x/sys/unix"
)
-type mountType string
-
// Type constants
const (
bps = iota
iops
// TypeBind is the type for mounting host dir
- TypeBind mountType = "bind"
+ TypeBind = "bind"
// TypeVolume is the type for remote storage volumes
- // TypeVolume mountType = "volume" // re-enable upon use
+ // TypeVolume = "volume" // re-enable upon use
// TypeTmpfs is the type for mounting tmpfs
- TypeTmpfs mountType = "tmpfs"
+ TypeTmpfs = "tmpfs"
)
// CreateResourceConfig represents resource elements in CreateConfig
@@ -130,15 +127,15 @@ type CreateConfig struct {
Mounts []spec.Mount //mounts
Volumes []string //volume
VolumesFrom []string
- WorkDir string //workdir
- LabelOpts []string //SecurityOpts
- NoNewPrivs bool //SecurityOpts
- ApparmorProfile string //SecurityOpts
- SeccompProfilePath string //SecurityOpts
+ NamedVolumes []*libpod.ContainerNamedVolume // Filled in by CreateConfigToOCISpec
+ WorkDir string //workdir
+ LabelOpts []string //SecurityOpts
+ NoNewPrivs bool //SecurityOpts
+ ApparmorProfile string //SecurityOpts
+ SeccompProfilePath string //SecurityOpts
SecurityOpts []string
Rootfs string
- LocalVolumes []spec.Mount //Keeps track of the built-in volumes of container used in the --volumes-from flag
- Syslog bool // Whether to enable syslog on exit commands
+ Syslog bool // Whether to enable syslog on exit commands
}
func u32Ptr(i int64) *uint32 { u := uint32(i); return &u }
@@ -172,9 +169,9 @@ func (c *CreateConfig) AddContainerInitBinary(path string) error {
c.Command = append([]string{"/dev/init", "--"}, c.Command...)
c.Mounts = append(c.Mounts, spec.Mount{
Destination: "/dev/init",
- Type: "bind",
+ Type: TypeBind,
Source: path,
- Options: []string{"bind", "ro"},
+ Options: []string{TypeBind, "ro"},
})
return nil
}
@@ -217,9 +214,9 @@ func (c *CreateConfig) initFSMounts() []spec.Mount {
return mounts
}
-//GetVolumeMounts takes user provided input for bind mounts and creates Mount structs
+// GetVolumeMounts takes user provided input for bind mounts and creates Mount structs
func (c *CreateConfig) GetVolumeMounts(specMounts []spec.Mount) ([]spec.Mount, error) {
- m := c.LocalVolumes
+ m := []spec.Mount{}
for _, i := range c.Volumes {
var options []string
spliti := strings.Split(i, ":")
@@ -255,9 +252,11 @@ func (c *CreateConfig) GetVolumeMounts(specMounts []spec.Mount) ([]spec.Mount, e
mount.Source = "tmpfs"
mount.Options = append(mount.Options, "tmpcopyup")
} else {
+ // TODO: Move support for this and tmpfs into libpod
+ // Should tmpfs also be handled as named volumes? Wouldn't be hard
// This will cause a new local Volume to be created on your system
mount.Source = stringid.GenerateNonCryptoID()
- mount.Options = append(mount.Options, "bind")
+ mount.Options = append(mount.Options, TypeBind)
}
m = append(m, mount)
}
@@ -268,13 +267,12 @@ func (c *CreateConfig) GetVolumeMounts(specMounts []spec.Mount) ([]spec.Mount, e
// GetVolumesFrom reads the create-config artifact of the container to get volumes from
// and adds it to c.Volumes of the current container.
func (c *CreateConfig) GetVolumesFrom() error {
- var options string
-
if os.Geteuid() != 0 {
return nil
}
for _, vol := range c.VolumesFrom {
+ options := ""
splitVol := strings.SplitN(vol, ":", 2)
if len(splitVol) == 2 {
options = splitVol[1]
@@ -283,41 +281,60 @@ func (c *CreateConfig) GetVolumesFrom() error {
if err != nil {
return errors.Wrapf(err, "error looking up container %q", splitVol[0])
}
- inspect, err := ctr.Inspect(false)
- if err != nil {
- return errors.Wrapf(err, "error inspecting %q", splitVol[0])
- }
- var createArtifact CreateConfig
- artifact, err := ctr.GetArtifact("create-config")
- if err != nil {
- return errors.Wrapf(err, "error getting create-config artifact for %q", splitVol[0])
+
+ logrus.Debugf("Adding volumes from container %s", ctr.ID())
+
+ // Look up the container's user volumes. This gets us the
+ // destinations of all mounts the user added to the container.
+ userVolumesArr := ctr.UserVolumes()
+
+ // We're going to need to access them a lot, so convert to a map
+ // to reduce looping.
+ // We'll also use the map to indicate if we missed any volumes along the way.
+ userVolumes := make(map[string]bool)
+ for _, dest := range userVolumesArr {
+ userVolumes[dest] = false
}
- if err := json.Unmarshal(artifact, &createArtifact); err != nil {
- return err
+
+ // Now we get the container's spec and loop through its volumes
+ // and append them in if we can find them.
+ spec := ctr.Spec()
+ if spec == nil {
+ return errors.Errorf("error retrieving container %s spec", ctr.ID())
}
- for key := range createArtifact.BuiltinImgVolumes {
- for _, m := range inspect.Mounts {
- if m.Destination == key {
- c.LocalVolumes = append(c.LocalVolumes, m)
- break
+ for _, mnt := range spec.Mounts {
+ if mnt.Type != TypeBind {
+ continue
+ }
+ if _, exists := userVolumes[mnt.Destination]; exists {
+ userVolumes[mnt.Destination] = true
+ localOptions := options
+ if localOptions == "" {
+ localOptions = strings.Join(mnt.Options, ",")
}
+ c.Volumes = append(c.Volumes, fmt.Sprintf("%s:%s:%s", mnt.Source, mnt.Destination, localOptions))
}
}
- for _, i := range createArtifact.Volumes {
- // Volumes format is host-dir:ctr-dir[:options], so get the host and ctr dir
- // and add on the options given by the user to the flag.
- spliti := strings.SplitN(i, ":", 3)
- // Throw error if mounting volume from container with Z option (private label)
- // Override this by adding 'z' to options.
- if len(spliti) > 2 && strings.Contains(spliti[2], "Z") && !strings.Contains(options, "z") {
- return errors.Errorf("volume mounted with private option 'Z' in %q. Use option 'z' to mount in current container", ctr.ID())
+ // We're done with the spec mounts. Add named volumes.
+ // Add these unconditionally - none of them are automatically
+ // part of the container, as some spec mounts are.
+ namedVolumes := ctr.NamedVolumes()
+ for _, namedVol := range namedVolumes {
+ if _, exists := userVolumes[namedVol.Dest]; exists {
+ userVolumes[namedVol.Dest] = true
}
- if options == "" {
- // Mount the volumes with the default options
- c.Volumes = append(c.Volumes, createArtifact.Volumes...)
- } else {
- c.Volumes = append(c.Volumes, spliti[0]+":"+spliti[1]+":"+options)
+ localOptions := options
+ if localOptions == "" {
+ localOptions = strings.Join(namedVol.Options, ",")
+ }
+ c.Volumes = append(c.Volumes, fmt.Sprintf("%s:%s:%s", namedVol.Name, namedVol.Dest, localOptions))
+ }
+
+ // Check if we missed any volumes
+ for volDest, found := range userVolumes {
+ if !found {
+ logrus.Warnf("Unable to match volume %s from container %s for volumes-from", volDest, ctr.ID())
}
}
}
@@ -417,14 +434,20 @@ func (c *CreateConfig) GetContainerCreateOptions(runtime *libpod.Runtime, pod *l
// others, if they are included
volumes := make([]string, 0, len(c.Volumes))
for _, vol := range c.Volumes {
- volumes = append(volumes, strings.SplitN(vol, ":", 2)[0])
+ // We always want the volume destination
+ splitVol := strings.SplitN(vol, ":", 3)
+ if len(splitVol) > 1 {
+ volumes = append(volumes, splitVol[1])
+ } else {
+ volumes = append(volumes, splitVol[0])
+ }
}
options = append(options, libpod.WithUserVolumes(volumes))
}
- if len(c.LocalVolumes) != 0 {
- options = append(options, libpod.WithLocalVolumes(c.LocalVolumes))
+ if len(c.NamedVolumes) != 0 {
+ options = append(options, libpod.WithNamedVolumes(c.NamedVolumes))
}
if len(c.Command) != 0 {
@@ -538,7 +561,7 @@ func (c *CreateConfig) GetContainerCreateOptions(runtime *libpod.Runtime, pod *l
options = append(options, libpod.WithPrivileged(c.Privileged))
- useImageVolumes := c.ImageVolumeType == "bind"
+ useImageVolumes := c.ImageVolumeType == TypeBind
// Gather up the options for NewContainer which consist of With... funcs
options = append(options, libpod.WithRootFSFromImage(c.ImageID, c.Image, useImageVolumes))
options = append(options, libpod.WithSecLabels(c.LabelOpts))
diff --git a/pkg/spec/spec.go b/pkg/spec/spec.go
index a61741f73..9b6bd089e 100644
--- a/pkg/spec/spec.go
+++ b/pkg/spec/spec.go
@@ -6,6 +6,7 @@ import (
"path/filepath"
"strings"
+ "github.com/containers/libpod/libpod"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/storage/pkg/mount"
pmount "github.com/containers/storage/pkg/mount"
@@ -48,6 +49,33 @@ func supercedeUserMounts(mounts []spec.Mount, configMount []spec.Mount) []spec.M
return configMount
}
+// Split named volumes from normal volumes
+func splitNamedVolumes(mounts []spec.Mount) ([]spec.Mount, []*libpod.ContainerNamedVolume) {
+ newMounts := make([]spec.Mount, 0)
+ namedVolumes := make([]*libpod.ContainerNamedVolume, 0)
+ for _, mount := range mounts {
+ // If it's not a named volume, append unconditionally
+ if mount.Type != TypeBind {
+ newMounts = append(newMounts, mount)
+ continue
+ }
+ // Volumes that are not named volumes must be an absolute or
+ // relative path.
+ // Volume names may not begin with a non-alphanumeric character
+ // so the HasPrefix() check is safe here.
+ if strings.HasPrefix(mount.Source, "/") || strings.HasPrefix(mount.Source, ".") {
+ newMounts = append(newMounts, mount)
+ } else {
+ namedVolume := new(libpod.ContainerNamedVolume)
+ namedVolume.Name = mount.Source
+ namedVolume.Dest = mount.Destination
+ namedVolume.Options = mount.Options
+ namedVolumes = append(namedVolumes, namedVolume)
+ }
+ }
+ return newMounts, namedVolumes
+}
+
func getAvailableGids() (int64, error) {
idMap, err := user.ParseIDMapFile("/proc/self/gid_map")
if err != nil {
@@ -99,7 +127,7 @@ func CreateConfigToOCISpec(config *CreateConfig) (*spec.Spec, error) { //nolint
}
sysMnt := spec.Mount{
Destination: "/sys",
- Type: "bind",
+ Type: TypeBind,
Source: "/sys",
Options: []string{"rprivate", "nosuid", "noexec", "nodev", r, "rbind"},
}
@@ -126,7 +154,7 @@ func CreateConfigToOCISpec(config *CreateConfig) (*spec.Spec, error) { //nolint
g.RemoveMount("/dev/mqueue")
devMqueue := spec.Mount{
Destination: "/dev/mqueue",
- Type: "bind",
+ Type: TypeBind,
Source: "/dev/mqueue",
Options: []string{"bind", "nosuid", "noexec", "nodev"},
}
@@ -136,7 +164,7 @@ func CreateConfigToOCISpec(config *CreateConfig) (*spec.Spec, error) { //nolint
g.RemoveMount("/proc")
procMount := spec.Mount{
Destination: "/proc",
- Type: "bind",
+ Type: TypeBind,
Source: "/proc",
Options: []string{"rbind", "nosuid", "noexec", "nodev"},
}
@@ -377,6 +405,12 @@ func CreateConfigToOCISpec(config *CreateConfig) (*spec.Spec, error) { //nolint
configSpec.Mounts = supercedeUserMounts(volumeMounts, configSpec.Mounts)
//--mount
configSpec.Mounts = supercedeUserMounts(config.initFSMounts(), configSpec.Mounts)
+
+ // Split normal mounts and named volumes
+ newMounts, namedVolumes := splitNamedVolumes(configSpec.Mounts)
+ configSpec.Mounts = newMounts
+ config.NamedVolumes = namedVolumes
+
// BLOCK IO
blkio, err := config.CreateBlockIO()
if err != nil {
diff --git a/pkg/varlinkapi/images.go b/pkg/varlinkapi/images.go
index 63d500204..8cd13e251 100644
--- a/pkg/varlinkapi/images.go
+++ b/pkg/varlinkapi/images.go
@@ -103,6 +103,7 @@ func (i *LibpodAPI) GetImage(call iopodman.VarlinkCall, id string) error {
VirtualSize: newImage.VirtualSize,
Containers: int64(len(containers)),
Labels: labels,
+ TopLayer: newImage.TopLayer(),
}
return call.ReplyGetImage(il)
}
@@ -923,3 +924,40 @@ func (i *LibpodAPI) Diff(call iopodman.VarlinkCall, name string) error {
}
return call.ReplyDiff(response)
}
+
+// GetLayersMapWithImageInfo is a development only endpoint to obtain layer information for an image.
+func (i *LibpodAPI) GetLayersMapWithImageInfo(call iopodman.VarlinkCall) error {
+ layerInfo, err := image.GetLayersMapWithImageInfo(i.Runtime.ImageRuntime())
+ if err != nil {
+ return call.ReplyErrorOccurred(err.Error())
+ }
+ b, err := json.Marshal(layerInfo)
+ if err != nil {
+ return call.ReplyErrorOccurred(err.Error())
+ }
+ return call.ReplyGetLayersMapWithImageInfo(string(b))
+}
+
+// BuildImageHierarchyMap ...
+func (i *LibpodAPI) BuildImageHierarchyMap(call iopodman.VarlinkCall, name string) error {
+ img, err := i.Runtime.ImageRuntime().NewFromLocal(name)
+ if err != nil {
+ return call.ReplyErrorOccurred(err.Error())
+ }
+ imageInfo := &image.InfoImage{
+ ID: img.ID(),
+ Tags: img.Names(),
+ }
+ layerInfo, err := image.GetLayersMapWithImageInfo(i.Runtime.ImageRuntime())
+ if err != nil {
+ return call.ReplyErrorOccurred(err.Error())
+ }
+ if err := image.BuildImageHierarchyMap(imageInfo, layerInfo, img.TopLayer()); err != nil {
+ return call.ReplyErrorOccurred(err.Error())
+ }
+ b, err := json.Marshal(imageInfo)
+ if err != nil {
+ return call.ReplyErrorOccurred(err.Error())
+ }
+ return call.ReplyBuildImageHierarchyMap(string(b))
+}
diff --git a/test/e2e/commit_test.go b/test/e2e/commit_test.go
index bf9c88de5..fe4ae64cf 100644
--- a/test/e2e/commit_test.go
+++ b/test/e2e/commit_test.go
@@ -144,7 +144,7 @@ var _ = Describe("Podman commit", func() {
inspect.WaitWithDefaultTimeout()
Expect(inspect.ExitCode()).To(Equal(0))
image := inspect.InspectImageJSON()
- _, ok := image[0].Config.Volumes["/tmp"]
+ _, ok := image[0].Config.Volumes["/foo"]
Expect(ok).To(BeTrue())
r := podmanTest.Podman([]string{"run", "newimage"})
diff --git a/test/e2e/generate_kube_test.go b/test/e2e/generate_kube_test.go
index 5bcf3b347..2f0af7e5f 100644
--- a/test/e2e/generate_kube_test.go
+++ b/test/e2e/generate_kube_test.go
@@ -48,7 +48,6 @@ var _ = Describe("Podman generate kube", func() {
})
It("podman generate kube on container", func() {
- SkipIfRootless()
session := podmanTest.RunTopContainer("top")
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
@@ -62,7 +61,6 @@ var _ = Describe("Podman generate kube", func() {
})
It("podman generate service kube on container", func() {
- SkipIfRootless()
session := podmanTest.RunTopContainer("top")
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
@@ -76,7 +74,6 @@ var _ = Describe("Podman generate kube", func() {
})
It("podman generate kube on pod", func() {
- SkipIfRootless()
_, rc, _ := podmanTest.CreatePod("toppod")
Expect(rc).To(Equal(0))
@@ -93,7 +90,6 @@ var _ = Describe("Podman generate kube", func() {
})
It("podman generate service kube on pod", func() {
- SkipIfRootless()
_, rc, _ := podmanTest.CreatePod("toppod")
Expect(rc).To(Equal(0))
diff --git a/test/e2e/healthcheck_run_test.go b/test/e2e/healthcheck_run_test.go
index cd2365ce7..60be86ebc 100644
--- a/test/e2e/healthcheck_run_test.go
+++ b/test/e2e/healthcheck_run_test.go
@@ -42,7 +42,6 @@ var _ = Describe("Podman healthcheck run", func() {
})
It("podman healthcheck on valid container", func() {
- SkipIfRootless()
podmanTest.RestoreArtifact(healthcheck)
session := podmanTest.Podman([]string{"run", "-dt", "--name", "hc", healthcheck})
session.WaitWithDefaultTimeout()
@@ -135,7 +134,6 @@ var _ = Describe("Podman healthcheck run", func() {
})
It("podman healthcheck good check results in healthy even in start-period", func() {
- SkipIfRootless()
session := podmanTest.Podman([]string{"run", "-dt", "--name", "hc", "--healthcheck-start-period", "2m", "--healthcheck-retries", "2", "--healthcheck-command", "\"CMD-SHELL\" \"ls\" \"||\" \"exit\" \"1\"", ALPINE, "top"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
@@ -149,7 +147,6 @@ var _ = Describe("Podman healthcheck run", func() {
})
It("podman healthcheck single healthy result changes failed to healthy", func() {
- SkipIfRootless()
session := podmanTest.Podman([]string{"run", "-dt", "--name", "hc", "--healthcheck-retries", "2", "--healthcheck-command", "\"CMD-SHELL\" \"ls\" \"/foo\" \"||\" \"exit\" \"1\"", ALPINE, "top"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
diff --git a/test/e2e/run_test.go b/test/e2e/run_test.go
index 2daf2fe5b..a89ee491b 100644
--- a/test/e2e/run_test.go
+++ b/test/e2e/run_test.go
@@ -611,7 +611,6 @@ USER mail`
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
Expect(session.OutputToString()).To(ContainSubstring("data"))
-
})
It("podman run --volumes flag with multiple volumes", func() {
diff --git a/test/e2e/volume_rm_test.go b/test/e2e/volume_rm_test.go
index 888474670..39628d56f 100644
--- a/test/e2e/volume_rm_test.go
+++ b/test/e2e/volume_rm_test.go
@@ -32,7 +32,7 @@ var _ = Describe("Podman volume rm", func() {
})
- It("podman rm volume", func() {
+ It("podman volume rm", func() {
session := podmanTest.Podman([]string{"volume", "create", "myvol"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
@@ -47,7 +47,7 @@ var _ = Describe("Podman volume rm", func() {
Expect(len(session.OutputToStringArray())).To(Equal(0))
})
- It("podman rm with --force flag", func() {
+ It("podman volume rm with --force flag", func() {
SkipIfRemote()
session := podmanTest.Podman([]string{"create", "-v", "myvol:/myvol", ALPINE, "ls"})
cid := session.OutputToString()
diff --git a/test/framework/framework.go b/test/framework/framework.go
new file mode 100644
index 000000000..52401faf8
--- /dev/null
+++ b/test/framework/framework.go
@@ -0,0 +1,56 @@
+package framework
+
+import (
+ "fmt"
+
+ "github.com/onsi/ginkgo"
+ "github.com/onsi/gomega"
+)
+
+// TestFramework is used to support commonnly used test features
+type TestFramework struct {
+ setup func(*TestFramework) error
+ teardown func(*TestFramework) error
+ TestError error
+}
+
+// NewTestFramework creates a new test framework instance for a given `setup`
+// and `teardown` function
+func NewTestFramework(
+ setup func(*TestFramework) error,
+ teardown func(*TestFramework) error,
+) *TestFramework {
+ return &TestFramework{
+ setup,
+ teardown,
+ fmt.Errorf("error"),
+ }
+}
+
+// NilFn is a convenience function which simply does nothing
+func NilFunc(f *TestFramework) error {
+ return nil
+}
+
+// Setup is the global initialization function which runs before each test
+// suite
+func (t *TestFramework) Setup() {
+ // Global initialization for the whole framework goes in here
+
+ // Setup the actual test suite
+ gomega.Expect(t.setup(t)).To(gomega.Succeed())
+}
+
+// Teardown is the global deinitialization function which runs after each test
+// suite
+func (t *TestFramework) Teardown() {
+ // Global deinitialization for the whole framework goes in here
+
+ // Teardown the actual test suite
+ gomega.Expect(t.teardown(t)).To(gomega.Succeed())
+}
+
+// Describe is a convenience wrapper around the `ginkgo.Describe` function
+func (t *TestFramework) Describe(text string, body func()) bool {
+ return ginkgo.Describe("libpod: "+text, body)
+}