summaryrefslogtreecommitdiff
path: root/pkg
diff options
context:
space:
mode:
Diffstat (limited to 'pkg')
-rw-r--r--pkg/adapter/images.go34
-rw-r--r--pkg/adapter/images_remote.go32
-rw-r--r--pkg/adapter/runtime_remote.go7
-rw-r--r--pkg/registrar/registrar_test.go318
-rw-r--r--pkg/spec/createconfig.go127
-rw-r--r--pkg/spec/spec.go40
-rw-r--r--pkg/varlinkapi/images.go38
7 files changed, 429 insertions, 167 deletions
diff --git a/pkg/adapter/images.go b/pkg/adapter/images.go
new file mode 100644
index 000000000..c8ea1cdea
--- /dev/null
+++ b/pkg/adapter/images.go
@@ -0,0 +1,34 @@
+// +build !remoteclient
+
+package adapter
+
+import (
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/containers/libpod/libpod/image"
+ "github.com/pkg/errors"
+)
+
+// Tree ...
+func (r *LocalRuntime) Tree(c *cliconfig.TreeValues) (*image.InfoImage, map[string]*image.LayerInfo, *ContainerImage, error) {
+ img, err := r.NewImageFromLocal(c.InputArgs[0])
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ // Fetch map of image-layers, which is used for printing output.
+ layerInfoMap, err := image.GetLayersMapWithImageInfo(r.Runtime.ImageRuntime())
+ if err != nil {
+ return nil, nil, nil, errors.Wrapf(err, "error while retrieving layers of image %q", img.InputName)
+ }
+
+ // Create an imageInfo and fill the image and layer info
+ imageInfo := &image.InfoImage{
+ ID: img.ID(),
+ Tags: img.Names(),
+ }
+
+ if err := image.BuildImageHierarchyMap(imageInfo, layerInfoMap, img.TopLayer()); err != nil {
+ return nil, nil, nil, err
+ }
+ return imageInfo, layerInfoMap, img, nil
+}
diff --git a/pkg/adapter/images_remote.go b/pkg/adapter/images_remote.go
index e7b38dccc..722058d4a 100644
--- a/pkg/adapter/images_remote.go
+++ b/pkg/adapter/images_remote.go
@@ -6,8 +6,11 @@ import (
"context"
"encoding/json"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
iopodman "github.com/containers/libpod/cmd/podman/varlink"
+ "github.com/containers/libpod/libpod/image"
"github.com/containers/libpod/pkg/inspect"
+ "github.com/pkg/errors"
)
// Inspect returns returns an ImageData struct from over a varlink connection
@@ -22,3 +25,32 @@ func (i *ContainerImage) Inspect(ctx context.Context) (*inspect.ImageData, error
}
return &data, nil
}
+
+// Tree ...
+func (r *LocalRuntime) Tree(c *cliconfig.TreeValues) (*image.InfoImage, map[string]*image.LayerInfo, *ContainerImage, error) {
+ layerInfoMap := make(map[string]*image.LayerInfo)
+ imageInfo := &image.InfoImage{}
+
+ img, err := r.NewImageFromLocal(c.InputArgs[0])
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ reply, err := iopodman.GetLayersMapWithImageInfo().Call(r.Conn)
+ if err != nil {
+ return nil, nil, nil, errors.Wrap(err, "failed to obtain image layers")
+ }
+ if err := json.Unmarshal([]byte(reply), &layerInfoMap); err != nil {
+ return nil, nil, nil, errors.Wrap(err, "failed to unmarshal image layers")
+ }
+
+ reply, err = iopodman.BuildImageHierarchyMap().Call(r.Conn, c.InputArgs[0])
+ if err != nil {
+ return nil, nil, nil, errors.Wrap(err, "failed to get build image map")
+ }
+ if err := json.Unmarshal([]byte(reply), imageInfo); err != nil {
+ return nil, nil, nil, errors.Wrap(err, "failed to unmarshal build image map")
+ }
+
+ return imageInfo, layerInfoMap, img, nil
+}
diff --git a/pkg/adapter/runtime_remote.go b/pkg/adapter/runtime_remote.go
index 978c9ffd8..807a9ad8f 100644
--- a/pkg/adapter/runtime_remote.go
+++ b/pkg/adapter/runtime_remote.go
@@ -82,6 +82,7 @@ type remoteImage struct {
Digest digest.Digest
isParent bool
Runtime *LocalRuntime
+ TopLayer string
}
// Container ...
@@ -147,6 +148,7 @@ func imageInListToContainerImage(i iopodman.Image, name string, runtime *LocalRu
Names: i.RepoTags,
isParent: i.IsParent,
Runtime: runtime,
+ TopLayer: i.TopLayer,
}
return &ContainerImage{ri}, nil
}
@@ -280,6 +282,11 @@ func (ci *ContainerImage) Dangling() bool {
return len(ci.Names()) == 0
}
+// TopLayer returns an images top layer as a string
+func (ci *ContainerImage) TopLayer() string {
+ return ci.remoteImage.TopLayer
+}
+
// TagImage ...
func (ci *ContainerImage) TagImage(tag string) error {
_, err := iopodman.TagImage().Call(ci.Runtime.Conn, ci.ID(), tag)
diff --git a/pkg/registrar/registrar_test.go b/pkg/registrar/registrar_test.go
index 0c1ef312a..50af95915 100644
--- a/pkg/registrar/registrar_test.go
+++ b/pkg/registrar/registrar_test.go
@@ -1,119 +1,213 @@
-package registrar
+package registrar_test
import (
- "reflect"
"testing"
-)
-
-func TestReserve(t *testing.T) {
- r := NewRegistrar()
-
- obj := "test1"
- if err := r.Reserve("test", obj); err != nil {
- t.Fatal(err)
- }
-
- if err := r.Reserve("test", obj); err != nil {
- t.Fatal(err)
- }
-
- obj2 := "test2"
- err := r.Reserve("test", obj2)
- if err == nil {
- t.Fatalf("expected error when reserving an already reserved name to another object")
- }
- if err != ErrNameReserved {
- t.Fatal("expected `ErrNameReserved` error when attempting to reserve an already reserved name")
- }
-}
-
-func TestRelease(t *testing.T) {
- r := NewRegistrar()
- obj := "testing"
-
- if err := r.Reserve("test", obj); err != nil {
- t.Fatal(err)
- }
- r.Release("test")
- r.Release("test") // Ensure there is no panic here
- if err := r.Reserve("test", obj); err != nil {
- t.Fatal(err)
- }
-}
-
-func TestGetNames(t *testing.T) {
- r := NewRegistrar()
- obj := "testing"
- names := []string{"test1", "test2"}
-
- for _, name := range names {
- if err := r.Reserve(name, obj); err != nil {
- t.Fatal(err)
- }
- }
- r.Reserve("test3", "other")
-
- names2, err := r.GetNames(obj)
- if err != nil {
- t.Fatal(err)
- }
-
- if !reflect.DeepEqual(names, names2) {
- t.Fatalf("Exepected: %v, Got: %v", names, names2)
- }
-}
+ "github.com/containers/libpod/pkg/registrar"
+ . "github.com/containers/libpod/test/framework"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
-func TestDelete(t *testing.T) {
- r := NewRegistrar()
- obj := "testing"
- names := []string{"test1", "test2"}
- for _, name := range names {
- if err := r.Reserve(name, obj); err != nil {
- t.Fatal(err)
- }
- }
-
- r.Reserve("test3", "other")
- r.Delete(obj)
-
- _, err := r.GetNames(obj)
- if err == nil {
- t.Fatal("expected error getting names for deleted key")
- }
-
- if err != ErrNoSuchKey {
- t.Fatal("expected `ErrNoSuchKey`")
- }
+// TestRegistrar runs the created specs
+func TestRegistrar(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "Registrar")
}
-func TestGet(t *testing.T) {
- r := NewRegistrar()
- obj := "testing"
- name := "test"
-
- _, err := r.Get(name)
- if err == nil {
- t.Fatal("expected error when key does not exist")
- }
- if err != ErrNameNotReserved {
- t.Fatal(err)
- }
-
- if err := r.Reserve(name, obj); err != nil {
- t.Fatal(err)
- }
-
- if _, err = r.Get(name); err != nil {
- t.Fatal(err)
- }
-
- r.Delete(obj)
- _, err = r.Get(name)
- if err == nil {
- t.Fatal("expected error when key does not exist")
- }
- if err != ErrNameNotReserved {
- t.Fatal(err)
- }
-}
+// nolint: gochecknoglobals
+var t *TestFramework
+
+var _ = BeforeSuite(func() {
+ t = NewTestFramework(NilFunc, NilFunc)
+ t.Setup()
+})
+
+var _ = AfterSuite(func() {
+ t.Teardown()
+})
+
+// The actual test suite
+var _ = t.Describe("Registrar", func() {
+ // Constant test data needed by some tests
+ const (
+ testKey = "testKey"
+ testName = "testName"
+ anotherKey = "anotherKey"
+ )
+
+ // The system under test
+ var sut *registrar.Registrar
+
+ // Prepare the system under test and register a test name and key before
+ // each test
+ BeforeEach(func() {
+ sut = registrar.NewRegistrar()
+ Expect(sut.Reserve(testName, testKey)).To(BeNil())
+ })
+
+ t.Describe("Reserve", func() {
+ It("should succeed to reserve a new registrar", func() {
+ // Given
+ // When
+ err := sut.Reserve("name", "key")
+
+ // Then
+ Expect(err).To(BeNil())
+ })
+
+ It("should succeed to reserve a registrar twice", func() {
+ // Given
+ // When
+ err := sut.Reserve(testName, testKey)
+
+ // Then
+ Expect(err).To(BeNil())
+ })
+
+ It("should fail to reserve an already reserved registrar", func() {
+ // Given
+ // When
+ err := sut.Reserve(testName, anotherKey)
+
+ // Then
+ Expect(err).NotTo(BeNil())
+ Expect(err).To(Equal(registrar.ErrNameReserved))
+ })
+ })
+
+ t.Describe("Release", func() {
+ It("should succeed to release a registered registrar multiple times", func() {
+ // Given
+ // When
+ // Then
+ sut.Release(testName)
+ sut.Release(testName)
+ })
+
+ It("should succeed to release a unknown registrar multiple times", func() {
+ // Given
+ // When
+ // Then
+ sut.Release(anotherKey)
+ sut.Release(anotherKey)
+ })
+
+ It("should succeed to release and re-register a registrar", func() {
+ // Given
+ // When
+ sut.Release(testName)
+ err := sut.Reserve(testName, testKey)
+
+ // Then
+ Expect(err).To(BeNil())
+ })
+ })
+
+ t.Describe("GetNames", func() {
+ It("should succeed to retrieve a single name for a registrar", func() {
+ // Given
+ // When
+ names, err := sut.GetNames(testKey)
+
+ // Then
+ Expect(err).To(BeNil())
+ Expect(len(names)).To(Equal(1))
+ Expect(names[0]).To(Equal(testName))
+ })
+
+ It("should succeed to retrieve all names for a registrar", func() {
+ // Given
+ testNames := []string{"test1", "test2"}
+ for _, name := range testNames {
+ Expect(sut.Reserve(name, anotherKey)).To(BeNil())
+ }
+
+ // When
+ names, err := sut.GetNames(anotherKey)
+
+ // Then
+ Expect(err).To(BeNil())
+ Expect(len(names)).To(Equal(2))
+ Expect(names).To(Equal(testNames))
+ })
+ })
+
+ t.Describe("GetNames", func() {
+ It("should succeed to retrieve a single name for a registrar", func() {
+ // Given
+ // When
+ names, err := sut.GetNames(testKey)
+
+ // Then
+ Expect(err).To(BeNil())
+ Expect(len(names)).To(Equal(1))
+ Expect(names[0]).To(Equal(testName))
+ })
+
+ It("should succeed to retrieve all names for a registrar", func() {
+ // Given
+ anotherKey := "anotherKey"
+ testNames := []string{"test1", "test2"}
+ for _, name := range testNames {
+ Expect(sut.Reserve(name, anotherKey)).To(BeNil())
+ }
+
+ // When
+ names, err := sut.GetNames(anotherKey)
+
+ // Then
+ Expect(err).To(BeNil())
+ Expect(len(names)).To(Equal(2))
+ Expect(names).To(Equal(testNames))
+ })
+ })
+
+ t.Describe("Delete", func() {
+ It("should succeed to delete a registrar", func() {
+ // Given
+ // When
+ sut.Delete(testKey)
+
+ // Then
+ names, err := sut.GetNames(testKey)
+ Expect(len(names)).To(BeZero())
+ Expect(err).To(Equal(registrar.ErrNoSuchKey))
+ })
+ })
+
+ t.Describe("Get", func() {
+ It("should succeed to get a key for a registrar", func() {
+ // Given
+ // When
+ key, err := sut.Get(testName)
+
+ // Then
+ Expect(err).To(BeNil())
+ Expect(key).To(Equal(testKey))
+ })
+
+ It("should fail to get a key for a not existing registrar", func() {
+ // Given
+ // When
+ key, err := sut.Get("notExistingName")
+
+ // Then
+ Expect(key).To(BeEmpty())
+ Expect(err).To(Equal(registrar.ErrNameNotReserved))
+ })
+ })
+
+ t.Describe("GetAll", func() {
+ It("should succeed to get all names", func() {
+ // Given
+ // When
+ names := sut.GetAll()
+
+ // Then
+ Expect(len(names)).To(Equal(1))
+ Expect(len(names[testKey])).To(Equal(1))
+ Expect(names[testKey][0]).To(Equal(testName))
+ })
+ })
+})
diff --git a/pkg/spec/createconfig.go b/pkg/spec/createconfig.go
index a433fc16d..e71d9d3db 100644
--- a/pkg/spec/createconfig.go
+++ b/pkg/spec/createconfig.go
@@ -1,7 +1,6 @@
package createconfig
import (
- "encoding/json"
"fmt"
"net"
"os"
@@ -23,18 +22,16 @@ import (
"golang.org/x/sys/unix"
)
-type mountType string
-
// Type constants
const (
bps = iota
iops
// TypeBind is the type for mounting host dir
- TypeBind mountType = "bind"
+ TypeBind = "bind"
// TypeVolume is the type for remote storage volumes
- // TypeVolume mountType = "volume" // re-enable upon use
+ // TypeVolume = "volume" // re-enable upon use
// TypeTmpfs is the type for mounting tmpfs
- TypeTmpfs mountType = "tmpfs"
+ TypeTmpfs = "tmpfs"
)
// CreateResourceConfig represents resource elements in CreateConfig
@@ -130,15 +127,15 @@ type CreateConfig struct {
Mounts []spec.Mount //mounts
Volumes []string //volume
VolumesFrom []string
- WorkDir string //workdir
- LabelOpts []string //SecurityOpts
- NoNewPrivs bool //SecurityOpts
- ApparmorProfile string //SecurityOpts
- SeccompProfilePath string //SecurityOpts
+ NamedVolumes []*libpod.ContainerNamedVolume // Filled in by CreateConfigToOCISpec
+ WorkDir string //workdir
+ LabelOpts []string //SecurityOpts
+ NoNewPrivs bool //SecurityOpts
+ ApparmorProfile string //SecurityOpts
+ SeccompProfilePath string //SecurityOpts
SecurityOpts []string
Rootfs string
- LocalVolumes []spec.Mount //Keeps track of the built-in volumes of container used in the --volumes-from flag
- Syslog bool // Whether to enable syslog on exit commands
+ Syslog bool // Whether to enable syslog on exit commands
}
func u32Ptr(i int64) *uint32 { u := uint32(i); return &u }
@@ -172,9 +169,9 @@ func (c *CreateConfig) AddContainerInitBinary(path string) error {
c.Command = append([]string{"/dev/init", "--"}, c.Command...)
c.Mounts = append(c.Mounts, spec.Mount{
Destination: "/dev/init",
- Type: "bind",
+ Type: TypeBind,
Source: path,
- Options: []string{"bind", "ro"},
+ Options: []string{TypeBind, "ro"},
})
return nil
}
@@ -217,9 +214,9 @@ func (c *CreateConfig) initFSMounts() []spec.Mount {
return mounts
}
-//GetVolumeMounts takes user provided input for bind mounts and creates Mount structs
+// GetVolumeMounts takes user provided input for bind mounts and creates Mount structs
func (c *CreateConfig) GetVolumeMounts(specMounts []spec.Mount) ([]spec.Mount, error) {
- m := c.LocalVolumes
+ m := []spec.Mount{}
for _, i := range c.Volumes {
var options []string
spliti := strings.Split(i, ":")
@@ -255,9 +252,11 @@ func (c *CreateConfig) GetVolumeMounts(specMounts []spec.Mount) ([]spec.Mount, e
mount.Source = "tmpfs"
mount.Options = append(mount.Options, "tmpcopyup")
} else {
+ // TODO: Move support for this and tmpfs into libpod
+ // Should tmpfs also be handled as named volumes? Wouldn't be hard
// This will cause a new local Volume to be created on your system
mount.Source = stringid.GenerateNonCryptoID()
- mount.Options = append(mount.Options, "bind")
+ mount.Options = append(mount.Options, TypeBind)
}
m = append(m, mount)
}
@@ -268,13 +267,12 @@ func (c *CreateConfig) GetVolumeMounts(specMounts []spec.Mount) ([]spec.Mount, e
// GetVolumesFrom reads the create-config artifact of the container to get volumes from
// and adds it to c.Volumes of the current container.
func (c *CreateConfig) GetVolumesFrom() error {
- var options string
-
if os.Geteuid() != 0 {
return nil
}
for _, vol := range c.VolumesFrom {
+ options := ""
splitVol := strings.SplitN(vol, ":", 2)
if len(splitVol) == 2 {
options = splitVol[1]
@@ -283,41 +281,60 @@ func (c *CreateConfig) GetVolumesFrom() error {
if err != nil {
return errors.Wrapf(err, "error looking up container %q", splitVol[0])
}
- inspect, err := ctr.Inspect(false)
- if err != nil {
- return errors.Wrapf(err, "error inspecting %q", splitVol[0])
- }
- var createArtifact CreateConfig
- artifact, err := ctr.GetArtifact("create-config")
- if err != nil {
- return errors.Wrapf(err, "error getting create-config artifact for %q", splitVol[0])
+
+ logrus.Debugf("Adding volumes from container %s", ctr.ID())
+
+ // Look up the container's user volumes. This gets us the
+ // destinations of all mounts the user added to the container.
+ userVolumesArr := ctr.UserVolumes()
+
+ // We're going to need to access them a lot, so convert to a map
+ // to reduce looping.
+ // We'll also use the map to indicate if we missed any volumes along the way.
+ userVolumes := make(map[string]bool)
+ for _, dest := range userVolumesArr {
+ userVolumes[dest] = false
}
- if err := json.Unmarshal(artifact, &createArtifact); err != nil {
- return err
+
+ // Now we get the container's spec and loop through its volumes
+ // and append them in if we can find them.
+ spec := ctr.Spec()
+ if spec == nil {
+ return errors.Errorf("error retrieving container %s spec", ctr.ID())
}
- for key := range createArtifact.BuiltinImgVolumes {
- for _, m := range inspect.Mounts {
- if m.Destination == key {
- c.LocalVolumes = append(c.LocalVolumes, m)
- break
+ for _, mnt := range spec.Mounts {
+ if mnt.Type != TypeBind {
+ continue
+ }
+ if _, exists := userVolumes[mnt.Destination]; exists {
+ userVolumes[mnt.Destination] = true
+ localOptions := options
+ if localOptions == "" {
+ localOptions = strings.Join(mnt.Options, ",")
}
+ c.Volumes = append(c.Volumes, fmt.Sprintf("%s:%s:%s", mnt.Source, mnt.Destination, localOptions))
}
}
- for _, i := range createArtifact.Volumes {
- // Volumes format is host-dir:ctr-dir[:options], so get the host and ctr dir
- // and add on the options given by the user to the flag.
- spliti := strings.SplitN(i, ":", 3)
- // Throw error if mounting volume from container with Z option (private label)
- // Override this by adding 'z' to options.
- if len(spliti) > 2 && strings.Contains(spliti[2], "Z") && !strings.Contains(options, "z") {
- return errors.Errorf("volume mounted with private option 'Z' in %q. Use option 'z' to mount in current container", ctr.ID())
+ // We're done with the spec mounts. Add named volumes.
+ // Add these unconditionally - none of them are automatically
+ // part of the container, as some spec mounts are.
+ namedVolumes := ctr.NamedVolumes()
+ for _, namedVol := range namedVolumes {
+ if _, exists := userVolumes[namedVol.Dest]; exists {
+ userVolumes[namedVol.Dest] = true
}
- if options == "" {
- // Mount the volumes with the default options
- c.Volumes = append(c.Volumes, createArtifact.Volumes...)
- } else {
- c.Volumes = append(c.Volumes, spliti[0]+":"+spliti[1]+":"+options)
+ localOptions := options
+ if localOptions == "" {
+ localOptions = strings.Join(namedVol.Options, ",")
+ }
+ c.Volumes = append(c.Volumes, fmt.Sprintf("%s:%s:%s", namedVol.Name, namedVol.Dest, localOptions))
+ }
+
+ // Check if we missed any volumes
+ for volDest, found := range userVolumes {
+ if !found {
+ logrus.Warnf("Unable to match volume %s from container %s for volumes-from", volDest, ctr.ID())
}
}
}
@@ -417,14 +434,20 @@ func (c *CreateConfig) GetContainerCreateOptions(runtime *libpod.Runtime, pod *l
// others, if they are included
volumes := make([]string, 0, len(c.Volumes))
for _, vol := range c.Volumes {
- volumes = append(volumes, strings.SplitN(vol, ":", 2)[0])
+ // We always want the volume destination
+ splitVol := strings.SplitN(vol, ":", 3)
+ if len(splitVol) > 1 {
+ volumes = append(volumes, splitVol[1])
+ } else {
+ volumes = append(volumes, splitVol[0])
+ }
}
options = append(options, libpod.WithUserVolumes(volumes))
}
- if len(c.LocalVolumes) != 0 {
- options = append(options, libpod.WithLocalVolumes(c.LocalVolumes))
+ if len(c.NamedVolumes) != 0 {
+ options = append(options, libpod.WithNamedVolumes(c.NamedVolumes))
}
if len(c.Command) != 0 {
@@ -538,7 +561,7 @@ func (c *CreateConfig) GetContainerCreateOptions(runtime *libpod.Runtime, pod *l
options = append(options, libpod.WithPrivileged(c.Privileged))
- useImageVolumes := c.ImageVolumeType == "bind"
+ useImageVolumes := c.ImageVolumeType == TypeBind
// Gather up the options for NewContainer which consist of With... funcs
options = append(options, libpod.WithRootFSFromImage(c.ImageID, c.Image, useImageVolumes))
options = append(options, libpod.WithSecLabels(c.LabelOpts))
diff --git a/pkg/spec/spec.go b/pkg/spec/spec.go
index a61741f73..9b6bd089e 100644
--- a/pkg/spec/spec.go
+++ b/pkg/spec/spec.go
@@ -6,6 +6,7 @@ import (
"path/filepath"
"strings"
+ "github.com/containers/libpod/libpod"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/storage/pkg/mount"
pmount "github.com/containers/storage/pkg/mount"
@@ -48,6 +49,33 @@ func supercedeUserMounts(mounts []spec.Mount, configMount []spec.Mount) []spec.M
return configMount
}
+// Split named volumes from normal volumes
+func splitNamedVolumes(mounts []spec.Mount) ([]spec.Mount, []*libpod.ContainerNamedVolume) {
+ newMounts := make([]spec.Mount, 0)
+ namedVolumes := make([]*libpod.ContainerNamedVolume, 0)
+ for _, mount := range mounts {
+ // If it's not a named volume, append unconditionally
+ if mount.Type != TypeBind {
+ newMounts = append(newMounts, mount)
+ continue
+ }
+ // Volumes that are not named volumes must be an absolute or
+ // relative path.
+ // Volume names may not begin with a non-alphanumeric character
+ // so the HasPrefix() check is safe here.
+ if strings.HasPrefix(mount.Source, "/") || strings.HasPrefix(mount.Source, ".") {
+ newMounts = append(newMounts, mount)
+ } else {
+ namedVolume := new(libpod.ContainerNamedVolume)
+ namedVolume.Name = mount.Source
+ namedVolume.Dest = mount.Destination
+ namedVolume.Options = mount.Options
+ namedVolumes = append(namedVolumes, namedVolume)
+ }
+ }
+ return newMounts, namedVolumes
+}
+
func getAvailableGids() (int64, error) {
idMap, err := user.ParseIDMapFile("/proc/self/gid_map")
if err != nil {
@@ -99,7 +127,7 @@ func CreateConfigToOCISpec(config *CreateConfig) (*spec.Spec, error) { //nolint
}
sysMnt := spec.Mount{
Destination: "/sys",
- Type: "bind",
+ Type: TypeBind,
Source: "/sys",
Options: []string{"rprivate", "nosuid", "noexec", "nodev", r, "rbind"},
}
@@ -126,7 +154,7 @@ func CreateConfigToOCISpec(config *CreateConfig) (*spec.Spec, error) { //nolint
g.RemoveMount("/dev/mqueue")
devMqueue := spec.Mount{
Destination: "/dev/mqueue",
- Type: "bind",
+ Type: TypeBind,
Source: "/dev/mqueue",
Options: []string{"bind", "nosuid", "noexec", "nodev"},
}
@@ -136,7 +164,7 @@ func CreateConfigToOCISpec(config *CreateConfig) (*spec.Spec, error) { //nolint
g.RemoveMount("/proc")
procMount := spec.Mount{
Destination: "/proc",
- Type: "bind",
+ Type: TypeBind,
Source: "/proc",
Options: []string{"rbind", "nosuid", "noexec", "nodev"},
}
@@ -377,6 +405,12 @@ func CreateConfigToOCISpec(config *CreateConfig) (*spec.Spec, error) { //nolint
configSpec.Mounts = supercedeUserMounts(volumeMounts, configSpec.Mounts)
//--mount
configSpec.Mounts = supercedeUserMounts(config.initFSMounts(), configSpec.Mounts)
+
+ // Split normal mounts and named volumes
+ newMounts, namedVolumes := splitNamedVolumes(configSpec.Mounts)
+ configSpec.Mounts = newMounts
+ config.NamedVolumes = namedVolumes
+
// BLOCK IO
blkio, err := config.CreateBlockIO()
if err != nil {
diff --git a/pkg/varlinkapi/images.go b/pkg/varlinkapi/images.go
index 63d500204..8cd13e251 100644
--- a/pkg/varlinkapi/images.go
+++ b/pkg/varlinkapi/images.go
@@ -103,6 +103,7 @@ func (i *LibpodAPI) GetImage(call iopodman.VarlinkCall, id string) error {
VirtualSize: newImage.VirtualSize,
Containers: int64(len(containers)),
Labels: labels,
+ TopLayer: newImage.TopLayer(),
}
return call.ReplyGetImage(il)
}
@@ -923,3 +924,40 @@ func (i *LibpodAPI) Diff(call iopodman.VarlinkCall, name string) error {
}
return call.ReplyDiff(response)
}
+
+// GetLayersMapWithImageInfo is a development only endpoint to obtain layer information for an image.
+func (i *LibpodAPI) GetLayersMapWithImageInfo(call iopodman.VarlinkCall) error {
+ layerInfo, err := image.GetLayersMapWithImageInfo(i.Runtime.ImageRuntime())
+ if err != nil {
+ return call.ReplyErrorOccurred(err.Error())
+ }
+ b, err := json.Marshal(layerInfo)
+ if err != nil {
+ return call.ReplyErrorOccurred(err.Error())
+ }
+ return call.ReplyGetLayersMapWithImageInfo(string(b))
+}
+
+// BuildImageHierarchyMap ...
+func (i *LibpodAPI) BuildImageHierarchyMap(call iopodman.VarlinkCall, name string) error {
+ img, err := i.Runtime.ImageRuntime().NewFromLocal(name)
+ if err != nil {
+ return call.ReplyErrorOccurred(err.Error())
+ }
+ imageInfo := &image.InfoImage{
+ ID: img.ID(),
+ Tags: img.Names(),
+ }
+ layerInfo, err := image.GetLayersMapWithImageInfo(i.Runtime.ImageRuntime())
+ if err != nil {
+ return call.ReplyErrorOccurred(err.Error())
+ }
+ if err := image.BuildImageHierarchyMap(imageInfo, layerInfo, img.TopLayer()); err != nil {
+ return call.ReplyErrorOccurred(err.Error())
+ }
+ b, err := json.Marshal(imageInfo)
+ if err != nil {
+ return call.ReplyErrorOccurred(err.Error())
+ }
+ return call.ReplyBuildImageHierarchyMap(string(b))
+}