summaryrefslogtreecommitdiff
path: root/pkg
diff options
context:
space:
mode:
Diffstat (limited to 'pkg')
-rw-r--r--pkg/api/handlers/compat/containers_prune.go35
-rw-r--r--pkg/api/handlers/libpod/containers_create.go3
-rw-r--r--pkg/api/handlers/libpod/images.go2
-rw-r--r--pkg/api/handlers/libpod/pods.go14
-rw-r--r--pkg/api/handlers/libpod/system.go71
-rw-r--r--pkg/api/handlers/libpod/volumes.go15
-rw-r--r--pkg/api/server/register_system.go17
-rw-r--r--pkg/bindings/system/system.go24
-rw-r--r--pkg/bindings/test/common_test.go4
-rw-r--r--pkg/bindings/test/containers_test.go4
-rw-r--r--pkg/bindings/test/create_test.go2
-rw-r--r--pkg/bindings/test/info_test.go2
-rw-r--r--pkg/bindings/test/system_test.go106
-rw-r--r--pkg/domain/entities/container_ps.go30
-rw-r--r--pkg/domain/entities/images.go2
-rw-r--r--pkg/domain/entities/system.go14
-rw-r--r--pkg/domain/infra/abi/containers.go21
-rw-r--r--pkg/domain/infra/abi/images.go20
-rw-r--r--pkg/domain/infra/tunnel/containers.go4
-rw-r--r--pkg/ps/ps.go1
-rw-r--r--pkg/specgen/container_validate.go2
-rw-r--r--pkg/specgen/generate/container.go40
-rw-r--r--pkg/specgen/generate/container_create.go47
-rw-r--r--pkg/specgen/generate/oci.go59
-rw-r--r--pkg/specgen/generate/storage.go303
-rw-r--r--pkg/specgen/namespaces.go4
-rw-r--r--pkg/specgen/specgen.go19
27 files changed, 764 insertions, 101 deletions
diff --git a/pkg/api/handlers/compat/containers_prune.go b/pkg/api/handlers/compat/containers_prune.go
index b4e98ac1f..9d77f612b 100644
--- a/pkg/api/handlers/compat/containers_prune.go
+++ b/pkg/api/handlers/compat/containers_prune.go
@@ -38,21 +38,24 @@ func PruneContainers(w http.ResponseWriter, r *http.Request) {
filterFuncs = append(filterFuncs, generatedFunc)
}
}
- prunedContainers, pruneErrors, err := runtime.PruneContainers(filterFuncs)
- if err != nil {
- utils.InternalServerError(w, err)
- return
- }
// Libpod response differs
if utils.IsLibpodRequest(r) {
- report := &entities.ContainerPruneReport{
- Err: pruneErrors,
- ID: prunedContainers,
+ report, err := PruneContainersHelper(w, r, filterFuncs)
+ if err != nil {
+ utils.InternalServerError(w, err)
+ return
}
+
utils.WriteResponse(w, http.StatusOK, report)
return
}
+
+ prunedContainers, pruneErrors, err := runtime.PruneContainers(filterFuncs)
+ if err != nil {
+ utils.InternalServerError(w, err)
+ return
+ }
for ctrID, size := range prunedContainers {
if pruneErrors[ctrID] == nil {
space += size
@@ -65,3 +68,19 @@ func PruneContainers(w http.ResponseWriter, r *http.Request) {
}
utils.WriteResponse(w, http.StatusOK, report)
}
+
+func PruneContainersHelper(w http.ResponseWriter, r *http.Request, filterFuncs []libpod.ContainerFilter) (
+ *entities.ContainerPruneReport, error) {
+ runtime := r.Context().Value("runtime").(*libpod.Runtime)
+ prunedContainers, pruneErrors, err := runtime.PruneContainers(filterFuncs)
+ if err != nil {
+ utils.InternalServerError(w, err)
+ return nil, err
+ }
+
+ report := &entities.ContainerPruneReport{
+ Err: pruneErrors,
+ ID: prunedContainers,
+ }
+ return report, nil
+}
diff --git a/pkg/api/handlers/libpod/containers_create.go b/pkg/api/handlers/libpod/containers_create.go
index f64132d55..40b6cacdb 100644
--- a/pkg/api/handlers/libpod/containers_create.go
+++ b/pkg/api/handlers/libpod/containers_create.go
@@ -1,6 +1,7 @@
package libpod
import (
+ "context"
"encoding/json"
"net/http"
@@ -26,7 +27,7 @@ func CreateContainer(w http.ResponseWriter, r *http.Request) {
utils.InternalServerError(w, err)
return
}
- ctr, err := generate.MakeContainer(runtime, &sg)
+ ctr, err := generate.MakeContainer(context.Background(), runtime, &sg)
if err != nil {
utils.InternalServerError(w, err)
return
diff --git a/pkg/api/handlers/libpod/images.go b/pkg/api/handlers/libpod/images.go
index 760ab1b7c..f7be5ce9a 100644
--- a/pkg/api/handlers/libpod/images.go
+++ b/pkg/api/handlers/libpod/images.go
@@ -283,7 +283,7 @@ func ImagesLoad(w http.ResponseWriter, r *http.Request) {
return
}
}
- utils.WriteResponse(w, http.StatusOK, entities.ImageLoadReport{Name: loadedImage})
+ utils.WriteResponse(w, http.StatusOK, entities.ImageLoadReport{Names: split})
}
func ImagesImport(w http.ResponseWriter, r *http.Request) {
diff --git a/pkg/api/handlers/libpod/pods.go b/pkg/api/handlers/libpod/pods.go
index 0b15ab0d6..c3f8d5d66 100644
--- a/pkg/api/handlers/libpod/pods.go
+++ b/pkg/api/handlers/libpod/pods.go
@@ -231,14 +231,22 @@ func PodRestart(w http.ResponseWriter, r *http.Request) {
}
func PodPrune(w http.ResponseWriter, r *http.Request) {
+ reports, err := PodPruneHelper(w, r)
+ if err != nil {
+ utils.InternalServerError(w, err)
+ return
+ }
+ utils.WriteResponse(w, http.StatusOK, reports)
+}
+
+func PodPruneHelper(w http.ResponseWriter, r *http.Request) ([]*entities.PodPruneReport, error) {
var (
runtime = r.Context().Value("runtime").(*libpod.Runtime)
reports []*entities.PodPruneReport
)
responses, err := runtime.PrunePods(r.Context())
if err != nil {
- utils.InternalServerError(w, err)
- return
+ return nil, err
}
for k, v := range responses {
reports = append(reports, &entities.PodPruneReport{
@@ -246,7 +254,7 @@ func PodPrune(w http.ResponseWriter, r *http.Request) {
Id: k,
})
}
- utils.WriteResponse(w, http.StatusOK, reports)
+ return reports, nil
}
func PodPause(w http.ResponseWriter, r *http.Request) {
diff --git a/pkg/api/handlers/libpod/system.go b/pkg/api/handlers/libpod/system.go
new file mode 100644
index 000000000..98e33bf10
--- /dev/null
+++ b/pkg/api/handlers/libpod/system.go
@@ -0,0 +1,71 @@
+package libpod
+
+import (
+ "net/http"
+
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/pkg/api/handlers/compat"
+ "github.com/containers/libpod/pkg/api/handlers/utils"
+ "github.com/containers/libpod/pkg/domain/entities"
+ "github.com/gorilla/schema"
+ "github.com/pkg/errors"
+)
+
+// SystemPrune removes unused data
+func SystemPrune(w http.ResponseWriter, r *http.Request) {
+ var (
+ decoder = r.Context().Value("decoder").(*schema.Decoder)
+ runtime = r.Context().Value("runtime").(*libpod.Runtime)
+ systemPruneReport = new(entities.SystemPruneReport)
+ )
+ query := struct {
+ All bool `schema:"all"`
+ Volumes bool `schema:"volumes"`
+ }{}
+
+ if err := decoder.Decode(&query, r.URL.Query()); err != nil {
+ utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest,
+ errors.Wrapf(err, "Failed to parse parameters for %s", r.URL.String()))
+ return
+ }
+
+ podPruneReport, err := PodPruneHelper(w, r)
+ if err != nil {
+ utils.InternalServerError(w, err)
+ return
+ }
+ systemPruneReport.PodPruneReport = podPruneReport
+
+ // We could parallelize this, should we?
+ containerPruneReport, err := compat.PruneContainersHelper(w, r, nil)
+ if err != nil {
+ utils.InternalServerError(w, err)
+ return
+ }
+ systemPruneReport.ContainerPruneReport = containerPruneReport
+
+ results, err := runtime.ImageRuntime().PruneImages(r.Context(), query.All, nil)
+ if err != nil {
+ utils.InternalServerError(w, err)
+ return
+ }
+
+ report := entities.ImagePruneReport{
+ Report: entities.Report{
+ Id: results,
+ Err: nil,
+ },
+ }
+
+ systemPruneReport.ImagePruneReport = &report
+
+ if query.Volumes {
+ volumePruneReport, err := pruneVolumesHelper(w, r)
+ if err != nil {
+ utils.InternalServerError(w, err)
+ return
+ }
+ systemPruneReport.VolumePruneReport = volumePruneReport
+ }
+ utils.WriteResponse(w, http.StatusOK, systemPruneReport)
+}
diff --git a/pkg/api/handlers/libpod/volumes.go b/pkg/api/handlers/libpod/volumes.go
index 18c561a0d..c42ca407b 100644
--- a/pkg/api/handlers/libpod/volumes.go
+++ b/pkg/api/handlers/libpod/volumes.go
@@ -147,14 +147,22 @@ func ListVolumes(w http.ResponseWriter, r *http.Request) {
}
func PruneVolumes(w http.ResponseWriter, r *http.Request) {
+ reports, err := pruneVolumesHelper(w, r)
+ if err != nil {
+ utils.InternalServerError(w, err)
+ return
+ }
+ utils.WriteResponse(w, http.StatusOK, reports)
+}
+
+func pruneVolumesHelper(w http.ResponseWriter, r *http.Request) ([]*entities.VolumePruneReport, error) {
var (
runtime = r.Context().Value("runtime").(*libpod.Runtime)
reports []*entities.VolumePruneReport
)
pruned, err := runtime.PruneVolumes(r.Context())
if err != nil {
- utils.InternalServerError(w, err)
- return
+ return nil, err
}
for k, v := range pruned {
reports = append(reports, &entities.VolumePruneReport{
@@ -162,9 +170,8 @@ func PruneVolumes(w http.ResponseWriter, r *http.Request) {
Id: k,
})
}
- utils.WriteResponse(w, http.StatusOK, reports)
+ return reports, nil
}
-
func RemoveVolume(w http.ResponseWriter, r *http.Request) {
var (
runtime = r.Context().Value("runtime").(*libpod.Runtime)
diff --git a/pkg/api/server/register_system.go b/pkg/api/server/register_system.go
index 708ccd39b..7375a75c1 100644
--- a/pkg/api/server/register_system.go
+++ b/pkg/api/server/register_system.go
@@ -4,6 +4,7 @@ import (
"net/http"
"github.com/containers/libpod/pkg/api/handlers/compat"
+ "github.com/containers/libpod/pkg/api/handlers/libpod"
"github.com/gorilla/mux"
)
@@ -11,5 +12,21 @@ func (s *APIServer) registerSystemHandlers(r *mux.Router) error {
r.Handle(VersionedPath("/system/df"), s.APIHandler(compat.GetDiskUsage)).Methods(http.MethodGet)
// Added non version path to URI to support docker non versioned paths
r.Handle("/system/df", s.APIHandler(compat.GetDiskUsage)).Methods(http.MethodGet)
+ // Swagger:operation POST /libpod/system/prune libpod pruneSystem
+ // ---
+ // tags:
+ // - system
+ // summary: Prune unused data
+ // produces:
+ // - application/json
+ // responses:
+ // 200:
+ // $ref: '#/responses/SystemPruneReport'
+ // 400:
+ // $ref: "#/responses/BadParamError"
+ // 500:
+ // $ref: "#/responses/InternalError"
+ r.Handle(VersionedPath("/libpod/system/prune"), s.APIHandler(libpod.SystemPrune)).Methods(http.MethodPost)
+
return nil
}
diff --git a/pkg/bindings/system/system.go b/pkg/bindings/system/system.go
index e2f264139..df6b529de 100644
--- a/pkg/bindings/system/system.go
+++ b/pkg/bindings/system/system.go
@@ -6,6 +6,7 @@ import (
"io"
"net/http"
"net/url"
+ "strconv"
"github.com/containers/libpod/pkg/bindings"
"github.com/containers/libpod/pkg/domain/entities"
@@ -59,3 +60,26 @@ func Events(ctx context.Context, eventChan chan (entities.Event), cancelChan cha
}
return nil
}
+
+// Prune removes all unused system data.
+func Prune(ctx context.Context, all, volumes *bool) (*entities.SystemPruneReport, error) {
+ var (
+ report entities.SystemPruneReport
+ )
+ conn, err := bindings.GetClient(ctx)
+ if err != nil {
+ return nil, err
+ }
+ params := url.Values{}
+ if all != nil {
+ params.Set("All", strconv.FormatBool(*all))
+ }
+ if volumes != nil {
+ params.Set("Volumes", strconv.FormatBool(*volumes))
+ }
+ response, err := conn.DoRequest(nil, http.MethodPost, "/system/prune", params)
+ if err != nil {
+ return nil, err
+ }
+ return &report, response.Process(&report)
+}
diff --git a/pkg/bindings/test/common_test.go b/pkg/bindings/test/common_test.go
index 6b8d6788c..f33e42440 100644
--- a/pkg/bindings/test/common_test.go
+++ b/pkg/bindings/test/common_test.go
@@ -3,13 +3,13 @@ package test_bindings
import (
"context"
"fmt"
- "github.com/containers/libpod/libpod/define"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strings"
+ "github.com/containers/libpod/libpod/define"
. "github.com/containers/libpod/pkg/bindings"
"github.com/containers/libpod/pkg/bindings/containers"
"github.com/containers/libpod/pkg/specgen"
@@ -189,7 +189,7 @@ func (b *bindingTest) restoreImageFromCache(i testImage) {
// Run a container within or without a pod
// and add or append the alpine image to it
func (b *bindingTest) RunTopContainer(containerName *string, insidePod *bool, podName *string) (string, error) {
- s := specgen.NewSpecGenerator(alpine.name)
+ s := specgen.NewSpecGenerator(alpine.name, false)
s.Terminal = false
s.Command = []string{"top"}
if containerName != nil {
diff --git a/pkg/bindings/test/containers_test.go b/pkg/bindings/test/containers_test.go
index e288dc368..c79d89b73 100644
--- a/pkg/bindings/test/containers_test.go
+++ b/pkg/bindings/test/containers_test.go
@@ -360,7 +360,7 @@ var _ = Describe("Podman containers ", func() {
It("logging", func() {
stdoutChan := make(chan string, 10)
- s := specgen.NewSpecGenerator(alpine.name)
+ s := specgen.NewSpecGenerator(alpine.name, false)
s.Terminal = true
s.Command = []string{"date", "-R"}
r, err := containers.CreateWithSpec(bt.conn, s)
@@ -521,7 +521,7 @@ var _ = Describe("Podman containers ", func() {
})
It("container init", func() {
- s := specgen.NewSpecGenerator(alpine.name)
+ s := specgen.NewSpecGenerator(alpine.name, false)
ctr, err := containers.CreateWithSpec(bt.conn, s)
Expect(err).To(BeNil())
err = containers.ContainerInit(bt.conn, ctr.ID)
diff --git a/pkg/bindings/test/create_test.go b/pkg/bindings/test/create_test.go
index f83a9b14d..a63aa79cf 100644
--- a/pkg/bindings/test/create_test.go
+++ b/pkg/bindings/test/create_test.go
@@ -31,7 +31,7 @@ var _ = Describe("Create containers ", func() {
})
It("create a container running top", func() {
- s := specgen.NewSpecGenerator(alpine.name)
+ s := specgen.NewSpecGenerator(alpine.name, false)
s.Command = []string{"top"}
s.Terminal = true
s.Name = "top"
diff --git a/pkg/bindings/test/info_test.go b/pkg/bindings/test/info_test.go
index d0e651134..64f2b458f 100644
--- a/pkg/bindings/test/info_test.go
+++ b/pkg/bindings/test/info_test.go
@@ -45,7 +45,7 @@ var _ = Describe("Podman info", func() {
})
It("podman info container counts", func() {
- s := specgen.NewSpecGenerator(alpine.name)
+ s := specgen.NewSpecGenerator(alpine.name, false)
_, err := containers.CreateWithSpec(bt.conn, s)
Expect(err).To(BeNil())
diff --git a/pkg/bindings/test/system_test.go b/pkg/bindings/test/system_test.go
index 3abc26b34..87e6d56dc 100644
--- a/pkg/bindings/test/system_test.go
+++ b/pkg/bindings/test/system_test.go
@@ -4,7 +4,12 @@ import (
"time"
"github.com/containers/libpod/pkg/api/handlers"
+ "github.com/containers/libpod/pkg/bindings"
+ "github.com/containers/libpod/pkg/bindings/containers"
+ "github.com/containers/libpod/pkg/bindings/pods"
"github.com/containers/libpod/pkg/bindings/system"
+ "github.com/containers/libpod/pkg/bindings/volumes"
+ "github.com/containers/libpod/pkg/domain/entities"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gexec"
@@ -12,13 +17,16 @@ import (
var _ = Describe("Podman system", func() {
var (
- bt *bindingTest
- s *gexec.Session
+ bt *bindingTest
+ s *gexec.Session
+ newpod string
)
BeforeEach(func() {
bt = newBindingTest()
bt.RestoreImagesFromCache()
+ newpod = "newpod"
+ bt.Podcreate(&newpod)
s = bt.startAPIService()
time.Sleep(1 * time.Second)
err := bt.NewConnection()
@@ -48,4 +56,98 @@ var _ = Describe("Podman system", func() {
cancelChan <- true
Expect(len(messages)).To(BeNumerically("==", 3))
})
+
+ It("podman system prune - pod,container stopped", func() {
+ // Start and stop a pod to enter in exited state.
+ _, err := pods.Start(bt.conn, newpod)
+ Expect(err).To(BeNil())
+ _, err = pods.Stop(bt.conn, newpod, nil)
+ Expect(err).To(BeNil())
+ // Start and stop a container to enter in exited state.
+ var name = "top"
+ _, err = bt.RunTopContainer(&name, &bindings.PFalse, nil)
+ Expect(err).To(BeNil())
+ err = containers.Stop(bt.conn, name, nil)
+ Expect(err).To(BeNil())
+
+ systemPruneResponse, err := system.Prune(bt.conn, &bindings.PTrue, &bindings.PFalse)
+ Expect(err).To(BeNil())
+ Expect(len(systemPruneResponse.PodPruneReport)).To(Equal(1))
+ Expect(len(systemPruneResponse.ContainerPruneReport.ID)).To(Equal(1))
+ Expect(len(systemPruneResponse.ImagePruneReport.Report.Id)).
+ To(BeNumerically(">", 0))
+ Expect(systemPruneResponse.ImagePruneReport.Report.Id).
+ To(ContainElement("docker.io/library/alpine:latest"))
+ Expect(len(systemPruneResponse.VolumePruneReport)).To(Equal(0))
+ })
+
+ It("podman system prune running alpine container", func() {
+ // Start and stop a pod to enter in exited state.
+ _, err := pods.Start(bt.conn, newpod)
+ Expect(err).To(BeNil())
+ _, err = pods.Stop(bt.conn, newpod, nil)
+ Expect(err).To(BeNil())
+
+ // Start and stop a container to enter in exited state.
+ var name = "top"
+ _, err = bt.RunTopContainer(&name, &bindings.PFalse, nil)
+ Expect(err).To(BeNil())
+ err = containers.Stop(bt.conn, name, nil)
+ Expect(err).To(BeNil())
+
+ // Start container and leave in running
+ var name2 = "top2"
+ _, err = bt.RunTopContainer(&name2, &bindings.PFalse, nil)
+ Expect(err).To(BeNil())
+
+ // Adding an unused volume
+ _, err = volumes.Create(bt.conn, entities.VolumeCreateOptions{})
+ Expect(err).To(BeNil())
+
+ systemPruneResponse, err := system.Prune(bt.conn, &bindings.PTrue, &bindings.PFalse)
+ Expect(err).To(BeNil())
+ Expect(len(systemPruneResponse.PodPruneReport)).To(Equal(1))
+ Expect(len(systemPruneResponse.ContainerPruneReport.ID)).To(Equal(1))
+ Expect(len(systemPruneResponse.ImagePruneReport.Report.Id)).
+ To(BeNumerically(">", 0))
+ // Alpine image should not be pruned as used by running container
+ Expect(systemPruneResponse.ImagePruneReport.Report.Id).
+ ToNot(ContainElement("docker.io/library/alpine:latest"))
+ // Though unsed volume is available it should not be pruned as flag set to false.
+ Expect(len(systemPruneResponse.VolumePruneReport)).To(Equal(0))
+ })
+
+ It("podman system prune running alpine container volume prune", func() {
+ // Start a pod and leave it running
+ _, err := pods.Start(bt.conn, newpod)
+ Expect(err).To(BeNil())
+
+ // Start and stop a container to enter in exited state.
+ var name = "top"
+ _, err = bt.RunTopContainer(&name, &bindings.PFalse, nil)
+ Expect(err).To(BeNil())
+ err = containers.Stop(bt.conn, name, nil)
+ Expect(err).To(BeNil())
+
+ // Start second container and leave in running
+ var name2 = "top2"
+ _, err = bt.RunTopContainer(&name2, &bindings.PFalse, nil)
+ Expect(err).To(BeNil())
+
+ // Adding an unused volume should work
+ _, err = volumes.Create(bt.conn, entities.VolumeCreateOptions{})
+ Expect(err).To(BeNil())
+
+ systemPruneResponse, err := system.Prune(bt.conn, &bindings.PTrue, &bindings.PTrue)
+ Expect(err).To(BeNil())
+ Expect(len(systemPruneResponse.PodPruneReport)).To(Equal(0))
+ Expect(len(systemPruneResponse.ContainerPruneReport.ID)).To(Equal(1))
+ Expect(len(systemPruneResponse.ImagePruneReport.Report.Id)).
+ To(BeNumerically(">", 0))
+ // Alpine image should not be pruned as used by running container
+ Expect(systemPruneResponse.ImagePruneReport.Report.Id).
+ ToNot(ContainElement("docker.io/library/alpine:latest"))
+ // Volume should be pruned now as flag set true
+ Expect(len(systemPruneResponse.VolumePruneReport)).To(Equal(1))
+ })
})
diff --git a/pkg/domain/entities/container_ps.go b/pkg/domain/entities/container_ps.go
index 709bb58d6..fd94d93be 100644
--- a/pkg/domain/entities/container_ps.go
+++ b/pkg/domain/entities/container_ps.go
@@ -25,6 +25,8 @@ type ListContainer struct {
ID string `json:"Id"`
// Container image
Image string
+ // Container image ID
+ ImageID string
// If this container is a Pod infra container
IsInfra bool
// Labels for container
@@ -159,3 +161,31 @@ func SortPsOutput(sortBy string, psOutput SortListContainers) (SortListContainer
}
return psOutput, nil
}
+
+func (l ListContainer) CGROUPNS() string {
+ return l.Namespaces.Cgroup
+}
+
+func (l ListContainer) IPC() string {
+ return l.Namespaces.IPC
+}
+
+func (l ListContainer) MNT() string {
+ return l.Namespaces.MNT
+}
+
+func (l ListContainer) NET() string {
+ return l.Namespaces.NET
+}
+
+func (l ListContainer) PIDNS() string {
+ return l.Namespaces.PIDNS
+}
+
+func (l ListContainer) USERNS() string {
+ return l.Namespaces.User
+}
+
+func (l ListContainer) UTS() string {
+ return l.Namespaces.UTS
+}
diff --git a/pkg/domain/entities/images.go b/pkg/domain/entities/images.go
index 773cd90b4..460965b34 100644
--- a/pkg/domain/entities/images.go
+++ b/pkg/domain/entities/images.go
@@ -256,7 +256,7 @@ type ImageLoadOptions struct {
}
type ImageLoadReport struct {
- Name string
+ Names []string
}
type ImageImportOptions struct {
diff --git a/pkg/domain/entities/system.go b/pkg/domain/entities/system.go
index 3ddc04293..de93a382f 100644
--- a/pkg/domain/entities/system.go
+++ b/pkg/domain/entities/system.go
@@ -12,3 +12,17 @@ type ServiceOptions struct {
Timeout time.Duration // duration of inactivity the service should wait before shutting down
Command *cobra.Command // CLI command provided. Used in V1 code
}
+
+// SystemPruneOptions provides options to prune system.
+type SystemPruneOptions struct {
+ All bool
+ Volume bool
+}
+
+// SystemPruneReport provides report after system prune is executed.
+type SystemPruneReport struct {
+ PodPruneReport []*PodPruneReport
+ *ContainerPruneReport
+ *ImagePruneReport
+ VolumePruneReport []*VolumePruneReport
+}
diff --git a/pkg/domain/infra/abi/containers.go b/pkg/domain/infra/abi/containers.go
index 9844d1d96..286d37c34 100644
--- a/pkg/domain/infra/abi/containers.go
+++ b/pkg/domain/infra/abi/containers.go
@@ -217,12 +217,23 @@ func (ic *ContainerEngine) ContainerKill(ctx context.Context, namesOrIds []strin
}
func (ic *ContainerEngine) ContainerRestart(ctx context.Context, namesOrIds []string, options entities.RestartOptions) ([]*entities.RestartReport, error) {
var (
+ ctrs []*libpod.Container
+ err error
reports []*entities.RestartReport
)
- ctrs, err := getContainersByContext(options.All, options.Latest, namesOrIds, ic.Libpod)
- if err != nil {
- return nil, err
+
+ if options.Running {
+ ctrs, err = ic.Libpod.GetRunningContainers()
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ ctrs, err = getContainersByContext(options.All, options.Latest, namesOrIds, ic.Libpod)
+ if err != nil {
+ return nil, err
+ }
}
+
for _, con := range ctrs {
timeout := con.StopTimeout()
if options.Timeout != nil {
@@ -481,7 +492,7 @@ func (ic *ContainerEngine) ContainerCreate(ctx context.Context, s *specgen.SpecG
if err := generate.CompleteSpec(ctx, ic.Libpod, s); err != nil {
return nil, err
}
- ctr, err := generate.MakeContainer(ic.Libpod, s)
+ ctr, err := generate.MakeContainer(ctx, ic.Libpod, s)
if err != nil {
return nil, err
}
@@ -669,7 +680,7 @@ func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.Conta
if err := generate.CompleteSpec(ctx, ic.Libpod, opts.Spec); err != nil {
return nil, err
}
- ctr, err := generate.MakeContainer(ic.Libpod, opts.Spec)
+ ctr, err := generate.MakeContainer(ctx, ic.Libpod, opts.Spec)
if err != nil {
return nil, err
}
diff --git a/pkg/domain/infra/abi/images.go b/pkg/domain/infra/abi/images.go
index 64d9c9f12..7ac111745 100644
--- a/pkg/domain/infra/abi/images.go
+++ b/pkg/domain/infra/abi/images.go
@@ -46,7 +46,6 @@ func (ir *ImageEngine) Prune(ctx context.Context, opts entities.ImagePruneOption
Id: results,
Err: nil,
},
- Size: 0,
}
return &report, nil
}
@@ -326,16 +325,19 @@ func (ir *ImageEngine) Load(ctx context.Context, opts entities.ImageLoadOptions)
if err != nil {
return nil, err
}
- newImage, err := ir.Libpod.ImageRuntime().NewFromLocal(name)
- if err != nil {
- return nil, errors.Wrap(err, "image loaded but no additional tags were created")
- }
- if len(opts.Name) > 0 {
- if err := newImage.TagImage(fmt.Sprintf("%s:%s", opts.Name, opts.Tag)); err != nil {
- return nil, errors.Wrapf(err, "error adding %q to image %q", opts.Name, newImage.InputName)
+ names := strings.Split(name, ",")
+ if len(names) <= 1 {
+ newImage, err := ir.Libpod.ImageRuntime().NewFromLocal(name)
+ if err != nil {
+ return nil, errors.Wrap(err, "image loaded but no additional tags were created")
+ }
+ if len(opts.Name) > 0 {
+ if err := newImage.TagImage(fmt.Sprintf("%s:%s", opts.Name, opts.Tag)); err != nil {
+ return nil, errors.Wrapf(err, "error adding %q to image %q", opts.Name, newImage.InputName)
+ }
}
}
- return &entities.ImageLoadReport{Name: name}, nil
+ return &entities.ImageLoadReport{Names: names}, nil
}
func (ir *ImageEngine) Import(ctx context.Context, opts entities.ImageImportOptions) (*entities.ImageImportReport, error) {
diff --git a/pkg/domain/infra/tunnel/containers.go b/pkg/domain/infra/tunnel/containers.go
index 18d6613f4..32f9c4e36 100644
--- a/pkg/domain/infra/tunnel/containers.go
+++ b/pkg/domain/infra/tunnel/containers.go
@@ -115,11 +115,15 @@ func (ic *ContainerEngine) ContainerRestart(ctx context.Context, namesOrIds []st
t := int(*options.Timeout)
timeout = &t
}
+
ctrs, err := getContainersByContext(ic.ClientCxt, options.All, namesOrIds)
if err != nil {
return nil, err
}
for _, c := range ctrs {
+ if options.Running && c.State != define.ContainerStateRunning.String() {
+ continue
+ }
reports = append(reports, &entities.RestartReport{
Id: c.ID,
Err: containers.Restart(ic.ClientCxt, c.ID, timeout),
diff --git a/pkg/ps/ps.go b/pkg/ps/ps.go
index d0fef65c8..907063df9 100644
--- a/pkg/ps/ps.go
+++ b/pkg/ps/ps.go
@@ -158,6 +158,7 @@ func ListContainerBatch(rt *libpod.Runtime, ctr *libpod.Container, opts entities
ExitedAt: exitedTime.Unix(),
ID: conConfig.ID,
Image: conConfig.RootfsImageName,
+ ImageID: conConfig.RootfsImageID,
IsInfra: conConfig.IsInfra,
Labels: conConfig.Labels,
Mounts: ctr.UserVolumes(),
diff --git a/pkg/specgen/container_validate.go b/pkg/specgen/container_validate.go
index 87fc59dfe..94e456c52 100644
--- a/pkg/specgen/container_validate.go
+++ b/pkg/specgen/container_validate.go
@@ -14,7 +14,7 @@ var (
// SystemDValues describes the only values that SystemD can be
SystemDValues = []string{"true", "false", "always"}
// ImageVolumeModeValues describes the only values that ImageVolumeMode can be
- ImageVolumeModeValues = []string{"ignore", "tmpfs", "bind"}
+ ImageVolumeModeValues = []string{"ignore", "tmpfs", "anonymous"}
)
func exclusiveOptions(opt1, opt2 string) error {
diff --git a/pkg/specgen/generate/container.go b/pkg/specgen/generate/container.go
index 669b1f05f..92a2b4d35 100644
--- a/pkg/specgen/generate/container.go
+++ b/pkg/specgen/generate/container.go
@@ -3,18 +3,16 @@ package generate
import (
"context"
+ "github.com/containers/image/v5/manifest"
"github.com/containers/libpod/libpod"
ann "github.com/containers/libpod/pkg/annotations"
envLib "github.com/containers/libpod/pkg/env"
"github.com/containers/libpod/pkg/signal"
"github.com/containers/libpod/pkg/specgen"
- "github.com/pkg/errors"
"golang.org/x/sys/unix"
)
func CompleteSpec(ctx context.Context, r *libpod.Runtime, s *specgen.SpecGenerator) error {
- var appendEntryPoint bool
-
// If a rootfs is used, then there is no image data
if s.ContainerStorageConfig.Rootfs != "" {
return nil
@@ -25,7 +23,12 @@ func CompleteSpec(ctx context.Context, r *libpod.Runtime, s *specgen.SpecGenerat
return err
}
- if s.HealthConfig == nil {
+ _, mediaType, err := newImage.Manifest(ctx)
+ if err != nil {
+ return err
+ }
+
+ if s.HealthConfig == nil && mediaType == manifest.DockerV2Schema2MediaType {
s.HealthConfig, err = newImage.GetHealthCheck(ctx)
if err != nil {
return err
@@ -107,28 +110,6 @@ func CompleteSpec(ctx context.Context, r *libpod.Runtime, s *specgen.SpecGenerat
}
s.Annotations = annotations
- // entrypoint
- entrypoint, err := newImage.Entrypoint(ctx)
- if err != nil {
- return err
- }
- if len(s.Entrypoint) < 1 && len(entrypoint) > 0 {
- appendEntryPoint = true
- s.Entrypoint = entrypoint
- }
- command, err := newImage.Cmd(ctx)
- if err != nil {
- return err
- }
- if len(s.Command) < 1 && len(command) > 0 {
- if appendEntryPoint {
- s.Command = entrypoint
- }
- s.Command = append(s.Command, command...)
- }
- if len(s.Command) < 1 && len(s.Entrypoint) < 1 {
- return errors.Errorf("No command provided or as CMD or ENTRYPOINT in this image")
- }
// workdir
workingDir, err := newImage.WorkingDir(ctx)
if err != nil {
@@ -151,13 +132,6 @@ func CompleteSpec(ctx context.Context, r *libpod.Runtime, s *specgen.SpecGenerat
if err != nil {
return err
}
-
- // TODO This should be enabled when namespaces actually work
- //case usernsMode.IsKeepID():
- // user = fmt.Sprintf("%d:%d", rootless.GetRootlessUID(), rootless.GetRootlessGID())
- if len(s.User) == 0 {
- s.User = "0"
- }
}
if err := finishThrottleDevices(s); err != nil {
return err
diff --git a/pkg/specgen/generate/container_create.go b/pkg/specgen/generate/container_create.go
index 49a717c5d..bb84f0618 100644
--- a/pkg/specgen/generate/container_create.go
+++ b/pkg/specgen/generate/container_create.go
@@ -15,7 +15,7 @@ import (
)
// MakeContainer creates a container based on the SpecGenerator
-func MakeContainer(rt *libpod.Runtime, s *specgen.SpecGenerator) (*libpod.Container, error) {
+func MakeContainer(ctx context.Context, rt *libpod.Runtime, s *specgen.SpecGenerator) (*libpod.Container, error) {
rtc, err := rt.GetConfig()
if err != nil {
return nil, err
@@ -75,16 +75,8 @@ func MakeContainer(rt *libpod.Runtime, s *specgen.SpecGenerator) (*libpod.Contai
s.CgroupNS = defaultNS
}
- options, err := createContainerOptions(rt, s, pod)
- if err != nil {
- return nil, err
- }
+ options := []libpod.CtrCreateOption{}
- podmanPath, err := os.Executable()
- if err != nil {
- return nil, err
- }
- options = append(options, createExitCommandOption(s, rt.StorageConfig(), rtc, podmanPath))
var newImage *image.Image
if s.Rootfs != "" {
options = append(options, libpod.WithRootFS(s.Rootfs))
@@ -99,14 +91,31 @@ func MakeContainer(rt *libpod.Runtime, s *specgen.SpecGenerator) (*libpod.Contai
return nil, errors.Wrap(err, "invalid config provided")
}
- runtimeSpec, err := SpecGenToOCI(s, rt, newImage)
+ finalMounts, finalVolumes, err := finalizeMounts(ctx, s, rt, rtc, newImage)
+ if err != nil {
+ return nil, err
+ }
+
+ opts, err := createContainerOptions(rt, s, pod, finalVolumes)
+ if err != nil {
+ return nil, err
+ }
+ options = append(options, opts...)
+
+ podmanPath, err := os.Executable()
+ if err != nil {
+ return nil, err
+ }
+ options = append(options, createExitCommandOption(s, rt.StorageConfig(), rtc, podmanPath))
+
+ runtimeSpec, err := SpecGenToOCI(ctx, s, rt, rtc, newImage, finalMounts)
if err != nil {
return nil, err
}
- return rt.NewContainer(context.Background(), runtimeSpec, options...)
+ return rt.NewContainer(ctx, runtimeSpec, options...)
}
-func createContainerOptions(rt *libpod.Runtime, s *specgen.SpecGenerator, pod *libpod.Pod) ([]libpod.CtrCreateOption, error) {
+func createContainerOptions(rt *libpod.Runtime, s *specgen.SpecGenerator, pod *libpod.Pod, volumes []*specgen.NamedVolume) ([]libpod.CtrCreateOption, error) {
var options []libpod.CtrCreateOption
var err error
@@ -133,21 +142,21 @@ func createContainerOptions(rt *libpod.Runtime, s *specgen.SpecGenerator, pod *l
for _, mount := range s.Mounts {
destinations = append(destinations, mount.Destination)
}
- for _, volume := range s.Volumes {
+ for _, volume := range volumes {
destinations = append(destinations, volume.Dest)
}
options = append(options, libpod.WithUserVolumes(destinations))
- if len(s.Volumes) != 0 {
- var volumes []*libpod.ContainerNamedVolume
- for _, v := range s.Volumes {
- volumes = append(volumes, &libpod.ContainerNamedVolume{
+ if len(volumes) != 0 {
+ var vols []*libpod.ContainerNamedVolume
+ for _, v := range volumes {
+ vols = append(vols, &libpod.ContainerNamedVolume{
Name: v.Name,
Dest: v.Dest,
Options: v.Options,
})
}
- options = append(options, libpod.WithNamedVolumes(volumes))
+ options = append(options, libpod.WithNamedVolumes(vols))
}
if len(s.Command) != 0 {
diff --git a/pkg/specgen/generate/oci.go b/pkg/specgen/generate/oci.go
index 8ca95016e..87262684e 100644
--- a/pkg/specgen/generate/oci.go
+++ b/pkg/specgen/generate/oci.go
@@ -1,8 +1,10 @@
package generate
import (
+ "context"
"strings"
+ "github.com/containers/common/pkg/config"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/image"
"github.com/containers/libpod/pkg/rootless"
@@ -10,6 +12,7 @@ import (
"github.com/opencontainers/runc/libcontainer/user"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
+ "github.com/pkg/errors"
)
func addRlimits(s *specgen.SpecGenerator, g *generate.Generator) error {
@@ -48,7 +51,51 @@ func addRlimits(s *specgen.SpecGenerator, g *generate.Generator) error {
return nil
}
-func SpecGenToOCI(s *specgen.SpecGenerator, rt *libpod.Runtime, newImage *image.Image) (*spec.Spec, error) {
+// Produce the final command for the container.
+func makeCommand(ctx context.Context, s *specgen.SpecGenerator, img *image.Image, rtc *config.Config) ([]string, error) {
+ finalCommand := []string{}
+
+ entrypoint := s.Entrypoint
+ if len(entrypoint) == 0 && img != nil {
+ newEntry, err := img.Entrypoint(ctx)
+ if err != nil {
+ return nil, err
+ }
+ entrypoint = newEntry
+ }
+
+ finalCommand = append(finalCommand, entrypoint...)
+
+ command := s.Command
+ if len(command) == 0 && img != nil {
+ newCmd, err := img.Cmd(ctx)
+ if err != nil {
+ return nil, err
+ }
+ command = newCmd
+ }
+
+ finalCommand = append(finalCommand, command...)
+
+ if len(finalCommand) == 0 {
+ return nil, errors.Errorf("no command or entrypoint provided, and no CMD or ENTRYPOINT from image")
+ }
+
+ if s.Init {
+ initPath := s.InitPath
+ if initPath == "" && rtc != nil {
+ initPath = rtc.Engine.InitPath
+ }
+ if initPath == "" {
+ return nil, errors.Errorf("no path to init binary found but container requested an init")
+ }
+ finalCommand = append([]string{initPath, "--"}, finalCommand...)
+ }
+
+ return finalCommand, nil
+}
+
+func SpecGenToOCI(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runtime, rtc *config.Config, newImage *image.Image, mounts []spec.Mount) (*spec.Spec, error) {
var (
inUserNS bool
)
@@ -173,7 +220,13 @@ func SpecGenToOCI(s *specgen.SpecGenerator, rt *libpod.Runtime, newImage *image.
g.AddMount(cgroupMnt)
}
g.SetProcessCwd(s.WorkDir)
- g.SetProcessArgs(s.Command)
+
+ finalCmd, err := makeCommand(ctx, s, newImage, rtc)
+ if err != nil {
+ return nil, err
+ }
+ g.SetProcessArgs(finalCmd)
+
g.SetProcessTerminal(s.Terminal)
for key, val := range s.Annotations {
@@ -227,7 +280,7 @@ func SpecGenToOCI(s *specgen.SpecGenerator, rt *libpod.Runtime, newImage *image.
}
// BIND MOUNTS
- configSpec.Mounts = SupercedeUserMounts(s.Mounts, configSpec.Mounts)
+ configSpec.Mounts = SupercedeUserMounts(mounts, configSpec.Mounts)
// Process mounts to ensure correct options
if err := InitFSMounts(configSpec.Mounts); err != nil {
return nil, err
diff --git a/pkg/specgen/generate/storage.go b/pkg/specgen/generate/storage.go
index 7650e4e9a..241c9adeb 100644
--- a/pkg/specgen/generate/storage.go
+++ b/pkg/specgen/generate/storage.go
@@ -1,12 +1,20 @@
package generate
import (
+ "context"
+ "fmt"
+ "os"
"path"
"path/filepath"
"strings"
+ "github.com/containers/common/pkg/config"
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/libpod/image"
+ "github.com/containers/libpod/pkg/specgen"
"github.com/containers/libpod/pkg/util"
spec "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -20,6 +28,301 @@ const (
TypeTmpfs = "tmpfs"
)
+var (
+ errDuplicateDest = errors.Errorf("duplicate mount destination")
+)
+
+// Produce final mounts and named volumes for a container
+func finalizeMounts(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runtime, rtc *config.Config, img *image.Image) ([]spec.Mount, []*specgen.NamedVolume, error) {
+ // Get image volumes
+ baseMounts, baseVolumes, err := getImageVolumes(ctx, img, s)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Get volumes-from mounts
+ volFromMounts, volFromVolumes, err := getVolumesFrom(s.VolumesFrom, rt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Supercede from --volumes-from.
+ for dest, mount := range volFromMounts {
+ baseMounts[dest] = mount
+ }
+ for dest, volume := range volFromVolumes {
+ baseVolumes[dest] = volume
+ }
+
+ // Need to make map forms of specgen mounts/volumes.
+ unifiedMounts := map[string]spec.Mount{}
+ unifiedVolumes := map[string]*specgen.NamedVolume{}
+ for _, m := range s.Mounts {
+ if _, ok := unifiedMounts[m.Destination]; ok {
+ return nil, nil, errors.Wrapf(errDuplicateDest, "conflict in specified mounts - multiple mounts at %q", m.Destination)
+ }
+ unifiedMounts[m.Destination] = m
+ }
+ for _, v := range s.Volumes {
+ if _, ok := unifiedVolumes[v.Dest]; ok {
+ return nil, nil, errors.Wrapf(errDuplicateDest, "conflict in specified volumes - multiple volumes at %q", v.Dest)
+ }
+ unifiedVolumes[v.Dest] = v
+ }
+
+ // If requested, add container init binary
+ if s.Init {
+ initPath := s.InitPath
+ if initPath == "" && rtc != nil {
+ initPath = rtc.Engine.InitPath
+ }
+ initMount, err := addContainerInitBinary(s, initPath)
+ if err != nil {
+ return nil, nil, err
+ }
+ if _, ok := unifiedMounts[initMount.Destination]; ok {
+ return nil, nil, errors.Wrapf(errDuplicateDest, "conflict with mount added by --init to %q", initMount.Destination)
+ }
+ unifiedMounts[initMount.Destination] = initMount
+ }
+
+ // Before superseding, we need to find volume mounts which conflict with
+ // named volumes, and vice versa.
+ // We'll delete the conflicts here as we supersede.
+ for dest := range unifiedMounts {
+ if _, ok := baseVolumes[dest]; ok {
+ delete(baseVolumes, dest)
+ }
+ }
+ for dest := range unifiedVolumes {
+ if _, ok := baseMounts[dest]; ok {
+ delete(baseMounts, dest)
+ }
+ }
+
+ // Supersede volumes-from/image volumes with unified volumes from above.
+ // This is an unconditional replacement.
+ for dest, mount := range unifiedMounts {
+ baseMounts[dest] = mount
+ }
+ for dest, volume := range unifiedVolumes {
+ baseVolumes[dest] = volume
+ }
+
+ // TODO: Investigate moving readonlyTmpfs into here. Would be more
+ // correct.
+
+ // Check for conflicts between named volumes and mounts
+ for dest := range baseMounts {
+ if _, ok := baseVolumes[dest]; ok {
+ return nil, nil, errors.Wrapf(errDuplicateDest, "conflict at mount destination %v", dest)
+ }
+ }
+ for dest := range baseVolumes {
+ if _, ok := baseMounts[dest]; ok {
+ return nil, nil, errors.Wrapf(errDuplicateDest, "conflict at mount destination %v", dest)
+ }
+ }
+ // Final step: maps to arrays
+ finalMounts := make([]spec.Mount, 0, len(baseMounts))
+ for _, mount := range baseMounts {
+ if mount.Type == TypeBind {
+ absSrc, err := filepath.Abs(mount.Source)
+ if err != nil {
+ return nil, nil, errors.Wrapf(err, "error getting absolute path of %s", mount.Source)
+ }
+ mount.Source = absSrc
+ }
+ finalMounts = append(finalMounts, mount)
+ }
+ finalVolumes := make([]*specgen.NamedVolume, 0, len(baseVolumes))
+ for _, volume := range baseVolumes {
+ finalVolumes = append(finalVolumes, volume)
+ }
+
+ return finalMounts, finalVolumes, nil
+}
+
+// Get image volumes from the given image
+func getImageVolumes(ctx context.Context, img *image.Image, s *specgen.SpecGenerator) (map[string]spec.Mount, map[string]*specgen.NamedVolume, error) {
+ mounts := make(map[string]spec.Mount)
+ volumes := make(map[string]*specgen.NamedVolume)
+
+ mode := strings.ToLower(s.ImageVolumeMode)
+
+ // Image may be nil (rootfs in use), or image volume mode may be ignore.
+ if img == nil || mode == "ignore" {
+ return mounts, volumes, nil
+ }
+
+ inspect, err := img.InspectNoSize(ctx)
+ if err != nil {
+ return nil, nil, errors.Wrapf(err, "error inspecting image to get image volumes")
+ }
+ for volume := range inspect.Config.Volumes {
+ logrus.Debugf("Image has volume at %q", volume)
+ cleanDest := filepath.Clean(volume)
+ switch mode {
+ case "", "anonymous":
+ // Anonymous volumes have no name.
+ newVol := new(specgen.NamedVolume)
+ newVol.Dest = cleanDest
+ newVol.Options = []string{"rprivate", "rw", "nodev", "exec"}
+ volumes[cleanDest] = newVol
+ logrus.Debugf("Adding anonymous image volume at %q", cleanDest)
+ case "tmpfs":
+ mount := spec.Mount{
+ Destination: cleanDest,
+ Source: TypeTmpfs,
+ Type: TypeTmpfs,
+ Options: []string{"rprivate", "rw", "nodev", "exec"},
+ }
+ mounts[cleanDest] = mount
+ logrus.Debugf("Adding tmpfs image volume at %q", cleanDest)
+ }
+ }
+
+ return mounts, volumes, nil
+}
+
+func getVolumesFrom(volumesFrom []string, runtime *libpod.Runtime) (map[string]spec.Mount, map[string]*specgen.NamedVolume, error) {
+ finalMounts := make(map[string]spec.Mount)
+ finalNamedVolumes := make(map[string]*specgen.NamedVolume)
+
+ for _, volume := range volumesFrom {
+ var options []string
+
+ splitVol := strings.SplitN(volume, ":", 2)
+ if len(splitVol) == 2 {
+ splitOpts := strings.Split(splitVol[1], ",")
+ for _, opt := range splitOpts {
+ setRORW := false
+ setZ := false
+ switch opt {
+ case "z":
+ if setZ {
+ return nil, nil, errors.Errorf("cannot set :z more than once in mount options")
+ }
+ setZ = true
+ case "ro", "rw":
+ if setRORW {
+ return nil, nil, errors.Errorf("cannot set ro or rw options more than once")
+ }
+ setRORW = true
+ default:
+ return nil, nil, errors.Errorf("invalid option %q specified - volumes from another container can only use z,ro,rw options", opt)
+ }
+ }
+ options = splitOpts
+ }
+
+ ctr, err := runtime.LookupContainer(splitVol[0])
+ if err != nil {
+ return nil, nil, errors.Wrapf(err, "error looking up container %q for volumes-from", splitVol[0])
+ }
+
+ logrus.Debugf("Adding volumes from container %s", ctr.ID())
+
+ // Look up the container's user volumes. This gets us the
+ // destinations of all mounts the user added to the container.
+ userVolumesArr := ctr.UserVolumes()
+
+ // We're going to need to access them a lot, so convert to a map
+ // to reduce looping.
+ // We'll also use the map to indicate if we missed any volumes along the way.
+ userVolumes := make(map[string]bool)
+ for _, dest := range userVolumesArr {
+ userVolumes[dest] = false
+ }
+
+ // Now we get the container's spec and loop through its volumes
+ // and append them in if we can find them.
+ spec := ctr.Spec()
+ if spec == nil {
+ return nil, nil, errors.Errorf("error retrieving container %s spec for volumes-from", ctr.ID())
+ }
+ for _, mnt := range spec.Mounts {
+ if mnt.Type != TypeBind {
+ continue
+ }
+ if _, exists := userVolumes[mnt.Destination]; exists {
+ userVolumes[mnt.Destination] = true
+
+ if len(options) != 0 {
+ mnt.Options = options
+ }
+
+ if _, ok := finalMounts[mnt.Destination]; ok {
+ logrus.Debugf("Overriding mount to %s with new mount from container %s", mnt.Destination, ctr.ID())
+ }
+ finalMounts[mnt.Destination] = mnt
+ }
+ }
+
+ // We're done with the spec mounts. Add named volumes.
+ // Add these unconditionally - none of them are automatically
+ // part of the container, as some spec mounts are.
+ namedVolumes := ctr.NamedVolumes()
+ for _, namedVol := range namedVolumes {
+ if _, exists := userVolumes[namedVol.Dest]; exists {
+ userVolumes[namedVol.Dest] = true
+ }
+
+ if len(options) != 0 {
+ namedVol.Options = options
+ }
+
+ if _, ok := finalMounts[namedVol.Dest]; ok {
+ logrus.Debugf("Overriding named volume mount to %s with new named volume from container %s", namedVol.Dest, ctr.ID())
+ }
+
+ newVol := new(specgen.NamedVolume)
+ newVol.Dest = namedVol.Dest
+ newVol.Options = namedVol.Options
+ newVol.Name = namedVol.Name
+
+ finalNamedVolumes[namedVol.Dest] = newVol
+ }
+
+ // Check if we missed any volumes
+ for volDest, found := range userVolumes {
+ if !found {
+ logrus.Warnf("Unable to match volume %s from container %s for volumes-from", volDest, ctr.ID())
+ }
+ }
+ }
+
+ return finalMounts, finalNamedVolumes, nil
+}
+
+// AddContainerInitBinary adds the init binary specified by path iff the
+// container will run in a private PID namespace that is not shared with the
+// host or another pre-existing container, where an init-like process is
+// already running.
+// This does *NOT* modify the container command - that must be done elsewhere.
+func addContainerInitBinary(s *specgen.SpecGenerator, path string) (spec.Mount, error) {
+ mount := spec.Mount{
+ Destination: "/dev/init",
+ Type: TypeBind,
+ Source: path,
+ Options: []string{TypeBind, "ro"},
+ }
+
+ if path == "" {
+ return mount, fmt.Errorf("please specify a path to the container-init binary")
+ }
+ if !s.PidNS.IsPrivate() {
+ return mount, fmt.Errorf("cannot add init binary as PID 1 (PID namespace isn't private)")
+ }
+ if s.Systemd == "true" || s.Systemd == "always" {
+ return mount, fmt.Errorf("cannot use container-init binary with systemd")
+ }
+ if _, err := os.Stat(path); os.IsNotExist(err) {
+ return mount, errors.Wrap(err, "container-init binary not found on the host")
+ }
+ return mount, nil
+}
+
// Supersede existing mounts in the spec with new, user-specified mounts.
// TODO: Should we unmount subtree mounts? E.g., if /tmp/ is mounted by
// one mount, and we already have /tmp/a and /tmp/b, should we remove
diff --git a/pkg/specgen/namespaces.go b/pkg/specgen/namespaces.go
index cee49ff51..f0161a793 100644
--- a/pkg/specgen/namespaces.go
+++ b/pkg/specgen/namespaces.go
@@ -159,6 +159,8 @@ func (n *Namespace) validate() error {
func ParseNamespace(ns string) (Namespace, error) {
toReturn := Namespace{}
switch {
+ case ns == "pod":
+ toReturn.NSMode = FromPod
case ns == "host":
toReturn.NSMode = Host
case ns == "private":
@@ -214,6 +216,8 @@ func ParseNetworkNamespace(ns string) (Namespace, []string, error) {
toReturn := Namespace{}
var cniNetworks []string
switch {
+ case ns == "pod":
+ toReturn.NSMode = FromPod
case ns == "bridge":
toReturn.NSMode = Bridge
case ns == "none":
diff --git a/pkg/specgen/specgen.go b/pkg/specgen/specgen.go
index 275af1f49..20c8f8800 100644
--- a/pkg/specgen/specgen.go
+++ b/pkg/specgen/specgen.go
@@ -154,14 +154,23 @@ type ContainerStorageConfig struct {
// ImageVolumeMode indicates how image volumes will be created.
// Supported modes are "ignore" (do not create), "tmpfs" (create as
// tmpfs), and "anonymous" (create as anonymous volumes).
- // The default is anonymous.
+ // The default if unset is anonymous.
// Optional.
ImageVolumeMode string `json:"image_volume_mode,omitempty"`
- // VolumesFrom is a list of containers whose volumes will be added to
- // this container. Supported mount options may be added after the
- // container name with a : and include "ro" and "rw".
- // Optional.
+ // VolumesFrom is a set of containers whose volumes will be added to
+ // this container. The name or ID of the container must be provided, and
+ // may optionally be followed by a : and then one or more
+ // comma-separated options. Valid options are 'ro', 'rw', and 'z'.
+ // Options will be used for all volumes sourced from the container.
VolumesFrom []string `json:"volumes_from,omitempty"`
+ // Init specifies that an init binary will be mounted into the
+ // container, and will be used as PID1.
+ Init bool `json:"init,omitempty"`
+ // InitPath specifies the path to the init binary that will be added if
+ // Init is specified above. If not specified, the default set in the
+ // Libpod config will be used. Ignored if Init above is not set.
+ // Optional.
+ InitPath string `json:"init_path,omitempty"`
// Mounts are mounts that will be added to the container.
// These will supersede Image Volumes and VolumesFrom volumes where
// there are conflicts.