summaryrefslogtreecommitdiff
path: root/pkg
diff options
context:
space:
mode:
Diffstat (limited to 'pkg')
-rw-r--r--pkg/adapter/containers.go179
-rw-r--r--pkg/adapter/containers_remote.go137
-rw-r--r--pkg/adapter/runtime.go17
-rw-r--r--pkg/adapter/runtime_remote.go6
-rw-r--r--pkg/inspect/inspect.go3
-rw-r--r--pkg/spec/createconfig.go1
-rw-r--r--pkg/spec/spec.go25
-rw-r--r--pkg/varlinkapi/attach.go14
-rw-r--r--pkg/varlinkapi/containers.go13
-rw-r--r--pkg/varlinkapi/events.go10
-rw-r--r--pkg/varlinkapi/images.go7
11 files changed, 383 insertions, 29 deletions
diff --git a/pkg/adapter/containers.go b/pkg/adapter/containers.go
index a5b911da1..9ec897a60 100644
--- a/pkg/adapter/containers.go
+++ b/pkg/adapter/containers.go
@@ -697,3 +697,182 @@ func (r *LocalRuntime) UnpauseContainers(ctx context.Context, cli *cliconfig.Unp
}
return pool.Run()
}
+
+// Restart containers without or without a timeout
+func (r *LocalRuntime) Restart(ctx context.Context, c *cliconfig.RestartValues) ([]string, map[string]error, error) {
+ var (
+ containers []*libpod.Container
+ restartContainers []*libpod.Container
+ err error
+ )
+ useTimeout := c.Flag("timeout").Changed || c.Flag("time").Changed
+ inputTimeout := c.Timeout
+
+ // Handle --latest
+ if c.Latest {
+ lastCtr, err := r.Runtime.GetLatestContainer()
+ if err != nil {
+ return nil, nil, errors.Wrapf(err, "unable to get latest container")
+ }
+ restartContainers = append(restartContainers, lastCtr)
+ } else if c.Running {
+ containers, err = r.GetRunningContainers()
+ if err != nil {
+ return nil, nil, err
+ }
+ restartContainers = append(restartContainers, containers...)
+ } else if c.All {
+ containers, err = r.Runtime.GetAllContainers()
+ if err != nil {
+ return nil, nil, err
+ }
+ restartContainers = append(restartContainers, containers...)
+ } else {
+ for _, id := range c.InputArgs {
+ ctr, err := r.Runtime.LookupContainer(id)
+ if err != nil {
+ return nil, nil, err
+ }
+ restartContainers = append(restartContainers, ctr)
+ }
+ }
+
+ maxWorkers := shared.DefaultPoolSize("restart")
+ if c.GlobalIsSet("max-workers") {
+ maxWorkers = c.GlobalFlags.MaxWorks
+ }
+
+ logrus.Debugf("Setting maximum workers to %d", maxWorkers)
+
+ // We now have a slice of all the containers to be restarted. Iterate them to
+ // create restart Funcs with a timeout as needed
+ pool := shared.NewPool("restart", maxWorkers, len(restartContainers))
+ for _, c := range restartContainers {
+ ctr := c
+ timeout := ctr.StopTimeout()
+ if useTimeout {
+ timeout = inputTimeout
+ }
+ pool.Add(shared.Job{
+ ID: ctr.ID(),
+ Fn: func() error {
+ err := ctr.RestartWithTimeout(ctx, timeout)
+ if err != nil {
+ logrus.Debugf("Failed to restart container %s: %s", ctr.ID(), err.Error())
+ }
+ return err
+ },
+ })
+ }
+ return pool.Run()
+}
+
+// Top display the running processes of a container
+func (r *LocalRuntime) Top(cli *cliconfig.TopValues) ([]string, error) {
+ var (
+ descriptors []string
+ container *libpod.Container
+ err error
+ )
+ if cli.Latest {
+ descriptors = cli.InputArgs
+ container, err = r.Runtime.GetLatestContainer()
+ } else {
+ descriptors = cli.InputArgs[1:]
+ container, err = r.Runtime.LookupContainer(cli.InputArgs[0])
+ }
+ if err != nil {
+ return nil, errors.Wrapf(err, "unable to lookup requested container")
+ }
+ return container.Top(descriptors)
+}
+
+// Prune removes stopped containers
+func (r *LocalRuntime) Prune(ctx context.Context, maxWorkers int, force bool) ([]string, map[string]error, error) {
+ var (
+ ok = []string{}
+ failures = map[string]error{}
+ err error
+ )
+
+ logrus.Debugf("Setting maximum rm workers to %d", maxWorkers)
+
+ filter := func(c *libpod.Container) bool {
+ state, err := c.State()
+ if err != nil {
+ logrus.Error(err)
+ return false
+ }
+ if c.PodID() != "" {
+ return false
+ }
+ if state == libpod.ContainerStateStopped || state == libpod.ContainerStateExited {
+ return true
+ }
+ return false
+ }
+ delContainers, err := r.Runtime.GetContainers(filter)
+ if err != nil {
+ return ok, failures, err
+ }
+ if len(delContainers) < 1 {
+ return ok, failures, err
+ }
+ pool := shared.NewPool("prune", maxWorkers, len(delContainers))
+ for _, c := range delContainers {
+ ctr := c
+ pool.Add(shared.Job{
+ ID: ctr.ID(),
+ Fn: func() error {
+ err := r.Runtime.RemoveContainer(ctx, ctr, force, false)
+ if err != nil {
+ logrus.Debugf("Failed to prune container %s: %s", ctr.ID(), err.Error())
+ }
+ return err
+ },
+ })
+ }
+ return pool.Run()
+}
+
+// CleanupContainers any leftovers bits of stopped containers
+func (r *LocalRuntime) CleanupContainers(ctx context.Context, cli *cliconfig.CleanupValues) ([]string, map[string]error, error) {
+ var (
+ ok = []string{}
+ failures = map[string]error{}
+ )
+
+ ctrs, err := shortcuts.GetContainersByContext(cli.All, cli.Latest, cli.InputArgs, r.Runtime)
+ if err != nil {
+ return ok, failures, err
+ }
+
+ for _, ctr := range ctrs {
+ if cli.Remove {
+ err = removeContainer(ctx, ctr, r)
+ } else {
+ err = cleanupContainer(ctx, ctr, r)
+ }
+
+ if err == nil {
+ ok = append(ok, ctr.ID())
+ } else {
+ failures[ctr.ID()] = err
+ }
+ }
+ return ok, failures, nil
+}
+
+func removeContainer(ctx context.Context, ctr *libpod.Container, runtime *LocalRuntime) error {
+ if err := runtime.RemoveContainer(ctx, ctr, false, true); err != nil {
+ return errors.Wrapf(err, "failed to cleanup and remove container %v", ctr.ID())
+ }
+ return nil
+}
+
+func cleanupContainer(ctx context.Context, ctr *libpod.Container, runtime *LocalRuntime) error {
+ if err := ctr.Cleanup(ctx); err != nil {
+ return errors.Wrapf(err, "failed to cleanup container %v", ctr.ID())
+ }
+ return nil
+}
diff --git a/pkg/adapter/containers_remote.go b/pkg/adapter/containers_remote.go
index cb61871bf..a3a48a564 100644
--- a/pkg/adapter/containers_remote.go
+++ b/pkg/adapter/containers_remote.go
@@ -45,6 +45,12 @@ func (c *Container) ID() string {
return c.config.ID
}
+// Restart a single container
+func (c *Container) Restart(timeout int64) error {
+ _, err := iopodman.RestartContainer().Call(c.Runtime.Conn, c.ID(), timeout)
+ return err
+}
+
// Pause a container
func (c *Container) Pause() error {
_, err := iopodman.PauseContainer().Call(c.Runtime.Conn, c.ID())
@@ -132,6 +138,23 @@ func (r *LocalRuntime) LookupContainer(idOrName string) (*Container, error) {
}, nil
}
+// GetAllContainers returns all containers in a slice
+func (r *LocalRuntime) GetAllContainers() ([]*Container, error) {
+ var containers []*Container
+ ctrs, err := iopodman.GetContainersByContext().Call(r.Conn, true, false, []string{})
+ if err != nil {
+ return nil, err
+ }
+ for _, ctr := range ctrs {
+ container, err := r.LookupContainer(ctr)
+ if err != nil {
+ return nil, err
+ }
+ containers = append(containers, container)
+ }
+ return containers, nil
+}
+
func (r *LocalRuntime) LookupContainersWithStatus(filters []string) ([]*Container, error) {
var containers []*Container
ctrs, err := iopodman.GetContainersByStatus().Call(r.Conn, filters)
@@ -561,7 +584,10 @@ func (r *LocalRuntime) Attach(ctx context.Context, c *cliconfig.AttachValues) er
}
inputStream := os.Stdin
if c.NoStdin {
- inputStream = nil
+ inputStream, err = os.Open(os.DevNull)
+ if err != nil {
+ return err
+ }
}
errChan, err := r.attach(ctx, inputStream, os.Stdout, c.InputArgs[0], false, c.DetachKeys)
if err != nil {
@@ -753,3 +779,112 @@ func (r *LocalRuntime) UnpauseContainers(ctx context.Context, cli *cliconfig.Unp
}
return ok, failures, nil
}
+
+// Restart restarts a container over varlink
+func (r *LocalRuntime) Restart(ctx context.Context, c *cliconfig.RestartValues) ([]string, map[string]error, error) {
+ var (
+ containers []*Container
+ restartContainers []*Container
+ err error
+ ok = []string{}
+ failures = map[string]error{}
+ )
+ useTimeout := c.Flag("timeout").Changed || c.Flag("time").Changed
+ inputTimeout := c.Timeout
+
+ if c.Latest {
+ lastCtr, err := r.GetLatestContainer()
+ if err != nil {
+ return nil, nil, errors.Wrapf(err, "unable to get latest container")
+ }
+ restartContainers = append(restartContainers, lastCtr)
+ } else if c.Running {
+ containers, err = r.LookupContainersWithStatus([]string{libpod.ContainerStateRunning.String()})
+ if err != nil {
+ return nil, nil, err
+ }
+ restartContainers = append(restartContainers, containers...)
+ } else if c.All {
+ containers, err = r.GetAllContainers()
+ if err != nil {
+ return nil, nil, err
+ }
+ restartContainers = append(restartContainers, containers...)
+ } else {
+ for _, id := range c.InputArgs {
+ ctr, err := r.LookupContainer(id)
+ if err != nil {
+ return nil, nil, err
+ }
+ restartContainers = append(restartContainers, ctr)
+ }
+ }
+
+ for _, c := range restartContainers {
+ c := c
+ timeout := c.config.StopTimeout
+ if useTimeout {
+ timeout = inputTimeout
+ }
+ err := c.Restart(int64(timeout))
+ if err != nil {
+ failures[c.ID()] = err
+ } else {
+ ok = append(ok, c.ID())
+ }
+ }
+ return ok, failures, nil
+}
+
+// Top display the running processes of a container
+func (r *LocalRuntime) Top(cli *cliconfig.TopValues) ([]string, error) {
+ var (
+ ctr *Container
+ err error
+ descriptors []string
+ )
+ if cli.Latest {
+ ctr, err = r.GetLatestContainer()
+ descriptors = cli.InputArgs
+ } else {
+ ctr, err = r.LookupContainer(cli.InputArgs[0])
+ descriptors = cli.InputArgs[1:]
+ }
+ if err != nil {
+ return nil, err
+ }
+ return iopodman.Top().Call(r.Conn, ctr.ID(), descriptors)
+}
+
+// Prune removes stopped containers
+func (r *LocalRuntime) Prune(ctx context.Context, maxWorkers int, force bool) ([]string, map[string]error, error) {
+
+ var (
+ ok = []string{}
+ failures = map[string]error{}
+ ctrs []*Container
+ err error
+ )
+ logrus.Debugf("Setting maximum rm workers to %d", maxWorkers)
+
+ filters := []string{libpod.ContainerStateExited.String()}
+ ctrs, err = r.LookupContainersWithStatus(filters)
+ if err != nil {
+ return ok, failures, err
+ }
+ for _, c := range ctrs {
+ c := c
+ _, err := iopodman.RemoveContainer().Call(r.Conn, c.ID(), false, false)
+ if err != nil {
+ failures[c.ID()] = err
+ } else {
+ ok = append(ok, c.ID())
+ }
+ }
+ return ok, failures, nil
+}
+
+// Cleanup any leftovers bits of stopped containers
+func (r *LocalRuntime) CleanupContainers(ctx context.Context, cli *cliconfig.CleanupValues) ([]string, map[string]error, error) {
+ return nil, nil, errors.New("container cleanup not supported for remote clients")
+}
diff --git a/pkg/adapter/runtime.go b/pkg/adapter/runtime.go
index 753f7c944..0d840d65b 100644
--- a/pkg/adapter/runtime.go
+++ b/pkg/adapter/runtime.go
@@ -5,6 +5,7 @@ package adapter
import (
"bufio"
"context"
+ "github.com/containers/libpod/cmd/podman/shared"
"io"
"io/ioutil"
"os"
@@ -17,7 +18,6 @@ import (
"github.com/containers/image/types"
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/libpodruntime"
- "github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/events"
"github.com/containers/libpod/libpod/image"
@@ -57,8 +57,8 @@ type Volume struct {
type VolumeFilter func(*Volume) bool
// GetRuntime returns a LocalRuntime struct with the actual runtime embedded in it
-func GetRuntime(c *cliconfig.PodmanCommand) (*LocalRuntime, error) {
- runtime, err := libpodruntime.GetRuntime(c)
+func GetRuntime(ctx context.Context, c *cliconfig.PodmanCommand) (*LocalRuntime, error) {
+ runtime, err := libpodruntime.GetRuntime(ctx, c)
if err != nil {
return nil, err
}
@@ -119,8 +119,8 @@ func (r *LocalRuntime) RemoveImage(ctx context.Context, img *ContainerImage, for
}
// PruneImages is wrapper into PruneImages within the image pkg
-func (r *LocalRuntime) PruneImages(all bool) ([]string, error) {
- return r.ImageRuntime().PruneImages(all)
+func (r *LocalRuntime) PruneImages(ctx context.Context, all bool) ([]string, error) {
+ return r.ImageRuntime().PruneImages(ctx, all)
}
// Export is a wrapper to container export to a tarfile
@@ -322,10 +322,6 @@ func (r *LocalRuntime) Events(c *cliconfig.EventValues) error {
fromStart bool
eventsError error
)
- options, err := shared.GenerateEventOptions(c.Filter, c.Since, c.Until)
- if err != nil {
- return errors.Wrapf(err, "unable to generate event options")
- }
tmpl, err := template.New("events").Parse(c.Format)
if err != nil {
return err
@@ -335,7 +331,8 @@ func (r *LocalRuntime) Events(c *cliconfig.EventValues) error {
}
eventChannel := make(chan *events.Event)
go func() {
- eventsError = r.Runtime.Events(fromStart, c.Stream, options, eventChannel)
+ readOpts := events.ReadOptions{FromStart: fromStart, Stream: c.Stream, Filters: c.Filter, EventChannel: eventChannel, Since: c.Since, Until: c.Until}
+ eventsError = r.Runtime.Events(readOpts)
}()
if eventsError != nil {
diff --git a/pkg/adapter/runtime_remote.go b/pkg/adapter/runtime_remote.go
index dcb0924ce..6102daccf 100644
--- a/pkg/adapter/runtime_remote.go
+++ b/pkg/adapter/runtime_remote.go
@@ -46,7 +46,7 @@ type LocalRuntime struct {
}
// GetRuntime returns a LocalRuntime struct with the actual runtime embedded in it
-func GetRuntime(c *cliconfig.PodmanCommand) (*LocalRuntime, error) {
+func GetRuntime(ctx context.Context, c *cliconfig.PodmanCommand) (*LocalRuntime, error) {
runtime := RemoteRuntime{}
conn, err := runtime.Connect()
if err != nil {
@@ -256,7 +256,7 @@ func (r *LocalRuntime) New(ctx context.Context, name, signaturePolicyPath, authf
// IsParent goes through the layers in the store and checks if i.TopLayer is
// the parent of any other layer in store. Double check that image with that
// layer exists as well.
-func (ci *ContainerImage) IsParent() (bool, error) {
+func (ci *ContainerImage) IsParent(context.Context) (bool, error) {
return ci.remoteImage.isParent, nil
}
@@ -338,7 +338,7 @@ func (ci *ContainerImage) History(ctx context.Context) ([]*image.History, error)
}
// PruneImages is the wrapper call for a remote-client to prune images
-func (r *LocalRuntime) PruneImages(all bool) ([]string, error) {
+func (r *LocalRuntime) PruneImages(ctx context.Context, all bool) ([]string, error) {
return iopodman.ImagesPrune().Call(r.Conn, all)
}
diff --git a/pkg/inspect/inspect.go b/pkg/inspect/inspect.go
index 270e431ad..6978370ef 100644
--- a/pkg/inspect/inspect.go
+++ b/pkg/inspect/inspect.go
@@ -38,7 +38,8 @@ type HostConfig struct {
PidMode string `json:"PidMode"`
Privileged bool `json:"Privileged"`
PublishAllPorts bool `json:"PublishAllPorts"` //TODO
- ReadonlyRootfs bool `json:"ReadonlyRootfs"`
+ ReadOnlyRootfs bool `json:"ReadonlyRootfs"`
+ ReadOnlyTmpfs bool `json:"ReadonlyTmpfs"`
SecurityOpt []string `json:"SecurityOpt"`
UTSMode string `json:"UTSMode"`
UsernsMode string `json:"UsernsMode"`
diff --git a/pkg/spec/createconfig.go b/pkg/spec/createconfig.go
index e71d9d3db..064dedd45 100644
--- a/pkg/spec/createconfig.go
+++ b/pkg/spec/createconfig.go
@@ -113,6 +113,7 @@ type CreateConfig struct {
PublishAll bool //publish-all
Quiet bool //quiet
ReadOnlyRootfs bool //read-only
+ ReadOnlyTmpfs bool //read-only-tmpfs
Resources CreateResourceConfig
Rm bool //rm
StopSignal syscall.Signal // stop-signal
diff --git a/pkg/spec/spec.go b/pkg/spec/spec.go
index 0371b6d4d..4cbed0ea4 100644
--- a/pkg/spec/spec.go
+++ b/pkg/spec/spec.go
@@ -341,6 +341,31 @@ func CreateConfigToOCISpec(config *CreateConfig) (*spec.Spec, error) { //nolint
}
}
+ if config.ReadOnlyRootfs && config.ReadOnlyTmpfs {
+ options := []string{"rw", "rprivate", "nosuid", "nodev", "tmpcopyup"}
+ for _, i := range []string{"/tmp", "/var/tmp"} {
+ if libpod.MountExists(g.Config.Mounts, i) {
+ continue
+ }
+ // Default options if nothing passed
+ tmpfsMnt := spec.Mount{
+ Destination: i,
+ Type: "tmpfs",
+ Source: "tmpfs",
+ Options: options,
+ }
+ g.AddMount(tmpfsMnt)
+ }
+ if !libpod.MountExists(g.Config.Mounts, "/run") {
+ tmpfsMnt := spec.Mount{
+ Destination: "/run",
+ Type: "tmpfs",
+ Source: "tmpfs",
+ Options: append(options, "noexec", "size=65536k"),
+ }
+ g.AddMount(tmpfsMnt)
+ }
+ }
for name, val := range config.Env {
g.AddProcessEnv(name, val)
}
diff --git a/pkg/varlinkapi/attach.go b/pkg/varlinkapi/attach.go
index 9e2a265be..6c62d3514 100644
--- a/pkg/varlinkapi/attach.go
+++ b/pkg/varlinkapi/attach.go
@@ -53,7 +53,13 @@ func (i *LibpodAPI) Attach(call iopodman.VarlinkCall, name string, detachKeys st
if err != nil {
return call.ReplyErrorOccurred(err.Error())
}
-
+ state, err := ctr.State()
+ if err != nil {
+ return call.ReplyErrorOccurred(err.Error())
+ }
+ if !start && state != libpod.ContainerStateRunning {
+ return call.ReplyErrorOccurred("container must be running to attach")
+ }
reader, writer, _, pw, streams := setupStreams(call)
go func() {
@@ -62,10 +68,10 @@ func (i *LibpodAPI) Attach(call iopodman.VarlinkCall, name string, detachKeys st
}
}()
- if start {
- finalErr = startAndAttach(ctr, streams, detachKeys, resize, errChan)
- } else {
+ if state == libpod.ContainerStateRunning {
finalErr = attach(ctr, streams, detachKeys, resize, errChan)
+ } else {
+ finalErr = startAndAttach(ctr, streams, detachKeys, resize, errChan)
}
if finalErr != libpod.ErrDetach && finalErr != nil {
diff --git a/pkg/varlinkapi/containers.go b/pkg/varlinkapi/containers.go
index 237407050..872c7bc26 100644
--- a/pkg/varlinkapi/containers.go
+++ b/pkg/varlinkapi/containers.go
@@ -733,3 +733,16 @@ func newPodmanLogLine(line *libpod.LogLine) iopodman.LogLine {
Cid: line.CID,
}
}
+
+// Top displays information about a container's running processes
+func (i *LibpodAPI) Top(call iopodman.VarlinkCall, nameOrID string, descriptors []string) error {
+ ctr, err := i.Runtime.LookupContainer(nameOrID)
+ if err != nil {
+ return call.ReplyContainerNotFound(ctr.ID(), err.Error())
+ }
+ topInfo, err := ctr.Top(descriptors)
+ if err != nil {
+ return call.ReplyErrorOccurred(err.Error())
+ }
+ return call.ReplyTop(topInfo)
+}
diff --git a/pkg/varlinkapi/events.go b/pkg/varlinkapi/events.go
index 1e5696fbe..f9a9d9321 100644
--- a/pkg/varlinkapi/events.go
+++ b/pkg/varlinkapi/events.go
@@ -6,7 +6,6 @@ import (
"fmt"
"time"
- "github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/cmd/podman/varlink"
"github.com/containers/libpod/libpod/events"
)
@@ -23,19 +22,16 @@ func (i *LibpodAPI) GetEvents(call iopodman.VarlinkCall, filter []string, since
stream = true
call.Continues = true
}
- filters, err := shared.GenerateEventOptions(filter, since, until)
- if err != nil {
- return call.ReplyErrorOccurred(err.Error())
- }
if len(since) > 0 || len(until) > 0 {
fromStart = true
}
eventChannel := make(chan *events.Event)
go func() {
- eventsError = i.Runtime.Events(fromStart, stream, filters, eventChannel)
+ readOpts := events.ReadOptions{FromStart: fromStart, Stream: stream, Filters: filter, EventChannel: eventChannel}
+ eventsError = i.Runtime.Events(readOpts)
}()
if eventsError != nil {
- return call.ReplyErrorOccurred(err.Error())
+ return call.ReplyErrorOccurred(eventsError.Error())
}
for {
event = <-eventChannel
diff --git a/pkg/varlinkapi/images.go b/pkg/varlinkapi/images.go
index 470eadaeb..cecddf6b3 100644
--- a/pkg/varlinkapi/images.go
+++ b/pkg/varlinkapi/images.go
@@ -4,6 +4,7 @@ package varlinkapi
import (
"bytes"
+ "context"
"encoding/json"
"fmt"
"io"
@@ -49,7 +50,7 @@ func (i *LibpodAPI) ListImages(call iopodman.VarlinkCall) error {
}
size, _ := image.Size(getContext())
- isParent, err := image.IsParent()
+ isParent, err := image.IsParent(context.TODO())
if err != nil {
return call.ReplyErrorOccurred(err.Error())
}
@@ -503,7 +504,7 @@ func (i *LibpodAPI) DeleteUnusedImages(call iopodman.VarlinkCall) error {
return call.ReplyErrorOccurred(err.Error())
}
if len(containers) == 0 {
- if err := img.Remove(false); err != nil {
+ if err := img.Remove(context.TODO(), false); err != nil {
return call.ReplyErrorOccurred(err.Error())
}
deletedImages = append(deletedImages, img.ID())
@@ -739,7 +740,7 @@ func (i *LibpodAPI) ContainerRunlabel(call iopodman.VarlinkCall, input iopodman.
// ImagesPrune ....
func (i *LibpodAPI) ImagesPrune(call iopodman.VarlinkCall, all bool) error {
- prunedImages, err := i.Runtime.ImageRuntime().PruneImages(all)
+ prunedImages, err := i.Runtime.ImageRuntime().PruneImages(context.TODO(), all)
if err != nil {
return call.ReplyErrorOccurred(err.Error())
}