summaryrefslogtreecommitdiff
path: root/cmd
diff options
context:
space:
mode:
Diffstat (limited to 'cmd')
-rw-r--r--cmd/podman/checkpoint.go25
-rw-r--r--cmd/podman/cleanup.go40
-rw-r--r--cmd/podman/common.go67
-rw-r--r--cmd/podman/create.go29
-rw-r--r--cmd/podman/create_cli_test.go2
-rw-r--r--cmd/podman/kill.go48
-rw-r--r--cmd/podman/parse.go10
-rw-r--r--cmd/podman/pod_create.go18
-rw-r--r--cmd/podman/port.go3
-rw-r--r--cmd/podman/ps.go377
-rw-r--r--cmd/podman/restore.go28
-rw-r--r--cmd/podman/rm.go33
-rw-r--r--cmd/podman/shared/container.go260
-rw-r--r--cmd/podman/stats.go7
-rw-r--r--cmd/podman/stop.go48
-rw-r--r--cmd/podman/utils.go11
16 files changed, 561 insertions, 445 deletions
diff --git a/cmd/podman/checkpoint.go b/cmd/podman/checkpoint.go
index 8582ce138..bf280920d 100644
--- a/cmd/podman/checkpoint.go
+++ b/cmd/podman/checkpoint.go
@@ -6,6 +6,7 @@ import (
"os"
"github.com/containers/libpod/cmd/podman/libpodruntime"
+ "github.com/containers/libpod/libpod"
"github.com/containers/libpod/pkg/rootless"
"github.com/pkg/errors"
"github.com/urfave/cli"
@@ -22,6 +23,11 @@ var (
Name: "keep, k",
Usage: "keep all temporary checkpoint files",
},
+ cli.BoolFlag{
+ Name: "all, a",
+ Usage: "checkpoint all running containers",
+ },
+ LatestFlag,
}
checkpointCommand = cli.Command{
Name: "checkpoint",
@@ -45,21 +51,14 @@ func checkpointCmd(c *cli.Context) error {
defer runtime.Shutdown(false)
keep := c.Bool("keep")
- args := c.Args()
- if len(args) < 1 {
- return errors.Errorf("you must provide at least one container name or id")
+
+ if err := checkAllAndLatest(c); err != nil {
+ return err
}
- var lastError error
- for _, arg := range args {
- ctr, err := runtime.LookupContainer(arg)
- if err != nil {
- if lastError != nil {
- fmt.Fprintln(os.Stderr, lastError)
- }
- lastError = errors.Wrapf(err, "error looking up container %q", arg)
- continue
- }
+ containers, lastError := getAllOrLatestContainers(c, runtime, libpod.ContainerStateRunning, "running")
+
+ for _, ctr := range containers {
if err = ctr.Checkpoint(context.TODO(), keep); err != nil {
if lastError != nil {
fmt.Fprintln(os.Stderr, lastError)
diff --git a/cmd/podman/cleanup.go b/cmd/podman/cleanup.go
index 3fd150783..bc4af9f50 100644
--- a/cmd/podman/cleanup.go
+++ b/cmd/podman/cleanup.go
@@ -5,7 +5,6 @@ import (
"os"
"github.com/containers/libpod/cmd/podman/libpodruntime"
- "github.com/containers/libpod/libpod"
"github.com/pkg/errors"
"github.com/urfave/cli"
)
@@ -44,43 +43,14 @@ func cleanupCmd(c *cli.Context) error {
}
defer runtime.Shutdown(false)
- args := c.Args()
+ if err := checkAllAndLatest(c); err != nil {
+ return err
+ }
+
+ cleanupContainers, lastError := getAllOrLatestContainers(c, runtime, -1, "all")
ctx := getContext()
- var lastError error
- var cleanupContainers []*libpod.Container
- if c.Bool("all") {
- if c.Bool("lastest") {
- return errors.New("--all and --latest cannot be used together")
- }
- if len(args) != 0 {
- return errors.New("--all and explicit container IDs cannot be used together")
- }
- cleanupContainers, err = runtime.GetContainers()
- if err != nil {
- return errors.Wrapf(err, "unable to get container list")
- }
- } else if c.Bool("latest") {
- if len(args) != 0 {
- return errors.New("--latest and explicit container IDs cannot be used together")
- }
- lastCtr, err := runtime.GetLatestContainer()
- if err != nil {
- return errors.Wrapf(err, "unable to get latest container")
- }
- cleanupContainers = append(cleanupContainers, lastCtr)
- } else {
- for _, i := range args {
- container, err := runtime.LookupContainer(i)
- if err != nil {
- fmt.Fprintln(os.Stderr, err)
- lastError = errors.Wrapf(err, "unable to find container %s", i)
- continue
- }
- cleanupContainers = append(cleanupContainers, container)
- }
- }
for _, ctr := range cleanupContainers {
if err = ctr.Cleanup(ctx); err != nil {
if lastError != nil {
diff --git a/cmd/podman/common.go b/cmd/podman/common.go
index 8ae1c9e0f..f9e746b28 100644
--- a/cmd/podman/common.go
+++ b/cmd/podman/common.go
@@ -89,6 +89,73 @@ func validateFlags(c *cli.Context, flags []cli.Flag) error {
return nil
}
+// checkAllAndLatest checks that --all and --latest are used correctly
+func checkAllAndLatest(c *cli.Context) error {
+ argLen := len(c.Args())
+ if (c.Bool("all") || c.Bool("latest")) && argLen > 0 {
+ return errors.Errorf("no arguments are needed with --all or --latest")
+ }
+ if c.Bool("all") && c.Bool("latest") {
+ return errors.Errorf("--all and --latest cannot be used together")
+ }
+ if argLen < 1 && !c.Bool("all") && !c.Bool("latest") {
+ return errors.Errorf("you must provide at least one pod name or id")
+ }
+ return nil
+}
+
+// getAllOrLatestContainers tries to return the correct list of containers
+// depending if --all, --latest or <container-id> is used.
+// It requires the Context (c) and the Runtime (runtime). As different
+// commands are using different container state for the --all option
+// the desired state has to be specified in filterState. If no filter
+// is desired a -1 can be used to get all containers. For a better
+// error message, if the filter fails, a corresponding verb can be
+// specified which will then appear in the error message.
+func getAllOrLatestContainers(c *cli.Context, runtime *libpod.Runtime, filterState libpod.ContainerStatus, verb string) ([]*libpod.Container, error) {
+ var containers []*libpod.Container
+ var lastError error
+ var err error
+ if c.Bool("all") {
+ if filterState != -1 {
+ var filterFuncs []libpod.ContainerFilter
+ filterFuncs = append(filterFuncs, func(c *libpod.Container) bool {
+ state, _ := c.State()
+ return state == filterState
+ })
+ containers, err = runtime.GetContainers(filterFuncs...)
+ } else {
+ containers, err = runtime.GetContainers()
+ }
+ if err != nil {
+ return nil, errors.Wrapf(err, "unable to get %s containers", verb)
+ }
+ } else if c.Bool("latest") {
+ lastCtr, err := runtime.GetLatestContainer()
+ if err != nil {
+ return nil, errors.Wrapf(err, "unable to get latest container")
+ }
+ containers = append(containers, lastCtr)
+ } else {
+ args := c.Args()
+ for _, i := range args {
+ container, err := runtime.LookupContainer(i)
+ if err != nil {
+ if lastError != nil {
+ fmt.Fprintln(os.Stderr, lastError)
+ }
+ lastError = errors.Wrapf(err, "unable to find container %s", i)
+ }
+ if container != nil {
+ // This is here to make sure this does not return [<nil>] but only nil
+ containers = append(containers, container)
+ }
+ }
+ }
+
+ return containers, lastError
+}
+
// getContext returns a non-nil, empty context
func getContext() context.Context {
return context.TODO()
diff --git a/cmd/podman/create.go b/cmd/podman/create.go
index 248ff1b7d..9f6825c95 100644
--- a/cmd/podman/create.go
+++ b/cmd/podman/create.go
@@ -95,15 +95,6 @@ func createInit(c *cli.Context) error {
return err
}
- if c.String("cidfile") != "" {
- if _, err := os.Stat(c.String("cidfile")); err == nil {
- return errors.Errorf("container id file exists. ensure another container is not using it or delete %s", c.String("cidfile"))
- }
- if err := libpod.WriteFile("", c.String("cidfile")); err != nil {
- return errors.Wrapf(err, "unable to write cidfile %s", c.String("cidfile"))
- }
- }
-
if len(c.Args()) < 1 {
return errors.Errorf("image name or ID is required")
}
@@ -119,6 +110,20 @@ func createContainer(c *cli.Context, runtime *libpod.Runtime) (*libpod.Container
rootfs = c.Args()[0]
}
+ var err error
+ var cidFile *os.File
+ if c.IsSet("cidfile") && os.Geteuid() == 0 {
+ cidFile, err = libpod.OpenExclusiveFile(c.String("cidfile"))
+ if err != nil && os.IsExist(err) {
+ return nil, nil, errors.Errorf("container id file exists. Ensure another container is not using it or delete %s", c.String("cidfile"))
+ }
+ if err != nil {
+ return nil, nil, errors.Errorf("error opening cidfile %s", c.String("cidfile"))
+ }
+ defer cidFile.Close()
+ defer cidFile.Sync()
+ }
+
imageName := ""
var data *inspect.ImageData = nil
@@ -171,12 +176,14 @@ func createContainer(c *cli.Context, runtime *libpod.Runtime) (*libpod.Container
return nil, nil, err
}
- if c.String("cidfile") != "" {
- err := libpod.WriteFile(ctr.ID(), c.String("cidfile"))
+ if cidFile != nil {
+ _, err = cidFile.WriteString(ctr.ID())
if err != nil {
logrus.Error(err)
}
+
}
+
logrus.Debugf("New container created %q", ctr.ID())
return ctr, createConfig, nil
}
diff --git a/cmd/podman/create_cli_test.go b/cmd/podman/create_cli_test.go
index fa128c8e6..9db007ff3 100644
--- a/cmd/podman/create_cli_test.go
+++ b/cmd/podman/create_cli_test.go
@@ -47,7 +47,7 @@ func TestGetAllLabels(t *testing.T) {
}
func TestGetAllLabelsBadKeyValue(t *testing.T) {
- inLabels := []string{"ONE1", "TWO=2"}
+ inLabels := []string{"=badValue", "="}
fileLabels := []string{}
_, err := getAllLabels(fileLabels, inLabels)
assert.Error(t, err, assert.AnError)
diff --git a/cmd/podman/kill.go b/cmd/podman/kill.go
index 56dd170b5..7ca5bd7c5 100644
--- a/cmd/podman/kill.go
+++ b/cmd/podman/kill.go
@@ -41,19 +41,10 @@ var (
// killCmd kills one or more containers with a signal
func killCmd(c *cli.Context) error {
- args := c.Args()
- if (!c.Bool("all") && !c.Bool("latest")) && len(args) == 0 {
- return errors.Errorf("you must specify one or more containers to kill")
- }
- if (c.Bool("all") || c.Bool("latest")) && len(args) > 0 {
- return errors.Errorf("you cannot specify any containers to kill with --latest or --all")
- }
- if c.Bool("all") && c.Bool("latest") {
- return errors.Errorf("--all and --latest cannot be used together")
- }
- if len(args) < 1 && !c.Bool("all") && !c.Bool("latest") {
- return errors.Errorf("you must provide at least one container name or id")
+ if err := checkAllAndLatest(c); err != nil {
+ return err
}
+
if err := validateFlags(c, killFlags); err != nil {
return err
}
@@ -76,38 +67,7 @@ func killCmd(c *cli.Context) error {
killSignal = uint(sysSignal)
}
- var filterFuncs []libpod.ContainerFilter
- var containers []*libpod.Container
- var lastError error
- if c.Bool("all") {
- // only get running containers
- filterFuncs = append(filterFuncs, func(c *libpod.Container) bool {
- state, _ := c.State()
- return state == libpod.ContainerStateRunning
- })
- containers, err = runtime.GetContainers(filterFuncs...)
- if err != nil {
- return errors.Wrapf(err, "unable to get running containers")
- }
- } else if c.Bool("latest") {
- lastCtr, err := runtime.GetLatestContainer()
- if err != nil {
- return errors.Wrapf(err, "unable to get last created container")
- }
- containers = append(containers, lastCtr)
- } else {
- for _, i := range args {
- container, err := runtime.LookupContainer(i)
- if err != nil {
- if lastError != nil {
- fmt.Fprintln(os.Stderr, lastError)
- }
- lastError = errors.Wrapf(err, "unable to find container %s", i)
- continue
- }
- containers = append(containers, container)
- }
- }
+ containers, lastError := getAllOrLatestContainers(c, runtime, libpod.ContainerStateRunning, "running")
for _, ctr := range containers {
if err := ctr.Kill(killSignal); err != nil {
diff --git a/cmd/podman/parse.go b/cmd/podman/parse.go
index ade592ddf..2e4959656 100644
--- a/cmd/podman/parse.go
+++ b/cmd/podman/parse.go
@@ -198,6 +198,11 @@ func readKVStrings(env map[string]string, files []string, override []string) err
func parseEnv(env map[string]string, line string) error {
data := strings.SplitN(line, "=", 2)
+ // catch invalid variables such as "=" or "=A"
+ if data[0] == "" {
+ return errors.Errorf("invalid environment variable: %q", line)
+ }
+
// trim the front of a variable, but nothing else
name := strings.TrimLeft(data[0], whiteSpaces)
if strings.ContainsAny(name, whiteSpaces) {
@@ -208,10 +213,7 @@ func parseEnv(env map[string]string, line string) error {
env[name] = data[1]
} else {
// if only a pass-through variable is given, clean it up.
- val, exists := os.LookupEnv(name)
- if !exists {
- return errors.Errorf("environment variable %q does not exist", name)
- }
+ val, _ := os.LookupEnv(name)
env[name] = val
}
return nil
diff --git a/cmd/podman/pod_create.go b/cmd/podman/pod_create.go
index c3a45a093..63fa6b294 100644
--- a/cmd/podman/pod_create.go
+++ b/cmd/podman/pod_create.go
@@ -90,13 +90,17 @@ func podCreateCmd(c *cli.Context) error {
}
defer runtime.Shutdown(false)
- if c.IsSet("pod-id-file") {
- if _, err = os.Stat(c.String("pod-id-file")); err == nil {
- return errors.Errorf("pod id file exists. ensure another pod is not using it or delete %s", c.String("pod-id-file"))
+ var podIdFile *os.File
+ if c.IsSet("pod-id-file") && os.Geteuid() == 0 {
+ podIdFile, err = libpod.OpenExclusiveFile(c.String("pod-id-file"))
+ if err != nil && os.IsExist(err) {
+ return errors.Errorf("pod id file exists. Ensure another pod is not using it or delete %s", c.String("pod-id-file"))
}
- if err = libpod.WriteFile("", c.String("pod-id-file")); err != nil {
- return errors.Wrapf(err, "unable to write pod id file %s", c.String("pod-id-file"))
+ if err != nil {
+ return errors.Errorf("error opening pod-id-file %s", c.String("pod-id-file"))
}
+ defer podIdFile.Close()
+ defer podIdFile.Sync()
}
if !c.BoolT("infra") && c.IsSet("share") && c.String("share") != "none" && c.String("share") != "" {
return errors.Errorf("You cannot share kernel namespaces on the pod level without an infra container")
@@ -137,8 +141,8 @@ func podCreateCmd(c *cli.Context) error {
return err
}
- if c.IsSet("pod-id-file") {
- err = libpod.WriteFile(pod.ID(), c.String("pod-id-file"))
+ if podIdFile != nil {
+ _, err = podIdFile.WriteString(pod.ID())
if err != nil {
logrus.Error(err)
}
diff --git a/cmd/podman/port.go b/cmd/podman/port.go
index d6497d450..3355e751b 100644
--- a/cmd/podman/port.go
+++ b/cmd/podman/port.go
@@ -104,6 +104,9 @@ func portCmd(c *cli.Context) error {
containers = append(containers, container)
} else if c.Bool("latest") {
container, err = runtime.GetLatestContainer()
+ if err != nil {
+ return errors.Wrapf(err, "unable to get last created container")
+ }
containers = append(containers, container)
} else {
containers, err = runtime.GetRunningContainers()
diff --git a/cmd/podman/ps.go b/cmd/podman/ps.go
index 32b3a0574..a468f6121 100644
--- a/cmd/podman/ps.go
+++ b/cmd/podman/ps.go
@@ -1,11 +1,15 @@
package main
import (
+ "encoding/json"
"fmt"
+ "html/template"
+ "os"
"reflect"
"sort"
"strconv"
"strings"
+ "text/tabwriter"
"time"
"github.com/containers/libpod/cmd/podman/formats"
@@ -16,12 +20,31 @@ import (
"github.com/cri-o/ocicni/pkg/ocicni"
"github.com/docker/go-units"
"github.com/pkg/errors"
- "github.com/sirupsen/logrus"
"github.com/urfave/cli"
"k8s.io/apimachinery/pkg/fields"
)
-const mountTruncLength = 12
+const (
+ mountTruncLength = 12
+ hid = "CONTAINER ID"
+ himage = "IMAGE"
+ hcommand = "COMMAND"
+ hcreated = "CREATED"
+ hstatus = "STATUS"
+ hports = "PORTS"
+ hnames = "NAMES"
+ hsize = "SIZE"
+ hinfra = "IS INFRA"
+ hpod = "POD"
+ nspid = "PID"
+ nscgroup = "CGROUPNS"
+ nsipc = "IPC"
+ nsmnt = "MNT"
+ nsnet = "NET"
+ nspidns = "PIDNS"
+ nsuserns = "USERNS"
+ nsuts = "UTS"
+)
type psTemplateParams struct {
ID string
@@ -76,7 +99,7 @@ type psJSONParams struct {
}
// Type declaration and functions for sorting the PS output
-type psSorted []psJSONParams
+type psSorted []shared.PsContainerOutput
func (a psSorted) Len() int { return len(a) }
func (a psSorted) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
@@ -84,7 +107,7 @@ func (a psSorted) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
type psSortedCommand struct{ psSorted }
func (a psSortedCommand) Less(i, j int) bool {
- return strings.Join(a.psSorted[i].Command, " ") < strings.Join(a.psSorted[j].Command, " ")
+ return a.psSorted[i].Command < a.psSorted[j].Command
}
type psSortedCreated struct{ psSorted }
@@ -201,6 +224,11 @@ var (
)
func psCmd(c *cli.Context) error {
+ var (
+ filterFuncs []libpod.ContainerFilter
+ outputContainers []*libpod.Container
+ )
+
if err := validateFlags(c, psFlags); err != nil {
return err
}
@@ -220,11 +248,9 @@ func psCmd(c *cli.Context) error {
return errors.Errorf("too many arguments, ps takes no arguments")
}
- format := genPsFormat(c.String("format"), c.Bool("quiet"), c.Bool("size"), c.Bool("namespace"), c.Bool("pod"), c.Bool("all"))
-
opts := shared.PsOptions{
All: c.Bool("all"),
- Format: format,
+ Format: c.String("format"),
Last: c.Int("last"),
Latest: c.Bool("latest"),
NoTrunc: c.Bool("no-trunc"),
@@ -235,18 +261,6 @@ func psCmd(c *cli.Context) error {
Sort: c.String("sort"),
}
- var filterFuncs []libpod.ContainerFilter
- // When we are dealing with latest or last=n, we need to
- // get all containers.
- if !opts.All && !opts.Latest && opts.Last < 1 {
- // only get running containers
- filterFuncs = append(filterFuncs, func(c *libpod.Container) bool {
- state, _ := c.State()
- // Don't return infra containers
- return state == libpod.ContainerStateRunning && !c.IsInfra()
- })
- }
-
filters := c.StringSlice("filter")
if len(filters) > 0 {
for _, f := range filters {
@@ -262,8 +276,6 @@ func psCmd(c *cli.Context) error {
}
}
- var outputContainers []*libpod.Container
-
if !opts.Latest {
// Get all containers
containers, err := runtime.GetContainers(filterFuncs...)
@@ -288,7 +300,92 @@ func psCmd(c *cli.Context) error {
outputContainers = []*libpod.Container{latestCtr}
}
- return generatePsOutput(outputContainers, opts)
+ pss := shared.PBatch(outputContainers, 8, opts)
+ if opts.Sort != "" {
+ pss, err = sortPsOutput(opts.Sort, pss)
+ if err != nil {
+ return err
+ }
+ }
+
+ // If quiet, print only cids and return
+ if opts.Quiet {
+ return printQuiet(pss)
+ }
+
+ // If the user wants their own GO template format
+ if opts.Format != "" {
+ if opts.Format == "json" {
+ return dumpJSON(pss)
+ }
+ return printFormat(opts.Format, pss)
+ }
+
+ // Define a tab writer with stdout as the output
+ w := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0)
+ defer w.Flush()
+
+ // Output standard PS headers
+ if !opts.Namespace {
+ fmt.Fprintf(w, "\n%s\t%s\t%s\t%s\t%s\t%s\t%s", hid, himage, hcommand, hcreated, hstatus, hports, hnames)
+ // If the user does not want size OR pod info, we print the isInfra bool
+ if !opts.Size && !opts.Pod {
+ fmt.Fprintf(w, "\t%s", hinfra)
+ }
+ // User wants pod info
+ if opts.Pod {
+ fmt.Fprintf(w, "\t%s", hpod)
+ }
+ //User wants size info
+ if opts.Size {
+ fmt.Fprintf(w, "\t%s", hsize)
+ }
+ } else {
+ // Output Namespace headers
+ fmt.Fprintf(w, "\n%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s", hid, hnames, nspid, nscgroup, nsipc, nsmnt, nsnet, nspidns, nsuserns, nsuts)
+ }
+
+ // Now iterate each container and output its information
+ for _, container := range pss {
+
+ // Standard PS output
+ if !opts.Namespace {
+ fmt.Fprintf(w, "\n%s\t%s\t%s\t%s\t%s\t%s\t%s", container.ID, container.Image, container.Command, container.Created, container.Status, container.Ports, container.Names)
+
+ // If not size and not pod info, do isInfra
+ if !opts.Size && !opts.Pod {
+ fmt.Fprintf(w, "\t%t", container.IsInfra)
+ }
+ // User wants pod info
+ if opts.Pod {
+ fmt.Fprintf(w, "\t%s", container.Pod)
+ }
+ //User wants size info
+ if opts.Size {
+ var size string
+ if container.Size == nil {
+ size = units.HumanSizeWithPrecision(0, 0)
+ } else {
+ size = units.HumanSizeWithPrecision(float64(container.Size.RwSize), 3) + " (virtual " + units.HumanSizeWithPrecision(float64(container.Size.RootFsSize), 3) + ")"
+ fmt.Fprintf(w, "\t%s", size)
+ }
+ }
+
+ } else {
+ // Print namespace information
+ ns := shared.GetNamespaces(container.Pid)
+ fmt.Fprintf(w, "\n%s\t%s\t%d\t%s\t%s\t%s\t%s\t%s\t%s\t%s", container.ID, container.Names, container.Pid, ns.Cgroup, ns.IPC, ns.MNT, ns.NET, ns.PIDNS, ns.User, ns.UTS)
+ }
+
+ }
+ return nil
+}
+
+func printQuiet(containers []shared.PsContainerOutput) error {
+ for _, c := range containers {
+ fmt.Println(c.ID)
+ }
+ return nil
}
// checkFlagsPassed checks if mutually exclusive flags are passed together
@@ -420,47 +517,6 @@ func generateContainerFilterFuncs(filter, filterValue string, runtime *libpod.Ru
return nil, errors.Errorf("%s is an invalid filter", filter)
}
-// generate the template based on conditions given
-func genPsFormat(format string, quiet, size, namespace, pod, infra bool) string {
- if format != "" {
- // "\t" from the command line is not being recognized as a tab
- // replacing the string "\t" to a tab character if the user passes in "\t"
- return strings.Replace(format, `\t`, "\t", -1)
- }
- if quiet {
- return formats.IDString
- }
- podappend := ""
- if pod {
- podappend = "{{.Pod}}\t"
- }
- if namespace {
- return fmt.Sprintf("table {{.ID}}\t{{.Names}}\t%s{{.PID}}\t{{.CGROUPNS}}\t{{.IPC}}\t{{.MNT}}\t{{.NET}}\t{{.PIDNS}}\t{{.USERNS}}\t{{.UTS}}", podappend)
- }
- format = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.Created}}\t{{.Status}}\t{{.Ports}}\t{{.Names}}\t"
- format += podappend
- if size {
- format += "{{.Size}}\t"
- }
- if infra {
- format += "{{.IsInfra}}\t"
- }
- return format
-}
-
-func psToGeneric(templParams []psTemplateParams, JSONParams []psJSONParams) (genericParams []interface{}) {
- if len(templParams) > 0 {
- for _, v := range templParams {
- genericParams = append(genericParams, interface{}(v))
- }
- return
- }
- for _, v := range JSONParams {
- genericParams = append(genericParams, interface{}(v))
- }
- return
-}
-
// generate the accurate header based on template given
func (p *psTemplateParams) headerMap() map[string]string {
v := reflect.Indirect(reflect.ValueOf(p))
@@ -503,176 +559,6 @@ func sortPsOutput(sortBy string, psOutput psSorted) (psSorted, error) {
return psOutput, nil
}
-// getTemplateOutput returns the modified container information
-func getTemplateOutput(psParams []psJSONParams, opts shared.PsOptions) ([]psTemplateParams, error) {
- var (
- psOutput []psTemplateParams
- pod, status, size string
- ns *shared.Namespace
- )
- // If the user is trying to filter based on size, or opted to sort on size
- // the size bool must be set.
- if strings.Contains(opts.Format, ".Size") || opts.Sort == "size" {
- opts.Size = true
- }
- if strings.Contains(opts.Format, ".Pod") || opts.Sort == "pod" {
- opts.Pod = true
- }
-
- for _, psParam := range psParams {
- // do we need this?
- imageName := psParam.Image
- ctrID := psParam.ID
-
- if opts.Namespace {
- ns = psParam.Namespaces
- }
- if opts.Size {
- if psParam.Size == nil {
- size = units.HumanSizeWithPrecision(0, 0)
- } else {
- size = units.HumanSizeWithPrecision(float64(psParam.Size.RwSize), 3) + " (virtual " + units.HumanSizeWithPrecision(float64(psParam.Size.RootFsSize), 3) + ")"
- }
- }
- if opts.Pod {
- pod = psParam.Pod
- }
-
- command := strings.Join(psParam.Command, " ")
- if !opts.NoTrunc {
- if len(command) > 20 {
- command = command[:19] + "..."
- }
- }
- ports := portsToString(psParam.Ports)
- labels := formatLabels(psParam.Labels)
-
- switch psParam.Status {
- case libpod.ContainerStateExited.String():
- fallthrough
- case libpod.ContainerStateStopped.String():
- exitedSince := units.HumanDuration(time.Since(psParam.ExitedAt))
- status = fmt.Sprintf("Exited (%d) %s ago", psParam.ExitCode, exitedSince)
- case libpod.ContainerStateRunning.String():
- status = "Up " + units.HumanDuration(time.Since(psParam.StartedAt)) + " ago"
- case libpod.ContainerStatePaused.String():
- status = "Paused"
- case libpod.ContainerStateCreated.String(), libpod.ContainerStateConfigured.String():
- status = "Created"
- default:
- status = "Error"
- }
-
- if !opts.NoTrunc {
- ctrID = shortID(psParam.ID)
- pod = shortID(psParam.Pod)
- }
- params := psTemplateParams{
- ID: ctrID,
- Image: imageName,
- Command: command,
- CreatedAtTime: psParam.CreatedAt,
- Created: units.HumanDuration(time.Since(psParam.CreatedAt)) + " ago",
- Status: status,
- Ports: ports,
- Size: size,
- Names: psParam.Names,
- Labels: labels,
- Mounts: getMounts(psParam.Mounts, opts.NoTrunc),
- PID: psParam.PID,
- Pod: pod,
- IsInfra: psParam.IsInfra,
- }
-
- if opts.Namespace {
- params.CGROUPNS = ns.Cgroup
- params.IPC = ns.IPC
- params.MNT = ns.MNT
- params.NET = ns.NET
- params.PIDNS = ns.PIDNS
- params.USERNS = ns.User
- params.UTS = ns.UTS
- }
- psOutput = append(psOutput, params)
- }
-
- return psOutput, nil
-}
-
-// getAndSortJSONOutput returns the container info in its raw, sorted form
-func getAndSortJSONParams(containers []*libpod.Container, opts shared.PsOptions) ([]psJSONParams, error) {
- var (
- psOutput psSorted
- ns *shared.Namespace
- )
- for _, ctr := range containers {
- batchInfo, err := shared.BatchContainerOp(ctr, opts)
- if err != nil {
- if errors.Cause(err) == libpod.ErrNoSuchCtr {
- logrus.Warn(err)
- continue
- }
- return nil, err
- }
-
- if opts.Namespace {
- ns = shared.GetNamespaces(batchInfo.Pid)
- }
- params := psJSONParams{
- ID: ctr.ID(),
- Image: batchInfo.ConConfig.RootfsImageName,
- ImageID: batchInfo.ConConfig.RootfsImageID,
- Command: batchInfo.ConConfig.Spec.Process.Args,
- ExitCode: batchInfo.ExitCode,
- Exited: batchInfo.Exited,
- CreatedAt: batchInfo.ConConfig.CreatedTime,
- StartedAt: batchInfo.StartedTime,
- ExitedAt: batchInfo.ExitedTime,
- Status: batchInfo.ConState.String(),
- PID: batchInfo.Pid,
- Ports: batchInfo.ConConfig.PortMappings,
- Size: batchInfo.Size,
- Names: batchInfo.ConConfig.Name,
- Labels: batchInfo.ConConfig.Labels,
- Mounts: batchInfo.ConConfig.UserVolumes,
- ContainerRunning: batchInfo.ConState == libpod.ContainerStateRunning,
- Namespaces: ns,
- Pod: ctr.PodID(),
- IsInfra: ctr.IsInfra(),
- }
-
- psOutput = append(psOutput, params)
- }
- return sortPsOutput(opts.Sort, psOutput)
-}
-
-func generatePsOutput(containers []*libpod.Container, opts shared.PsOptions) error {
- if len(containers) == 0 && opts.Format != formats.JSONString {
- return nil
- }
- psOutput, err := getAndSortJSONParams(containers, opts)
- if err != nil {
- return err
- }
- var out formats.Writer
-
- switch opts.Format {
- case formats.JSONString:
- if err != nil {
- return errors.Wrapf(err, "unable to create JSON for output")
- }
- out = formats.JSONStructArray{Output: psToGeneric([]psTemplateParams{}, psOutput)}
- default:
- psOutput, err := getTemplateOutput(psOutput, opts)
- if err != nil {
- return errors.Wrapf(err, "unable to create output")
- }
- out = formats.StdoutTemplateArray{Output: psToGeneric(psOutput, []psJSONParams{}), Template: opts.Format, Fields: psOutput[0].headerMap()}
- }
-
- return formats.Writer(out).Out()
-}
-
// getLabels converts the labels to a string of the form "key=value, key2=value2"
func formatLabels(labels map[string]string) string {
var arr []string
@@ -723,3 +609,28 @@ func portsToString(ports []ocicni.PortMapping) string {
}
return strings.Join(portDisplay, ", ")
}
+
+func printFormat(format string, containers []shared.PsContainerOutput) error {
+ out := template.New("output")
+ out, err := out.Parse(format + "\n")
+
+ if err != nil {
+ return err
+ }
+ for _, container := range containers {
+ if err := out.Execute(os.Stdout, container); err != nil {
+ return err
+ }
+
+ }
+ return nil
+}
+
+func dumpJSON(containers []shared.PsContainerOutput) error {
+ b, err := json.MarshalIndent(containers, "", "\t")
+ if err != nil {
+ return err
+ }
+ os.Stdout.Write(b)
+ return nil
+}
diff --git a/cmd/podman/restore.go b/cmd/podman/restore.go
index 623c4936e..067a2b5d4 100644
--- a/cmd/podman/restore.go
+++ b/cmd/podman/restore.go
@@ -6,6 +6,7 @@ import (
"os"
"github.com/containers/libpod/cmd/podman/libpodruntime"
+ "github.com/containers/libpod/libpod"
"github.com/containers/libpod/pkg/rootless"
"github.com/pkg/errors"
"github.com/urfave/cli"
@@ -22,6 +23,14 @@ var (
Name: "keep, k",
Usage: "keep all temporary checkpoint files",
},
+ // restore --all would make more sense if there would be
+ // dedicated state for container which are checkpointed.
+ // TODO: add ContainerStateCheckpointed
+ cli.BoolFlag{
+ Name: "all, a",
+ Usage: "restore all checkpointed containers",
+ },
+ LatestFlag,
}
restoreCommand = cli.Command{
Name: "restore",
@@ -45,21 +54,14 @@ func restoreCmd(c *cli.Context) error {
defer runtime.Shutdown(false)
keep := c.Bool("keep")
- args := c.Args()
- if len(args) < 1 {
- return errors.Errorf("you must provide at least one container name or id")
+
+ if err := checkAllAndLatest(c); err != nil {
+ return err
}
- var lastError error
- for _, arg := range args {
- ctr, err := runtime.LookupContainer(arg)
- if err != nil {
- if lastError != nil {
- fmt.Fprintln(os.Stderr, lastError)
- }
- lastError = errors.Wrapf(err, "error looking up container %q", arg)
- continue
- }
+ containers, lastError := getAllOrLatestContainers(c, runtime, libpod.ContainerStateRunning, "checkpointed")
+
+ for _, ctr := range containers {
if err = ctr.Restore(context.TODO(), keep); err != nil {
if lastError != nil {
fmt.Fprintln(os.Stderr, lastError)
diff --git a/cmd/podman/rm.go b/cmd/podman/rm.go
index 38b1546ff..c6641e879 100644
--- a/cmd/podman/rm.go
+++ b/cmd/podman/rm.go
@@ -2,7 +2,6 @@ package main
import (
"fmt"
- "os"
rt "runtime"
"github.com/containers/libpod/cmd/podman/libpodruntime"
@@ -63,37 +62,11 @@ func rmCmd(c *cli.Context) error {
}
defer runtime.Shutdown(false)
- args := c.Args()
- if c.Bool("latest") && c.Bool("all") {
- return errors.Errorf("--all and --latest cannot be used together")
- }
-
- if len(args) == 0 && !c.Bool("all") && !c.Bool("latest") {
- return errors.Errorf("specify one or more containers to remove")
+ if err := checkAllAndLatest(c); err != nil {
+ return err
}
- if c.Bool("all") {
- delContainers, err = runtime.GetContainers()
- if err != nil {
- return errors.Wrapf(err, "unable to get container list")
- }
- } else if c.Bool("latest") {
- lastCtr, err := runtime.GetLatestContainer()
- if err != nil {
- return errors.Wrapf(err, "unable to get latest container")
- }
- delContainers = append(delContainers, lastCtr)
- } else {
- for _, i := range args {
- container, err := runtime.LookupContainer(i)
- if err != nil {
- fmt.Fprintln(os.Stderr, err)
- lastError = errors.Wrapf(err, "unable to find container %s", i)
- continue
- }
- delContainers = append(delContainers, container)
- }
- }
+ delContainers, lastError = getAllOrLatestContainers(c, runtime, -1, "all")
for _, container := range delContainers {
f := func() error {
diff --git a/cmd/podman/shared/container.go b/cmd/podman/shared/container.go
index f44d0f7c9..4af737e0a 100644
--- a/cmd/podman/shared/container.go
+++ b/cmd/podman/shared/container.go
@@ -2,11 +2,15 @@ package shared
import (
"encoding/json"
+ "fmt"
+ "github.com/cri-o/ocicni/pkg/ocicni"
+ "github.com/docker/go-units"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
+ "sync"
"time"
"github.com/containers/libpod/libpod"
@@ -17,6 +21,11 @@ import (
"github.com/sirupsen/logrus"
)
+const (
+ cidTruncLength = 12
+ podTruncLength = 12
+)
+
// PsOptions describes the struct being formed for ps
type PsOptions struct {
All bool
@@ -45,6 +54,35 @@ type BatchContainerStruct struct {
Size *ContainerSize
}
+// PsContainerOutput is the struct being returned from a parallel
+// Batch operation
+type PsContainerOutput struct {
+ ID string
+ Image string
+ Command string
+ Created string
+ Ports string
+ Names string
+ IsInfra bool
+ Status string
+ State libpod.ContainerStatus
+ Pid int
+ Size *ContainerSize
+ Pod string
+ CreatedAt time.Time
+ ExitedAt time.Time
+ StartedAt time.Time
+ Labels map[string]string
+ PID string
+ Cgroup string
+ IPC string
+ MNT string
+ NET string
+ PIDNS string
+ User string
+ UTS string
+}
+
// Namespace describes output for ps namespace
type Namespace struct {
PID string `json:"pid,omitempty"`
@@ -64,6 +102,212 @@ type ContainerSize struct {
RwSize int64 `json:"rwSize"`
}
+// NewBatchContainer runs a batch process under one lock to get container information and only
+// be called in PBatch
+func NewBatchContainer(ctr *libpod.Container, opts PsOptions) (PsContainerOutput, error) {
+ var (
+ conState libpod.ContainerStatus
+ command string
+ created string
+ status string
+ exitedAt time.Time
+ startedAt time.Time
+ exitCode int32
+ err error
+ pid int
+ size *ContainerSize
+ ns *Namespace
+ pso PsContainerOutput
+ )
+ batchErr := ctr.Batch(func(c *libpod.Container) error {
+ conState, err = c.State()
+ if err != nil {
+ return errors.Wrapf(err, "unable to obtain container state")
+ }
+ command = strings.Join(c.Command(), " ")
+ created = units.HumanDuration(time.Since(c.CreatedTime())) + " ago"
+
+ exitCode, _, err = c.ExitCode()
+ if err != nil {
+ return errors.Wrapf(err, "unable to obtain container exit code")
+ }
+ startedAt, err = c.StartedTime()
+ if err != nil {
+ logrus.Errorf("error getting started time for %q: %v", c.ID(), err)
+ }
+ exitedAt, err = c.FinishedTime()
+ if err != nil {
+ logrus.Errorf("error getting exited time for %q: %v", c.ID(), err)
+ }
+ if opts.Namespace {
+ pid, err = c.PID()
+ if err != nil {
+ return errors.Wrapf(err, "unable to obtain container pid")
+ }
+ ns = GetNamespaces(pid)
+ }
+ if opts.Size {
+ size = new(ContainerSize)
+
+ rootFsSize, err := c.RootFsSize()
+ if err != nil {
+ logrus.Errorf("error getting root fs size for %q: %v", c.ID(), err)
+ }
+
+ rwSize, err := c.RWSize()
+ if err != nil {
+ logrus.Errorf("error getting rw size for %q: %v", c.ID(), err)
+ }
+
+ size.RootFsSize = rootFsSize
+ size.RwSize = rwSize
+ }
+
+ return nil
+ })
+
+ if batchErr != nil {
+ return pso, batchErr
+ }
+
+ switch conState.String() {
+ case libpod.ContainerStateExited.String():
+ fallthrough
+ case libpod.ContainerStateStopped.String():
+ exitedSince := units.HumanDuration(time.Since(exitedAt))
+ status = fmt.Sprintf("Exited (%d) %s ago", exitCode, exitedSince)
+ case libpod.ContainerStateRunning.String():
+ status = "Up " + units.HumanDuration(time.Since(startedAt)) + " ago"
+ case libpod.ContainerStatePaused.String():
+ status = "Paused"
+ case libpod.ContainerStateCreated.String(), libpod.ContainerStateConfigured.String():
+ status = "Created"
+ default:
+ status = "Error"
+ }
+
+ _, imageName := ctr.Image()
+ cid := ctr.ID()
+ pod := ctr.PodID()
+ if !opts.NoTrunc {
+ cid = cid[0:cidTruncLength]
+ if len(pod) > 12 {
+ pod = pod[0:podTruncLength]
+ }
+ }
+
+ pso.ID = cid
+ pso.Image = imageName
+ pso.Command = command
+ pso.Created = created
+ pso.Ports = portsToString(ctr.PortMappings())
+ pso.Names = ctr.Name()
+ pso.IsInfra = ctr.IsInfra()
+ pso.Status = status
+ pso.State = conState
+ pso.Pid = pid
+ pso.Size = size
+ pso.Pod = pod
+ pso.ExitedAt = exitedAt
+ pso.CreatedAt = ctr.CreatedTime()
+ pso.StartedAt = startedAt
+ pso.Labels = ctr.Labels()
+
+ if opts.Namespace {
+ pso.Cgroup = ns.Cgroup
+ pso.IPC = ns.IPC
+ pso.MNT = ns.MNT
+ pso.NET = ns.NET
+ pso.User = ns.User
+ pso.UTS = ns.UTS
+ pso.PIDNS = ns.PIDNS
+ }
+
+ return pso, nil
+}
+
+type pFunc func() (PsContainerOutput, error)
+
+type workerInput struct {
+ parallelFunc pFunc
+ opts PsOptions
+ cid string
+ job int
+}
+
+// worker is a "threaded" worker that takes jobs from the channel "queue"
+func worker(wg *sync.WaitGroup, jobs <-chan workerInput, results chan<- PsContainerOutput, errors chan<- error) {
+ for j := range jobs {
+ r, err := j.parallelFunc()
+ // If we find an error, we return just the error
+ if err != nil {
+ errors <- err
+ } else {
+ // Return the result
+ results <- r
+ }
+ wg.Done()
+ }
+}
+
+// PBatch is performs batch operations on a container in parallel. It spawns the number of workers
+// relative to the the number of parallel operations desired.
+func PBatch(containers []*libpod.Container, workers int, opts PsOptions) []PsContainerOutput {
+ var (
+ wg sync.WaitGroup
+ psResults []PsContainerOutput
+ )
+
+ // If the number of containers in question is less than the number of
+ // proposed parallel operations, we shouldnt spawn so many workers
+ if workers > len(containers) {
+ workers = len(containers)
+ }
+
+ jobs := make(chan workerInput, len(containers))
+ results := make(chan PsContainerOutput, len(containers))
+ batchErrors := make(chan error, len(containers))
+
+ // Create the workers
+ for w := 1; w <= workers; w++ {
+ go worker(&wg, jobs, results, batchErrors)
+ }
+
+ // Add jobs to the workers
+ for i, j := range containers {
+ j := j
+ wg.Add(1)
+ f := func() (PsContainerOutput, error) {
+ return NewBatchContainer(j, opts)
+ }
+ jobs <- workerInput{
+ parallelFunc: f,
+ opts: opts,
+ cid: j.ID(),
+ job: i,
+ }
+ }
+ close(jobs)
+ wg.Wait()
+ close(results)
+ close(batchErrors)
+ for err := range batchErrors {
+ logrus.Errorf("unable to get container info: %q", err)
+ }
+ for res := range results {
+ // We sort out running vs non-running here to save lots of copying
+ // later.
+ if !opts.All && !opts.Latest && opts.Last < 1 {
+ if !res.IsInfra && res.State == libpod.ContainerStateRunning {
+ psResults = append(psResults, res)
+ }
+ } else {
+ psResults = append(psResults, res)
+ }
+ }
+ return psResults
+}
+
// BatchContainer is used in ps to reduce performance hits by "batching"
// locks.
func BatchContainerOp(ctr *libpod.Container, opts PsOptions) (BatchContainerStruct, error) {
@@ -325,3 +569,19 @@ func getCgroup(spec *specs.Spec) string {
}
return cgroup
}
+
+// portsToString converts the ports used to a string of the from "port1, port2"
+func portsToString(ports []ocicni.PortMapping) string {
+ var portDisplay []string
+ if len(ports) == 0 {
+ return ""
+ }
+ for _, v := range ports {
+ hostIP := v.HostIP
+ if hostIP == "" {
+ hostIP = "0.0.0.0"
+ }
+ portDisplay = append(portDisplay, fmt.Sprintf("%s:%d->%d/%s", hostIP, v.HostPort, v.ContainerPort, v.Protocol))
+ }
+ return strings.Join(portDisplay, ", ")
+}
diff --git a/cmd/podman/stats.go b/cmd/podman/stats.go
index dea351e88..f6beac1a8 100644
--- a/cmd/podman/stats.go
+++ b/cmd/podman/stats.go
@@ -84,8 +84,7 @@ func statsCmd(c *cli.Context) error {
if ctr > 1 {
return errors.Errorf("--all, --latest and containers cannot be used together")
} else if ctr == 0 {
- // If user didn't specify, imply --all
- all = true
+ return errors.Errorf("you must specify --all, --latest, or at least one container")
}
runtime, err := libpodruntime.GetRuntime(c)
@@ -126,6 +125,10 @@ func statsCmd(c *cli.Context) error {
for _, ctr := range ctrs {
initialStats, err := ctr.GetContainerStats(&libpod.ContainerStats{})
if err != nil {
+ // when doing "all", dont worry about containers that are not running
+ if c.Bool("all") && errors.Cause(err) == libpod.ErrCtrRemoved || errors.Cause(err) == libpod.ErrNoSuchCtr || errors.Cause(err) == libpod.ErrCtrStateInvalid {
+ continue
+ }
return err
}
containerStats[ctr.ID()] = initialStats
diff --git a/cmd/podman/stop.go b/cmd/podman/stop.go
index ff0b36bf1..edadbda89 100644
--- a/cmd/podman/stop.go
+++ b/cmd/podman/stop.go
@@ -2,7 +2,6 @@ package main
import (
"fmt"
- "os"
rt "runtime"
"github.com/containers/libpod/cmd/podman/libpodruntime"
@@ -44,16 +43,11 @@ var (
)
func stopCmd(c *cli.Context) error {
- args := c.Args()
- if (c.Bool("all") || c.Bool("latest")) && len(args) > 0 {
- return errors.Errorf("no arguments are needed with --all or --latest")
- }
- if c.Bool("all") && c.Bool("latest") {
- return errors.Errorf("--all and --latest cannot be used together")
- }
- if len(args) < 1 && !c.Bool("all") && !c.Bool("latest") {
- return errors.Errorf("you must provide at least one container name or id")
+
+ if err := checkAllAndLatest(c); err != nil {
+ return err
}
+
if err := validateFlags(c, stopFlags); err != nil {
return err
}
@@ -65,39 +59,7 @@ func stopCmd(c *cli.Context) error {
}
defer runtime.Shutdown(false)
- var filterFuncs []libpod.ContainerFilter
- var containers []*libpod.Container
- var lastError error
-
- if c.Bool("all") {
- // only get running containers
- filterFuncs = append(filterFuncs, func(c *libpod.Container) bool {
- state, _ := c.State()
- return state == libpod.ContainerStateRunning
- })
- containers, err = runtime.GetContainers(filterFuncs...)
- if err != nil {
- return errors.Wrapf(err, "unable to get running containers")
- }
- } else if c.Bool("latest") {
- lastCtr, err := runtime.GetLatestContainer()
- if err != nil {
- return errors.Wrapf(err, "unable to get last created container")
- }
- containers = append(containers, lastCtr)
- } else {
- for _, i := range args {
- container, err := runtime.LookupContainer(i)
- if err != nil {
- if lastError != nil {
- fmt.Fprintln(os.Stderr, lastError)
- }
- lastError = errors.Wrapf(err, "unable to find container %s", i)
- continue
- }
- containers = append(containers, container)
- }
- }
+ containers, lastError := getAllOrLatestContainers(c, runtime, libpod.ContainerStateRunning, "running")
var stopFuncs []workerInput
for _, ctr := range containers {
diff --git a/cmd/podman/utils.go b/cmd/podman/utils.go
index b193cf889..f9971fd88 100644
--- a/cmd/podman/utils.go
+++ b/cmd/podman/utils.go
@@ -160,15 +160,8 @@ func (f *RawTtyFormatter) Format(entry *logrus.Entry) ([]byte, error) {
}
func checkMutuallyExclusiveFlags(c *cli.Context) error {
- argLen := len(c.Args())
- if (c.Bool("all") || c.Bool("latest")) && argLen > 0 {
- return errors.Errorf("no arguments are needed with --all or --latest")
- }
- if c.Bool("all") && c.Bool("latest") {
- return errors.Errorf("--all and --latest cannot be used together")
- }
- if argLen < 1 && !c.Bool("all") && !c.Bool("latest") {
- return errors.Errorf("you must provide at least one pod name or id")
+ if err := checkAllAndLatest(c); err != nil {
+ return err
}
if err := validateFlags(c, startFlags); err != nil {
return err