summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorbaude <bbaude@redhat.com>2018-02-22 10:08:59 -0600
committerAtomic Bot <atomic-devel@projectatomic.io>2018-02-23 17:56:51 +0000
commite97ca5bd38045a818e94cb11571fa4120dffcdae (patch)
tree548a07155e1816cb605db0a558f929e8f33ffee8
parentfd79671d05895cf209262c3262bab2fdac386984 (diff)
downloadpodman-e97ca5bd38045a818e94cb11571fa4120dffcdae.tar.gz
podman-e97ca5bd38045a818e94cb11571fa4120dffcdae.tar.bz2
podman-e97ca5bd38045a818e94cb11571fa4120dffcdae.zip
podman ps json output use batched ops
The podman ps command for non-json targets has the ability to use a "batched" function which limits the use of locks needed. The collection of output for JSON should use this too. Signed-off-by: baude <bbaude@redhat.com> Closes: #380 Approved by: rhatdan
-rw-r--r--cmd/podman/ps.go258
1 files changed, 135 insertions, 123 deletions
diff --git a/cmd/podman/ps.go b/cmd/podman/ps.go
index d5bbe6f67..b635ea392 100644
--- a/cmd/podman/ps.go
+++ b/cmd/podman/ps.go
@@ -64,21 +64,21 @@ type psTemplateParams struct {
// psJSONParams will be populated by data from libpod.Container,
// the members of the struct are the sama data types as their sources.
type psJSONParams struct {
- ID string `json:"id"`
- Image string `json:"image"`
- ImageID string `json:"image_id"`
- Command []string `json:"command"`
- CreatedAt time.Time `json:"createdAt"`
- RunningFor time.Duration `json:"runningFor"`
- Status string `json:"status"`
- Ports map[string]struct{} `json:"ports"`
- RootFsSize int64 `json:"rootFsSize"`
- RWSize int64 `json:"rwSize"`
- Names string `json:"names"`
- Labels fields.Set `json:"labels"`
- Mounts []specs.Mount `json:"mounts"`
- ContainerRunning bool `json:"ctrRunning"`
- Namespaces *namespace `json:"namespace,omitempty"`
+ ID string `json:"id"`
+ Image string `json:"image"`
+ ImageID string `json:"image_id"`
+ Command []string `json:"command"`
+ CreatedAt time.Time `json:"createdAt"`
+ RunningFor time.Duration `json:"runningFor"`
+ Status string `json:"status"`
+ Ports []ocicni.PortMapping `json:"ports"`
+ RootFsSize int64 `json:"rootFsSize"`
+ RWSize int64 `json:"rwSize"`
+ Names string `json:"names"`
+ Labels fields.Set `json:"labels"`
+ Mounts []specs.Mount `json:"mounts"`
+ ContainerRunning bool `json:"ctrRunning"`
+ Namespaces *namespace `json:"namespace,omitempty"`
}
type namespace struct {
@@ -397,72 +397,25 @@ func (p *psTemplateParams) headerMap() map[string]string {
// getTemplateOutput returns the modified container information
func getTemplateOutput(containers []*libpod.Container, opts psOptions) ([]psTemplateParams, error) {
var (
- psOutput []psTemplateParams
- status, ctrID, size string
- conConfig *libpod.ContainerConfig
- conState libpod.ContainerStatus
- err error
- exitCode int32
- pid int
- rootFsSize, rwSize int64
- ns *namespace
- startedTime time.Time
+ psOutput []psTemplateParams
+ status, size string
+ ns *namespace
)
for _, ctr := range containers {
- batchErr := ctr.Batch(func(c *libpod.Container) error {
- ctrID = c.ID()
- conConfig = c.Config()
- conState, err = c.State()
- if err != nil {
- return errors.Wrapf(err, "unable to obtain container state")
- }
- exitCode, err = c.ExitCode()
- if err != nil {
- return errors.Wrapf(err, "unable to obtain container exit code")
- }
- startedTime, err = c.StartedTime()
- if err != nil {
- logrus.Errorf("error getting started time for %q: %v", c.ID(), err)
- }
-
- if !opts.size && !opts.namespace {
- return nil
- }
-
- if opts.namespace {
- pid, err = c.PID()
- if err != nil {
- return errors.Wrapf(err, "unable to obtain container pid")
- }
- }
- if opts.size {
- rootFsSize, err = c.RootFsSize()
- if err != nil {
- logrus.Errorf("error getting root fs size for %q: %v", c.ID(), err)
- }
-
- rwSize, err = c.RWSize()
- if err != nil {
- logrus.Errorf("error getting rw size for %q: %v", c.ID(), err)
- }
-
- size = units.HumanSizeWithPrecision(float64(rwSize), 3) + " (virtual " + units.HumanSizeWithPrecision(float64(rootFsSize), 3) + ")"
- }
-
- return nil
- })
- if batchErr != nil {
+ batchInfo, err := batchContainerOp(ctr, opts)
+ if err != nil {
return nil, err
}
+ ctrID := ctr.ID()
runningFor := ""
// If the container has not be started, the "zero" value of time is 0001-01-01 00:00:00 +0000 UTC
// which would make the time elapsed about a few hundred of years. So checking for the "zero" value of time.Time
- if startedTime != (time.Time{}) {
- runningFor = units.HumanDuration(time.Since(startedTime))
+ if batchInfo.startedTime != (time.Time{}) {
+ runningFor = units.HumanDuration(time.Since(batchInfo.startedTime))
}
- createdAt := conConfig.CreatedTime.Format("2006-01-02 15:04:05 -0700 MST")
- imageName := conConfig.RootfsImageName
+ createdAt := batchInfo.conConfig.CreatedTime.Format("2006-01-02 15:04:05 -0700 MST")
+ imageName := batchInfo.conConfig.RootfsImageName
var createArtifact createConfig
artifact, err := ctr.GetArtifact("create-config")
@@ -474,17 +427,19 @@ func getTemplateOutput(containers []*libpod.Container, opts psOptions) ([]psTemp
logrus.Errorf("couldn't get some ps information, error getting artifact %q: %v", ctr.ID(), err)
}
if opts.namespace {
- ns = getNamespaces(pid)
+ ns = getNamespaces(batchInfo.pid)
}
+ if opts.size {
- // TODO We currently dont have the ability to get many of
- // these data items. Uncomment as progress is made
- command := strings.Join(conConfig.Spec.Process.Args, " ")
- ports := getPorts(conConfig.PortMappings)
+ size = units.HumanSizeWithPrecision(float64(batchInfo.rwSize), 3) + " (virtual " + units.HumanSizeWithPrecision(float64(batchInfo.rootFsSize), 3) + ")"
+ }
+
+ command := strings.Join(batchInfo.conConfig.Spec.Process.Args, " ")
+ ports := portsToString(batchInfo.conConfig.PortMappings)
mounts := getMounts(createArtifact.Volumes, opts.noTrunc)
labels := formatLabels(ctr.Labels())
- switch conState {
+ switch batchInfo.conState {
case libpod.ContainerStateStopped:
status = fmt.Sprintf("Exited (%d) %s ago", exitCode, runningFor)
case libpod.ContainerStateRunning:
@@ -499,12 +454,9 @@ func getTemplateOutput(containers []*libpod.Container, opts psOptions) ([]psTemp
if !opts.noTrunc {
ctrID = shortID(ctr.ID())
- imageName = conConfig.RootfsImageName
+ imageName = batchInfo.conConfig.RootfsImageName
}
- // TODO We currently dont have the ability to get many of
- // these data items. Uncomment as progress is made
-
params := psTemplateParams{
ID: ctrID,
Image: imageName,
@@ -517,7 +469,7 @@ func getTemplateOutput(containers []*libpod.Container, opts psOptions) ([]psTemp
Names: ctr.Name(),
Labels: labels,
Mounts: mounts,
- PID: pid,
+ PID: batchInfo.pid,
}
if opts.namespace {
params.Cgroup = ns.Cgroup
@@ -564,48 +516,34 @@ func getNamespaceInfo(path string) (string, error) {
}
// getJSONOutput returns the container info in its raw form
-func getJSONOutput(containers []*libpod.Container, nSpace bool) ([]psJSONParams, error) {
- var psOutput []psJSONParams
- var ns *namespace
+func getJSONOutput(containers []*libpod.Container, opts psOptions) ([]psJSONParams, error) {
+ var (
+ psOutput []psJSONParams
+ ns *namespace
+ )
for _, ctr := range containers {
- pid, err := ctr.PID()
- if err != nil {
- return psOutput, errors.Wrapf(err, "unable to obtain container pid")
- }
- if nSpace {
- ns = getNamespaces(pid)
- }
- cc := ctr.Config()
- conState, err := ctr.State()
+ batchInfo, err := batchContainerOp(ctr, opts)
if err != nil {
- return psOutput, errors.Wrapf(err, "unable to obtain container state for JSON output")
- }
- rootFsSize, err := ctr.RootFsSize()
- if err != nil {
- logrus.Errorf("error getting root fs size for %q: %v", ctr.ID(), err)
+ return nil, err
}
- rwSize, err := ctr.RWSize()
- if err != nil {
- logrus.Errorf("error getting rw size for %q: %v", ctr.ID(), err)
+ if opts.namespace {
+ ns = getNamespaces(batchInfo.pid)
}
-
params := psJSONParams{
- // TODO When we have ability to obtain the commented out data, we need
- // TODO to add it
- ID: ctr.ID(),
- Image: cc.RootfsImageName,
- ImageID: cc.RootfsImageID,
- Command: ctr.Spec().Process.Args,
- CreatedAt: cc.CreatedTime,
- RunningFor: time.Since(cc.CreatedTime),
- Status: conState.String(),
- //Ports: cc.Spec.Linux.Resources.Network.
- RootFsSize: rootFsSize,
- RWSize: rwSize,
- Names: cc.Name,
- Labels: cc.Labels,
- Mounts: cc.Spec.Mounts,
- ContainerRunning: conState == libpod.ContainerStateRunning,
+ ID: ctr.ID(),
+ Image: batchInfo.conConfig.RootfsImageName,
+ ImageID: batchInfo.conConfig.RootfsImageID,
+ Command: batchInfo.conConfig.Spec.Process.Args,
+ CreatedAt: batchInfo.conConfig.CreatedTime,
+ RunningFor: time.Since(batchInfo.conConfig.CreatedTime),
+ Status: batchInfo.conState.String(),
+ Ports: batchInfo.conConfig.PortMappings,
+ RootFsSize: batchInfo.rootFsSize,
+ RWSize: batchInfo.rwSize,
+ Names: batchInfo.conConfig.Name,
+ Labels: batchInfo.conConfig.Labels,
+ Mounts: batchInfo.conConfig.Spec.Mounts,
+ ContainerRunning: batchInfo.conState == libpod.ContainerStateRunning,
Namespaces: ns,
}
psOutput = append(psOutput, params)
@@ -621,7 +559,7 @@ func generatePsOutput(containers []*libpod.Container, opts psOptions) error {
switch opts.format {
case formats.JSONString:
- psOutput, err := getJSONOutput(containers, opts.namespace)
+ psOutput, err := getJSONOutput(containers, opts)
if err != nil {
return errors.Wrapf(err, "unable to create JSON for output")
}
@@ -678,8 +616,8 @@ func getMounts(mounts []string, noTrunc bool) string {
return strings.Join(arr, ",")
}
-// getPorts converts the ports used to a string of the from "port1, port2"
-func getPorts(ports []ocicni.PortMapping) string {
+// portsToString converts the ports used to a string of the from "port1, port2"
+func portsToString(ports []ocicni.PortMapping) string {
var portDisplay []string
if len(ports) == 0 {
return ""
@@ -693,3 +631,77 @@ func getPorts(ports []ocicni.PortMapping) string {
}
return strings.Join(portDisplay, ", ")
}
+
+type batchContainerStruct struct {
+ conConfig *libpod.ContainerConfig
+ conState libpod.ContainerStatus
+ exitCode int32
+ pid int
+ rootFsSize, rwSize int64
+ startedTime time.Time
+}
+
+func batchContainerOp(ctr *libpod.Container, opts psOptions) (batchContainerStruct, error) {
+ var (
+ conConfig *libpod.ContainerConfig
+ conState libpod.ContainerStatus
+ err error
+ exitCode int32
+ pid int
+ rootFsSize, rwSize int64
+ startedTime time.Time
+ )
+
+ batchErr := ctr.Batch(func(c *libpod.Container) error {
+ conConfig = c.Config()
+ conState, err = c.State()
+ if err != nil {
+ return errors.Wrapf(err, "unable to obtain container state")
+ }
+
+ exitCode, err = c.ExitCode()
+ if err != nil {
+ return errors.Wrapf(err, "unable to obtain container exit code")
+ }
+ startedTime, err = c.StartedTime()
+ if err != nil {
+ logrus.Errorf("error getting started time for %q: %v", c.ID(), err)
+ }
+
+ if !opts.size && !opts.namespace {
+ return nil
+ }
+
+ if opts.namespace {
+ pid, err = c.PID()
+ if err != nil {
+ return errors.Wrapf(err, "unable to obtain container pid")
+ }
+ }
+ if opts.size {
+ rootFsSize, err = c.RootFsSize()
+ if err != nil {
+ logrus.Errorf("error getting root fs size for %q: %v", c.ID(), err)
+ }
+
+ rwSize, err = c.RWSize()
+ if err != nil {
+ logrus.Errorf("error getting rw size for %q: %v", c.ID(), err)
+ }
+
+ }
+ return nil
+ })
+ if batchErr != nil {
+ return batchContainerStruct{}, batchErr
+ }
+ return batchContainerStruct{
+ conConfig: conConfig,
+ conState: conState,
+ exitCode: exitCode,
+ pid: pid,
+ rootFsSize: rootFsSize,
+ rwSize: rwSize,
+ startedTime: startedTime,
+ }, nil
+}