summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorOpenShift Merge Robot <openshift-merge-robot@users.noreply.github.com>2019-03-26 04:51:06 -0700
committerGitHub <noreply@github.com>2019-03-26 04:51:06 -0700
commit9e556530d24a51c7d19679a2f658a367686d9874 (patch)
tree7f5881c70c53aa47685d2a663f75ed9b430a72e3
parentd0c6a35c05a72e4b156819406b83aa9f34f840e0 (diff)
parent25e0f870694ff65ac4e2973072410dcfc45dd7db (diff)
downloadpodman-9e556530d24a51c7d19679a2f658a367686d9874.tar.gz
podman-9e556530d24a51c7d19679a2f658a367686d9874.tar.bz2
podman-9e556530d24a51c7d19679a2f658a367686d9874.zip
Merge pull request #2498 from QiWang19/sysdf
podman system df- show podman disk usage
-rw-r--r--cmd/podman/cliconfig/config.go6
-rw-r--r--cmd/podman/commands.go1
-rw-r--r--cmd/podman/system_df.go639
-rw-r--r--completions/bash/podman19
-rw-r--r--docs/podman-system-df.1.md57
-rw-r--r--docs/podman-system.1.md3
-rw-r--r--test/e2e/system_df_test.go62
-rw-r--r--transfer.md1
8 files changed, 787 insertions, 1 deletions
diff --git a/cmd/podman/cliconfig/config.go b/cmd/podman/cliconfig/config.go
index 1461c9f03..884bd7fdb 100644
--- a/cmd/podman/cliconfig/config.go
+++ b/cmd/podman/cliconfig/config.go
@@ -572,3 +572,9 @@ type SystemPruneValues struct {
type SystemRenumberValues struct {
PodmanCommand
}
+
+type SystemDfValues struct {
+ PodmanCommand
+ Verbose bool
+ Format string
+}
diff --git a/cmd/podman/commands.go b/cmd/podman/commands.go
index 810c5a6f6..875b2aec8 100644
--- a/cmd/podman/commands.go
+++ b/cmd/podman/commands.go
@@ -108,6 +108,7 @@ func getSystemSubCommands() []*cobra.Command {
return []*cobra.Command{
_pruneSystemCommand,
_renumberCommand,
+ _dfSystemCommand,
}
}
diff --git a/cmd/podman/system_df.go b/cmd/podman/system_df.go
new file mode 100644
index 000000000..183c5a7dd
--- /dev/null
+++ b/cmd/podman/system_df.go
@@ -0,0 +1,639 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/containers/buildah/pkg/formats"
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/containers/libpod/cmd/podman/libpodruntime"
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/libpod/image"
+ units "github.com/docker/go-units"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/cobra"
+)
+
+var (
+ dfSystemCommand cliconfig.SystemDfValues
+ dfSystemDescription = `
+ podman system df
+
+ Show podman disk usage
+ `
+ _dfSystemCommand = &cobra.Command{
+ Use: "df",
+ Short: "Show podman disk usage",
+ Long: dfSystemDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ dfSystemCommand.GlobalFlags = MainGlobalOpts
+ return dfSystemCmd(&dfSystemCommand)
+ },
+ }
+)
+
+type dfMetaData struct {
+ images []*image.Image
+ containers []*libpod.Container
+ activeContainers map[string]*libpod.Container
+ imagesUsedbyCtrMap map[string][]*libpod.Container
+ imagesUsedbyActiveCtr map[string][]*libpod.Container
+ volumes []*libpod.Volume
+ volumeUsedByContainerMap map[string][]*libpod.Container
+}
+
+type systemDfDiskUsage struct {
+ Type string
+ Total int
+ Active int
+ Size string
+ Reclaimable string
+}
+
+type imageVerboseDiskUsage struct {
+ Repository string
+ Tag string
+ ImageID string
+ Created string
+ Size string
+ SharedSize string
+ UniqueSize string
+ Containers int
+}
+
+type containerVerboseDiskUsage struct {
+ ContainerID string
+ Image string
+ Command string
+ LocalVolumes int
+ Size string
+ Created string
+ Status string
+ Names string
+}
+
+type volumeVerboseDiskUsage struct {
+ VolumeName string
+ Links int
+ Size string
+}
+
+const systemDfDefaultFormat string = "table {{.Type}}\t{{.Total}}\t{{.Active}}\t{{.Size}}\t{{.Reclaimable}}"
+
+func init() {
+ dfSystemCommand.Command = _dfSystemCommand
+ dfSystemCommand.SetUsageTemplate(UsageTemplate())
+ flags := dfSystemCommand.Flags()
+ flags.BoolVarP(&dfSystemCommand.Verbose, "verbose", "v", false, "Show detailed information on space usage")
+ flags.StringVar(&dfSystemCommand.Format, "format", "", "Pretty-print images using a Go template")
+}
+
+func dfSystemCmd(c *cliconfig.SystemDfValues) error {
+ runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand)
+ if err != nil {
+ return errors.Wrapf(err, "Could not get runtime")
+ }
+ defer runtime.Shutdown(false)
+
+ ctx := getContext()
+
+ metaData, err := getDfMetaData(ctx, runtime)
+ if err != nil {
+ return errors.Wrapf(err, "error getting disk usage data")
+ }
+
+ if c.Verbose {
+ err := verboseOutput(ctx, metaData)
+ if err != nil {
+ return err
+ }
+ return nil
+ }
+
+ systemDfDiskUsages, err := getDiskUsage(ctx, runtime, metaData)
+ if err != nil {
+ return errors.Wrapf(err, "error getting output of system df")
+ }
+ format := systemDfDefaultFormat
+ if c.Format != "" {
+ format = strings.Replace(c.Format, `\t`, "\t", -1)
+ }
+ generateSysDfOutput(systemDfDiskUsages, format)
+ return nil
+}
+
+func generateSysDfOutput(systemDfDiskUsages []systemDfDiskUsage, format string) {
+ var systemDfHeader = map[string]string{
+ "Type": "TYPE",
+ "Total": "TOTAL",
+ "Active": "ACTIVE",
+ "Size": "SIZE",
+ "Reclaimable": "RECLAIMABLE",
+ }
+ out := formats.StdoutTemplateArray{Output: systemDfDiskUsageToGeneric(systemDfDiskUsages), Template: format, Fields: systemDfHeader}
+ formats.Writer(out).Out()
+}
+
+func getDiskUsage(ctx context.Context, runtime *libpod.Runtime, metaData dfMetaData) ([]systemDfDiskUsage, error) {
+ imageDiskUsage, err := getImageDiskUsage(ctx, metaData.images, metaData.imagesUsedbyCtrMap, metaData.imagesUsedbyActiveCtr)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error getting disk usage of images")
+ }
+ containerDiskUsage, err := getContainerDiskUsage(metaData.containers, metaData.activeContainers)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error getting disk usage of containers")
+ }
+ volumeDiskUsage, err := getVolumeDiskUsage(metaData.volumes, metaData.volumeUsedByContainerMap)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error getting disk usage of volumess")
+ }
+
+ systemDfDiskUsages := []systemDfDiskUsage{imageDiskUsage, containerDiskUsage, volumeDiskUsage}
+ return systemDfDiskUsages, nil
+}
+
+func getDfMetaData(ctx context.Context, runtime *libpod.Runtime) (dfMetaData, error) {
+ var metaData dfMetaData
+ images, err := runtime.ImageRuntime().GetImages()
+ if err != nil {
+ return metaData, errors.Wrapf(err, "unable to get images")
+ }
+ containers, err := runtime.GetAllContainers()
+ if err != nil {
+ return metaData, errors.Wrapf(err, "error getting all containers")
+ }
+ volumes, err := runtime.GetAllVolumes()
+ if err != nil {
+ return metaData, errors.Wrap(err, "error getting all volumes")
+ }
+ activeContainers, err := activeContainers(containers)
+ if err != nil {
+ return metaData, errors.Wrapf(err, "error getting active containers")
+ }
+ imagesUsedbyCtrMap, imagesUsedbyActiveCtr, err := imagesUsedbyCtr(containers, activeContainers)
+ if err != nil {
+ return metaData, errors.Wrapf(err, "error getting getting images used by containers")
+ }
+ metaData = dfMetaData{
+ images: images,
+ containers: containers,
+ activeContainers: activeContainers,
+ imagesUsedbyCtrMap: imagesUsedbyCtrMap,
+ imagesUsedbyActiveCtr: imagesUsedbyActiveCtr,
+ volumes: volumes,
+ volumeUsedByContainerMap: volumeUsedByContainer(containers),
+ }
+ return metaData, nil
+}
+
+func imageUniqueSize(ctx context.Context, images []*image.Image) (map[string]uint64, error) {
+ imgUniqueSizeMap := make(map[string]uint64)
+ for _, img := range images {
+ parentImg := img
+ for {
+ next, err := parentImg.GetParent()
+ if err != nil {
+ return nil, errors.Wrapf(err, "error getting parent of image %s", parentImg.ID())
+ }
+ if next == nil {
+ break
+ }
+ parentImg = next
+ }
+ imgSize, err := img.Size(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if img.ID() == parentImg.ID() {
+ imgUniqueSizeMap[img.ID()] = *imgSize
+ } else {
+ parentImgSize, err := parentImg.Size(ctx)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error getting size of parent image %s", parentImg.ID())
+ }
+ imgUniqueSizeMap[img.ID()] = *imgSize - *parentImgSize
+ }
+ }
+ return imgUniqueSizeMap, nil
+}
+
+func getImageDiskUsage(ctx context.Context, images []*image.Image, imageUsedbyCintainerMap map[string][]*libpod.Container, imageUsedbyActiveContainerMap map[string][]*libpod.Container) (systemDfDiskUsage, error) {
+ var (
+ numberOfImages int
+ sumSize uint64
+ numberOfActiveImages int
+ unreclaimableSize uint64
+ imageDiskUsage systemDfDiskUsage
+ reclaimableStr string
+ )
+
+ imgUniqueSizeMap, err := imageUniqueSize(ctx, images)
+ if err != nil {
+ return imageDiskUsage, errors.Wrapf(err, "error getting unique size of images")
+ }
+
+ for _, img := range images {
+
+ unreclaimableSize += imageUsedSize(img, imgUniqueSizeMap, imageUsedbyCintainerMap, imageUsedbyActiveContainerMap)
+
+ isParent, err := img.IsParent()
+ if err != nil {
+ return imageDiskUsage, err
+ }
+ parent, err := img.GetParent()
+ if err != nil {
+ return imageDiskUsage, errors.Wrapf(err, "error getting parent of image %s", img.ID())
+ }
+ if isParent && parent != nil {
+ continue
+ }
+ numberOfImages++
+ if _, isActive := imageUsedbyCintainerMap[img.ID()]; isActive {
+ numberOfActiveImages++
+ }
+
+ if !isParent {
+ size, err := img.Size(ctx)
+ if err != nil {
+ return imageDiskUsage, errors.Wrapf(err, "error getting disk usage of image %s", img.ID())
+ }
+ sumSize += *size
+ }
+
+ }
+ sumSizeStr := units.HumanSizeWithPrecision(float64(sumSize), 3)
+ reclaimable := sumSize - unreclaimableSize
+ if sumSize != 0 {
+ reclaimableStr = fmt.Sprintf("%s (%v%%)", units.HumanSizeWithPrecision(float64(reclaimable), 3), 100*reclaimable/sumSize)
+ } else {
+ reclaimableStr = fmt.Sprintf("%s (%v%%)", units.HumanSizeWithPrecision(float64(reclaimable), 3), 0)
+ }
+ imageDiskUsage = systemDfDiskUsage{
+ Type: "Images",
+ Total: numberOfImages,
+ Active: numberOfActiveImages,
+ Size: sumSizeStr,
+ Reclaimable: reclaimableStr,
+ }
+ return imageDiskUsage, nil
+}
+
+func imageUsedSize(img *image.Image, imgUniqueSizeMap map[string]uint64, imageUsedbyCintainerMap map[string][]*libpod.Container, imageUsedbyActiveContainerMap map[string][]*libpod.Container) uint64 {
+ var usedSize uint64
+ imgUnique := imgUniqueSizeMap[img.ID()]
+ if _, isCtrActive := imageUsedbyActiveContainerMap[img.ID()]; isCtrActive {
+ return imgUnique
+ }
+ containers := imageUsedbyCintainerMap[img.ID()]
+ for _, ctr := range containers {
+ if len(ctr.UserVolumes()) > 0 {
+ usedSize += imgUnique
+ return usedSize
+ }
+ }
+ return usedSize
+}
+
+func imagesUsedbyCtr(containers []*libpod.Container, activeContainers map[string]*libpod.Container) (map[string][]*libpod.Container, map[string][]*libpod.Container, error) {
+ imgCtrMap := make(map[string][]*libpod.Container)
+ imgActiveCtrMap := make(map[string][]*libpod.Container)
+ for _, ctr := range containers {
+ imgID, _ := ctr.Image()
+ imgCtrMap[imgID] = append(imgCtrMap[imgID], ctr)
+ if _, isActive := activeContainers[ctr.ID()]; isActive {
+ imgActiveCtrMap[imgID] = append(imgActiveCtrMap[imgID], ctr)
+ }
+ }
+ return imgCtrMap, imgActiveCtrMap, nil
+}
+
+func getContainerDiskUsage(containers []*libpod.Container, activeContainers map[string]*libpod.Container) (systemDfDiskUsage, error) {
+ var (
+ sumSize int64
+ unreclaimableSize int64
+ reclaimableStr string
+ )
+ for _, ctr := range containers {
+ size, err := ctr.RWSize()
+ if err != nil {
+ return systemDfDiskUsage{}, errors.Wrapf(err, "error getting size of container %s", ctr.ID())
+ }
+ sumSize += size
+ }
+ for _, activeCtr := range activeContainers {
+ size, err := activeCtr.RWSize()
+ if err != nil {
+ return systemDfDiskUsage{}, errors.Wrapf(err, "error getting size of active container %s", activeCtr.ID())
+ }
+ unreclaimableSize += size
+ }
+ if sumSize == 0 {
+ reclaimableStr = fmt.Sprintf("%s (%v%%)", units.HumanSizeWithPrecision(0, 3), 0)
+ } else {
+ reclaimable := sumSize - unreclaimableSize
+ reclaimableStr = fmt.Sprintf("%s (%v%%)", units.HumanSizeWithPrecision(float64(reclaimable), 3), 100*reclaimable/sumSize)
+ }
+ containerDiskUsage := systemDfDiskUsage{
+ Type: "Containers",
+ Total: len(containers),
+ Active: len(activeContainers),
+ Size: units.HumanSizeWithPrecision(float64(sumSize), 3),
+ Reclaimable: reclaimableStr,
+ }
+ return containerDiskUsage, nil
+}
+
+func ctrIsActive(ctr *libpod.Container) (bool, error) {
+ state, err := ctr.State()
+ if err != nil {
+ return false, err
+ }
+ return state == libpod.ContainerStatePaused || state == libpod.ContainerStateRunning, nil
+}
+
+func activeContainers(containers []*libpod.Container) (map[string]*libpod.Container, error) {
+ activeContainers := make(map[string]*libpod.Container)
+ for _, aCtr := range containers {
+ isActive, err := ctrIsActive(aCtr)
+ if err != nil {
+ return nil, err
+ }
+ if isActive {
+ activeContainers[aCtr.ID()] = aCtr
+ }
+ }
+ return activeContainers, nil
+}
+
+func getVolumeDiskUsage(volumes []*libpod.Volume, volumeUsedByContainerMap map[string][]*libpod.Container) (systemDfDiskUsage, error) {
+ var (
+ sumSize int64
+ unreclaimableSize int64
+ reclaimableStr string
+ )
+ for _, volume := range volumes {
+ size, err := volumeSize(volume)
+ if err != nil {
+ return systemDfDiskUsage{}, errors.Wrapf(err, "error getting size of volime %s", volume.Name())
+ }
+ sumSize += size
+ if _, exist := volumeUsedByContainerMap[volume.Name()]; exist {
+ unreclaimableSize += size
+ }
+ }
+ reclaimable := sumSize - unreclaimableSize
+ if sumSize != 0 {
+ reclaimableStr = fmt.Sprintf("%s (%v%%)", units.HumanSizeWithPrecision(float64(reclaimable), 3), 100*reclaimable/sumSize)
+ } else {
+ reclaimableStr = fmt.Sprintf("%s (%v%%)", units.HumanSizeWithPrecision(float64(reclaimable), 3), 0)
+ }
+ volumesDiskUsage := systemDfDiskUsage{
+ Type: "Local Volumes",
+ Total: len(volumes),
+ Active: len(volumeUsedByContainerMap),
+ Size: units.HumanSizeWithPrecision(float64(sumSize), 3),
+ Reclaimable: reclaimableStr,
+ }
+ return volumesDiskUsage, nil
+}
+
+func volumeUsedByContainer(containers []*libpod.Container) map[string][]*libpod.Container {
+ volumeUsedByContainerMap := make(map[string][]*libpod.Container)
+ for _, ctr := range containers {
+
+ ctrVolumes := ctr.UserVolumes()
+ for _, ctrVolume := range ctrVolumes {
+ volumeUsedByContainerMap[ctrVolume] = append(volumeUsedByContainerMap[ctrVolume], ctr)
+ }
+ }
+ return volumeUsedByContainerMap
+}
+
+func volumeSize(volume *libpod.Volume) (int64, error) {
+ var size int64
+ err := filepath.Walk(volume.MountPoint(), func(path string, info os.FileInfo, err error) error {
+ if err == nil && !info.IsDir() {
+ size += info.Size()
+ }
+ return err
+ })
+ return size, err
+}
+
+func getImageVerboseDiskUsage(ctx context.Context, images []*image.Image, imagesUsedbyCtr map[string][]*libpod.Container) ([]imageVerboseDiskUsage, error) {
+ var imagesVerboseDiskUsage []imageVerboseDiskUsage
+ imgUniqueSizeMap, err := imageUniqueSize(ctx, images)
+ if err != nil {
+ return imagesVerboseDiskUsage, errors.Wrapf(err, "error getting unique size of images")
+ }
+ for _, img := range images {
+ isParent, err := img.IsParent()
+ if err != nil {
+ return imagesVerboseDiskUsage, errors.Wrapf(err, "error checking if %s is a parent images", img.ID())
+ }
+ parent, err := img.GetParent()
+ if err != nil {
+ return imagesVerboseDiskUsage, errors.Wrapf(err, "error getting parent of image %s", img.ID())
+ }
+ if isParent && parent != nil {
+ continue
+ }
+ size, err := img.Size(ctx)
+ if err != nil {
+ return imagesVerboseDiskUsage, errors.Wrapf(err, "error getting size of image %s", img.ID())
+ }
+ numberOfContainers := 0
+ if ctrs, exist := imagesUsedbyCtr[img.ID()]; exist {
+ numberOfContainers = len(ctrs)
+ }
+ var repo string
+ var tag string
+ if len(img.Names()) == 0 {
+ repo = "<none>"
+ tag = "<none>"
+ }
+ repopairs, err := image.ReposToMap([]string{img.Names()[0]})
+ if err != nil {
+ logrus.Errorf("error finding tag/digest for %s", img.ID())
+ }
+ for reponame, tags := range repopairs {
+ for _, tagname := range tags {
+ repo = reponame
+ tag = tagname
+ }
+ }
+
+ imageVerbosedf := imageVerboseDiskUsage{
+ Repository: repo,
+ Tag: tag,
+ ImageID: shortID(img.ID()),
+ Created: units.HumanDuration(time.Since((img.Created().Local()))) + " ago",
+ Size: units.HumanSizeWithPrecision(float64(*size), 3),
+ SharedSize: units.HumanSizeWithPrecision(float64(*size-imgUniqueSizeMap[img.ID()]), 3),
+ UniqueSize: units.HumanSizeWithPrecision(float64(imgUniqueSizeMap[img.ID()]), 3),
+ Containers: numberOfContainers,
+ }
+ imagesVerboseDiskUsage = append(imagesVerboseDiskUsage, imageVerbosedf)
+ }
+ return imagesVerboseDiskUsage, nil
+}
+
+func getContainerVerboseDiskUsage(containers []*libpod.Container) (containersVerboseDiskUsage []containerVerboseDiskUsage, err error) {
+ for _, ctr := range containers {
+ imgID, _ := ctr.Image()
+ size, err := ctr.RWSize()
+ if err != nil {
+ return containersVerboseDiskUsage, errors.Wrapf(err, "error getting size of container %s", ctr.ID())
+ }
+ state, err := ctr.State()
+ if err != nil {
+ return containersVerboseDiskUsage, errors.Wrapf(err, "error getting the state of container %s", ctr.ID())
+ }
+
+ ctrVerboseData := containerVerboseDiskUsage{
+ ContainerID: shortID(ctr.ID()),
+ Image: shortImageID(imgID),
+ Command: strings.Join(ctr.Command(), " "),
+ LocalVolumes: len(ctr.UserVolumes()),
+ Size: units.HumanSizeWithPrecision(float64(size), 3),
+ Created: units.HumanDuration(time.Since(ctr.CreatedTime().Local())) + "ago",
+ Status: state.String(),
+ Names: ctr.Name(),
+ }
+ containersVerboseDiskUsage = append(containersVerboseDiskUsage, ctrVerboseData)
+
+ }
+ return containersVerboseDiskUsage, nil
+}
+
+func getVolumeVerboseDiskUsage(volumes []*libpod.Volume, volumeUsedByContainerMap map[string][]*libpod.Container) (volumesVerboseDiskUsage []volumeVerboseDiskUsage, err error) {
+ for _, vol := range volumes {
+ volSize, err := volumeSize(vol)
+ if err != nil {
+ return volumesVerboseDiskUsage, errors.Wrapf(err, "error getting size of volume %s", vol.Name())
+ }
+ links := 0
+ if linkCtr, exist := volumeUsedByContainerMap[vol.Name()]; exist {
+ links = len(linkCtr)
+ }
+ volumeVerboseData := volumeVerboseDiskUsage{
+ VolumeName: vol.Name(),
+ Links: links,
+ Size: units.HumanSizeWithPrecision(float64(volSize), 3),
+ }
+ volumesVerboseDiskUsage = append(volumesVerboseDiskUsage, volumeVerboseData)
+ }
+ return volumesVerboseDiskUsage, nil
+}
+
+func imagesVerboseOutput(ctx context.Context, metaData dfMetaData) error {
+ var imageVerboseHeader = map[string]string{
+ "Repository": "REPOSITORY",
+ "Tag": "TAG",
+ "ImageID": "IMAGE ID",
+ "Created": "CREATED",
+ "Size": "SIZE",
+ "SharedSize": "SHARED SIZE",
+ "UniqueSize": "UNQUE SIZE",
+ "Containers": "CONTAINERS",
+ }
+ imagesVerboseDiskUsage, err := getImageVerboseDiskUsage(ctx, metaData.images, metaData.imagesUsedbyCtrMap)
+ if err != nil {
+ return errors.Wrapf(err, "error getting verbose output of images")
+ }
+ os.Stderr.WriteString("Images space usage:\n\n")
+ out := formats.StdoutTemplateArray{Output: systemDfImageVerboseDiskUsageToGeneric(imagesVerboseDiskUsage), Template: "table {{.Repository}}\t{{.Tag}}\t{{.ImageID}}\t{{.Created}}\t{{.Size}}\t{{.SharedSize}}\t{{.UniqueSize}}\t{{.Containers}}", Fields: imageVerboseHeader}
+ formats.Writer(out).Out()
+ return nil
+}
+
+func containersVerboseOutput(ctx context.Context, metaData dfMetaData) error {
+ var containerVerboseHeader = map[string]string{
+ "ContainerID": "CONTAINER ID ",
+ "Image": "IMAGE",
+ "Command": "COMMAND",
+ "LocalVolumes": "LOCAL VOLUMES",
+ "Size": "SIZE",
+ "Created": "CREATED",
+ "Status": "STATUS",
+ "Names": "NAMES",
+ }
+ containersVerboseDiskUsage, err := getContainerVerboseDiskUsage(metaData.containers)
+ if err != nil {
+ return errors.Wrapf(err, "error getting verbose output of containers")
+ }
+ os.Stderr.WriteString("\nContainers space usage:\n\n")
+ out := formats.StdoutTemplateArray{Output: systemDfContainerVerboseDiskUsageToGeneric(containersVerboseDiskUsage), Template: "table {{.ContainerID}}\t{{.Image}}\t{{.Command}}\t{{.LocalVolumes}}\t{{.Size}}\t{{.Created}}\t{{.Status}}\t{{.Names}}", Fields: containerVerboseHeader}
+ formats.Writer(out).Out()
+ return nil
+}
+
+func volumesVerboseOutput(ctx context.Context, metaData dfMetaData) error {
+ var volumeVerboseHeader = map[string]string{
+ "VolumeName": "VOLUME NAME",
+ "Links": "LINKS",
+ "Size": "SIZE",
+ }
+ volumesVerboseDiskUsage, err := getVolumeVerboseDiskUsage(metaData.volumes, metaData.volumeUsedByContainerMap)
+ if err != nil {
+ return errors.Wrapf(err, "error getting verbose ouput of volumes")
+ }
+ os.Stderr.WriteString("\nLocal Volumes space usage:\n\n")
+ out := formats.StdoutTemplateArray{Output: systemDfVolumeVerboseDiskUsageToGeneric(volumesVerboseDiskUsage), Template: "table {{.VolumeName}}\t{{.Links}}\t{{.Size}}", Fields: volumeVerboseHeader}
+ formats.Writer(out).Out()
+ return nil
+}
+
+func verboseOutput(ctx context.Context, metaData dfMetaData) error {
+ if err := imagesVerboseOutput(ctx, metaData); err != nil {
+ return err
+ }
+ if err := containersVerboseOutput(ctx, metaData); err != nil {
+ return err
+ }
+ if err := volumesVerboseOutput(ctx, metaData); err != nil {
+ return err
+ }
+ return nil
+}
+
+func systemDfDiskUsageToGeneric(diskUsages []systemDfDiskUsage) (out []interface{}) {
+ for _, usage := range diskUsages {
+ out = append(out, interface{}(usage))
+ }
+ return out
+}
+
+func systemDfImageVerboseDiskUsageToGeneric(diskUsages []imageVerboseDiskUsage) (out []interface{}) {
+ for _, usage := range diskUsages {
+ out = append(out, interface{}(usage))
+ }
+ return out
+}
+
+func systemDfContainerVerboseDiskUsageToGeneric(diskUsages []containerVerboseDiskUsage) (out []interface{}) {
+ for _, usage := range diskUsages {
+ out = append(out, interface{}(usage))
+ }
+ return out
+}
+
+func systemDfVolumeVerboseDiskUsageToGeneric(diskUsages []volumeVerboseDiskUsage) (out []interface{}) {
+ for _, usage := range diskUsages {
+ out = append(out, interface{}(usage))
+ }
+ return out
+}
+
+func shortImageID(id string) string {
+ const imageIDTruncLength int = 4
+ if len(id) > imageIDTruncLength {
+ return id[:imageIDTruncLength]
+ }
+ return id
+}
diff --git a/completions/bash/podman b/completions/bash/podman
index 1976bff44..dfa673481 100644
--- a/completions/bash/podman
+++ b/completions/bash/podman
@@ -999,6 +999,24 @@ _podman_container() {
esac
}
+_podman_system_df() {
+ local options_with_args="
+ --format
+ --verbose
+ "
+ local boolean_options="
+ -h
+ --help
+ --verbose
+ -v
+ "
+ case "$cur" in
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ esac
+}
+
_podman_system_info() {
_podman_info
}
@@ -1029,6 +1047,7 @@ _podman_system() {
-h
"
subcommands="
+ df
info
prune
"
diff --git a/docs/podman-system-df.1.md b/docs/podman-system-df.1.md
new file mode 100644
index 000000000..f33523dd6
--- /dev/null
+++ b/docs/podman-system-df.1.md
@@ -0,0 +1,57 @@
+% podman-system-df(1) podman
+
+## NAME
+podman\-system\-df - Show podman disk usage
+
+## SYNOPSIS
+**podman system df** [*options*]
+
+## DESCRIPTION
+Show podman disk usage
+
+## OPTIONS
+**--format**=""
+
+Pretty-print images using a Go template
+
+**-v, --verbose**[=false]
+Show detailed information on space usage
+
+## EXAMPLE
+
+$ podman system df
+TYPE TOTAL ACTIVE SIZE RECLAIMABLE
+Images 6 2 281MB 168MB (59%)
+Containers 3 1 0B 0B (0%)
+Local Volumes 1 1 22B 0B (0%)
+
+$ podman system df -v
+Images space usage:
+
+REPOSITORY TAG IMAGE ID CREATED SIZE SHARED SIZE UNQUE SIZE CONTAINERS
+docker.io/library/alpine latest 5cb3aa00f899 2 weeks ago 5.79MB 0B 5.79MB 5
+
+Containers space usage:
+
+CONTAINER ID IMAGE COMMAND LOCAL VOLUMES SIZE CREATED STATUS NAMES
+073f7e62812d 5cb3 sleep 100 1 0B About an hourago exited zen_joliot
+3f19f5bba242 5cb3 sleep 100 0 5.52kB 4 hoursago exited pedantic_archimedes
+8cd89bf645cc 5cb3 ls foodir 0 58B 2 hoursago configured agitated_hamilton
+a1d948a4b61d 5cb3 ls foodir 0 12B 2 hoursago exited laughing_wing
+eafe3e3c5bb3 5cb3 sleep 10000 0 72B 2 hoursago running priceless_liskov
+
+Local Volumes space usage:
+
+VOLUME NAME LINKS SIZE
+data 1 0B
+
+$ podman system df --format "{{.Type}}\t{{.Total}}"
+Images 1
+Containers 5
+Local Volumes 1
+
+## SEE ALSO
+podman-system(1)
+
+# HISTORY
+March 2019, Originally compiled by Qi Wang (qiwan at redhat dot com)
diff --git a/docs/podman-system.1.md b/docs/podman-system.1.md
index 6d87648e8..32b3efdd9 100644
--- a/docs/podman-system.1.md
+++ b/docs/podman-system.1.md
@@ -13,7 +13,8 @@ The system command allows you to manage the podman systems
| Command | Man Page | Description |
| ------- | --------------------------------------------------- | ---------------------------------------------------------------------------- |
-| info | [podman-info(1)](podman-info.1.md) | Displays Podman related system information. |
+| df | [podman-system-df(1)](podman-system-df.1.md) | Show podman disk usage. |
+| info | [podman-system-info(1)](podman-info.1.md) | Displays Podman related system information. |
| prune | [podman-system-prune(1)](podman-system-prune.1.md) | Remove all unused data |
| renumber | [podman-system-renumber(1)](podman-system-renumber.1.md)| Migrate lock numbers to handle a change in maximum number of locks. |
diff --git a/test/e2e/system_df_test.go b/test/e2e/system_df_test.go
new file mode 100644
index 000000000..92787f17c
--- /dev/null
+++ b/test/e2e/system_df_test.go
@@ -0,0 +1,62 @@
+// +build !remoteclient
+
+package integration
+
+import (
+ "fmt"
+ "os"
+ "strings"
+
+ . "github.com/containers/libpod/test/utils"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("podman system df", func() {
+ var (
+ tempdir string
+ err error
+ podmanTest *PodmanTestIntegration
+ )
+
+ BeforeEach(func() {
+ tempdir, err = CreateTempDirInTempDir()
+ if err != nil {
+ os.Exit(1)
+ }
+ podmanTest = PodmanTestCreate(tempdir)
+ podmanTest.RestoreAllArtifacts()
+ })
+
+ AfterEach(func() {
+ podmanTest.Cleanup()
+ f := CurrentGinkgoTestDescription()
+ timedResult := fmt.Sprintf("Test: %s completed in %f seconds", f.TestText, f.Duration.Seconds())
+ GinkgoWriter.Write([]byte(timedResult))
+ })
+
+ It("podman system df", func() {
+ session := podmanTest.Podman([]string{"create", ALPINE})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"volume", "create", "data"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"create", "-v", "data:/data", "--name", "container1", "busybox"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"system", "df"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(len(session.OutputToStringArray())).To(Equal(4))
+ images := strings.Fields(session.OutputToStringArray()[1])
+ containers := strings.Fields(session.OutputToStringArray()[2])
+ volumes := strings.Fields(session.OutputToStringArray()[3])
+ Expect(images[1]).To(Equal("2"))
+ Expect(containers[1]).To(Equal("2"))
+ Expect(volumes[2]).To(Equal("1"))
+ })
+})
diff --git a/transfer.md b/transfer.md
index 998a0a9e7..df91cdf21 100644
--- a/transfer.md
+++ b/transfer.md
@@ -78,6 +78,7 @@ There are other equivalents for these tools
| `docker volume prune` | [`podman volume prune`](./docs/podman-volume-prune.1.md) |
| `docker volume rm` | [`podman volume rm`](./docs/podman-volume-rm.1.md) |
| `docker system` | [`podman system`](./docs/podman-system.1.md) |
+| `docker system df` | [`podman system df`](./docs/podman-system-df.1.md) |
| `docker system prune` | [`podman system prune`](./docs/podman-system-prune.1.md) |
| `docker system info` | [`podman system info`](./docs/podman-system-info.1.md) |
| `docker wait` | [`podman wait`](./docs/podman-wait.1.md) |