diff options
Diffstat (limited to 'cmd')
56 files changed, 2283 insertions, 363 deletions
diff --git a/cmd/podman/build.go b/cmd/podman/build.go index 14bf226f9..880cb892f 100644 --- a/cmd/podman/build.go +++ b/cmd/podman/build.go @@ -1,6 +1,11 @@ package main import ( + "io/ioutil" + "os" + "path/filepath" + "strings" + "github.com/containers/buildah" "github.com/containers/buildah/imagebuildah" buildahcli "github.com/containers/buildah/pkg/cli" @@ -10,15 +15,15 @@ import ( "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/urfave/cli" - "io/ioutil" - "os" - "path/filepath" - "strings" ) var ( layerFlags = []cli.Flag{ cli.BoolTFlag{ + Name: "force-rm", + Usage: "Always remove intermediate containers after a build, even if the build is unsuccessful. (default true)", + }, + cli.BoolTFlag{ Name: "layers", Usage: "cache intermediate layers during build. Use BUILDAH_LAYERS environment variable to override. ", }, @@ -230,7 +235,7 @@ func buildCmd(c *cli.Context) error { Layers: layers, NoCache: c.Bool("no-cache"), RemoveIntermediateCtrs: c.BoolT("rm"), - ForceRmIntermediateCtrs: c.Bool("force-rm"), + ForceRmIntermediateCtrs: c.BoolT("force-rm"), } if c.Bool("quiet") { diff --git a/cmd/podman/checkpoint.go b/cmd/podman/checkpoint.go index bf280920d..824c97662 100644 --- a/cmd/podman/checkpoint.go +++ b/cmd/podman/checkpoint.go @@ -24,6 +24,14 @@ var ( Usage: "keep all temporary checkpoint files", }, cli.BoolFlag{ + Name: "leave-running, R", + Usage: "leave the container running after writing checkpoint to disk", + }, + cli.BoolFlag{ + Name: "tcp-established", + Usage: "checkpoint a container with established TCP connections", + }, + cli.BoolFlag{ Name: "all, a", Usage: "checkpoint all running containers", }, @@ -50,7 +58,11 @@ func checkpointCmd(c *cli.Context) error { } defer runtime.Shutdown(false) - keep := c.Bool("keep") + options := libpod.ContainerCheckpointOptions{ + Keep: c.Bool("keep"), + KeepRunning: c.Bool("leave-running"), + TCPEstablished: c.Bool("tcp-established"), + } if err := checkAllAndLatest(c); err != nil { return err @@ -59,7 +71,7 @@ func checkpointCmd(c *cli.Context) error { containers, lastError := getAllOrLatestContainers(c, runtime, libpod.ContainerStateRunning, "running") for _, ctr := range containers { - if err = ctr.Checkpoint(context.TODO(), keep); err != nil { + if err = ctr.Checkpoint(context.TODO(), options); err != nil { if lastError != nil { fmt.Fprintln(os.Stderr, lastError) } diff --git a/cmd/podman/commit.go b/cmd/podman/commit.go index b09c6b0d9..02ede4f73 100644 --- a/cmd/podman/commit.go +++ b/cmd/podman/commit.go @@ -95,7 +95,7 @@ func commitCmd(c *cli.Context) error { for _, change := range c.StringSlice("change") { splitChange := strings.Split(strings.ToUpper(change), "=") if !util.StringInSlice(splitChange[0], libpod.ChangeCmds) { - return errors.Errorf("invalid syntax for --change ", change) + return errors.Errorf("invalid syntax for --change: %s", change) } } } diff --git a/cmd/podman/common.go b/cmd/podman/common.go index f9e746b28..8404a29b8 100644 --- a/cmd/podman/common.go +++ b/cmd/podman/common.go @@ -11,6 +11,7 @@ import ( "github.com/containers/buildah" "github.com/containers/libpod/libpod" + "github.com/containers/libpod/pkg/rootless" "github.com/containers/storage" "github.com/fatih/camelcase" "github.com/pkg/errors" @@ -161,6 +162,13 @@ func getContext() context.Context { return context.TODO() } +func getDefaultNetwork() string { + if rootless.IsRootless() { + return "slirp4netns" + } + return "bridge" +} + // Common flags shared between commands var createFlags = []cli.Flag{ cli.StringSliceFlag{ @@ -372,7 +380,7 @@ var createFlags = []cli.Flag{ cli.StringFlag{ Name: "net, network", Usage: "Connect a container to a network", - Value: "bridge", + Value: getDefaultNetwork(), }, cli.BoolFlag{ Name: "oom-kill-disable", @@ -414,6 +422,10 @@ var createFlags = []cli.Flag{ Name: "read-only", Usage: "Make containers root filesystem read-only", }, + cli.StringFlag{ + Name: "restart", + Usage: "Restart is not supported. Please use a systemd unit file for restart", + }, cli.BoolFlag{ Name: "rm", Usage: "Remove container (and pod if created) after exit", diff --git a/cmd/podman/container.go b/cmd/podman/container.go index ff634278f..b0232c874 100644 --- a/cmd/podman/container.go +++ b/cmd/podman/container.go @@ -9,6 +9,7 @@ var ( attachCommand, checkpointCommand, cleanupCommand, + containerExistsCommand, commitCommand, createCommand, diffCommand, @@ -21,7 +22,7 @@ var ( mountCommand, pauseCommand, portCommand, - // pruneCommand, + pruneContainersCommand, refreshCommand, restartCommand, restoreCommand, diff --git a/cmd/podman/containers_prune.go b/cmd/podman/containers_prune.go new file mode 100644 index 000000000..92604e82f --- /dev/null +++ b/cmd/podman/containers_prune.go @@ -0,0 +1,74 @@ +package main + +import ( + "github.com/containers/libpod/cmd/podman/libpodruntime" + "github.com/containers/libpod/cmd/podman/shared" + "github.com/containers/libpod/libpod" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/urfave/cli" +) + +var ( + pruneContainersDescription = ` + podman container prune + + Removes all exited containers +` + + pruneContainersCommand = cli.Command{ + Name: "prune", + Usage: "Remove all stopped containers", + Description: pruneContainersDescription, + Action: pruneContainersCmd, + OnUsageError: usageErrorHandler, + } +) + +func pruneContainersCmd(c *cli.Context) error { + var ( + deleteFuncs []shared.ParallelWorkerInput + ) + + ctx := getContext() + runtime, err := libpodruntime.GetRuntime(c) + if err != nil { + return errors.Wrapf(err, "could not get runtime") + } + defer runtime.Shutdown(false) + + filter := func(c *libpod.Container) bool { + state, _ := c.State() + if state == libpod.ContainerStateStopped || (state == libpod.ContainerStateExited && err == nil && c.PodID() == "") { + return true + } + return false + } + delContainers, err := runtime.GetContainers(filter) + if err != nil { + return err + } + if len(delContainers) < 1 { + return nil + } + for _, container := range delContainers { + con := container + f := func() error { + return runtime.RemoveContainer(ctx, con, c.Bool("force")) + } + + deleteFuncs = append(deleteFuncs, shared.ParallelWorkerInput{ + ContainerID: con.ID(), + ParallelFunc: f, + }) + } + maxWorkers := shared.Parallelize("rm") + if c.GlobalIsSet("max-workers") { + maxWorkers = c.GlobalInt("max-workers") + } + logrus.Debugf("Setting maximum workers to %d", maxWorkers) + + // Run the parallel funcs + deleteErrors, errCount := shared.ParallelExecuteWorkerPool(maxWorkers, deleteFuncs) + return printParallelOutput(deleteErrors, errCount) +} diff --git a/cmd/podman/create.go b/cmd/podman/create.go index 9f6825c95..870eb28d6 100644 --- a/cmd/podman/create.go +++ b/cmd/podman/create.go @@ -11,6 +11,7 @@ import ( "syscall" "github.com/containers/libpod/cmd/podman/libpodruntime" + "github.com/containers/libpod/cmd/podman/shared" "github.com/containers/libpod/libpod" "github.com/containers/libpod/libpod/image" ann "github.com/containers/libpod/pkg/annotations" @@ -66,7 +67,7 @@ func createCmd(c *cli.Context) error { rootless.SetSkipStorageSetup(true) } - runtime, err := libpodruntime.GetContainerRuntime(c) + runtime, err := libpodruntime.GetRuntime(c) if err != nil { return errors.Wrapf(err, "error creating libpod runtime") } @@ -128,7 +129,7 @@ func createContainer(c *cli.Context, runtime *libpod.Runtime) (*libpod.Container var data *inspect.ImageData = nil if rootfs == "" && !rootless.SkipStorageSetup() { - newImage, err := runtime.ImageRuntime().New(ctx, c.Args()[0], rtc.SignaturePolicyPath, "", os.Stderr, nil, image.SigningOptions{}, false, false) + newImage, err := runtime.ImageRuntime().New(ctx, c.Args()[0], rtc.SignaturePolicyPath, "", os.Stderr, nil, image.SigningOptions{}, false) if err != nil { return nil, nil, err } @@ -375,8 +376,8 @@ func configureEntrypoint(c *cli.Context, data *inspect.ImageData) []string { return entrypoint } -func configurePod(c *cli.Context, runtime *libpod.Runtime, namespaces map[string]string) (map[string]string, error) { - pod, err := runtime.LookupPod(c.String("pod")) +func configurePod(c *cli.Context, runtime *libpod.Runtime, namespaces map[string]string, podName string) (map[string]string, error) { + pod, err := runtime.LookupPod(podName) if err != nil { return namespaces, err } @@ -409,7 +410,12 @@ func parseCreateOpts(ctx context.Context, c *cli.Context, runtime *libpod.Runtim inputCommand, command []string memoryLimit, memoryReservation, memorySwap, memoryKernel int64 blkioWeight uint16 + namespaces map[string]string ) + if c.IsSet("restart") { + return nil, errors.Errorf("--restart option is not supported.\nUse systemd unit files for restarting containers") + } + idmappings, err := util.ParseIDMapping(c.StringSlice("uidmap"), c.StringSlice("gidmap"), c.String("subuidname"), c.String("subgidname")) if err != nil { return nil, err @@ -492,12 +498,21 @@ func parseCreateOpts(ctx context.Context, c *cli.Context, runtime *libpod.Runtim return nil, errors.Errorf("--cpu-quota and --cpus cannot be set together") } + // EXPOSED PORTS + var portBindings map[nat.Port][]nat.PortBinding + if data != nil { + portBindings, err = cc.ExposedPorts(c.StringSlice("expose"), c.StringSlice("publish"), c.Bool("publish-all"), data.ContainerConfig.ExposedPorts) + if err != nil { + return nil, err + } + } + // Kernel Namespaces // TODO Fix handling of namespace from pod // Instead of integrating here, should be done in libpod // However, that also involves setting up security opts // when the pod's namespace is integrated - namespaces := map[string]string{ + namespaces = map[string]string{ "pid": c.String("pid"), "net": c.String("net"), "ipc": c.String("ipc"), @@ -505,8 +520,41 @@ func parseCreateOpts(ctx context.Context, c *cli.Context, runtime *libpod.Runtim "uts": c.String("uts"), } + originalPodName := c.String("pod") + podName := strings.Replace(originalPodName, "new:", "", 1) + // after we strip out :new, make sure there is something left for a pod name + if len(podName) < 1 && c.IsSet("pod") { + return nil, errors.Errorf("new pod name must be at least one character") + } if c.IsSet("pod") { - namespaces, err = configurePod(c, runtime, namespaces) + if strings.HasPrefix(originalPodName, "new:") { + // pod does not exist; lets make it + var podOptions []libpod.PodCreateOption + podOptions = append(podOptions, libpod.WithPodName(podName), libpod.WithInfraContainer(), libpod.WithPodCgroups()) + if len(portBindings) > 0 { + ociPortBindings, err := cc.NatToOCIPortBindings(portBindings) + if err != nil { + return nil, err + } + podOptions = append(podOptions, libpod.WithInfraContainerPorts(ociPortBindings)) + } + + podNsOptions, err := shared.GetNamespaceOptions(strings.Split(DefaultKernelNamespaces, ",")) + if err != nil { + return nil, err + } + podOptions = append(podOptions, podNsOptions...) + // make pod + pod, err := runtime.NewPod(ctx, podOptions...) + if err != nil { + return nil, err + } + logrus.Debugf("pod %s created by new container request", pod.ID()) + + // The container now cannot have port bindings; so we reset the map + portBindings = make(map[nat.Port][]nat.PortBinding) + } + namespaces, err = configurePod(c, runtime, namespaces, podName) if err != nil { return nil, err } @@ -535,7 +583,7 @@ func parseCreateOpts(ctx context.Context, c *cli.Context, runtime *libpod.Runtim // Make sure if network is set to container namespace, port binding is not also being asked for netMode := ns.NetworkMode(namespaces["net"]) if netMode.IsContainer() { - if len(c.StringSlice("publish")) > 0 || c.Bool("publish-all") { + if len(portBindings) > 0 { return nil, errors.Errorf("cannot set port bindings on an existing container network namespace") } } @@ -644,15 +692,6 @@ func parseCreateOpts(ctx context.Context, c *cli.Context, runtime *libpod.Runtim return nil, errors.Errorf("No command specified on command line or as CMD or ENTRYPOINT in this image") } - // EXPOSED PORTS - var portBindings map[nat.Port][]nat.PortBinding - if data != nil { - portBindings, err = cc.ExposedPorts(c.StringSlice("expose"), c.StringSlice("publish"), c.Bool("publish-all"), data.ContainerConfig.ExposedPorts) - if err != nil { - return nil, err - } - } - // SHM Size shmSize, err := units.FromHumanSize(c.String("shm-size")) if err != nil { @@ -670,6 +709,11 @@ func parseCreateOpts(ctx context.Context, c *cli.Context, runtime *libpod.Runtim if util.StringInSlice(".", c.StringSlice("dns-search")) && len(c.StringSlice("dns-search")) > 1 { return nil, errors.Errorf("cannot pass additional search domains when also specifying '.'") } + if !netMode.IsPrivate() { + if c.IsSet("dns-search") || c.IsSet("dns") || c.IsSet("dns-opt") { + return nil, errors.Errorf("specifying DNS flags when network mode is shared with the host or another container is not allowed") + } + } // Validate domains are good for _, dom := range c.StringSlice("dns-search") { @@ -741,7 +785,7 @@ func parseCreateOpts(ctx context.Context, c *cli.Context, runtime *libpod.Runtim NetMode: netMode, UtsMode: utsMode, PidMode: pidMode, - Pod: c.String("pod"), + Pod: podName, Privileged: c.Bool("privileged"), Publish: c.StringSlice("publish"), PublishAll: c.Bool("publish-all"), diff --git a/cmd/podman/create_cli.go b/cmd/podman/create_cli.go index 218e9b806..b3a30d185 100644 --- a/cmd/podman/create_cli.go +++ b/cmd/podman/create_cli.go @@ -201,12 +201,13 @@ func parseVolumesFrom(volumesFrom []string) error { } func validateVolumeHostDir(hostDir string) error { - if !filepath.IsAbs(hostDir) { - return errors.Errorf("invalid host path, must be an absolute path %q", hostDir) - } - if _, err := os.Stat(hostDir); err != nil { - return errors.Wrapf(err, "error checking path %q", hostDir) + if filepath.IsAbs(hostDir) { + if _, err := os.Stat(hostDir); err != nil { + return errors.Wrapf(err, "error checking path %q", hostDir) + } } + // If hostDir is not an absolute path, that means the user wants to create a + // named volume. This will be done later on in the code. return nil } diff --git a/cmd/podman/exists.go b/cmd/podman/exists.go new file mode 100644 index 000000000..2e2559ec7 --- /dev/null +++ b/cmd/podman/exists.go @@ -0,0 +1,120 @@ +package main + +import ( + "os" + + "github.com/containers/libpod/cmd/podman/libpodruntime" + "github.com/containers/libpod/libpod" + "github.com/containers/libpod/libpod/image" + "github.com/pkg/errors" + "github.com/urfave/cli" +) + +var ( + imageExistsDescription = ` + podman image exists + + Check if an image exists in local storage +` + + imageExistsCommand = cli.Command{ + Name: "exists", + Usage: "Check if an image exists in local storage", + Description: imageExistsDescription, + Action: imageExistsCmd, + ArgsUsage: "IMAGE-NAME", + OnUsageError: usageErrorHandler, + } +) + +var ( + containerExistsDescription = ` + podman container exists + + Check if a container exists in local storage +` + + containerExistsCommand = cli.Command{ + Name: "exists", + Usage: "Check if a container exists in local storage", + Description: containerExistsDescription, + Action: containerExistsCmd, + ArgsUsage: "CONTAINER-NAME", + OnUsageError: usageErrorHandler, + } +) + +var ( + podExistsDescription = ` + podman pod exists + + Check if a pod exists in local storage +` + + podExistsCommand = cli.Command{ + Name: "exists", + Usage: "Check if a pod exists in local storage", + Description: podExistsDescription, + Action: podExistsCmd, + ArgsUsage: "POD-NAME", + OnUsageError: usageErrorHandler, + } +) + +func imageExistsCmd(c *cli.Context) error { + args := c.Args() + if len(args) > 1 || len(args) < 1 { + return errors.New("you may only check for the existence of one image at a time") + } + runtime, err := libpodruntime.GetRuntime(c) + if err != nil { + return errors.Wrapf(err, "could not get runtime") + } + defer runtime.Shutdown(false) + if _, err := runtime.ImageRuntime().NewFromLocal(args[0]); err != nil { + if errors.Cause(err) == image.ErrNoSuchImage { + os.Exit(1) + } + return err + } + return nil +} + +func containerExistsCmd(c *cli.Context) error { + args := c.Args() + if len(args) > 1 || len(args) < 1 { + return errors.New("you may only check for the existence of one container at a time") + } + runtime, err := libpodruntime.GetRuntime(c) + if err != nil { + return errors.Wrapf(err, "could not get runtime") + } + defer runtime.Shutdown(false) + if _, err := runtime.LookupContainer(args[0]); err != nil { + if errors.Cause(err) == libpod.ErrNoSuchCtr { + os.Exit(1) + } + return err + } + return nil +} + +func podExistsCmd(c *cli.Context) error { + args := c.Args() + if len(args) > 1 || len(args) < 1 { + return errors.New("you may only check for the existence of one pod at a time") + } + runtime, err := libpodruntime.GetRuntime(c) + if err != nil { + return errors.Wrapf(err, "could not get runtime") + } + defer runtime.Shutdown(false) + + if _, err := runtime.LookupPod(args[0]); err != nil { + if errors.Cause(err) == libpod.ErrNoSuchPod { + os.Exit(1) + } + return err + } + return nil +} diff --git a/cmd/podman/generate.go b/cmd/podman/generate.go new file mode 100644 index 000000000..765d0ee70 --- /dev/null +++ b/cmd/podman/generate.go @@ -0,0 +1,23 @@ +package main + +import ( + "github.com/urfave/cli" +) + +var ( + generateSubCommands = []cli.Command{ + containerKubeCommand, + } + + generateDescription = "generate structured data based for a containers and pods" + kubeCommand = cli.Command{ + Name: "generate", + Usage: "generated structured data", + Description: generateDescription, + ArgsUsage: "", + Subcommands: generateSubCommands, + UseShortOptionHandling: true, + OnUsageError: usageErrorHandler, + Hidden: true, + } +) diff --git a/cmd/podman/generate_kube.go b/cmd/podman/generate_kube.go new file mode 100644 index 000000000..8f2f0de32 --- /dev/null +++ b/cmd/podman/generate_kube.go @@ -0,0 +1,111 @@ +package main + +import ( + "fmt" + + "github.com/containers/libpod/cmd/podman/libpodruntime" + "github.com/containers/libpod/libpod" + "github.com/containers/libpod/pkg/rootless" + podmanVersion "github.com/containers/libpod/version" + "github.com/ghodss/yaml" + "github.com/pkg/errors" + "github.com/urfave/cli" + "k8s.io/api/core/v1" +) + +var ( + containerKubeFlags = []cli.Flag{ + cli.BoolFlag{ + Name: "service, s", + Usage: "generate YAML for kubernetes service object", + }, + } + containerKubeDescription = "Generate Kubernetes Pod YAML" + containerKubeCommand = cli.Command{ + Name: "kube", + Usage: "Generate Kubernetes pod YAML for a container or pod", + Description: containerKubeDescription, + Flags: sortFlags(containerKubeFlags), + Action: generateKubeYAMLCmd, + ArgsUsage: "CONTAINER|POD-NAME", + UseShortOptionHandling: true, + OnUsageError: usageErrorHandler, + } +) + +// generateKubeYAMLCmdgenerates or replays kube +func generateKubeYAMLCmd(c *cli.Context) error { + var ( + podYAML *v1.Pod + container *libpod.Container + err error + output []byte + pod *libpod.Pod + marshalledPod []byte + marshalledService []byte + servicePorts []v1.ServicePort + ) + + if rootless.IsRootless() { + return errors.Wrapf(libpod.ErrNotImplemented, "rootless users") + } + args := c.Args() + if len(args) > 1 || (len(args) < 1 && !c.Bool("latest")) { + return errors.Errorf("you must provide one container|pod ID or name or --latest") + } + + runtime, err := libpodruntime.GetRuntime(c) + if err != nil { + return errors.Wrapf(err, "could not get runtime") + } + defer runtime.Shutdown(false) + + // Get the container in question + container, err = runtime.LookupContainer(args[0]) + if err != nil { + pod, err = runtime.LookupPod(args[0]) + if err != nil { + return err + } + podYAML, servicePorts, err = pod.GenerateForKube() + } else { + if len(container.Dependencies()) > 0 { + return errors.Wrapf(libpod.ErrNotImplemented, "containers with dependencies") + } + podYAML, err = container.GenerateForKube() + } + if err != nil { + return err + } + + if c.Bool("service") { + serviceYAML := libpod.GenerateKubeServiceFromV1Pod(podYAML, servicePorts) + marshalledService, err = yaml.Marshal(serviceYAML) + if err != nil { + return err + } + } + // Marshall the results + marshalledPod, err = yaml.Marshal(podYAML) + if err != nil { + return err + } + + header := `# Generation of Kubernetes YAML is still under development! +# +# Save the output of this file and use kubectl create -f to import +# it into Kubernetes. +# +# Created with podman-%s +` + output = append(output, []byte(fmt.Sprintf(header, podmanVersion.Version))...) + output = append(output, marshalledPod...) + if c.Bool("service") { + output = append(output, []byte("---\n")...) + output = append(output, marshalledService...) + } + // Output the v1.Pod with the v1.Container + fmt.Println(string(output)) + + return nil +} diff --git a/cmd/podman/image.go b/cmd/podman/image.go index e67f61799..e978b9cf5 100644 --- a/cmd/podman/image.go +++ b/cmd/podman/image.go @@ -9,15 +9,17 @@ var ( buildCommand, historyCommand, importCommand, + imageExistsCommand, inspectCommand, loadCommand, lsImagesCommand, - // pruneCommand, + pruneImagesCommand, pullCommand, pushCommand, rmImageCommand, saveCommand, tagCommand, + trustCommand, } imageDescription = "Manage images" diff --git a/cmd/podman/images.go b/cmd/podman/images.go index a8955e49e..a1aeb6042 100644 --- a/cmd/podman/images.go +++ b/cmd/podman/images.go @@ -6,8 +6,7 @@ import ( "sort" "strings" "time" - - "github.com/sirupsen/logrus" + "unicode" "github.com/containers/libpod/cmd/podman/formats" "github.com/containers/libpod/cmd/podman/libpodruntime" @@ -16,6 +15,7 @@ import ( "github.com/docker/go-units" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" + "github.com/sirupsen/logrus" "github.com/urfave/cli" ) @@ -280,7 +280,9 @@ func getImagesTemplateOutput(ctx context.Context, runtime *libpod.Runtime, image if !opts.noTrunc { imageID = shortID(img.ID()) } + // get all specified repo:tag pairs and print them separately + outer: for repo, tags := range image.ReposToMap(img.Names()) { for _, tag := range tags { size, err := img.Size(ctx) @@ -289,6 +291,8 @@ func getImagesTemplateOutput(ctx context.Context, runtime *libpod.Runtime, image sizeStr = err.Error() } else { sizeStr = units.HumanSizeWithPrecision(float64(*size), 3) + lastNumIdx := strings.LastIndexFunc(sizeStr, unicode.IsNumber) + sizeStr = sizeStr[:lastNumIdx+1] + " " + sizeStr[lastNumIdx+1:] } params := imagesTemplateParams{ Repository: repo, @@ -300,6 +304,10 @@ func getImagesTemplateOutput(ctx context.Context, runtime *libpod.Runtime, image Size: sizeStr, } imagesOutput = append(imagesOutput, params) + if opts.quiet { // Show only one image ID when quiet + break outer + } + } } } @@ -374,13 +382,13 @@ func CreateFilterFuncs(ctx context.Context, r *libpod.Runtime, c *cli.Context, i case "before": before, err := r.ImageRuntime().NewFromLocal(splitFilter[1]) if err != nil { - return nil, errors.Wrapf(err, "unable to find image % in local stores", splitFilter[1]) + return nil, errors.Wrapf(err, "unable to find image %s in local stores", splitFilter[1]) } filterFuncs = append(filterFuncs, image.CreatedBeforeFilter(before.Created())) case "after": after, err := r.ImageRuntime().NewFromLocal(splitFilter[1]) if err != nil { - return nil, errors.Wrapf(err, "unable to find image % in local stores", splitFilter[1]) + return nil, errors.Wrapf(err, "unable to find image %s in local stores", splitFilter[1]) } filterFuncs = append(filterFuncs, image.CreatedAfterFilter(after.Created())) case "dangling": diff --git a/cmd/podman/images_prune.go b/cmd/podman/images_prune.go new file mode 100644 index 000000000..cb72a498f --- /dev/null +++ b/cmd/podman/images_prune.go @@ -0,0 +1,34 @@ +package main + +import ( + "github.com/containers/libpod/cmd/podman/libpodruntime" + "github.com/containers/libpod/cmd/podman/shared" + "github.com/pkg/errors" + "github.com/urfave/cli" +) + +var ( + pruneImagesDescription = ` + podman image prune + + Removes all unnamed images from local storage +` + + pruneImagesCommand = cli.Command{ + Name: "prune", + Usage: "Remove unused images", + Description: pruneImagesDescription, + Action: pruneImagesCmd, + OnUsageError: usageErrorHandler, + } +) + +func pruneImagesCmd(c *cli.Context) error { + runtime, err := libpodruntime.GetRuntime(c) + if err != nil { + return errors.Wrapf(err, "could not get runtime") + } + defer runtime.Shutdown(false) + + return shared.Prune(runtime.ImageRuntime()) +} diff --git a/cmd/podman/import.go b/cmd/podman/import.go index be516e4fa..144354fa6 100644 --- a/cmd/podman/import.go +++ b/cmd/podman/import.go @@ -139,7 +139,7 @@ func downloadFromURL(source string) (string, error) { _, err = io.Copy(outFile, response.Body) if err != nil { - return "", errors.Wrapf(err, "error saving %q to %q", source, outFile) + return "", errors.Wrapf(err, "error saving %s to %s", source, outFile.Name()) } return outFile.Name(), nil diff --git a/cmd/podman/inspect.go b/cmd/podman/inspect.go index bd9e8c13c..6ffcde55f 100644 --- a/cmd/podman/inspect.go +++ b/cmd/podman/inspect.go @@ -119,7 +119,7 @@ func iterateInput(ctx context.Context, c *cli.Context, args []string, runtime *l } libpodInspectData, err := ctr.Inspect(c.Bool("size")) if err != nil { - inspectError = errors.Wrapf(err, "error getting libpod container inspect data %q", ctr.ID) + inspectError = errors.Wrapf(err, "error getting libpod container inspect data %s", ctr.ID()) break } data, err = shared.GetCtrInspectInfo(ctr, libpodInspectData) @@ -154,12 +154,12 @@ func iterateInput(ctx context.Context, c *cli.Context, args []string, runtime *l } else { libpodInspectData, err := ctr.Inspect(c.Bool("size")) if err != nil { - inspectError = errors.Wrapf(err, "error getting libpod container inspect data %q", ctr.ID) + inspectError = errors.Wrapf(err, "error getting libpod container inspect data %s", ctr.ID()) break } data, err = shared.GetCtrInspectInfo(ctr, libpodInspectData) if err != nil { - inspectError = errors.Wrapf(err, "error parsing container data %q", ctr.ID) + inspectError = errors.Wrapf(err, "error parsing container data %s", ctr.ID()) break } } diff --git a/cmd/podman/kill.go b/cmd/podman/kill.go index 27882aeee..cfe4b4218 100644 --- a/cmd/podman/kill.go +++ b/cmd/podman/kill.go @@ -43,7 +43,6 @@ var ( // killCmd kills one or more containers with a signal func killCmd(c *cli.Context) error { var ( - lastError error killFuncs []shared.ParallelWorkerInput killSignal uint = uint(syscall.SIGTERM) ) @@ -75,8 +74,12 @@ func killCmd(c *cli.Context) error { containers, err := getAllOrLatestContainers(c, runtime, libpod.ContainerStateRunning, "running") if err != nil { - return err + if len(containers) == 0 { + return err + } + fmt.Println(err.Error()) } + for _, ctr := range containers { con := ctr f := func() error { @@ -95,18 +98,6 @@ func killCmd(c *cli.Context) error { } logrus.Debugf("Setting maximum workers to %d", maxWorkers) - killErrors := shared.ParallelExecuteWorkerPool(maxWorkers, killFuncs) - - for cid, result := range killErrors { - if result != nil { - if len(killErrors) > 1 { - fmt.Println(result.Error()) - } - lastError = result - continue - } - fmt.Println(cid) - } - - return lastError + killErrors, errCount := shared.ParallelExecuteWorkerPool(maxWorkers, killFuncs) + return printParallelOutput(killErrors, errCount) } diff --git a/cmd/podman/libpodruntime/runtime.go b/cmd/podman/libpodruntime/runtime.go index a4b3581be..d7a0dd931 100644 --- a/cmd/podman/libpodruntime/runtime.go +++ b/cmd/podman/libpodruntime/runtime.go @@ -11,31 +11,22 @@ import ( // GetRuntime generates a new libpod runtime configured by command line options func GetRuntime(c *cli.Context) (*libpod.Runtime, error) { - storageOpts, err := util.GetDefaultStoreOptions() - if err != nil { - return nil, err - } - return GetRuntimeWithStorageOpts(c, &storageOpts) -} + storageOpts := new(storage.StoreOptions) + options := []libpod.RuntimeOption{} -// GetContainerRuntime generates a new libpod runtime configured by command line options for containers -func GetContainerRuntime(c *cli.Context) (*libpod.Runtime, error) { - mappings, err := util.ParseIDMapping(c.StringSlice("uidmap"), c.StringSlice("gidmap"), c.String("subuidmap"), c.String("subgidmap")) + _, volumePath, err := util.GetDefaultStoreOptions() if err != nil { return nil, err } - storageOpts, err := util.GetDefaultStoreOptions() - if err != nil { - return nil, err - } - storageOpts.UIDMap = mappings.UIDMap - storageOpts.GIDMap = mappings.GIDMap - return GetRuntimeWithStorageOpts(c, &storageOpts) -} -// GetRuntime generates a new libpod runtime configured by command line options -func GetRuntimeWithStorageOpts(c *cli.Context, storageOpts *storage.StoreOptions) (*libpod.Runtime, error) { - options := []libpod.RuntimeOption{} + if c.IsSet("uidmap") || c.IsSet("gidmap") || c.IsSet("subuidmap") || c.IsSet("subgidmap") { + mappings, err := util.ParseIDMapping(c.StringSlice("uidmap"), c.StringSlice("gidmap"), c.String("subuidmap"), c.String("subgidmap")) + if err != nil { + return nil, err + } + storageOpts.UIDMap = mappings.UIDMap + storageOpts.GIDMap = mappings.GIDMap + } if c.GlobalIsSet("root") { storageOpts.GraphRoot = c.GlobalString("root") @@ -90,8 +81,8 @@ func GetRuntimeWithStorageOpts(c *cli.Context, storageOpts *storage.StoreOptions if c.GlobalIsSet("default-mounts-file") { options = append(options, libpod.WithDefaultMountsFile(c.GlobalString("default-mounts-file"))) } - if c.GlobalIsSet("hooks-dir-path") { - options = append(options, libpod.WithHooksDir(c.GlobalString("hooks-dir-path"))) + if c.GlobalIsSet("hooks-dir") { + options = append(options, libpod.WithHooksDir(c.GlobalStringSlice("hooks-dir")...)) } // TODO flag to set CNI plugins dir? @@ -104,6 +95,7 @@ func GetRuntimeWithStorageOpts(c *cli.Context, storageOpts *storage.StoreOptions if c.IsSet("infra-command") { options = append(options, libpod.WithDefaultInfraCommand(c.String("infra-command"))) } + options = append(options, libpod.WithVolumePath(volumePath)) if c.IsSet("config") { return libpod.NewRuntimeFromConfig(c.String("config"), options...) } diff --git a/cmd/podman/login.go b/cmd/podman/login.go index aa26d1466..4452651f8 100644 --- a/cmd/podman/login.go +++ b/cmd/podman/login.go @@ -2,13 +2,13 @@ package main import ( "bufio" - "context" "fmt" "os" "strings" "github.com/containers/image/docker" "github.com/containers/image/pkg/docker/config" + "github.com/containers/image/types" "github.com/containers/libpod/libpod/common" "github.com/pkg/errors" "github.com/urfave/cli" @@ -34,6 +34,10 @@ var ( Usage: "Pathname of a directory containing TLS certificates and keys used to connect to the registry", }, cli.BoolTFlag{ + Name: "get-login", + Usage: "Return the current login user for the registry", + }, + cli.BoolTFlag{ Name: "tls-verify", Usage: "Require HTTPS and verify certificates when contacting registries (default: true)", }, @@ -60,27 +64,65 @@ func loginCmd(c *cli.Context) error { if len(args) == 0 { return errors.Errorf("registry must be given") } - server := scrubServer(args[0]) + server := registryFromFullName(scrubServer(args[0])) authfile := getAuthFile(c.String("authfile")) sc := common.GetSystemContext("", authfile, false) + if c.IsSet("get-login") { + user, err := config.GetUserLoggedIn(sc, server) + + if err != nil { + return errors.Wrapf(err, "unable to check for login user") + } + + if user == "" { + return errors.Errorf("not logged into %s", server) + } + + fmt.Printf("%s\n", user) + return nil + } + // username of user logged in to server (if one exists) - userFromAuthFile, err := config.GetUserLoggedIn(sc, server) + userFromAuthFile, passFromAuthFile, err := config.GetAuthentication(sc, server) if err != nil { return errors.Wrapf(err, "error getting logged-in user") } - username, password, err := getUserAndPass(c.String("username"), c.String("password"), userFromAuthFile) + + ctx := getContext() + + var ( + username string + password string + ) + + if userFromAuthFile != "" { + username = userFromAuthFile + password = passFromAuthFile + fmt.Println("Authenticating with existing credentials...") + if err := docker.CheckAuth(ctx, sc, username, password, server); err == nil { + fmt.Println("Existing credentials are valid. Already logged in to", server) + return nil + } + fmt.Println("Existing credentials are invalid, please enter valid username and password") + } + + username, password, err = getUserAndPass(c.String("username"), c.String("password"), userFromAuthFile) if err != nil { return errors.Wrapf(err, "error getting username and password") } - sc.DockerInsecureSkipTLSVerify = !c.BoolT("tls-verify") + + if c.IsSet("tls-verify") { + sc.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!c.BoolT("tls-verify")) + } if c.String("cert-dir") != "" { sc.DockerCertPath = c.String("cert-dir") } - if err = docker.CheckAuth(context.TODO(), sc, username, password, server); err == nil { - if err := config.SetAuthentication(sc, server, username, password); err != nil { + if err = docker.CheckAuth(ctx, sc, username, password, server); err == nil { + // Write the new credentials to the authfile + if err = config.SetAuthentication(sc, server, username, password); err != nil { return err } } @@ -126,3 +168,14 @@ func getUserAndPass(username, password, userFromAuthFile string) (string, string } return strings.TrimSpace(username), password, err } + +// registryFromFullName gets the registry from the input. If the input is of the form +// quay.io/myuser/myimage, it will parse it and just return quay.io +// It also returns true if a full image name was given +func registryFromFullName(input string) string { + split := strings.Split(input, "/") + if len(split) > 1 { + return split[0] + } + return split[0] +} diff --git a/cmd/podman/logs.go b/cmd/podman/logs.go index 84aca5e61..75947c34e 100644 --- a/cmd/podman/logs.go +++ b/cmd/podman/logs.go @@ -40,14 +40,15 @@ var ( logsDescription = "The podman logs command batch-retrieves whatever logs are present for a container at the time of execution. This does not guarantee execution" + "order when combined with podman run (i.e. your run may not have generated any logs at the time you execute podman logs" logsCommand = cli.Command{ - Name: "logs", - Usage: "Fetch the logs of a container", - Description: logsDescription, - Flags: sortFlags(logsFlags), - Action: logsCmd, - ArgsUsage: "CONTAINER", - SkipArgReorder: true, - OnUsageError: usageErrorHandler, + Name: "logs", + Usage: "Fetch the logs of a container", + Description: logsDescription, + Flags: sortFlags(logsFlags), + Action: logsCmd, + ArgsUsage: "CONTAINER", + SkipArgReorder: true, + OnUsageError: usageErrorHandler, + UseShortOptionHandling: true, } ) diff --git a/cmd/podman/main.go b/cmd/podman/main.go index 38eac4504..2db6c5dec 100644 --- a/cmd/podman/main.go +++ b/cmd/podman/main.go @@ -2,13 +2,13 @@ package main import ( "fmt" + "log/syslog" "os" "os/exec" "runtime/pprof" "syscall" "github.com/containers/libpod/libpod" - "github.com/containers/libpod/pkg/hooks" _ "github.com/containers/libpod/pkg/hooks/0.1.0" "github.com/containers/libpod/pkg/rootless" "github.com/containers/libpod/version" @@ -17,7 +17,6 @@ import ( "github.com/sirupsen/logrus" lsyslog "github.com/sirupsen/logrus/hooks/syslog" "github.com/urfave/cli" - "log/syslog" ) // This is populated by the Makefile from the VERSION file @@ -35,8 +34,10 @@ var cmdsNotRequiringRootless = map[string]bool{ // If this change, please also update libpod.refreshRootless() "login": true, "logout": true, + "mount": true, "kill": true, "pause": true, + "restart": true, "run": true, "unpause": true, "search": true, @@ -77,6 +78,7 @@ func main() { infoCommand, inspectCommand, killCommand, + kubeCommand, loadCommand, loginCommand, logoutCommand, @@ -102,6 +104,7 @@ func main() { umountCommand, unpauseCommand, versionCommand, + volumeCommand, waitCommand, } @@ -205,11 +208,9 @@ func main() { Usage: "path to default mounts file", Hidden: true, }, - cli.StringFlag{ - Name: "hooks-dir-path", - Usage: "set the OCI hooks directory path", - Value: hooks.DefaultDir, - Hidden: true, + cli.StringSliceFlag{ + Name: "hooks-dir", + Usage: "set the OCI hooks directory path (may be set multiple times)", }, cli.IntFlag{ Name: "max-workers", diff --git a/cmd/podman/mount.go b/cmd/podman/mount.go index 585f506cd..c91115597 100644 --- a/cmd/podman/mount.go +++ b/cmd/podman/mount.go @@ -3,9 +3,11 @@ package main import ( js "encoding/json" "fmt" + "os" of "github.com/containers/libpod/cmd/podman/formats" "github.com/containers/libpod/cmd/podman/libpodruntime" + "github.com/containers/libpod/pkg/rootless" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/urfave/cli" @@ -52,6 +54,9 @@ func mountCmd(c *cli.Context) error { if err := validateFlags(c, mountFlags); err != nil { return err } + if os.Geteuid() != 0 { + rootless.SetSkipStorageSetup(true) + } runtime, err := libpodruntime.GetRuntime(c) if err != nil { @@ -59,6 +64,22 @@ func mountCmd(c *cli.Context) error { } defer runtime.Shutdown(false) + if os.Geteuid() != 0 { + if driver := runtime.GetConfig().StorageConfig.GraphDriverName; driver != "vfs" { + // Do not allow to mount a graphdriver that is not vfs if we are creating the userns as part + // of the mount command. + return fmt.Errorf("cannot mount using driver %s in rootless mode", driver) + } + + became, ret, err := rootless.BecomeRootInUserNS() + if err != nil { + return err + } + if became { + os.Exit(ret) + } + } + formats := map[string]bool{ "": true, of.JSONString: true, diff --git a/cmd/podman/pause.go b/cmd/podman/pause.go index 1e1585216..fcb2f3cb8 100644 --- a/cmd/podman/pause.go +++ b/cmd/podman/pause.go @@ -1,7 +1,6 @@ package main import ( - "fmt" "os" "github.com/containers/libpod/cmd/podman/libpodruntime" @@ -37,7 +36,6 @@ var ( func pauseCmd(c *cli.Context) error { var ( - lastError error pauseContainers []*libpod.Container pauseFuncs []shared.ParallelWorkerInput ) @@ -90,17 +88,6 @@ func pauseCmd(c *cli.Context) error { } logrus.Debugf("Setting maximum workers to %d", maxWorkers) - pauseErrors := shared.ParallelExecuteWorkerPool(maxWorkers, pauseFuncs) - - for cid, result := range pauseErrors { - if result != nil { - if len(pauseErrors) > 1 { - fmt.Println(result.Error()) - } - lastError = result - continue - } - fmt.Println(cid) - } - return lastError + pauseErrors, errCount := shared.ParallelExecuteWorkerPool(maxWorkers, pauseFuncs) + return printParallelOutput(pauseErrors, errCount) } diff --git a/cmd/podman/pod.go b/cmd/podman/pod.go index 0c6ec5e8c..a30361134 100644 --- a/cmd/podman/pod.go +++ b/cmd/podman/pod.go @@ -11,6 +11,7 @@ Pods are a group of one or more containers sharing the same network, pid and ipc ` podSubCommands = []cli.Command{ podCreateCommand, + podExistsCommand, podInspectCommand, podKillCommand, podPauseCommand, diff --git a/cmd/podman/pod_create.go b/cmd/podman/pod_create.go index 63fa6b294..967ce7610 100644 --- a/cmd/podman/pod_create.go +++ b/cmd/podman/pod_create.go @@ -8,6 +8,7 @@ import ( "github.com/containers/libpod/cmd/podman/libpodruntime" "github.com/containers/libpod/cmd/podman/shared" "github.com/containers/libpod/libpod" + "github.com/containers/libpod/pkg/rootless" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/urfave/cli" @@ -58,6 +59,10 @@ var podCreateFlags = []cli.Flag{ Name: "pod-id-file", Usage: "Write the pod ID to the file", }, + cli.StringSliceFlag{ + Name: "publish, p", + Usage: "Publish a container's port, or a range of ports, to the host (default [])", + }, cli.StringFlag{ Name: "share", Usage: "A comma delimited list of kernel namespaces the pod will share", @@ -102,6 +107,16 @@ func podCreateCmd(c *cli.Context) error { defer podIdFile.Close() defer podIdFile.Sync() } + + if len(c.StringSlice("publish")) > 0 { + if !c.BoolT("infra") { + return errors.Errorf("you must have an infra container to publish port bindings to the host") + } + if rootless.IsRootless() { + return errors.Errorf("rootless networking does not allow port binding to the host") + } + } + if !c.BoolT("infra") && c.IsSet("share") && c.String("share") != "none" && c.String("share") != "" { return errors.Errorf("You cannot share kernel namespaces on the pod level without an infra container") } @@ -131,6 +146,14 @@ func podCreateCmd(c *cli.Context) error { options = append(options, nsOptions...) } + if len(c.StringSlice("publish")) > 0 { + portBindings, err := shared.CreatePortBindings(c.StringSlice("publish")) + if err != nil { + return err + } + options = append(options, libpod.WithInfraContainerPorts(portBindings)) + + } // always have containers use pod cgroups // User Opt out is not yet supported options = append(options, libpod.WithPodCgroups()) diff --git a/cmd/podman/pod_stop.go b/cmd/podman/pod_stop.go index 14114aa11..d49ba8a00 100644 --- a/cmd/podman/pod_stop.go +++ b/cmd/podman/pod_stop.go @@ -2,7 +2,6 @@ package main import ( "fmt" - "github.com/containers/libpod/cmd/podman/libpodruntime" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -16,6 +15,10 @@ var ( Usage: "stop all running pods", }, LatestPodFlag, + cli.UintFlag{ + Name: "timeout, time, t", + Usage: "Seconds to wait for pod stop before killing the container", + }, } podStopDescription = ` podman pod stop @@ -35,6 +38,7 @@ var ( ) func podStopCmd(c *cli.Context) error { + timeout := -1 if err := checkMutuallyExclusiveFlags(c); err != nil { return err } @@ -52,9 +56,12 @@ func podStopCmd(c *cli.Context) error { ctx := getContext() + if c.IsSet("timeout") { + timeout = int(c.Uint("timeout")) + } for _, pod := range pods { // set cleanup to true to clean mounts and namespaces - ctr_errs, err := pod.Stop(ctx, true) + ctr_errs, err := pod.StopWithTimeout(ctx, true, timeout) if ctr_errs != nil { for ctr, err := range ctr_errs { if lastError != nil { diff --git a/cmd/podman/ps.go b/cmd/podman/ps.go index 83274c9a8..7a4a80769 100644 --- a/cmd/podman/ps.go +++ b/cmd/podman/ps.go @@ -184,7 +184,7 @@ var ( Usage: "Display the extended information", }, cli.BoolFlag{ - Name: "pod", + Name: "pod, p", Usage: "Print the ID and name of the pod the containers are associated with", }, cli.BoolFlag{ @@ -200,6 +200,10 @@ var ( Usage: "Sort output by command, created, id, image, names, runningfor, size, or status", Value: "created", }, + cli.BoolFlag{ + Name: "sync", + Usage: "Sync container state with OCI runtime", + }, } psDescription = "Prints out information about the containers" psCommand = cli.Command{ @@ -260,6 +264,7 @@ func psCmd(c *cli.Context) error { Size: c.Bool("size"), Namespace: c.Bool("namespace"), Sort: c.String("sort"), + Sync: c.Bool("sync"), } filters := c.StringSlice("filter") diff --git a/cmd/podman/pull.go b/cmd/podman/pull.go index 8fb3971bd..47130805e 100644 --- a/cmd/podman/pull.go +++ b/cmd/podman/pull.go @@ -64,7 +64,6 @@ specified, the image with the 'latest' tag (if it exists) is pulled // pullCmd gets the data from the command line and calls pullImage // to copy an image from a registry to a local machine func pullCmd(c *cli.Context) error { - forceSecure := false runtime, err := libpodruntime.GetRuntime(c) if err != nil { return errors.Wrapf(err, "could not get runtime") @@ -104,12 +103,11 @@ func pullCmd(c *cli.Context) error { } dockerRegistryOptions := image2.DockerRegistryOptions{ - DockerRegistryCreds: registryCreds, - DockerCertPath: c.String("cert-dir"), - DockerInsecureSkipTLSVerify: !c.BoolT("tls-verify"), + DockerRegistryCreds: registryCreds, + DockerCertPath: c.String("cert-dir"), } if c.IsSet("tls-verify") { - forceSecure = c.Bool("tls-verify") + dockerRegistryOptions.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!c.BoolT("tls-verify")) } // Possible for docker-archive to have multiple tags, so use LoadFromArchiveReference instead @@ -125,7 +123,7 @@ func pullCmd(c *cli.Context) error { imgID = newImage[0].ID() } else { authfile := getAuthFile(c.String("authfile")) - newImage, err := runtime.ImageRuntime().New(getContext(), image, c.String("signature-policy"), authfile, writer, &dockerRegistryOptions, image2.SigningOptions{}, true, forceSecure) + newImage, err := runtime.ImageRuntime().New(getContext(), image, c.String("signature-policy"), authfile, writer, &dockerRegistryOptions, image2.SigningOptions{}, true) if err != nil { return errors.Wrapf(err, "error pulling image %q", image) } diff --git a/cmd/podman/push.go b/cmd/podman/push.go index 331f92cd2..82589f3f1 100644 --- a/cmd/podman/push.go +++ b/cmd/podman/push.go @@ -81,7 +81,6 @@ func pushCmd(c *cli.Context) error { var ( registryCreds *types.DockerAuthConfig destName string - forceSecure bool ) args := c.Args() @@ -108,7 +107,6 @@ func pushCmd(c *cli.Context) error { } certPath := c.String("cert-dir") - skipVerify := !c.BoolT("tls-verify") removeSignatures := c.Bool("remove-signatures") signBy := c.String("sign-by") @@ -145,14 +143,12 @@ func pushCmd(c *cli.Context) error { } } - if c.IsSet("tls-verify") { - forceSecure = c.Bool("tls-verify") - } - dockerRegistryOptions := image.DockerRegistryOptions{ - DockerRegistryCreds: registryCreds, - DockerCertPath: certPath, - DockerInsecureSkipTLSVerify: skipVerify, + DockerRegistryCreds: registryCreds, + DockerCertPath: certPath, + } + if c.IsSet("tls-verify") { + dockerRegistryOptions.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!c.BoolT("tls-verify")) } so := image.SigningOptions{ @@ -167,5 +163,5 @@ func pushCmd(c *cli.Context) error { authfile := getAuthFile(c.String("authfile")) - return newImage.PushImageToHeuristicDestination(getContext(), destName, manifestType, authfile, c.String("signature-policy"), writer, c.Bool("compress"), so, &dockerRegistryOptions, forceSecure, nil) + return newImage.PushImageToHeuristicDestination(getContext(), destName, manifestType, authfile, c.String("signature-policy"), writer, c.Bool("compress"), so, &dockerRegistryOptions, nil) } diff --git a/cmd/podman/restart.go b/cmd/podman/restart.go index 2e264db79..c6fe1025a 100644 --- a/cmd/podman/restart.go +++ b/cmd/podman/restart.go @@ -2,10 +2,12 @@ package main import ( "fmt" + "os" "github.com/containers/libpod/cmd/podman/libpodruntime" "github.com/containers/libpod/cmd/podman/shared" "github.com/containers/libpod/libpod" + "github.com/containers/libpod/pkg/rootless" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/urfave/cli" @@ -46,10 +48,13 @@ func restartCmd(c *cli.Context) error { var ( restartFuncs []shared.ParallelWorkerInput containers []*libpod.Container - lastError error restartContainers []*libpod.Container ) + if os.Geteuid() != 0 { + rootless.SetSkipStorageSetup(true) + } + args := c.Args() runOnly := c.Bool("running") all := c.Bool("all") @@ -98,6 +103,29 @@ func restartCmd(c *cli.Context) error { } } + maxWorkers := shared.Parallelize("restart") + if c.GlobalIsSet("max-workers") { + maxWorkers = c.GlobalInt("max-workers") + } + + logrus.Debugf("Setting maximum workers to %d", maxWorkers) + + if rootless.IsRootless() { + // With rootless containers we cannot really restart an existing container + // as we would need to join the mount namespace as well to be able to reuse + // the storage. + if err := stopRootlessContainers(restartContainers, timeout, useTimeout, maxWorkers); err != nil { + return err + } + became, ret, err := rootless.BecomeRootInUserNS() + if err != nil { + return err + } + if became { + os.Exit(ret) + } + } + // We now have a slice of all the containers to be restarted. Iterate them to // create restart Funcs with a timeout as needed for _, ctr := range restartContainers { @@ -117,22 +145,49 @@ func restartCmd(c *cli.Context) error { }) } - maxWorkers := shared.Parallelize("restart") - if c.GlobalIsSet("max-workers") { - maxWorkers = c.GlobalInt("max-workers") - } + restartErrors, errCount := shared.ParallelExecuteWorkerPool(maxWorkers, restartFuncs) + return printParallelOutput(restartErrors, errCount) +} - logrus.Debugf("Setting maximum workers to %d", maxWorkers) +func stopRootlessContainers(stopContainers []*libpod.Container, timeout uint, useTimeout bool, maxWorkers int) error { + var stopFuncs []shared.ParallelWorkerInput + for _, ctr := range stopContainers { + state, err := ctr.State() + if err != nil { + return err + } + if state != libpod.ContainerStateRunning { + continue + } + + ctrTimeout := ctr.StopTimeout() + if useTimeout { + ctrTimeout = timeout + } - restartErrors := shared.ParallelExecuteWorkerPool(maxWorkers, restartFuncs) + c := ctr + f := func() error { + return c.StopWithTimeout(ctrTimeout) + } - for cid, result := range restartErrors { - if result != nil { - fmt.Println(result.Error()) - lastError = result - continue + stopFuncs = append(stopFuncs, shared.ParallelWorkerInput{ + ContainerID: c.ID(), + ParallelFunc: f, + }) + + restartErrors, errCount := shared.ParallelExecuteWorkerPool(maxWorkers, stopFuncs) + var lastError error + for _, result := range restartErrors { + if result != nil { + if errCount > 1 { + fmt.Println(result.Error()) + } + lastError = result + } + } + if lastError != nil { + return lastError } - fmt.Println(cid) } - return lastError + return nil } diff --git a/cmd/podman/restore.go b/cmd/podman/restore.go index 067a2b5d4..bc2a71ba0 100644 --- a/cmd/podman/restore.go +++ b/cmd/podman/restore.go @@ -27,6 +27,10 @@ var ( // dedicated state for container which are checkpointed. // TODO: add ContainerStateCheckpointed cli.BoolFlag{ + Name: "tcp-established", + Usage: "checkpoint a container with established TCP connections", + }, + cli.BoolFlag{ Name: "all, a", Usage: "restore all checkpointed containers", }, @@ -53,16 +57,19 @@ func restoreCmd(c *cli.Context) error { } defer runtime.Shutdown(false) - keep := c.Bool("keep") + options := libpod.ContainerCheckpointOptions{ + Keep: c.Bool("keep"), + TCPEstablished: c.Bool("tcp-established"), + } if err := checkAllAndLatest(c); err != nil { return err } - containers, lastError := getAllOrLatestContainers(c, runtime, libpod.ContainerStateRunning, "checkpointed") + containers, lastError := getAllOrLatestContainers(c, runtime, libpod.ContainerStateExited, "checkpointed") for _, ctr := range containers { - if err = ctr.Restore(context.TODO(), keep); err != nil { + if err = ctr.Restore(context.TODO(), options); err != nil { if lastError != nil { fmt.Fprintln(os.Stderr, lastError) } diff --git a/cmd/podman/rm.go b/cmd/podman/rm.go index 0fb5345ee..7c0569b78 100644 --- a/cmd/podman/rm.go +++ b/cmd/podman/rm.go @@ -4,7 +4,6 @@ import ( "fmt" "github.com/containers/libpod/cmd/podman/libpodruntime" "github.com/containers/libpod/cmd/podman/shared" - "github.com/containers/libpod/libpod" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/urfave/cli" @@ -46,9 +45,7 @@ Running containers will not be removed without the -f option. // saveCmd saves the image to either docker-archive or oci func rmCmd(c *cli.Context) error { var ( - delContainers []*libpod.Container - lastError error - deleteFuncs []shared.ParallelWorkerInput + deleteFuncs []shared.ParallelWorkerInput ) ctx := getContext() @@ -65,7 +62,13 @@ func rmCmd(c *cli.Context) error { return err } - delContainers, lastError = getAllOrLatestContainers(c, runtime, -1, "all") + delContainers, err := getAllOrLatestContainers(c, runtime, -1, "all") + if err != nil { + if len(delContainers) == 0 { + return err + } + fmt.Println(err.Error()) + } for _, container := range delContainers { con := container @@ -84,14 +87,7 @@ func rmCmd(c *cli.Context) error { } logrus.Debugf("Setting maximum workers to %d", maxWorkers) - deleteErrors := shared.ParallelExecuteWorkerPool(maxWorkers, deleteFuncs) - for cid, result := range deleteErrors { - if result != nil { - fmt.Println(result.Error()) - lastError = result - continue - } - fmt.Println(cid) - } - return lastError + // Run the parallel funcs + deleteErrors, errCount := shared.ParallelExecuteWorkerPool(maxWorkers, deleteFuncs) + return printParallelOutput(deleteErrors, errCount) } diff --git a/cmd/podman/rmi.go b/cmd/podman/rmi.go index c0a0d69df..910c7ba35 100644 --- a/cmd/podman/rmi.go +++ b/cmd/podman/rmi.go @@ -46,6 +46,13 @@ var ( ) func rmiCmd(c *cli.Context) error { + var ( + lastError error + deleted bool + deleteErr error + msg string + ) + ctx := getContext() if err := validateFlags(c, rmiFlags); err != nil { return err @@ -66,20 +73,18 @@ func rmiCmd(c *cli.Context) error { } images := args[:] - var lastError error - var deleted bool removeImage := func(img *image.Image) { deleted = true - msg, err := runtime.RemoveImage(ctx, img, c.Bool("force")) - if err != nil { - if errors.Cause(err) == storage.ErrImageUsedByContainer { + msg, deleteErr = runtime.RemoveImage(ctx, img, c.Bool("force")) + if deleteErr != nil { + if errors.Cause(deleteErr) == storage.ErrImageUsedByContainer { fmt.Printf("A container associated with containers/storage, i.e. via Buildah, CRI-O, etc., may be associated with this image: %-12.12s\n", img.ID()) } if lastError != nil { fmt.Fprintln(os.Stderr, lastError) } - lastError = err + lastError = deleteErr } else { fmt.Println(msg) } @@ -91,8 +96,31 @@ func rmiCmd(c *cli.Context) error { if err != nil { return errors.Wrapf(err, "unable to query local images") } - for _, i := range imagesToDelete { - removeImage(i) + lastNumberofImages := 0 + for len(imagesToDelete) > 0 { + if lastNumberofImages == len(imagesToDelete) { + return errors.New("unable to delete all images; re-run the rmi command again.") + } + for _, i := range imagesToDelete { + isParent, err := i.IsParent() + if err != nil { + return err + } + if isParent { + continue + } + removeImage(i) + } + lastNumberofImages = len(imagesToDelete) + imagesToDelete, err = runtime.ImageRuntime().GetImages() + if err != nil { + return err + } + // If no images are left to delete or there is just one image left and it cannot be deleted, + // lets break out and display the error + if len(imagesToDelete) == 0 || (lastNumberofImages == 1 && lastError != nil) { + break + } } } else { // Create image.image objects for deletion from user input. diff --git a/cmd/podman/run.go b/cmd/podman/run.go index af6ced45d..20cb85347 100644 --- a/cmd/podman/run.go +++ b/cmd/podman/run.go @@ -44,7 +44,7 @@ func runCmd(c *cli.Context) error { rootless.SetSkipStorageSetup(true) } - runtime, err := libpodruntime.GetContainerRuntime(c) + runtime, err := libpodruntime.GetRuntime(c) if err != nil { return errors.Wrapf(err, "error creating libpod runtime") } @@ -116,6 +116,11 @@ func runCmd(c *cli.Context) error { if strings.Index(err.Error(), "permission denied") > -1 { exitCode = 126 } + if c.IsSet("rm") { + if deleteError := runtime.RemoveContainer(ctx, ctr, true); deleteError != nil { + logrus.Errorf("unable to remove container %s after failing to start and attach to it", ctr.ID()) + } + } return err } diff --git a/cmd/podman/runlabel.go b/cmd/podman/runlabel.go index e1dee1fb2..48a296260 100644 --- a/cmd/podman/runlabel.go +++ b/cmd/podman/runlabel.go @@ -10,7 +10,6 @@ import ( "github.com/containers/libpod/cmd/podman/libpodruntime" "github.com/containers/libpod/cmd/podman/shared" "github.com/containers/libpod/libpod/image" - "github.com/containers/libpod/pkg/util" "github.com/containers/libpod/utils" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -94,7 +93,7 @@ func runlabelCmd(c *cli.Context) error { imageName string stdErr, stdOut io.Writer stdIn io.Reader - newImage *image.Image + extraArgs []string ) // Evil images could trick into recursively executing the runlabel @@ -124,6 +123,9 @@ func runlabelCmd(c *cli.Context) error { return errors.Errorf("the display and quiet flags cannot be used together.") } + if len(args) > 2 { + extraArgs = args[2:] + } pull := c.Bool("pull") label := args[0] @@ -151,75 +153,26 @@ func runlabelCmd(c *cli.Context) error { stdIn = nil } - if pull { - var registryCreds *types.DockerAuthConfig - if c.IsSet("creds") { - creds, err := util.ParseRegistryCreds(c.String("creds")) - if err != nil { - return err - } - registryCreds = creds - } - dockerRegistryOptions := image.DockerRegistryOptions{ - DockerRegistryCreds: registryCreds, - DockerCertPath: c.String("cert-dir"), - DockerInsecureSkipTLSVerify: !c.BoolT("tls-verify"), - } - authfile := getAuthFile(c.String("authfile")) - - newImage, err = runtime.ImageRuntime().New(ctx, runlabelImage, c.String("signature-policy"), authfile, stdOut, &dockerRegistryOptions, image.SigningOptions{}, false, false) - } else { - newImage, err = runtime.ImageRuntime().NewFromLocal(runlabelImage) + dockerRegistryOptions := image.DockerRegistryOptions{ + DockerCertPath: c.String("cert-dir"), } - if err != nil { - return errors.Wrapf(err, "unable to find image") + if c.IsSet("tls-verify") { + dockerRegistryOptions.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!c.BoolT("tls-verify")) } - if len(newImage.Names()) < 1 { - imageName = newImage.ID() - } else { - imageName = newImage.Names()[0] - } - - runLabel, err := newImage.GetLabel(ctx, label) + authfile := getAuthFile(c.String("authfile")) + runLabel, imageName, err := shared.GetRunlabel(label, runlabelImage, ctx, runtime, pull, c.String("creds"), dockerRegistryOptions, authfile, c.String("signature-policy"), stdOut) if err != nil { return err } - - // If no label to execute, we return if runLabel == "" { return nil } - // The user provided extra arguments that need to be tacked onto the label's command - if len(args) > 2 { - runLabel = fmt.Sprintf("%s %s", runLabel, strings.Join(args[2:], " ")) - } - - cmd, err := shared.GenerateCommand(runLabel, imageName, c.String("name")) + cmd, env, err := shared.GenerateRunlabelCommand(runLabel, imageName, c.String("name"), opts, extraArgs) if err != nil { - return errors.Wrapf(err, "unable to generate command") - } - env := shared.GenerateRunEnvironment(c.String("name"), imageName, opts) - env = append(env, "PODMAN_RUNLABEL_NESTED=1") - - envmap := envSliceToMap(env) - - envmapper := func(k string) string { - switch k { - case "OPT1": - return envmap["OPT1"] - case "OPT2": - return envmap["OPT2"] - case "OPT3": - return envmap["OPT3"] - } - return "" + return err } - - newS := os.Expand(strings.Join(cmd, " "), envmapper) - cmd = strings.Split(newS, " ") - if !c.Bool("quiet") { fmt.Printf("Command: %s\n", strings.Join(cmd, " ")) if c.Bool("display") { @@ -228,12 +181,3 @@ func runlabelCmd(c *cli.Context) error { } return utils.ExecCmdWithStdStreams(stdIn, stdOut, stdErr, env, cmd[0], cmd[1:]...) } - -func envSliceToMap(env []string) map[string]string { - m := make(map[string]string) - for _, i := range env { - split := strings.Split(i, "=") - m[split[0]] = strings.Join(split[1:], " ") - } - return m -} diff --git a/cmd/podman/save.go b/cmd/podman/save.go index 7edc42e0d..139f3918a 100644 --- a/cmd/podman/save.go +++ b/cmd/podman/save.go @@ -146,7 +146,7 @@ func saveCmd(c *cli.Context) error { return err } } - if err := newImage.PushImageToReference(getContext(), destRef, manifestType, "", "", writer, c.Bool("compress"), libpodImage.SigningOptions{}, &libpodImage.DockerRegistryOptions{}, false, additionaltags); err != nil { + if err := newImage.PushImageToReference(getContext(), destRef, manifestType, "", "", writer, c.Bool("compress"), libpodImage.SigningOptions{}, &libpodImage.DockerRegistryOptions{}, additionaltags); err != nil { if err2 := os.Remove(output); err2 != nil { logrus.Errorf("error deleting %q: %v", output, err) } diff --git a/cmd/podman/search.go b/cmd/podman/search.go index fa11dad32..442ebb57f 100644 --- a/cmd/podman/search.go +++ b/cmd/podman/search.go @@ -7,6 +7,7 @@ import ( "strings" "github.com/containers/image/docker" + "github.com/containers/image/types" "github.com/containers/libpod/cmd/podman/formats" "github.com/containers/libpod/libpod/common" sysreg "github.com/containers/libpod/pkg/registries" @@ -72,11 +73,12 @@ type searchParams struct { } type searchOpts struct { - filter []string - limit int - noTrunc bool - format string - authfile string + filter []string + limit int + noTrunc bool + format string + authfile string + insecureSkipTLSVerify types.OptionalBool } type searchFilterParams struct { @@ -116,7 +118,10 @@ func searchCmd(c *cli.Context) error { filter: c.StringSlice("filter"), authfile: getAuthFile(c.String("authfile")), } - regAndSkipTLS, err := getRegistriesAndSkipTLS(c, registry) + if c.IsSet("tls-verify") { + opts.insecureSkipTLSVerify = types.NewOptionalBool(!c.BoolT("tls-verify")) + } + registries, err := getRegistries(registry) if err != nil { return err } @@ -126,7 +131,7 @@ func searchCmd(c *cli.Context) error { return err } - return generateSearchOutput(term, regAndSkipTLS, opts, *filter) + return generateSearchOutput(term, registries, opts, *filter) } func genSearchFormat(format string) string { @@ -157,16 +162,8 @@ func (s *searchParams) headerMap() map[string]string { return values } -// A function for finding which registries can skip TLS -func getRegistriesAndSkipTLS(c *cli.Context, registry string) (map[string]bool, error) { - // Variables for setting up Registry and TLSVerify - tlsVerify := c.BoolT("tls-verify") - forceSecure := false - - if c.IsSet("tls-verify") { - forceSecure = c.BoolT("tls-verify") - } - +// getRegistries returns the list of registries to search, depending on an optional registry specification +func getRegistries(registry string) ([]string, error) { var registries []string if registry != "" { registries = append(registries, registry) @@ -177,35 +174,10 @@ func getRegistriesAndSkipTLS(c *cli.Context, registry string) (map[string]bool, return nil, errors.Wrapf(err, "error getting registries to search") } } - regAndSkipTLS := make(map[string]bool) - // If tls-verify is set to false, allow insecure always. - if !tlsVerify { - for _, reg := range registries { - regAndSkipTLS[reg] = true - } - } else { - // initially set all registries to verify with TLS - for _, reg := range registries { - regAndSkipTLS[reg] = false - } - // if the user didn't allow nor disallow insecure registries, check to see if the registry is insecure - if !forceSecure { - insecureRegistries, err := sysreg.GetInsecureRegistries() - if err != nil { - return nil, errors.Wrapf(err, "error getting insecure registries to search") - } - for _, reg := range insecureRegistries { - // if there are any insecure registries in registries, allow for HTTP - if _, ok := regAndSkipTLS[reg]; ok { - regAndSkipTLS[reg] = true - } - } - } - } - return regAndSkipTLS, nil + return registries, nil } -func getSearchOutput(term string, regAndSkipTLS map[string]bool, opts searchOpts, filter searchFilterParams) ([]searchParams, error) { +func getSearchOutput(term string, registries []string, opts searchOpts, filter searchFilterParams) ([]searchParams, error) { // Max number of queries by default is 25 limit := maxQueries if opts.limit != 0 { @@ -213,10 +185,10 @@ func getSearchOutput(term string, regAndSkipTLS map[string]bool, opts searchOpts } sc := common.GetSystemContext("", opts.authfile, false) + sc.DockerInsecureSkipTLSVerify = opts.insecureSkipTLSVerify + sc.SystemRegistriesConfPath = sysreg.SystemRegistriesConfPath() // FIXME: Set this more globally. Probably no reason not to have it in every types.SystemContext, and to compute the value just once in one place. var paramsArr []searchParams - for reg, skipTLS := range regAndSkipTLS { - // set the SkipTLSVerify bool depending on the registry being searched through - sc.DockerInsecureSkipTLSVerify = skipTLS + for _, reg := range registries { results, err := docker.SearchRegistry(context.TODO(), sc, reg, term, limit) if err != nil { logrus.Errorf("error searching registry %q: %v", reg, err) @@ -276,8 +248,8 @@ func getSearchOutput(term string, regAndSkipTLS map[string]bool, opts searchOpts return paramsArr, nil } -func generateSearchOutput(term string, regAndSkipTLS map[string]bool, opts searchOpts, filter searchFilterParams) error { - searchOutput, err := getSearchOutput(term, regAndSkipTLS, opts, filter) +func generateSearchOutput(term string, registries []string, opts searchOpts, filter searchFilterParams) error { + searchOutput, err := getSearchOutput(term, registries, opts, filter) if err != nil { return err } diff --git a/cmd/podman/shared/container.go b/cmd/podman/shared/container.go index 4404268d4..6236d19b4 100644 --- a/cmd/podman/shared/container.go +++ b/cmd/podman/shared/container.go @@ -1,10 +1,10 @@ package shared import ( + "context" "encoding/json" "fmt" - "github.com/cri-o/ocicni/pkg/ocicni" - "github.com/docker/go-units" + "io" "os" "path/filepath" "regexp" @@ -13,9 +13,14 @@ import ( "sync" "time" + "github.com/containers/image/types" "github.com/containers/libpod/libpod" + "github.com/containers/libpod/libpod/image" "github.com/containers/libpod/pkg/inspect" cc "github.com/containers/libpod/pkg/spec" + "github.com/containers/libpod/pkg/util" + "github.com/cri-o/ocicni/pkg/ocicni" + "github.com/docker/go-units" "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -40,6 +45,7 @@ type PsOptions struct { Sort string Label string Namespace bool + Sync bool } // BatchContainerStruct is the return obkect from BatchContainer and contains @@ -121,6 +127,12 @@ func NewBatchContainer(ctr *libpod.Container, opts PsOptions) (PsContainerOutput pso PsContainerOutput ) batchErr := ctr.Batch(func(c *libpod.Container) error { + if opts.Sync { + if err := c.Sync(); err != nil { + return err + } + } + conState, err = c.State() if err != nil { return errors.Wrapf(err, "unable to obtain container state") @@ -589,3 +601,79 @@ func portsToString(ports []ocicni.PortMapping) string { } return strings.Join(portDisplay, ", ") } + +// GetRunlabel is a helper function for runlabel; it gets the image if needed and begins the +// contruction of the runlabel output and environment variables +func GetRunlabel(label string, runlabelImage string, ctx context.Context, runtime *libpod.Runtime, pull bool, inputCreds string, dockerRegistryOptions image.DockerRegistryOptions, authfile string, signaturePolicyPath string, output io.Writer) (string, string, error) { + var ( + newImage *image.Image + err error + imageName string + ) + if pull { + var registryCreds *types.DockerAuthConfig + if inputCreds != "" { + creds, err := util.ParseRegistryCreds(inputCreds) + if err != nil { + return "", "", err + } + registryCreds = creds + } + dockerRegistryOptions.DockerRegistryCreds = registryCreds + newImage, err = runtime.ImageRuntime().New(ctx, runlabelImage, signaturePolicyPath, authfile, output, &dockerRegistryOptions, image.SigningOptions{}, false) + } else { + newImage, err = runtime.ImageRuntime().NewFromLocal(runlabelImage) + } + if err != nil { + return "", "", errors.Wrapf(err, "unable to find image") + } + + if len(newImage.Names()) < 1 { + imageName = newImage.ID() + } else { + imageName = newImage.Names()[0] + } + + runLabel, err := newImage.GetLabel(ctx, label) + return runLabel, imageName, err +} + +// GenerateRunlabelCommand generates the command that will eventually be execucted by podman +func GenerateRunlabelCommand(runLabel, imageName, name string, opts map[string]string, extraArgs []string) ([]string, []string, error) { + // The user provided extra arguments that need to be tacked onto the label's command + if len(extraArgs) > 0 { + runLabel = fmt.Sprintf("%s %s", runLabel, strings.Join(extraArgs, " ")) + } + cmd, err := GenerateCommand(runLabel, imageName, name) + if err != nil { + return nil, nil, errors.Wrapf(err, "unable to generate command") + } + env := GenerateRunEnvironment(name, imageName, opts) + env = append(env, "PODMAN_RUNLABEL_NESTED=1") + + envmap := envSliceToMap(env) + + envmapper := func(k string) string { + switch k { + case "OPT1": + return envmap["OPT1"] + case "OPT2": + return envmap["OPT2"] + case "OPT3": + return envmap["OPT3"] + } + return "" + } + newS := os.Expand(strings.Join(cmd, " "), envmapper) + cmd = strings.Split(newS, " ") + return cmd, env, nil +} + +func envSliceToMap(env []string) map[string]string { + m := make(map[string]string) + for _, i := range env { + split := strings.Split(i, "=") + m[split[0]] = strings.Join(split[1:], " ") + } + return m +} diff --git a/cmd/podman/shared/funcs.go b/cmd/podman/shared/funcs.go index a92e0d547..8770b8ec0 100644 --- a/cmd/podman/shared/funcs.go +++ b/cmd/podman/shared/funcs.go @@ -5,13 +5,28 @@ import ( "os" "path/filepath" "strings" + + "github.com/google/shlex" ) func substituteCommand(cmd string) (string, error) { + var ( + newCommand string + ) + + // Replace cmd with "/proc/self/exe" if "podman" or "docker" is being + // used. If "/usr/bin/docker" is provided, we also sub in podman. + // Otherwise, leave the command unchanged. + if cmd == "podman" || filepath.Base(cmd) == "docker" { + newCommand = "/proc/self/exe" + } else { + newCommand = cmd + } + // If cmd is an absolute or relative path, check if the file exists. // Throw an error if it doesn't exist. - if strings.Contains(cmd, "/") || strings.HasPrefix(cmd, ".") { - res, err := filepath.Abs(cmd) + if strings.Contains(newCommand, "/") || strings.HasPrefix(newCommand, ".") { + res, err := filepath.Abs(newCommand) if err != nil { return "", err } @@ -22,16 +37,7 @@ func substituteCommand(cmd string) (string, error) { } } - // Replace cmd with "/proc/self/exe" if "podman" or "docker" is being - // used. Otherwise, leave the command unchanged. - switch cmd { - case "podman": - fallthrough - case "docker": - return "/proc/self/exe", nil - default: - return cmd, nil - } + return newCommand, nil } // GenerateCommand takes a label (string) and converts it to an executable command @@ -42,7 +48,11 @@ func GenerateCommand(command, imageName, name string) ([]string, error) { if name == "" { name = imageName } - cmd := strings.Split(command, " ") + + cmd, err := shlex.Split(command) + if err != nil { + return nil, err + } prog, err := substituteCommand(cmd[0]) if err != nil { diff --git a/cmd/podman/shared/funcs_test.go b/cmd/podman/shared/funcs_test.go index 596df84e8..7506b9d9c 100644 --- a/cmd/podman/shared/funcs_test.go +++ b/cmd/podman/shared/funcs_test.go @@ -18,10 +18,11 @@ var ( ) func TestGenerateCommand(t *testing.T) { - inputCommand := "docker run -it --name NAME -e NAME=NAME -e IMAGE=IMAGE IMAGE echo install" - correctCommand := "/proc/self/exe run -it --name bar -e NAME=bar -e IMAGE=foo foo echo install" + inputCommand := "docker run -it --name NAME -e NAME=NAME -e IMAGE=IMAGE IMAGE echo \"hello world\"" + correctCommand := "/proc/self/exe run -it --name bar -e NAME=bar -e IMAGE=foo foo echo hello world" newCommand, err := GenerateCommand(inputCommand, "foo", "bar") assert.Nil(t, err) + assert.Equal(t, "hello world", newCommand[11]) assert.Equal(t, correctCommand, strings.Join(newCommand, " ")) } @@ -108,8 +109,8 @@ func TestGenerateCommandNoSetName(t *testing.T) { } func TestGenerateCommandNoName(t *testing.T) { - inputCommand := "docker run -it -e IMAGE=IMAGE IMAGE echo install" - correctCommand := "/proc/self/exe run -it -e IMAGE=foo foo echo install" + inputCommand := "docker run -it -e IMAGE=IMAGE IMAGE echo install" + correctCommand := "/proc/self/exe run -it -e IMAGE=foo foo echo install" newCommand, err := GenerateCommand(inputCommand, "foo", "") assert.Nil(t, err) assert.Equal(t, correctCommand, strings.Join(newCommand, " ")) diff --git a/cmd/podman/shared/parallel.go b/cmd/podman/shared/parallel.go index 633781a45..e6ce50f95 100644 --- a/cmd/podman/shared/parallel.go +++ b/cmd/podman/shared/parallel.go @@ -30,9 +30,10 @@ func ParallelWorker(wg *sync.WaitGroup, jobs <-chan ParallelWorkerInput, results // ParallelExecuteWorkerPool takes container jobs and performs them in parallel. The worker // int determines how many workers/threads should be premade. -func ParallelExecuteWorkerPool(workers int, functions []ParallelWorkerInput) map[string]error { +func ParallelExecuteWorkerPool(workers int, functions []ParallelWorkerInput) (map[string]error, int) { var ( - wg sync.WaitGroup + wg sync.WaitGroup + errorCount int ) resultChan := make(chan containerError, len(functions)) @@ -62,9 +63,12 @@ func ParallelExecuteWorkerPool(workers int, functions []ParallelWorkerInput) map close(resultChan) for ctrError := range resultChan { results[ctrError.ContainerID] = ctrError.Err + if ctrError.Err != nil { + errorCount += 1 + } } - return results + return results, errorCount } // Parallelize provides the maximum number of parallel workers (int) as calculated by a basic diff --git a/cmd/podman/shared/pod.go b/cmd/podman/shared/pod.go index 4e8e58c4d..30dd14845 100644 --- a/cmd/podman/shared/pod.go +++ b/cmd/podman/shared/pod.go @@ -1,7 +1,11 @@ package shared import ( + "strconv" + "github.com/containers/libpod/libpod" + "github.com/cri-o/ocicni/pkg/ocicni" + "github.com/docker/go-connections/nat" "github.com/pkg/errors" ) @@ -95,3 +99,36 @@ func GetNamespaceOptions(ns []string) ([]libpod.PodCreateOption, error) { } return options, nil } + +// CreatePortBindings iterates ports mappings and exposed ports into a format CNI understands +func CreatePortBindings(ports []string) ([]ocicni.PortMapping, error) { + var portBindings []ocicni.PortMapping + // The conversion from []string to natBindings is temporary while mheon reworks the port + // deduplication code. Eventually that step will not be required. + _, natBindings, err := nat.ParsePortSpecs(ports) + if err != nil { + return nil, err + } + for containerPb, hostPb := range natBindings { + var pm ocicni.PortMapping + pm.ContainerPort = int32(containerPb.Int()) + for _, i := range hostPb { + var hostPort int + var err error + pm.HostIP = i.HostIP + if i.HostPort == "" { + hostPort = containerPb.Int() + } else { + hostPort, err = strconv.Atoi(i.HostPort) + if err != nil { + return nil, errors.Wrapf(err, "unable to convert host port to integer") + } + } + + pm.HostPort = int32(hostPort) + pm.Protocol = containerPb.Proto() + portBindings = append(portBindings, pm) + } + } + return portBindings, nil +} diff --git a/cmd/podman/shared/prune.go b/cmd/podman/shared/prune.go new file mode 100644 index 000000000..90cfe4475 --- /dev/null +++ b/cmd/podman/shared/prune.go @@ -0,0 +1,24 @@ +package shared + +import ( + "fmt" + "github.com/pkg/errors" + + "github.com/containers/libpod/libpod/image" +) + +// Prune removes all unnamed and unused images from the local store +func Prune(ir *image.Runtime) error { + pruneImages, err := ir.GetPruneImages() + if err != nil { + return err + } + + for _, i := range pruneImages { + if err := i.Remove(true); err != nil { + return errors.Wrapf(err, "failed to remove %s", i.ID()) + } + fmt.Println(i.ID()) + } + return nil +} diff --git a/cmd/podman/start.go b/cmd/podman/start.go index 8cf85405e..8bb386c68 100644 --- a/cmd/podman/start.go +++ b/cmd/podman/start.go @@ -1,11 +1,13 @@ package main import ( + "encoding/json" "fmt" "os" "github.com/containers/libpod/cmd/podman/libpodruntime" "github.com/containers/libpod/libpod" + cc "github.com/containers/libpod/pkg/spec" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/urfave/cli" @@ -132,6 +134,18 @@ func startCmd(c *cli.Context) error { } // Handle non-attach start if err := ctr.Start(ctx); err != nil { + var createArtifact cc.CreateConfig + artifact, artifactErr := ctr.GetArtifact("create-config") + if artifactErr == nil { + if jsonErr := json.Unmarshal(artifact, &createArtifact); jsonErr != nil { + logrus.Errorf("unable to detect if container %s should be deleted", ctr.ID()) + } + if createArtifact.Rm { + if rmErr := runtime.RemoveContainer(ctx, ctr, true); rmErr != nil { + logrus.Errorf("unable to remove container %s after it failed to start", ctr.ID()) + } + } + } if lastError != nil { fmt.Fprintln(os.Stderr, lastError) } diff --git a/cmd/podman/stop.go b/cmd/podman/stop.go index cb36fd5cd..ade51705e 100644 --- a/cmd/podman/stop.go +++ b/cmd/podman/stop.go @@ -2,6 +2,7 @@ package main import ( "fmt" + "github.com/containers/libpod/cmd/podman/libpodruntime" "github.com/containers/libpod/cmd/podman/shared" "github.com/containers/libpod/libpod" @@ -59,7 +60,13 @@ func stopCmd(c *cli.Context) error { } defer runtime.Shutdown(false) - containers, lastError := getAllOrLatestContainers(c, runtime, libpod.ContainerStateRunning, "running") + containers, err := getAllOrLatestContainers(c, runtime, libpod.ContainerStateRunning, "running") + if err != nil { + if len(containers) == 0 { + return err + } + fmt.Println(err.Error()) + } var stopFuncs []shared.ParallelWorkerInput for _, ctr := range containers { @@ -71,7 +78,11 @@ func stopCmd(c *cli.Context) error { stopTimeout = ctr.StopTimeout() } f := func() error { - return con.StopWithTimeout(stopTimeout) + if err := con.StopWithTimeout(stopTimeout); err != nil && errors.Cause(err) != libpod.ErrCtrStopped { + return err + } + return nil + } stopFuncs = append(stopFuncs, shared.ParallelWorkerInput{ ContainerID: con.ID(), @@ -85,17 +96,6 @@ func stopCmd(c *cli.Context) error { } logrus.Debugf("Setting maximum workers to %d", maxWorkers) - stopErrors := shared.ParallelExecuteWorkerPool(maxWorkers, stopFuncs) - - for cid, result := range stopErrors { - if result != nil && result != libpod.ErrCtrStopped { - if len(stopErrors) > 1 { - fmt.Println(result.Error()) - } - lastError = result - continue - } - fmt.Println(cid) - } - return lastError + stopErrors, errCount := shared.ParallelExecuteWorkerPool(maxWorkers, stopFuncs) + return printParallelOutput(stopErrors, errCount) } diff --git a/cmd/podman/trust.go b/cmd/podman/trust.go new file mode 100644 index 000000000..7c404cd3f --- /dev/null +++ b/cmd/podman/trust.go @@ -0,0 +1,293 @@ +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "sort" + + "github.com/containers/image/types" + "github.com/containers/libpod/cmd/podman/formats" + "github.com/containers/libpod/cmd/podman/libpodruntime" + "github.com/containers/libpod/libpod/image" + "github.com/containers/libpod/pkg/trust" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/urfave/cli" +) + +var ( + setTrustFlags = []cli.Flag{ + cli.StringFlag{ + Name: "type, t", + Usage: "Trust type, accept values: signedBy(default), accept, reject.", + Value: "signedBy", + }, + cli.StringSliceFlag{ + Name: "pubkeysfile, f", + Usage: `Path of installed public key(s) to trust for TARGET. + Absolute path to keys is added to policy.json. May + used multiple times to define multiple public keys. + File(s) must exist before using this command.`, + }, + cli.StringFlag{ + Name: "policypath", + Hidden: true, + }, + } + showTrustFlags = []cli.Flag{ + cli.BoolFlag{ + Name: "raw", + Usage: "Output raw policy file", + }, + cli.BoolFlag{ + Name: "json, j", + Usage: "Output as json", + }, + cli.StringFlag{ + Name: "policypath", + Hidden: true, + }, + cli.StringFlag{ + Name: "registrypath", + Hidden: true, + }, + } + + setTrustDescription = "Set default trust policy or add a new trust policy for a registry" + setTrustCommand = cli.Command{ + Name: "set", + Usage: "Set default trust policy or a new trust policy for a registry", + Description: setTrustDescription, + Flags: sortFlags(setTrustFlags), + ArgsUsage: "default | REGISTRY[/REPOSITORY]", + Action: setTrustCmd, + OnUsageError: usageErrorHandler, + } + + showTrustDescription = "Display trust policy for the system" + showTrustCommand = cli.Command{ + Name: "show", + Usage: "Display trust policy for the system", + Description: showTrustDescription, + Flags: sortFlags(showTrustFlags), + Action: showTrustCmd, + ArgsUsage: "", + UseShortOptionHandling: true, + OnUsageError: usageErrorHandler, + } + + trustSubCommands = []cli.Command{ + setTrustCommand, + showTrustCommand, + } + + trustDescription = fmt.Sprintf(`Manages the trust policy of the host system. (%s) + Trust policy describes a registry scope that must be signed by public keys.`, getDefaultPolicyPath()) + trustCommand = cli.Command{ + Name: "trust", + Usage: "Manage container image trust policy", + Description: trustDescription, + ArgsUsage: "{set,show} ...", + Subcommands: trustSubCommands, + OnUsageError: usageErrorHandler, + } +) + +func showTrustCmd(c *cli.Context) error { + runtime, err := libpodruntime.GetRuntime(c) + if err != nil { + return errors.Wrapf(err, "could not create runtime") + } + + var ( + policyPath string + systemRegistriesDirPath string + ) + if c.IsSet("policypath") { + policyPath = c.String("policypath") + } else { + policyPath = trust.DefaultPolicyPath(runtime.SystemContext()) + } + policyContent, err := ioutil.ReadFile(policyPath) + if err != nil { + return errors.Wrapf(err, "unable to read %s", policyPath) + } + if c.IsSet("registrypath") { + systemRegistriesDirPath = c.String("registrypath") + } else { + systemRegistriesDirPath = trust.RegistriesDirPath(runtime.SystemContext()) + } + + if c.Bool("raw") { + _, err := os.Stdout.Write(policyContent) + if err != nil { + return errors.Wrap(err, "could not read trust policies") + } + return nil + } + + var policyContentStruct trust.PolicyContent + if err := json.Unmarshal(policyContent, &policyContentStruct); err != nil { + return errors.Errorf("could not read trust policies") + } + policyJSON, err := trust.GetPolicyJSON(policyContentStruct, systemRegistriesDirPath) + if err != nil { + return errors.Wrapf(err, "error reading registry config file") + } + if c.Bool("json") { + var outjson interface{} + outjson = policyJSON + out := formats.JSONStruct{Output: outjson} + return formats.Writer(out).Out() + } + + sortedRepos := sortPolicyJSONKey(policyJSON) + type policydefault struct { + Repo string + Trusttype string + GPGid string + Sigstore string + } + var policyoutput []policydefault + for _, repo := range sortedRepos { + repoval := policyJSON[repo] + var defaultstruct policydefault + defaultstruct.Repo = repo + if repoval["type"] != nil { + defaultstruct.Trusttype = trustTypeDescription(repoval["type"].(string)) + } + if repoval["keys"] != nil && len(repoval["keys"].([]string)) > 0 { + defaultstruct.GPGid = trust.GetGPGId(repoval["keys"].([]string)) + } + if repoval["sigstore"] != nil { + defaultstruct.Sigstore = repoval["sigstore"].(string) + } + policyoutput = append(policyoutput, defaultstruct) + } + var output []interface{} + for _, ele := range policyoutput { + output = append(output, interface{}(ele)) + } + out := formats.StdoutTemplateArray{Output: output, Template: "{{.Repo}}\t{{.Trusttype}}\t{{.GPGid}}\t{{.Sigstore}}"} + return formats.Writer(out).Out() +} + +func setTrustCmd(c *cli.Context) error { + runtime, err := libpodruntime.GetRuntime(c) + if err != nil { + return errors.Wrapf(err, "could not create runtime") + } + + args := c.Args() + if len(args) != 1 { + return errors.Errorf("default or a registry name must be specified") + } + valid, err := image.IsValidImageURI(args[0]) + if err != nil || !valid { + return errors.Wrapf(err, "invalid image uri %s", args[0]) + } + + trusttype := c.String("type") + if !isValidTrustType(trusttype) { + return errors.Errorf("invalid choice: %s (choose from 'accept', 'reject', 'signedBy')", trusttype) + } + if trusttype == "accept" { + trusttype = "insecureAcceptAnything" + } + + pubkeysfile := c.StringSlice("pubkeysfile") + if len(pubkeysfile) == 0 && trusttype == "signedBy" { + return errors.Errorf("At least one public key must be defined for type 'signedBy'") + } + + var policyPath string + if c.IsSet("policypath") { + policyPath = c.String("policypath") + } else { + policyPath = trust.DefaultPolicyPath(runtime.SystemContext()) + } + var policyContentStruct trust.PolicyContent + _, err = os.Stat(policyPath) + if !os.IsNotExist(err) { + policyContent, err := ioutil.ReadFile(policyPath) + if err != nil { + return errors.Wrapf(err, "unable to read %s", policyPath) + } + if err := json.Unmarshal(policyContent, &policyContentStruct); err != nil { + return errors.Errorf("could not read trust policies") + } + } + var newReposContent []trust.RepoContent + if len(pubkeysfile) != 0 { + for _, filepath := range pubkeysfile { + newReposContent = append(newReposContent, trust.RepoContent{Type: trusttype, KeyType: "GPGKeys", KeyPath: filepath}) + } + } else { + newReposContent = append(newReposContent, trust.RepoContent{Type: trusttype}) + } + if args[0] == "default" { + policyContentStruct.Default = newReposContent + } else { + exists := false + for transport, transportval := range policyContentStruct.Transports { + _, exists = transportval[args[0]] + if exists { + policyContentStruct.Transports[transport][args[0]] = newReposContent + break + } + } + if !exists { + if policyContentStruct.Transports == nil { + policyContentStruct.Transports = make(map[string]trust.RepoMap) + } + if policyContentStruct.Transports["docker"] == nil { + policyContentStruct.Transports["docker"] = make(map[string][]trust.RepoContent) + } + policyContentStruct.Transports["docker"][args[0]] = append(policyContentStruct.Transports["docker"][args[0]], newReposContent...) + } + } + + data, err := json.MarshalIndent(policyContentStruct, "", " ") + if err != nil { + return errors.Wrapf(err, "error setting trust policy") + } + err = ioutil.WriteFile(policyPath, data, 0644) + if err != nil { + return errors.Wrapf(err, "error setting trust policy") + } + return nil +} + +var typeDescription = map[string]string{"insecureAcceptAnything": "accept", "signedBy": "signed", "reject": "reject"} + +func trustTypeDescription(trustType string) string { + trustDescription, exist := typeDescription[trustType] + if !exist { + logrus.Warnf("invalid trust type %s", trustType) + } + return trustDescription +} + +func sortPolicyJSONKey(m map[string]map[string]interface{}) []string { + keys := make([]string, len(m)) + i := 0 + for k := range m { + keys[i] = k + i++ + } + sort.Strings(keys) + return keys +} + +func isValidTrustType(t string) bool { + if t == "accept" || t == "insecureAcceptAnything" || t == "reject" || t == "signedBy" { + return true + } + return false +} + +func getDefaultPolicyPath() string { + return trust.DefaultPolicyPath(&types.SystemContext{}) +} diff --git a/cmd/podman/unpause.go b/cmd/podman/unpause.go index 648fc9d3d..d77e056f8 100644 --- a/cmd/podman/unpause.go +++ b/cmd/podman/unpause.go @@ -1,7 +1,6 @@ package main import ( - "fmt" "os" "github.com/containers/libpod/cmd/podman/libpodruntime" @@ -37,7 +36,6 @@ var ( func unpauseCmd(c *cli.Context) error { var ( - lastError error unpauseContainers []*libpod.Container unpauseFuncs []shared.ParallelWorkerInput ) @@ -90,18 +88,6 @@ func unpauseCmd(c *cli.Context) error { } logrus.Debugf("Setting maximum workers to %d", maxWorkers) - unpauseErrors := shared.ParallelExecuteWorkerPool(maxWorkers, unpauseFuncs) - - for cid, result := range unpauseErrors { - if result != nil && result != libpod.ErrCtrStopped { - if len(unpauseErrors) > 1 { - fmt.Println(result.Error()) - } - lastError = result - continue - } - fmt.Println(cid) - } - - return lastError + unpauseErrors, errCount := shared.ParallelExecuteWorkerPool(maxWorkers, unpauseFuncs) + return printParallelOutput(unpauseErrors, errCount) } diff --git a/cmd/podman/utils.go b/cmd/podman/utils.go index afeccb668..a59535b43 100644 --- a/cmd/podman/utils.go +++ b/cmd/podman/utils.go @@ -3,6 +3,9 @@ package main import ( "context" "fmt" + "os" + gosignal "os/signal" + "github.com/containers/libpod/libpod" "github.com/docker/docker/pkg/signal" "github.com/docker/docker/pkg/term" @@ -11,8 +14,6 @@ import ( "github.com/urfave/cli" "golang.org/x/crypto/ssh/terminal" "k8s.io/client-go/tools/remotecommand" - "os" - gosignal "os/signal" ) type RawTtyFormatter struct { @@ -207,3 +208,49 @@ func getPodsFromContext(c *cli.Context, r *libpod.Runtime) ([]*libpod.Pod, error } return pods, lastError } + +func getVolumesFromContext(c *cli.Context, r *libpod.Runtime) ([]*libpod.Volume, error) { + args := c.Args() + var ( + vols []*libpod.Volume + lastError error + err error + ) + + if c.Bool("all") { + vols, err = r.Volumes() + if err != nil { + return nil, errors.Wrapf(err, "unable to get all volumes") + } + } + + for _, i := range args { + vol, err := r.GetVolume(i) + if err != nil { + if lastError != nil { + logrus.Errorf("%q", lastError) + } + lastError = errors.Wrapf(err, "unable to find volume %s", i) + continue + } + vols = append(vols, vol) + } + return vols, lastError +} + +//printParallelOutput takes the map of parallel worker results and outputs them +// to stdout +func printParallelOutput(m map[string]error, errCount int) error { + var lastError error + for cid, result := range m { + if result != nil { + if errCount > 1 { + fmt.Println(result.Error()) + } + lastError = result + continue + } + fmt.Println(cid) + } + return lastError +} diff --git a/cmd/podman/varlink/io.podman.varlink b/cmd/podman/varlink/io.podman.varlink index 4a4a1854c..376bbc950 100644 --- a/cmd/podman/varlink/io.podman.varlink +++ b/cmd/podman/varlink/io.podman.varlink @@ -343,15 +343,17 @@ type ListPodContainerInfo ( ) # PodCreate is an input structure for creating pods. -# It emulates options to podman pod create, however -# changing pause image name and pause container -# is not currently supported +# It emulates options to podman pod create. The infraCommand and +# infraImage options are currently NotSupported. type PodCreate ( name: string, cgroupParent: string, labels: [string]string, share: []string, - infra: bool + infra: bool, + infraCommand: string, + infraImage: string, + publish: []string ) # ListPodData is the returned struct for an individual pod @@ -371,6 +373,22 @@ type PodContainerErrorData ( reason: string ) +# Runlabel describes the required input for container runlabel +type Runlabel( + image: string, + authfile: string, + certDir: string, + creds: string, + display: bool, + name: string, + pull: bool, + signaturePolicyPath: string, + tlsVerify: bool, + label: string, + extraArgs: []string, + opts: [string]string +) + # Ping provides a response for developers to ensure their varlink setup is working. # #### Example # ~~~ @@ -449,6 +467,13 @@ method ListContainerChanges(name: string) -> (container: ContainerChanges) # path representing the target tarfile. If the container cannot be found, a [ContainerNotFound](#ContainerNotFound) # error will be returned. # The return value is the written tarfile. +# #### Example +# ~~~ +# $ varlink call -m unix:/run/podman/io.podman/io.podman.ExportContainer '{"name": "flamboyant_payne", "path": "/tmp/payne.tar" }' +# { +# "tarfile": "/tmp/payne.tar" +# } +# ~~~ method ExportContainer(name: string, path: string) -> (tarfile: string) # GetContainerStats takes the name or ID of a container and returns a single ContainerStats structure which @@ -565,6 +590,18 @@ method RemoveContainer(name: string, force: bool) -> (container: string) # DeleteStoppedContainers will delete all containers that are not running. It will return a list the deleted # container IDs. See also [RemoveContainer](RemoveContainer). +# #### Example +# ~~~ +# $ varlink call -m unix:/run/podman/io.podman/io.podman.DeleteStoppedContainers +# { +# "containers": [ +# "451410b931d00def8aa9b4f8084e4d4a39e5e04ea61f358cf53a5cf95afcdcee", +# "8b60f754a3e01389494a9581ade97d35c2765b6e2f19acd2d3040c82a32d1bc0", +# "cf2e99d4d3cad6073df199ed32bbe64b124f3e1aba6d78821aa8460e70d30084", +# "db901a329587312366e5ecff583d08f0875b4b79294322df67d90fc6eed08fc1" +# ] +# } +# ~~~ method DeleteStoppedContainers() -> (containers: []string) # ListImages returns an array of ImageInList structures which provide basic information about @@ -594,9 +631,10 @@ method InspectImage(name: string) -> (image: string) method HistoryImage(name: string) -> (history: []ImageHistory) # PushImage takes three input arguments: the name or ID of an image, the fully-qualified destination name of the image, -# and a boolean as to whether tls-verify should be used. It will return an [ImageNotFound](#ImageNotFound) error if +# and a boolean as to whether tls-verify should be used (with false disabling TLS, not affecting the default behavior). +# It will return an [ImageNotFound](#ImageNotFound) error if # the image cannot be found in local storage; otherwise the ID of the image will be returned on success. -method PushImage(name: string, tag: string, tlsverify: bool) -> (image: string) +method PushImage(name: string, tag: string, tlsverify: bool, signaturePolicy: string, creds: string, certDir: string, compress: bool, format: string, removeSignatures: bool, signBy: string) -> (image: string) # TagImage takes the name or ID of an image in local storage as well as the desired tag name. If the image cannot # be found, an [ImageNotFound](#ImageNotFound) error will be returned; otherwise, the ID of the image is returned on success. @@ -621,6 +659,18 @@ method SearchImage(name: string, limit: int) -> (images: []ImageSearch) # DeleteUnusedImages deletes any images not associated with a container. The IDs of the deleted images are returned # in a string array. +# #### Example +# ~~~ +# $ varlink call -m unix:/run/podman/io.podman/io.podman.DeleteUnusedImages +# { +# "images": [ +# "166ea6588079559c724c15223f52927f514f73dd5c5cf2ae2d143e3b2e6e9b52", +# "da86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e", +# "3ef70f7291f47dfe2b82931a993e16f5a44a0e7a68034c3e0e086d77f5829adc", +# "59788edf1f3e78cd0ebe6ce1446e9d10788225db3dedcfd1a59f764bad2b2690" +# ] +# } +# ~~~ method DeleteUnusedImages() -> (images: []string) # Commit, creates an image from an existing container. It requires the name or @@ -652,7 +702,7 @@ method ExportImage(name: string, destination: string, compress: bool, tags: []st # "id": "426866d6fa419873f97e5cbd320eeb22778244c1dfffa01c944db3114f55772e" # } # ~~~ -method PullImage(name: string) -> (id: string) +method PullImage(name: string, certDir: string, creds: string, signaturePolicy: string, tlsVerify: bool) -> (id: string) # CreatePod creates a new empty pod. It uses a [PodCreate](#PodCreate) type for input. # On success, the ID of the newly created pod will be returned. @@ -672,11 +722,80 @@ method CreatePod(create: PodCreate) -> (pod: string) # ListPods returns a list of pods in no particular order. They are # returned as an array of ListPodData structs. See also [GetPod](#GetPod). +# #### Example +# ~~~ +# $ varlink call -m unix:/run/podman/io.podman/io.podman.ListPods +# { +# "pods": [ +# { +# "cgroup": "machine.slice", +# "containersinfo": [ +# { +# "id": "00c130a45de0411f109f1a0cfea2e298df71db20fa939de5cab8b2160a36be45", +# "name": "1840835294cf-infra", +# "status": "running" +# }, +# { +# "id": "49a5cce72093a5ca47c6de86f10ad7bb36391e2d89cef765f807e460865a0ec6", +# "name": "upbeat_murdock", +# "status": "running" +# } +# ], +# "createdat": "2018-12-07 13:10:15.014139258 -0600 CST", +# "id": "1840835294cf076a822e4e12ba4152411f131bd869e7f6a4e8b16df9b0ea5c7f", +# "name": "foobar", +# "numberofcontainers": "2", +# "status": "Running" +# }, +# { +# "cgroup": "machine.slice", +# "containersinfo": [ +# { +# "id": "1ca4b7bbba14a75ba00072d4b705c77f3df87db0109afaa44d50cb37c04a477e", +# "name": "784306f655c6-infra", +# "status": "running" +# } +# ], +# "createdat": "2018-12-07 13:09:57.105112457 -0600 CST", +# "id": "784306f655c6200aea321dd430ba685e9b2cc1f7d7528a72f3ff74ffb29485a2", +# "name": "nostalgic_pike", +# "numberofcontainers": "1", +# "status": "Running" +# } +# ] +# } +# ~~~ method ListPods() -> (pods: []ListPodData) # GetPod takes a name or ID of a pod and returns single [ListPodData](#ListPodData) # structure. A [PodNotFound](#PodNotFound) error will be returned if the pod cannot be found. # See also [ListPods](ListPods). +# #### Example +# ~~~ +# $ varlink call -m unix:/run/podman/io.podman/io.podman.GetPod '{"name": "foobar"}' +# { +# "pod": { +# "cgroup": "machine.slice", +# "containersinfo": [ +# { +# "id": "00c130a45de0411f109f1a0cfea2e298df71db20fa939de5cab8b2160a36be45", +# "name": "1840835294cf-infra", +# "status": "running" +# }, +# { +# "id": "49a5cce72093a5ca47c6de86f10ad7bb36391e2d89cef765f807e460865a0ec6", +# "name": "upbeat_murdock", +# "status": "running" +# } +# ], +# "createdat": "2018-12-07 13:10:15.014139258 -0600 CST", +# "id": "1840835294cf076a822e4e12ba4152411f131bd869e7f6a4e8b16df9b0ea5c7f", +# "name": "foobar", +# "numberofcontainers": "2", +# "status": "Running" +# } +# } +# ~~~ method GetPod(name: string) -> (pod: ListPodData) # InspectPod takes the name or ID of an image and returns a string respresentation of data associated with the @@ -698,7 +817,7 @@ method InspectPod(name: string) -> (pod: string) # ~~~ method StartPod(name: string) -> (pod: string) -# StopPod stops containers in a pod. It takes the name or ID of a pod. +# StopPod stops containers in a pod. It takes the name or ID of a pod and a timeout. # If the pod cannot be found, a [PodNotFound](#PodNotFound) error will be returned instead. # Containers in a pod are stopped independently. If there is an error stopping one container, the ID of those containers # will be returned in a list, along with the ID of the pod in a [PodContainerError](#PodContainerError). @@ -711,7 +830,7 @@ method StartPod(name: string) -> (pod: string) # "pod": "135d71b9495f7c3967f536edad57750bfdb569336cd107d8aabab45565ffcfb6" # } # ~~~ -method StopPod(name: string) -> (pod: string) +method StopPod(name: string, timeout: int) -> (pod: string) # RestartPod will restart containers in a pod given a pod name or ID. Containers in # the pod that are running will be stopped, then all stopped containers will be run. @@ -734,6 +853,13 @@ method RestartPod(name: string) -> (pod: string) # will be returned in a list, along with the ID of the pod in a [PodContainerError](#PodContainerError). # If the pod was killed with no errors, the pod ID is returned. # See also [StopPod](StopPod). +# #### Example +# ~~~ +# $ varlink call -m unix:/run/podman/io.podman/io.podman.KillPod '{"name": "foobar", "signal": 15}' +# { +# "pod": "1840835294cf076a822e4e12ba4152411f131bd869e7f6a4e8b16df9b0ea5c7f" +# } +# ~~~ method KillPod(name: string, signal: int) -> (pod: string) # PausePod takes the name or ID of a pod and pauses the running containers associated with it. If the pod cannot be found, @@ -742,6 +868,13 @@ method KillPod(name: string, signal: int) -> (pod: string) # will be returned in a list, along with the ID of the pod in a [PodContainerError](#PodContainerError). # If the pod was paused with no errors, the pod ID is returned. # See also [UnpausePod](#UnpausePod). +# #### Example +# ~~~ +# $ varlink call -m unix:/run/podman/io.podman/io.podman.PausePod '{"name": "foobar"}' +# { +# "pod": "1840835294cf076a822e4e12ba4152411f131bd869e7f6a4e8b16df9b0ea5c7f" +# } +# ~~~ method PausePod(name: string) -> (pod: string) # UnpausePod takes the name or ID of a pod and unpauses the paused containers associated with it. If the pod cannot be @@ -750,6 +883,13 @@ method PausePod(name: string) -> (pod: string) # will be returned in a list, along with the ID of the pod in a [PodContainerError](#PodContainerError). # If the pod was unpaused with no errors, the pod ID is returned. # See also [PausePod](#PausePod). +# #### Example +# ~~~ +# $ varlink call -m unix:/run/podman/io.podman/io.podman.UnpausePod '{"name": "foobar"}' +# { +# "pod": "1840835294cf076a822e4e12ba4152411f131bd869e7f6a4e8b16df9b0ea5c7f" +# } +# ~~~ method UnpausePod(name: string) -> (pod: string) # RemovePod takes the name or ID of a pod as well a boolean representing whether a running @@ -804,6 +944,78 @@ method TopPod() -> (notimplemented: NotImplemented) # ~~~ method GetPodStats(name: string) -> (pod: string, containers: []ContainerStats) +# ImageExists talks a full or partial image ID or name and returns an int as to whether +# the image exists in local storage. An int result of 0 means the image does exist in +# local storage; whereas 1 indicates the image does not exists in local storage. +# #### Example +# ~~~ +# $ varlink call -m unix:/run/podman/io.podman/io.podman.ImageExists '{"name": "imageddoesntexist"}' +# { +# "exists": 1 +# } +# ~~~ +method ImageExists(name: string) -> (exists: int) + +# ContainerExists takes a full or partial container ID or name and returns an int as to +# whether the container exists in local storage. A result of 0 means the container does +# exists; whereas a result of 1 means it could not be found. +# #### Example +# ~~~ +# $ varlink call -m unix:/run/podman/io.podman/io.podman.ContainerExists '{"name": "flamboyant_payne"}'{ +# "exists": 0 +# } +# ~~~ +method ContainerExists(name: string) -> (exists: int) + +# ContainerCheckPoint performs a checkpopint on a container by its name or full/partial container +# ID. On successful checkpoint, the id of the checkpointed container is returned. +method ContainerCheckpoint(name: string, keep: bool, leaveRunning: bool, tcpEstablished: bool) -> (id: string) + +# ContainerRestore restores a container that has been checkpointed. The container to be restored can +# be identified by its name or full/partial container ID. A successful restore will result in the return +# of the container's ID. +method ContainerRestore(name: string, keep: bool, tcpEstablished: bool) -> (id: string) + +# ContainerRunlabel runs executes a command as described by a given container image label. +method ContainerRunlabel(runlabel: Runlabel) -> () + +# ListContainerMounts gathers all the mounted container mount points and returns them as an array +# of strings +# #### Example +# ~~~ +# $ varlink call -m unix:/run/podman/io.podman/io.podman.ListContainerMounts +# { +# "mounts": [ +# "/var/lib/containers/storage/overlay/b215fb622c65ba3b06c6d2341be80b76a9de7ae415ce419e65228873d4f0dcc8/merged", +# "/var/lib/containers/storage/overlay/5eaf806073f79c0ed9a695180ad598e34f963f7407da1d2ccf3560bdab49b26f/merged", +# "/var/lib/containers/storage/overlay/1ecb6b1dbb251737c7a24a31869096839c3719d8b250bf075f75172ddcc701e1/merged", +# "/var/lib/containers/storage/overlay/7137b28a3c422165fe920cba851f2f8da271c6b5908672c451ebda03ad3919e2/merged" +# ] +# } +# ~~~ +method ListContainerMounts() -> (mounts: []string) + +# MountContainer mounts a container by name or full/partial ID. Upon a successful mount, the destination +# mount is returned as a string. +# #### Example +# ~~~ +# $ varlink call -m unix:/run/podman/io.podman/io.podman.MountContainer '{"name": "jolly_shannon"}'{ +# "path": "/var/lib/containers/storage/overlay/419eeb04e783ea159149ced67d9fcfc15211084d65e894792a96bedfae0470ca/merged" +# } +# ~~~ +method MountContainer(name: string) -> (path: string) + +# UnmountContainer umounts a container by its name or full/partial container ID. +# #### Example +# ~~~ +# $ varlink call -m unix:/run/podman/io.podman/io.podman.UnmountContainer '{"name": "jolly_shannon", "force": false}' +# {} +# ~~~ +method UnmountContainer(name: string, force: bool) -> () + +# This function is not implemented yet. +method ListContainerPorts(name: string) -> (notimplemented: NotImplemented) + # ImageNotFound means the image could not be found by the provided name or ID in local storage. error ImageNotFound (name: string) diff --git a/cmd/podman/version.go b/cmd/podman/version.go index d80f24a14..d81deb696 100644 --- a/cmd/podman/version.go +++ b/cmd/podman/version.go @@ -4,6 +4,7 @@ import ( "fmt" "time" + "github.com/containers/libpod/cmd/podman/formats" "github.com/containers/libpod/libpod" "github.com/pkg/errors" "github.com/urfave/cli" @@ -15,6 +16,19 @@ func versionCmd(c *cli.Context) error { if err != nil { errors.Wrapf(err, "unable to determine version") } + + versionOutputFormat := c.String("format") + if versionOutputFormat != "" { + var out formats.Writer + switch versionOutputFormat { + case formats.JSONString: + out = formats.JSONStruct{Output: output} + default: + out = formats.StdoutTemplate{Output: output, Template: versionOutputFormat} + } + formats.Writer(out).Out() + return nil + } fmt.Println("Version: ", output.Version) fmt.Println("Go Version: ", output.GoVersion) if output.GitCommit != "" { @@ -30,8 +44,17 @@ func versionCmd(c *cli.Context) error { } // Cli command to print out the full version of podman -var versionCommand = cli.Command{ - Name: "version", - Usage: "Display the PODMAN Version Information", - Action: versionCmd, -} +var ( + versionCommand = cli.Command{ + Name: "version", + Usage: "Display the Podman Version Information", + Action: versionCmd, + Flags: versionFlags, + } + versionFlags = []cli.Flag{ + cli.StringFlag{ + Name: "format", + Usage: "Change the output format to JSON or a Go template", + }, + } +) diff --git a/cmd/podman/volume.go b/cmd/podman/volume.go new file mode 100644 index 000000000..913592e74 --- /dev/null +++ b/cmd/podman/volume.go @@ -0,0 +1,26 @@ +package main + +import ( + "github.com/urfave/cli" +) + +var ( + volumeDescription = `Manage volumes. + +Volumes are created in and can be shared between containers.` + + volumeSubCommands = []cli.Command{ + volumeCreateCommand, + volumeLsCommand, + volumeRmCommand, + volumeInspectCommand, + volumePruneCommand, + } + volumeCommand = cli.Command{ + Name: "volume", + Usage: "Manage volumes", + Description: volumeDescription, + UseShortOptionHandling: true, + Subcommands: volumeSubCommands, + } +) diff --git a/cmd/podman/volume_create.go b/cmd/podman/volume_create.go new file mode 100644 index 000000000..0b5f8d1e3 --- /dev/null +++ b/cmd/podman/volume_create.go @@ -0,0 +1,97 @@ +package main + +import ( + "fmt" + + "github.com/containers/libpod/cmd/podman/libpodruntime" + "github.com/containers/libpod/libpod" + "github.com/pkg/errors" + "github.com/urfave/cli" +) + +var volumeCreateDescription = ` +podman volume create + +Creates a new volume. If using the default driver, "local", the volume will +be created at.` + +var volumeCreateFlags = []cli.Flag{ + cli.StringFlag{ + Name: "driver", + Usage: "Specify volume driver name (default local)", + }, + cli.StringSliceFlag{ + Name: "label, l", + Usage: "Set metadata for a volume (default [])", + }, + cli.StringSliceFlag{ + Name: "opt, o", + Usage: "Set driver specific options (default [])", + }, +} + +var volumeCreateCommand = cli.Command{ + Name: "create", + Usage: "Create a new volume", + Description: volumeCreateDescription, + Flags: volumeCreateFlags, + Action: volumeCreateCmd, + SkipArgReorder: true, + ArgsUsage: "[VOLUME-NAME]", + UseShortOptionHandling: true, +} + +func volumeCreateCmd(c *cli.Context) error { + var ( + options []libpod.VolumeCreateOption + err error + volName string + ) + + if err = validateFlags(c, volumeCreateFlags); err != nil { + return err + } + + runtime, err := libpodruntime.GetRuntime(c) + if err != nil { + return errors.Wrapf(err, "error creating libpod runtime") + } + defer runtime.Shutdown(false) + + if len(c.Args()) > 1 { + return errors.Errorf("too many arguments, create takes at most 1 argument") + } + + if len(c.Args()) > 0 { + volName = c.Args()[0] + options = append(options, libpod.WithVolumeName(volName)) + } + + if c.IsSet("driver") { + options = append(options, libpod.WithVolumeDriver(c.String("driver"))) + } + + labels, err := getAllLabels([]string{}, c.StringSlice("label")) + if err != nil { + return errors.Wrapf(err, "unable to process labels") + } + if len(labels) != 0 { + options = append(options, libpod.WithVolumeLabels(labels)) + } + + opts, err := getAllLabels([]string{}, c.StringSlice("opt")) + if err != nil { + return errors.Wrapf(err, "unable to process options") + } + if len(options) != 0 { + options = append(options, libpod.WithVolumeOptions(opts)) + } + + vol, err := runtime.NewVolume(getContext(), options...) + if err != nil { + return err + } + fmt.Printf("%s\n", vol.Name()) + + return nil +} diff --git a/cmd/podman/volume_inspect.go b/cmd/podman/volume_inspect.go new file mode 100644 index 000000000..152f1d098 --- /dev/null +++ b/cmd/podman/volume_inspect.go @@ -0,0 +1,63 @@ +package main + +import ( + "github.com/containers/libpod/cmd/podman/libpodruntime" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/urfave/cli" +) + +var volumeInspectDescription = ` +podman volume inspect + +Display detailed information on one or more volumes. Can change the format +from JSON to a Go template. +` + +var volumeInspectFlags = []cli.Flag{ + cli.BoolFlag{ + Name: "all, a", + Usage: "Inspect all volumes", + }, + cli.StringFlag{ + Name: "format, f", + Usage: "Format volume output using Go template", + Value: "json", + }, +} + +var volumeInspectCommand = cli.Command{ + Name: "inspect", + Usage: "Display detailed information on one or more volumes", + Description: volumeInspectDescription, + Flags: volumeInspectFlags, + Action: volumeInspectCmd, + SkipArgReorder: true, + ArgsUsage: "[VOLUME-NAME ...]", + UseShortOptionHandling: true, +} + +func volumeInspectCmd(c *cli.Context) error { + var err error + + if err = validateFlags(c, volumeInspectFlags); err != nil { + return err + } + + runtime, err := libpodruntime.GetRuntime(c) + if err != nil { + return errors.Wrapf(err, "error creating libpod runtime") + } + defer runtime.Shutdown(false) + + opts := volumeLsOptions{ + Format: c.String("format"), + } + + vols, lastError := getVolumesFromContext(c, runtime) + if lastError != nil { + logrus.Errorf("%q", lastError) + } + + return generateVolLsOutput(vols, opts, runtime) +} diff --git a/cmd/podman/volume_ls.go b/cmd/podman/volume_ls.go new file mode 100644 index 000000000..0f94549ee --- /dev/null +++ b/cmd/podman/volume_ls.go @@ -0,0 +1,308 @@ +package main + +import ( + "reflect" + "strings" + + "github.com/containers/libpod/cmd/podman/formats" + "github.com/containers/libpod/cmd/podman/libpodruntime" + "github.com/containers/libpod/libpod" + "github.com/pkg/errors" + "github.com/urfave/cli" +) + +// volumeOptions is the "ls" command options +type volumeLsOptions struct { + Format string + Quiet bool +} + +// volumeLsTemplateParams is the template parameters to list the volumes +type volumeLsTemplateParams struct { + Name string + Labels string + MountPoint string + Driver string + Options string + Scope string +} + +// volumeLsJSONParams is the JSON parameters to list the volumes +type volumeLsJSONParams struct { + Name string `json:"name"` + Labels map[string]string `json:"labels"` + MountPoint string `json:"mountPoint"` + Driver string `json:"driver"` + Options map[string]string `json:"options"` + Scope string `json:"scope"` +} + +var volumeLsDescription = ` +podman volume ls + +List all available volumes. The output of the volumes can be filtered +and the output format can be changed to JSON or a user specified Go template. +` + +var volumeLsFlags = []cli.Flag{ + cli.StringFlag{ + Name: "filter, f", + Usage: "Filter volume output", + }, + cli.StringFlag{ + Name: "format", + Usage: "Format volume output using Go template", + Value: "table {{.Driver}}\t{{.Name}}", + }, + cli.BoolFlag{ + Name: "quiet, q", + Usage: "Print volume output in quiet mode", + }, +} + +var volumeLsCommand = cli.Command{ + Name: "ls", + Aliases: []string{"list"}, + Usage: "List volumes", + Description: volumeLsDescription, + Flags: volumeLsFlags, + Action: volumeLsCmd, + SkipArgReorder: true, + UseShortOptionHandling: true, +} + +func volumeLsCmd(c *cli.Context) error { + if err := validateFlags(c, volumeLsFlags); err != nil { + return err + } + + runtime, err := libpodruntime.GetRuntime(c) + if err != nil { + return errors.Wrapf(err, "error creating libpod runtime") + } + defer runtime.Shutdown(false) + + if len(c.Args()) > 0 { + return errors.Errorf("too many arguments, ls takes no arguments") + } + + opts := volumeLsOptions{ + Quiet: c.Bool("quiet"), + } + opts.Format = genVolLsFormat(c) + + // Get the filter functions based on any filters set + var filterFuncs []libpod.VolumeFilter + if c.String("filter") != "" { + filters := strings.Split(c.String("filter"), ",") + for _, f := range filters { + filterSplit := strings.Split(f, "=") + if len(filterSplit) < 2 { + return errors.Errorf("filter input must be in the form of filter=value: %s is invalid", f) + } + generatedFunc, err := generateVolumeFilterFuncs(filterSplit[0], filterSplit[1], runtime) + if err != nil { + return errors.Wrapf(err, "invalid filter") + } + filterFuncs = append(filterFuncs, generatedFunc) + } + } + + volumes, err := runtime.GetAllVolumes() + if err != nil { + return err + } + + // Get the volumes that match the filter + volsFiltered := make([]*libpod.Volume, 0, len(volumes)) + for _, vol := range volumes { + include := true + for _, filter := range filterFuncs { + include = include && filter(vol) + } + + if include { + volsFiltered = append(volsFiltered, vol) + } + } + return generateVolLsOutput(volsFiltered, opts, runtime) +} + +// generate the template based on conditions given +func genVolLsFormat(c *cli.Context) string { + var format string + if c.String("format") != "" { + // "\t" from the command line is not being recognized as a tab + // replacing the string "\t" to a tab character if the user passes in "\t" + format = strings.Replace(c.String("format"), `\t`, "\t", -1) + } + if c.Bool("quiet") { + format = "{{.Name}}" + } + return format +} + +// Convert output to genericParams for printing +func volLsToGeneric(templParams []volumeLsTemplateParams, JSONParams []volumeLsJSONParams) (genericParams []interface{}) { + if len(templParams) > 0 { + for _, v := range templParams { + genericParams = append(genericParams, interface{}(v)) + } + return + } + for _, v := range JSONParams { + genericParams = append(genericParams, interface{}(v)) + } + return +} + +// generate the accurate header based on template given +func (vol *volumeLsTemplateParams) volHeaderMap() map[string]string { + v := reflect.Indirect(reflect.ValueOf(vol)) + values := make(map[string]string) + + for i := 0; i < v.NumField(); i++ { + key := v.Type().Field(i).Name + value := key + if value == "Name" { + value = "Volume" + value + } + values[key] = strings.ToUpper(splitCamelCase(value)) + } + return values +} + +// getVolTemplateOutput returns all the volumes in the volumeLsTemplateParams format +func getVolTemplateOutput(lsParams []volumeLsJSONParams, opts volumeLsOptions) ([]volumeLsTemplateParams, error) { + var lsOutput []volumeLsTemplateParams + + for _, lsParam := range lsParams { + var ( + labels string + options string + ) + + for k, v := range lsParam.Labels { + label := k + if v != "" { + label += "=" + v + } + labels += label + } + for k, v := range lsParam.Options { + option := k + if v != "" { + option += "=" + v + } + options += option + } + params := volumeLsTemplateParams{ + Name: lsParam.Name, + Driver: lsParam.Driver, + MountPoint: lsParam.MountPoint, + Scope: lsParam.Scope, + Labels: labels, + Options: options, + } + + lsOutput = append(lsOutput, params) + } + return lsOutput, nil +} + +// getVolJSONParams returns the volumes in JSON format +func getVolJSONParams(volumes []*libpod.Volume, opts volumeLsOptions, runtime *libpod.Runtime) ([]volumeLsJSONParams, error) { + var lsOutput []volumeLsJSONParams + + for _, volume := range volumes { + params := volumeLsJSONParams{ + Name: volume.Name(), + Labels: volume.Labels(), + MountPoint: volume.MountPoint(), + Driver: volume.Driver(), + Options: volume.Options(), + Scope: volume.Scope(), + } + + lsOutput = append(lsOutput, params) + } + return lsOutput, nil +} + +// generateVolLsOutput generates the output based on the format, JSON or Go Template, and prints it out +func generateVolLsOutput(volumes []*libpod.Volume, opts volumeLsOptions, runtime *libpod.Runtime) error { + if len(volumes) == 0 && opts.Format != formats.JSONString { + return nil + } + lsOutput, err := getVolJSONParams(volumes, opts, runtime) + if err != nil { + return err + } + var out formats.Writer + + switch opts.Format { + case formats.JSONString: + if err != nil { + return errors.Wrapf(err, "unable to create JSON for volume output") + } + out = formats.JSONStructArray{Output: volLsToGeneric([]volumeLsTemplateParams{}, lsOutput)} + default: + lsOutput, err := getVolTemplateOutput(lsOutput, opts) + if err != nil { + return errors.Wrapf(err, "unable to create volume output") + } + out = formats.StdoutTemplateArray{Output: volLsToGeneric(lsOutput, []volumeLsJSONParams{}), Template: opts.Format, Fields: lsOutput[0].volHeaderMap()} + } + return formats.Writer(out).Out() +} + +// generateVolumeFilterFuncs returns the true if the volume matches the filter set, otherwise it returns false. +func generateVolumeFilterFuncs(filter, filterValue string, runtime *libpod.Runtime) (func(volume *libpod.Volume) bool, error) { + switch filter { + case "name": + return func(v *libpod.Volume) bool { + return strings.Contains(v.Name(), filterValue) + }, nil + case "driver": + return func(v *libpod.Volume) bool { + return v.Driver() == filterValue + }, nil + case "scope": + return func(v *libpod.Volume) bool { + return v.Scope() == filterValue + }, nil + case "label": + filterArray := strings.SplitN(filterValue, "=", 2) + filterKey := filterArray[0] + if len(filterArray) > 1 { + filterValue = filterArray[1] + } else { + filterValue = "" + } + return func(v *libpod.Volume) bool { + for labelKey, labelValue := range v.Labels() { + if labelKey == filterKey && ("" == filterValue || labelValue == filterValue) { + return true + } + } + return false + }, nil + case "opt": + filterArray := strings.SplitN(filterValue, "=", 2) + filterKey := filterArray[0] + if len(filterArray) > 1 { + filterValue = filterArray[1] + } else { + filterValue = "" + } + return func(v *libpod.Volume) bool { + for labelKey, labelValue := range v.Options() { + if labelKey == filterKey && ("" == filterValue || labelValue == filterValue) { + return true + } + } + return false + }, nil + } + return nil, errors.Errorf("%s is an invalid filter", filter) +} diff --git a/cmd/podman/volume_prune.go b/cmd/podman/volume_prune.go new file mode 100644 index 000000000..652c50f42 --- /dev/null +++ b/cmd/podman/volume_prune.go @@ -0,0 +1,86 @@ +package main + +import ( + "bufio" + "fmt" + "os" + "strings" + + "github.com/containers/libpod/cmd/podman/libpodruntime" + "github.com/containers/libpod/libpod" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/urfave/cli" +) + +var volumePruneDescription = ` +podman volume prune + +Remove all unused volumes. Will prompt for confirmation if not +using force. +` + +var volumePruneFlags = []cli.Flag{ + cli.BoolFlag{ + Name: "force, f", + Usage: "Do not prompt for confirmation", + }, +} + +var volumePruneCommand = cli.Command{ + Name: "prune", + Usage: "Remove all unused volumes", + Description: volumePruneDescription, + Flags: volumePruneFlags, + Action: volumePruneCmd, + SkipArgReorder: true, + UseShortOptionHandling: true, +} + +func volumePruneCmd(c *cli.Context) error { + var lastError error + + if err := validateFlags(c, volumePruneFlags); err != nil { + return err + } + + runtime, err := libpodruntime.GetRuntime(c) + if err != nil { + return errors.Wrapf(err, "error creating libpod runtime") + } + defer runtime.Shutdown(false) + + ctx := getContext() + + // Prompt for confirmation if --force is not set + if !c.Bool("force") { + reader := bufio.NewReader(os.Stdin) + fmt.Println("WARNING! This will remove all volumes not used by at least one container.") + fmt.Print("Are you sure you want to continue? [y/N] ") + ans, err := reader.ReadString('\n') + if err != nil { + return errors.Wrapf(err, "error reading input") + } + if strings.ToLower(ans)[0] != 'y' { + return nil + } + } + + volumes, err := runtime.GetAllVolumes() + if err != nil { + return err + } + + for _, vol := range volumes { + err = runtime.RemoveVolume(ctx, vol, false, true) + if err == nil { + fmt.Println(vol.Name()) + } else if err != libpod.ErrVolumeBeingUsed { + if lastError != nil { + logrus.Errorf("%q", lastError) + } + lastError = errors.Wrapf(err, "failed to remove volume %q", vol.Name()) + } + } + return lastError +} diff --git a/cmd/podman/volume_rm.go b/cmd/podman/volume_rm.go new file mode 100644 index 000000000..3fb623624 --- /dev/null +++ b/cmd/podman/volume_rm.go @@ -0,0 +1,71 @@ +package main + +import ( + "fmt" + + "github.com/containers/libpod/cmd/podman/libpodruntime" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/urfave/cli" +) + +var volumeRmDescription = ` +podman volume rm + +Remove one or more existing volumes. Will only remove volumes that are +not being used by any containers. To remove the volumes anyways, use the +--force flag. +` + +var volumeRmFlags = []cli.Flag{ + cli.BoolFlag{ + Name: "all, a", + Usage: "Remove all volumes", + }, + cli.BoolFlag{ + Name: "force, f", + Usage: "Remove a volume by force, even if it is being used by a container", + }, +} + +var volumeRmCommand = cli.Command{ + Name: "rm", + Aliases: []string{"remove"}, + Usage: "Remove one or more volumes", + Description: volumeRmDescription, + Flags: volumeRmFlags, + Action: volumeRmCmd, + ArgsUsage: "[VOLUME-NAME ...]", + SkipArgReorder: true, + UseShortOptionHandling: true, +} + +func volumeRmCmd(c *cli.Context) error { + var err error + + if err = validateFlags(c, volumeRmFlags); err != nil { + return err + } + + runtime, err := libpodruntime.GetRuntime(c) + if err != nil { + return errors.Wrapf(err, "error creating libpod runtime") + } + defer runtime.Shutdown(false) + + ctx := getContext() + + vols, lastError := getVolumesFromContext(c, runtime) + for _, vol := range vols { + err = runtime.RemoveVolume(ctx, vol, c.Bool("force"), false) + if err != nil { + if lastError != nil { + logrus.Errorf("%q", lastError) + } + lastError = errors.Wrapf(err, "failed to remove volume %q", vol.Name()) + } else { + fmt.Println(vol.Name()) + } + } + return lastError +} |