diff options
77 files changed, 4473 insertions, 615 deletions
diff --git a/.cirrus.yml b/.cirrus.yml index 5ec35cccb..c44277b05 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -137,7 +137,7 @@ gating_task: - 'cd $GOSRC && ./hack/podman-commands.sh |& ${TIMESTAMP}' # N/B: need 'clean' so some committed files are re-generated. - '/usr/local/bin/entrypoint.sh clean podman-remote |& ${TIMESTAMP}' - - '/usr/local/bin/entrypoint.sh clean podman BUILDTAGS="exclude_graphdriver_devicemapper selinux seccomp" |& ${TIMESTAMP}' + - '/usr/local/bin/entrypoint.sh clean podman xref_helpmsgs_manpages BUILDTAGS="exclude_graphdriver_devicemapper selinux seccomp" |& ${TIMESTAMP}' - '/usr/local/bin/entrypoint.sh local-cross |& ${TIMESTAMP}' - '/usr/local/bin/entrypoint.sh podman-remote-darwin |& ${TIMESTAMP}' - '/usr/local/bin/entrypoint.sh podman-remote-windows |& ${TIMESTAMP}' @@ -383,6 +383,10 @@ docdir: .PHONY: docs docs: .install.md2man docdir $(MANPAGES) ## Generate documentation +.PHONE: xref_helpmsgs_manpages +xref_helpmsgs_manpages: + ./hack/xref-helpmsgs-manpages + install-podman-remote-%-docs: podman-remote docs $(MANPAGES) rm -rf docs/build/remote mkdir -p docs/build/remote @@ -430,7 +434,7 @@ podman-remote-release-%.zip: cp release.txt "$(TMPDIR)/" cp ./bin/podman-remote-$*$(BINSFX) "$(TMPDIR)/$(SUBDIR)/podman$(BINSFX)" cp -r ./docs/build/remote/$* "$(TMPDIR)/$(SUBDIR)/docs/" - cd "$(TMPDIR)" && \ + cd "$(TMPDIR)/$(SUBDIR)" && \ zip --recurse-paths "$(CURDIR)/$@" "./release.txt" "./" -rm -rf "$(TMPDIR)" diff --git a/RELEASE_NOTES.md b/RELEASE_NOTES.md index c9c676ccb..6578f40fd 100644 --- a/RELEASE_NOTES.md +++ b/RELEASE_NOTES.md @@ -2,19 +2,26 @@ ## 1.8.2 +### Features +- Initial support for automatically updating containers managed via Systemd unit files has been merged. This allows containers to automatically upgrade if a newer version of their image becomes available + ### Bugfixes -- Fixed a bug where unit files generates by `podman generate systemd --new` would not force containers to detach, causing the unit to time out when trying to start +- Fixed a bug where unit files generated by `podman generate systemd --new` would not force containers to detach, causing the unit to time out when trying to start - Fixed a bug where `podman system reset` could delete important system directories if run as rootless on installations created by older Podman ([#4831](https://github.com/containers/libpod/issues/4831)) - Fixed a bug where image built by `podman build` would not properly set the OS and Architecture they were built with ([#5503](https://github.com/containers/libpod/issues/5503)) - Fixed a bug where attached `podman run` with `--sig-proxy` enabled (the default), when built with Go 1.14, would repeatedly send signal 23 to the process in the container and could generate errors when the container stopped ([#5483](https://github.com/containers/libpod/issues/5483)) - Fixed a bug where rootless `podman run` commands could hang when forwarding ports +- Fixed a bug where rootless Podman would not work when `/proc` was mounted with the `hidepid` option set +- Fixed a bug where the `podman system service` command would use large amounts of CPU when `--timeout` was set to 0 ([#5531](https://github.com/containers/libpod/issues/5531)) ### HTTP API - Initial support for Libpod endpoints related to creating and operating on image manifest lists has been added - The Libpod Healthcheck and Events API endpoints are now supported +- The Swagger endpoint can now handle cases where no Swagger documentation has been generated ### Misc -- Updated vendored containers/storage to v1.16.5 +- Updated Buildah to v1.14.3 +- Updated containers/storage to v1.16.5 - Several performance improvements have been made to creating containers, which should somewhat improve the performance of `podman create` and `podman run` ## 1.8.1 diff --git a/changelog.txt b/changelog.txt index e1f65fe9b..1e08a8419 100644 --- a/changelog.txt +++ b/changelog.txt @@ -1,3 +1,21 @@ +- Changelog for v1.8.2 (2020-03-19) + * fix reported compat issues + * Don't include SUBDIR in windows.zip + * rootless: fix usage with hidepid=1 + * V2 podman command + * serve swagger when present + * swagger: more consistency fixes + * Vendor in containers/buildah v1.14.3 + * Reduce CPU usage when --timeout=0 + * New test: man page cross-ref against --help + * podman: avoid conmon zombie on exec + * Filter pods through pod list api + * Bump to v1.8.2-dev + * Fix vendoring on master + * fix timeout file flake + * auto updates + * pkg/systemd: add dbus support + - Changelog for v1.8.2-rc1 (2020-03-17) * Update release notes for v1.8.2-rc1 * Fix vendoring on master diff --git a/cmd/podman/pod_ps.go b/cmd/podman/pod_ps.go index d7731e983..7acbd6888 100644 --- a/cmd/podman/pod_ps.go +++ b/cmd/podman/pod_ps.go @@ -4,7 +4,6 @@ import ( "fmt" "reflect" "sort" - "strconv" "strings" "time" @@ -13,7 +12,6 @@ import ( "github.com/containers/libpod/cmd/podman/shared" "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/pkg/adapter" - "github.com/containers/libpod/pkg/util" "github.com/docker/go-units" "github.com/pkg/errors" "github.com/spf13/cobra" @@ -29,8 +27,6 @@ const ( NUM_CTR_INFO = 10 ) -type PodFilter func(*adapter.Pod) bool - var ( bc_opts shared.PsOptions ) @@ -174,29 +170,23 @@ func podPsCmd(c *cliconfig.PodPsValues) error { opts.Format = genPodPsFormat(c) - var filterFuncs []PodFilter - if c.Filter != "" { - filters := strings.Split(c.Filter, ",") - for _, f := range filters { - filterSplit := strings.Split(f, "=") - if len(filterSplit) < 2 { - return errors.Errorf("filter input must be in the form of filter=value: %s is invalid", f) - } - generatedFunc, err := generatePodFilterFuncs(filterSplit[0], filterSplit[1]) - if err != nil { - return errors.Wrapf(err, "invalid filter") - } - filterFuncs = append(filterFuncs, generatedFunc) - } - } - var pods []*adapter.Pod + + // If latest is set true filters are ignored. if c.Latest { pod, err := runtime.GetLatestPod() if err != nil { return err } pods = append(pods, pod) + return generatePodPsOutput(pods, opts) + } + + if c.Filter != "" { + pods, err = runtime.GetPodsWithFilters(c.Filter) + if err != nil { + return err + } } else { pods, err = runtime.GetAllPods() if err != nil { @@ -204,19 +194,7 @@ func podPsCmd(c *cliconfig.PodPsValues) error { } } - podsFiltered := make([]*adapter.Pod, 0, len(pods)) - for _, pod := range pods { - include := true - for _, filter := range filterFuncs { - include = include && filter(pod) - } - - if include { - podsFiltered = append(podsFiltered, pod) - } - } - - return generatePodPsOutput(podsFiltered, opts) + return generatePodPsOutput(pods, opts) } // podPsCheckFlagsPassed checks if mutually exclusive flags are passed together @@ -235,88 +213,6 @@ func podPsCheckFlagsPassed(c *cliconfig.PodPsValues) error { return nil } -func generatePodFilterFuncs(filter, filterValue string) (func(pod *adapter.Pod) bool, error) { - switch filter { - case "ctr-ids": - return func(p *adapter.Pod) bool { - ctrIds, err := p.AllContainersByID() - if err != nil { - return false - } - return util.StringInSlice(filterValue, ctrIds) - }, nil - case "ctr-names": - return func(p *adapter.Pod) bool { - ctrs, err := p.AllContainers() - if err != nil { - return false - } - for _, ctr := range ctrs { - if filterValue == ctr.Name() { - return true - } - } - return false - }, nil - case "ctr-number": - return func(p *adapter.Pod) bool { - ctrIds, err := p.AllContainersByID() - if err != nil { - return false - } - - fVint, err2 := strconv.Atoi(filterValue) - if err2 != nil { - return false - } - return len(ctrIds) == fVint - }, nil - case "ctr-status": - if !util.StringInSlice(filterValue, []string{"created", "restarting", "running", "paused", "exited", "unknown"}) { - return nil, errors.Errorf("%s is not a valid status", filterValue) - } - return func(p *adapter.Pod) bool { - ctr_statuses, err := p.Status() - if err != nil { - return false - } - for _, ctr_status := range ctr_statuses { - state := ctr_status.String() - if ctr_status == define.ContainerStateConfigured { - state = "created" - } - if state == filterValue { - return true - } - } - return false - }, nil - case "id": - return func(p *adapter.Pod) bool { - return strings.Contains(p.ID(), filterValue) - }, nil - case "name": - return func(p *adapter.Pod) bool { - return strings.Contains(p.Name(), filterValue) - }, nil - case "status": - if !util.StringInSlice(filterValue, []string{"stopped", "running", "paused", "exited", "dead", "created"}) { - return nil, errors.Errorf("%s is not a valid pod status", filterValue) - } - return func(p *adapter.Pod) bool { - status, err := p.GetPodStatus() - if err != nil { - return false - } - if strings.ToLower(status) == filterValue { - return true - } - return false - }, nil - } - return nil, errors.Errorf("%s is an invalid filter", filter) -} - // generate the template based on conditions given func genPodPsFormat(c *cliconfig.PodPsValues) string { format := "" diff --git a/cmd/podman/shared/pod.go b/cmd/podman/shared/pod.go index 7b0b497fc..3046953b5 100644 --- a/cmd/podman/shared/pod.go +++ b/cmd/podman/shared/pod.go @@ -2,9 +2,11 @@ package shared import ( "strconv" + "strings" "github.com/containers/libpod/libpod" "github.com/containers/libpod/libpod/define" + "github.com/containers/libpod/pkg/util" "github.com/cri-o/ocicni/pkg/ocicni" "github.com/docker/go-connections/nat" "github.com/pkg/errors" @@ -134,4 +136,128 @@ func CreatePortBindings(ports []string) ([]ocicni.PortMapping, error) { return portBindings, nil } +// GetPodsWithFilters uses the cliconfig to categorize if the latest pod is required. +func GetPodsWithFilters(r *libpod.Runtime, filters string) ([]*libpod.Pod, error) { + filterFuncs, err := GenerateFilterFunction(r, strings.Split(filters, ",")) + if err != nil { + return nil, err + } + return FilterAllPodsWithFilterFunc(r, filterFuncs...) +} + +// FilterAllPodsWithFilterFunc retrieves all pods +// Filters can be provided which will determine which pods are included in the +// output. Multiple filters are handled by ANDing their output, so only pods +// matching all filters are returned +func FilterAllPodsWithFilterFunc(r *libpod.Runtime, filters ...libpod.PodFilter) ([]*libpod.Pod, error) { + pods, err := r.Pods(filters...) + if err != nil { + return nil, err + } + return pods, nil +} + +// GenerateFilterFunction basically gets the filters based on the input by the user +// and filter the pod list based on the criteria. +func GenerateFilterFunction(r *libpod.Runtime, filters []string) ([]libpod.PodFilter, error) { + var filterFuncs []libpod.PodFilter + for _, f := range filters { + filterSplit := strings.Split(f, "=") + if len(filterSplit) < 2 { + return nil, errors.Errorf("filter input must be in the form of filter=value: %s is invalid", f) + } + generatedFunc, err := generatePodFilterFuncs(filterSplit[0], filterSplit[1]) + if err != nil { + return nil, errors.Wrapf(err, "invalid filter") + } + filterFuncs = append(filterFuncs, generatedFunc) + } + + return filterFuncs, nil +} +func generatePodFilterFuncs(filter, filterValue string) ( + func(pod *libpod.Pod) bool, error) { + switch filter { + case "ctr-ids": + return func(p *libpod.Pod) bool { + ctrIds, err := p.AllContainersByID() + if err != nil { + return false + } + return util.StringInSlice(filterValue, ctrIds) + }, nil + case "ctr-names": + return func(p *libpod.Pod) bool { + ctrs, err := p.AllContainers() + if err != nil { + return false + } + for _, ctr := range ctrs { + if filterValue == ctr.Name() { + return true + } + } + return false + }, nil + case "ctr-number": + return func(p *libpod.Pod) bool { + ctrIds, err := p.AllContainersByID() + if err != nil { + return false + } + + fVint, err2 := strconv.Atoi(filterValue) + if err2 != nil { + return false + } + return len(ctrIds) == fVint + }, nil + case "ctr-status": + if !util.StringInSlice(filterValue, + []string{"created", "restarting", "running", "paused", + "exited", "unknown"}) { + return nil, errors.Errorf("%s is not a valid status", filterValue) + } + return func(p *libpod.Pod) bool { + ctr_statuses, err := p.Status() + if err != nil { + return false + } + for _, ctr_status := range ctr_statuses { + state := ctr_status.String() + if ctr_status == define.ContainerStateConfigured { + state = "created" + } + if state == filterValue { + return true + } + } + return false + }, nil + case "id": + return func(p *libpod.Pod) bool { + return strings.Contains(p.ID(), filterValue) + }, nil + case "name": + return func(p *libpod.Pod) bool { + return strings.Contains(p.Name(), filterValue) + }, nil + case "status": + if !util.StringInSlice(filterValue, []string{"stopped", "running", "paused", "exited", "dead", "created"}) { + return nil, errors.Errorf("%s is not a valid pod status", filterValue) + } + return func(p *libpod.Pod) bool { + status, err := p.GetPodStatus() + if err != nil { + return false + } + if strings.ToLower(status) == filterValue { + return true + } + return false + }, nil + } + return nil, errors.Errorf("%s is an invalid filter", filter) +} + var DefaultKernelNamespaces = "cgroup,ipc,net,uts" diff --git a/cmd/podmanV2/README.md b/cmd/podmanV2/README.md new file mode 100644 index 000000000..a17e6f850 --- /dev/null +++ b/cmd/podmanV2/README.md @@ -0,0 +1,113 @@ +# Adding a podman V2 commands + +## Build podman V2 + +```shell script +$ cd $GOPATH/src/github.com/containers/libpod/cmd/podmanV2 +``` +If you wish to include the libpod library in your program, +```shell script +$ go build -tags 'ABISupport' . +``` +The `--remote` flag may be used to connect to the Podman service using the API. +Otherwise, direct calls will be made to the Libpod library. +```shell script +$ go build -tags '!ABISupport' . +``` +The Libpod library is not linked into the executable. +All calls are made via the API and `--remote=False` is an error condition. + +## Adding a new command `podman manifests` +```shell script +$ mkdir -p $GOPATH/src/github.com/containers/libpod/cmd/podmanV2/manifests +``` +Create the file ```$GOPATH/src/github.com/containers/libpod/cmd/podmanV2/manifests/manifest.go``` +```go +package manifests + +import ( + "github.com/containers/libpod/cmd/podmanV2/registry" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/spf13/cobra" +) + +var ( + // podman _manifests_ + manifestCmd = &cobra.Command{ + Use: "manifest", + Short: "Manage manifests", + Long: "Manage manifests", + Example: "podman manifests IMAGE", + TraverseChildren: true, + PersistentPreRunE: preRunE, + RunE: registry.SubCommandExists, // Report error if there is no sub command given + } +) +func init() { + // Subscribe command to podman + registry.Commands = append(registry.Commands, registry.CliCommand{ + // _podman manifest_ will support both ABIMode and TunnelMode + Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode}, + // The definition for this command + Command: manifestCmd, + }) + // Setup cobra templates, sub commands will inherit + manifestCmd.SetHelpTemplate(registry.HelpTemplate()) + manifestCmd.SetUsageTemplate(registry.UsageTemplate()) +} + +// preRunE populates the image engine for sub commands +func preRunE(cmd *cobra.Command, args []string) error { + _, err := registry.NewImageEngine(cmd, args) + return err +} +``` +To "wire" in the `manifest` command, edit the file ```$GOPATH/src/github.com/containers/libpod/cmd/podmanV2/main.go``` to add: +```go +package main + +import _ "github.com/containers/libpod/cmd/podmanV2/manifests" +``` + +## Adding a new sub command `podman manifests list` +Create the file ```$GOPATH/src/github.com/containers/libpod/cmd/podmanV2/manifests/inspect.go``` +```go +package manifests + +import ( + "github.com/containers/libpod/cmd/podmanV2/registry" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/spf13/cobra" +) + +var ( + // podman manifests _inspect_ + inspectCmd = &cobra.Command{ + Use: "inspect IMAGE", + Short: "Display manifest from image", + Long: "Displays the low-level information on a manifest identified by image name or ID", + RunE: inspect, + Example: "podman manifest DEADBEEF", + } +) + +func init() { + // Subscribe inspect sub command to manifest command + registry.Commands = append(registry.Commands, registry.CliCommand{ + // _podman manifest inspect_ will support both ABIMode and TunnelMode + Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode}, + // The definition for this command + Command: inspectCmd, + Parent: manifestCmd, + }) + + // This is where you would configure the cobra flags using inspectCmd.Flags() +} + +// Business logic: cmd is inspectCmd, args is the positional arguments from os.Args +func inspect(cmd *cobra.Command, args []string) error { + // Business logic using registry.ImageEngine + // Do not pull from libpod directly use the domain objects and types + return nil +} +``` diff --git a/cmd/podmanV2/containers/container.go b/cmd/podmanV2/containers/container.go new file mode 100644 index 000000000..6b44f2a3e --- /dev/null +++ b/cmd/podmanV2/containers/container.go @@ -0,0 +1,33 @@ +package containers + +import ( + "github.com/containers/libpod/cmd/podmanV2/registry" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/spf13/cobra" +) + +var ( + // Command: podman _container_ + containerCmd = &cobra.Command{ + Use: "container", + Short: "Manage containers", + Long: "Manage containers", + TraverseChildren: true, + PersistentPreRunE: preRunE, + RunE: registry.SubCommandExists, + } +) + +func init() { + registry.Commands = append(registry.Commands, registry.CliCommand{ + Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode}, + Command: containerCmd, + }) + containerCmd.SetHelpTemplate(registry.HelpTemplate()) + containerCmd.SetUsageTemplate(registry.UsageTemplate()) +} + +func preRunE(cmd *cobra.Command, args []string) error { + _, err := registry.NewContainerEngine(cmd, args) + return err +} diff --git a/cmd/podmanV2/containers/inspect.go b/cmd/podmanV2/containers/inspect.go new file mode 100644 index 000000000..635be4789 --- /dev/null +++ b/cmd/podmanV2/containers/inspect.go @@ -0,0 +1,42 @@ +package containers + +import ( + "github.com/containers/libpod/cmd/podmanV2/registry" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/spf13/cobra" +) + +var ( + // podman container _inspect_ + inspectCmd = &cobra.Command{ + Use: "inspect [flags] CONTAINER", + Short: "Display the configuration of a container", + Long: `Displays the low-level information on a container identified by name or ID.`, + PreRunE: inspectPreRunE, + RunE: inspect, + Example: `podman container inspect myCtr + podman container inspect -l --format '{{.Id}} {{.Config.Labels}}'`, + } +) + +func init() { + registry.Commands = append(registry.Commands, registry.CliCommand{ + Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode}, + Command: inspectCmd, + Parent: containerCmd, + }) +} + +func inspectPreRunE(cmd *cobra.Command, args []string) (err error) { + err = preRunE(cmd, args) + if err != nil { + return + } + + _, err = registry.NewImageEngine(cmd, args) + return err +} + +func inspect(cmd *cobra.Command, args []string) error { + return nil +} diff --git a/cmd/podmanV2/containers/list.go b/cmd/podmanV2/containers/list.go new file mode 100644 index 000000000..630d9bbc7 --- /dev/null +++ b/cmd/podmanV2/containers/list.go @@ -0,0 +1,34 @@ +package containers + +import ( + "github.com/containers/libpod/cmd/podmanV2/registry" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/spf13/cobra" +) + +var ( + // podman container _list_ + listCmd = &cobra.Command{ + Use: "list", + Aliases: []string{"ls"}, + Args: cobra.NoArgs, + Short: "List containers", + Long: "Prints out information about the containers", + RunE: containers, + Example: `podman container list -a + podman container list -a --format "{{.ID}} {{.Image}} {{.Labels}} {{.Mounts}}" + podman container list --size --sort names`, + } +) + +func init() { + registry.Commands = append(registry.Commands, registry.CliCommand{ + Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode}, + Command: listCmd, + Parent: containerCmd, + }) +} + +func containers(cmd *cobra.Command, args []string) error { + return nil +} diff --git a/cmd/podmanV2/containers/ps.go b/cmd/podmanV2/containers/ps.go new file mode 100644 index 000000000..ce3d66c51 --- /dev/null +++ b/cmd/podmanV2/containers/ps.go @@ -0,0 +1,29 @@ +package containers + +import ( + "strings" + + "github.com/containers/libpod/cmd/podmanV2/registry" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/spf13/cobra" +) + +var ( + // podman _ps_ + psCmd = &cobra.Command{ + Use: "ps", + Args: cobra.NoArgs, + Short: listCmd.Short, + Long: listCmd.Long, + PersistentPreRunE: preRunE, + RunE: containers, + Example: strings.Replace(listCmd.Example, "container list", "ps", -1), + } +) + +func init() { + registry.Commands = append(registry.Commands, registry.CliCommand{ + Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode}, + Command: psCmd, + }) +} diff --git a/cmd/podmanV2/images/history.go b/cmd/podmanV2/images/history.go new file mode 100644 index 000000000..c75ae6ddc --- /dev/null +++ b/cmd/podmanV2/images/history.go @@ -0,0 +1,79 @@ +package images + +import ( + "context" + "fmt" + "os" + "text/tabwriter" + "text/template" + + "github.com/containers/libpod/cmd/podmanV2/registry" + "github.com/containers/libpod/cmd/podmanV2/report" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +var ( + long = `Displays the history of an image. + + The information can be printed out in an easy to read, or user specified format, and can be truncated.` + + // podman _history_ + historyCmd = &cobra.Command{ + Use: "history [flags] IMAGE", + Short: "Show history of a specified image", + Long: long, + Example: "podman history quay.io/fedora/fedora", + Args: cobra.ExactArgs(1), + PersistentPreRunE: preRunE, + RunE: history, + } +) + +var cmdFlags = struct { + Human bool + NoTrunc bool + Quiet bool + Format string +}{} + +func init() { + registry.Commands = append(registry.Commands, registry.CliCommand{ + Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode}, + Command: historyCmd, + }) + + historyCmd.SetHelpTemplate(registry.HelpTemplate()) + historyCmd.SetUsageTemplate(registry.UsageTemplate()) + flags := historyCmd.Flags() + flags.StringVar(&cmdFlags.Format, "format", "", "Change the output to JSON or a Go template") + flags.BoolVarP(&cmdFlags.Human, "human", "H", true, "Display sizes and dates in human readable format") + flags.BoolVar(&cmdFlags.NoTrunc, "no-trunc", false, "Do not truncate the output") + flags.BoolVar(&cmdFlags.NoTrunc, "notruncate", false, "Do not truncate the output") + flags.BoolVarP(&cmdFlags.Quiet, "quiet", "q", false, "Display the numeric IDs only") +} + +func history(cmd *cobra.Command, args []string) error { + results, err := registry.ImageEngine().History(context.Background(), args[0], entities.ImageHistoryOptions{}) + if err != nil { + return err + } + + row := "{{slice $x.ID 0 12}}\t{{toRFC3339 $x.Created}}\t{{ellipsis $x.CreatedBy 45}}\t{{$x.Size}}\t{{$x.Comment}}\n" + if cmdFlags.Human { + row = "{{slice $x.ID 0 12}}\t{{toHumanDuration $x.Created}}\t{{ellipsis $x.CreatedBy 45}}\t{{toHumanSize $x.Size}}\t{{$x.Comment}}\n" + } + format := "{{range $y, $x := . }}" + row + "{{end}}" + + tmpl := template.Must(template.New("report").Funcs(report.PodmanTemplateFuncs()).Parse(format)) + w := tabwriter.NewWriter(os.Stdout, 8, 2, 2, ' ', 0) + + _, _ = w.Write(report.ReportHeader("id", "created", "created by", "size", "comment")) + err = tmpl.Execute(w, results.Layers) + if err != nil { + fmt.Fprintln(os.Stderr, errors.Wrapf(err, "Failed to print report")) + } + w.Flush() + return nil +} diff --git a/cmd/podmanV2/images/image.go b/cmd/podmanV2/images/image.go new file mode 100644 index 000000000..a15c3e826 --- /dev/null +++ b/cmd/podmanV2/images/image.go @@ -0,0 +1,33 @@ +package images + +import ( + "github.com/containers/libpod/cmd/podmanV2/registry" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/spf13/cobra" +) + +var ( + // Command: podman _image_ + imageCmd = &cobra.Command{ + Use: "image", + Short: "Manage images", + Long: "Manage images", + TraverseChildren: true, + PersistentPreRunE: preRunE, + RunE: registry.SubCommandExists, + } +) + +func init() { + registry.Commands = append(registry.Commands, registry.CliCommand{ + Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode}, + Command: imageCmd, + }) + imageCmd.SetHelpTemplate(registry.HelpTemplate()) + imageCmd.SetUsageTemplate(registry.UsageTemplate()) +} + +func preRunE(cmd *cobra.Command, args []string) error { + _, err := registry.NewImageEngine(cmd, args) + return err +} diff --git a/cmd/podmanV2/images/images.go b/cmd/podmanV2/images/images.go new file mode 100644 index 000000000..a1e56396a --- /dev/null +++ b/cmd/podmanV2/images/images.go @@ -0,0 +1,46 @@ +package images + +import ( + "strings" + + "github.com/containers/libpod/cmd/podmanV2/registry" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/spf13/cobra" +) + +var ( + // podman _images_ + imagesCmd = &cobra.Command{ + Use: strings.Replace(listCmd.Use, "list", "images", 1), + Short: listCmd.Short, + Long: listCmd.Long, + PersistentPreRunE: preRunE, + RunE: images, + Example: strings.Replace(listCmd.Example, "podman image list", "podman images", -1), + } + + imagesOpts = entities.ImageListOptions{} +) + +func init() { + registry.Commands = append(registry.Commands, registry.CliCommand{ + Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode}, + Command: imagesCmd, + }) + imagesCmd.SetHelpTemplate(registry.HelpTemplate()) + imagesCmd.SetUsageTemplate(registry.UsageTemplate()) + + flags := imagesCmd.Flags() + flags.BoolVarP(&imagesOpts.All, "all", "a", false, "Show all images (default hides intermediate images)") + flags.BoolVar(&imagesOpts.Digests, "digests", false, "Show digests") + flags.StringSliceVarP(&imagesOpts.Filter, "filter", "f", []string{}, "Filter output based on conditions provided (default [])") + flags.StringVar(&imagesOpts.Format, "format", "", "Change the output format to JSON or a Go template") + flags.BoolVarP(&imagesOpts.Noheading, "noheading", "n", false, "Do not print column headings") + // TODO Need to learn how to deal with second name being a string instead of a char. + // This needs to be "no-trunc, notruncate" + flags.BoolVar(&imagesOpts.NoTrunc, "no-trunc", false, "Do not truncate output") + flags.BoolVar(&imagesOpts.NoTrunc, "notruncate", false, "Do not truncate output") + flags.BoolVarP(&imagesOpts.Quiet, "quiet", "q", false, "Display only image IDs") + flags.StringVar(&imagesOpts.Sort, "sort", "created", "Sort by created, id, repository, size, or tag") + flags.BoolVarP(&imagesOpts.History, "history", "", false, "Display the image name history") +} diff --git a/cmd/podmanV2/images/inspect.go b/cmd/podmanV2/images/inspect.go new file mode 100644 index 000000000..9c44cea35 --- /dev/null +++ b/cmd/podmanV2/images/inspect.go @@ -0,0 +1,124 @@ +package images + +import ( + "strings" + + "github.com/containers/buildah/pkg/formats" + "github.com/containers/libpod/cmd/podmanV2/registry" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/containers/libpod/pkg/util" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +var ( + inspectOpts = entities.ImageInspectOptions{} + + // Command: podman image _inspect_ + inspectCmd = &cobra.Command{ + Use: "inspect [flags] IMAGE", + Short: "Display the configuration of an image", + Long: `Displays the low-level information on an image identified by name or ID.`, + PreRunE: populateEngines, + RunE: imageInspect, + Example: `podman image inspect alpine`, + } + + containerEngine entities.ContainerEngine +) + +// Inspect is unique in that it needs both an ImageEngine and a ContainerEngine +func populateEngines(cmd *cobra.Command, args []string) (err error) { + // Populate registry.ImageEngine + err = preRunE(cmd, args) + if err != nil { + return + } + + // Populate registry.ContainerEngine + containerEngine, err = registry.NewContainerEngine(cmd, args) + return +} + +func init() { + registry.Commands = append(registry.Commands, registry.CliCommand{ + Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode}, + Command: inspectCmd, + Parent: imageCmd, + }) + + flags := inspectCmd.Flags() + flags.BoolVarP(&inspectOpts.Latest, "latest", "l", false, "Act on the latest container podman is aware of") + flags.BoolVarP(&inspectOpts.Size, "size", "s", false, "Display total file size") + flags.StringVarP(&inspectOpts.Format, "format", "f", "", "Change the output format to a Go template") + + if registry.GlobalFlags.EngineMode == entities.ABIMode { + // TODO: This is the same as V1. We could skip creating the flag altogether in V2... + _ = flags.MarkHidden("latest") + } +} + +const ( + inspectTypeContainer = "container" + inspectTypeImage = "image" + inspectAll = "all" +) + +func imageInspect(cmd *cobra.Command, args []string) error { + inspectType := inspectTypeImage + latestContainer := inspectOpts.Latest + + if len(args) == 0 && !latestContainer { + return errors.Errorf("container or image name must be specified: podman inspect [options [...]] name") + } + + if len(args) > 0 && latestContainer { + return errors.Errorf("you cannot provide additional arguments with --latest") + } + + if !util.StringInSlice(inspectType, []string{inspectTypeContainer, inspectTypeImage, inspectAll}) { + return errors.Errorf("the only recognized types are %q, %q, and %q", inspectTypeContainer, inspectTypeImage, inspectAll) + } + + outputFormat := inspectOpts.Format + if strings.Contains(outputFormat, "{{.Id}}") { + outputFormat = strings.Replace(outputFormat, "{{.Id}}", formats.IDString, -1) + } + // These fields were renamed, so we need to provide backward compat for + // the old names. + if strings.Contains(outputFormat, ".Src") { + outputFormat = strings.Replace(outputFormat, ".Src", ".Source", -1) + } + if strings.Contains(outputFormat, ".Dst") { + outputFormat = strings.Replace(outputFormat, ".Dst", ".Destination", -1) + } + if strings.Contains(outputFormat, ".ImageID") { + outputFormat = strings.Replace(outputFormat, ".ImageID", ".Image", -1) + } + _ = outputFormat + // if latestContainer { + // lc, err := ctnrRuntime.GetLatestContainer() + // if err != nil { + // return err + // } + // args = append(args, lc.ID()) + // inspectType = inspectTypeContainer + // } + + // inspectedObjects, iterateErr := iterateInput(getContext(), c.Size, args, runtime, inspectType) + // if iterateErr != nil { + // return iterateErr + // } + // + // var out formats.Writer + // if outputFormat != "" && outputFormat != formats.JSONString { + // // template + // out = formats.StdoutTemplateArray{Output: inspectedObjects, Template: outputFormat} + // } else { + // // default is json output + // out = formats.JSONStructArray{Output: inspectedObjects} + // } + // + // return out.Out() + return nil +} diff --git a/cmd/podmanV2/images/list.go b/cmd/podmanV2/images/list.go new file mode 100644 index 000000000..cfdfaaed2 --- /dev/null +++ b/cmd/podmanV2/images/list.go @@ -0,0 +1,33 @@ +package images + +import ( + "github.com/containers/libpod/cmd/podmanV2/registry" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/spf13/cobra" +) + +var ( + // Command: podman image _list_ + listCmd = &cobra.Command{ + Use: "list [flag] [IMAGE]", + Aliases: []string{"ls"}, + Short: "List images in local storage", + Long: "Lists images previously pulled to the system or created on the system.", + RunE: images, + Example: `podman image list --format json + podman image list --sort repository --format "table {{.ID}} {{.Repository}} {{.Tag}}" + podman image list --filter dangling=true`, + } +) + +func init() { + registry.Commands = append(registry.Commands, registry.CliCommand{ + Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode}, + Command: listCmd, + Parent: imageCmd, + }) +} + +func images(cmd *cobra.Command, args []string) error { + return nil +} diff --git a/cmd/podmanV2/main.go b/cmd/podmanV2/main.go new file mode 100644 index 000000000..0df086352 --- /dev/null +++ b/cmd/podmanV2/main.go @@ -0,0 +1,84 @@ +package main + +import ( + "fmt" + "os" + "reflect" + "runtime" + + _ "github.com/containers/libpod/cmd/podmanV2/containers" + _ "github.com/containers/libpod/cmd/podmanV2/images" + _ "github.com/containers/libpod/cmd/podmanV2/networks" + _ "github.com/containers/libpod/cmd/podmanV2/pods" + "github.com/containers/libpod/cmd/podmanV2/registry" + _ "github.com/containers/libpod/cmd/podmanV2/volumes" + "github.com/containers/libpod/libpod" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" +) + +func init() { + if err := libpod.SetXdgDirs(); err != nil { + logrus.Errorf(err.Error()) + os.Exit(1) + } + initCobra() +} + +func initCobra() { + switch runtime.GOOS { + case "darwin": + fallthrough + case "windows": + registry.GlobalFlags.EngineMode = entities.TunnelMode + case "linux": + registry.GlobalFlags.EngineMode = entities.ABIMode + default: + logrus.Errorf("%s is not a supported OS", runtime.GOOS) + os.Exit(1) + } + + // TODO: Is there a Cobra way to "peek" at os.Args? + if ok := Contains("--remote", os.Args); ok { + registry.GlobalFlags.EngineMode = entities.TunnelMode + } + + cobra.OnInitialize(func() {}) +} + +func main() { + fmt.Fprintf(os.Stderr, "Number of commands: %d\n", len(registry.Commands)) + for _, c := range registry.Commands { + if Contains(registry.GlobalFlags.EngineMode, c.Mode) { + parent := rootCmd + if c.Parent != nil { + parent = c.Parent + } + parent.AddCommand(c.Command) + } + } + + Execute() + os.Exit(0) +} + +func Contains(item interface{}, slice interface{}) bool { + s := reflect.ValueOf(slice) + + switch s.Kind() { + case reflect.Array: + fallthrough + case reflect.Slice: + break + default: + return false + } + + for i := 0; i < s.Len(); i++ { + if s.Index(i).Interface() == item { + return true + } + } + return false +} diff --git a/cmd/podmanV2/networks/network.go b/cmd/podmanV2/networks/network.go new file mode 100644 index 000000000..fc92d2321 --- /dev/null +++ b/cmd/podmanV2/networks/network.go @@ -0,0 +1,33 @@ +package images + +import ( + "github.com/containers/libpod/cmd/podmanV2/registry" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/spf13/cobra" +) + +var ( + // Command: podman _network_ + cmd = &cobra.Command{ + Use: "network", + Short: "Manage networks", + Long: "Manage networks", + TraverseChildren: true, + PersistentPreRunE: preRunE, + RunE: registry.SubCommandExists, + } +) + +func init() { + registry.Commands = append(registry.Commands, registry.CliCommand{ + Mode: []entities.EngineMode{entities.ABIMode}, + Command: cmd, + }) + cmd.SetHelpTemplate(registry.HelpTemplate()) + cmd.SetUsageTemplate(registry.UsageTemplate()) +} + +func preRunE(cmd *cobra.Command, args []string) error { + _, err := registry.NewContainerEngine(cmd, args) + return err +} diff --git a/cmd/podmanV2/pods/pod.go b/cmd/podmanV2/pods/pod.go new file mode 100644 index 000000000..81c0d33e1 --- /dev/null +++ b/cmd/podmanV2/pods/pod.go @@ -0,0 +1,33 @@ +package pods + +import ( + "github.com/containers/libpod/cmd/podmanV2/registry" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/spf13/cobra" +) + +var ( + // Command: podman _pod_ + podCmd = &cobra.Command{ + Use: "pod", + Short: "Manage pods", + Long: "Manage pods", + TraverseChildren: true, + PersistentPreRunE: preRunE, + RunE: registry.SubCommandExists, + } +) + +func init() { + registry.Commands = append(registry.Commands, registry.CliCommand{ + Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode}, + Command: podCmd, + }) + podCmd.SetHelpTemplate(registry.HelpTemplate()) + podCmd.SetUsageTemplate(registry.UsageTemplate()) +} + +func preRunE(cmd *cobra.Command, args []string) error { + _, err := registry.NewContainerEngine(cmd, args) + return err +} diff --git a/cmd/podmanV2/pods/ps.go b/cmd/podmanV2/pods/ps.go new file mode 100644 index 000000000..d4c625b2e --- /dev/null +++ b/cmd/podmanV2/pods/ps.go @@ -0,0 +1,32 @@ +package pods + +import ( + "github.com/containers/libpod/cmd/podmanV2/registry" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/spf13/cobra" +) + +var ( + psDescription = "List all pods on system including their names, ids and current state." + + // Command: podman pod _ps_ + psCmd = &cobra.Command{ + Use: "ps", + Aliases: []string{"ls", "list"}, + Short: "list pods", + Long: psDescription, + RunE: pods, + } +) + +func init() { + registry.Commands = append(registry.Commands, registry.CliCommand{ + Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode}, + Command: psCmd, + Parent: podCmd, + }) +} + +func pods(cmd *cobra.Command, args []string) error { + return nil +} diff --git a/cmd/podmanV2/registry/registry.go b/cmd/podmanV2/registry/registry.go new file mode 100644 index 000000000..fa51d6535 --- /dev/null +++ b/cmd/podmanV2/registry/registry.go @@ -0,0 +1,96 @@ +package registry + +import ( + "github.com/containers/libpod/pkg/domain/entities" + "github.com/containers/libpod/pkg/domain/infra" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type CliCommand struct { + Mode []entities.EngineMode + Command *cobra.Command + Parent *cobra.Command +} + +var ( + Commands []CliCommand + GlobalFlags entities.EngineFlags + imageEngine entities.ImageEngine + containerEngine entities.ContainerEngine + PodmanTunnel bool +) + +// HelpTemplate returns the help template for podman commands +// This uses the short and long options. +// command should not use this. +func HelpTemplate() string { + return `{{.Short}} + +Description: + {{.Long}} + +{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` +} + +// UsageTemplate returns the usage template for podman commands +// This blocks the displaying of the global options. The main podman +// command should not use this. +func UsageTemplate() string { + return `Usage(v2):{{if (and .Runnable (not .HasAvailableSubCommands))}} + {{.UseLine}}{{end}}{{if .HasAvailableSubCommands}} + {{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}} + +Aliases: + {{.NameAndAliases}}{{end}}{{if .HasExample}} + +Examples: + {{.Example}}{{end}}{{if .HasAvailableSubCommands}} + +Available Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}} + {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}} + +Flags: +{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}} +{{end}} +` +} + +func ImageEngine() entities.ImageEngine { + return imageEngine +} + +// NewImageEngine is a wrapper for building an ImageEngine to be used for PreRunE functions +func NewImageEngine(cmd *cobra.Command, args []string) (entities.ImageEngine, error) { + if imageEngine == nil { + engine, err := infra.NewImageEngine(GlobalFlags.EngineMode, entities.EngineOptions{}) + if err != nil { + return nil, err + } + imageEngine = engine + } + return imageEngine, nil +} + +func ContainerEngine() entities.ContainerEngine { + return containerEngine +} + +// NewContainerEngine is a wrapper for building an ContainerEngine to be used for PreRunE functions +func NewContainerEngine(cmd *cobra.Command, args []string) (entities.ContainerEngine, error) { + if containerEngine == nil { + engine, err := infra.NewContainerEngine(GlobalFlags.EngineMode, entities.EngineOptions{}) + if err != nil { + return nil, err + } + containerEngine = engine + } + return containerEngine, nil +} + +func SubCommandExists(cmd *cobra.Command, args []string) error { + if len(args) > 0 { + return errors.Errorf("unrecognized command `%[1]s %[2]s`\nTry '%[1]s --help' for more information.", cmd.CommandPath(), args[0]) + } + return errors.Errorf("missing command '%[1]s COMMAND'\nTry '%[1]s --help' for more information.", cmd.CommandPath()) +} diff --git a/cmd/podmanV2/report/templates.go b/cmd/podmanV2/report/templates.go new file mode 100644 index 000000000..dc43d4f9b --- /dev/null +++ b/cmd/podmanV2/report/templates.go @@ -0,0 +1,61 @@ +package report + +import ( + "strings" + "text/template" + "time" + + "github.com/docker/go-units" +) + +var defaultFuncMap = template.FuncMap{ + "ellipsis": func(s string, length int) string { + if len(s) > length { + return s[:length-3] + "..." + } + return s + }, + // TODO: Remove on Go 1.14 port + "slice": func(s string, i, j int) string { + if i > j || len(s) < i { + return s + } + if len(s) < j { + return s[i:] + } + return s[i:j] + }, + "toRFC3339": func(t int64) string { + return time.Unix(t, 0).Format(time.RFC3339) + }, + "toHumanDuration": func(t int64) string { + return units.HumanDuration(time.Since(time.Unix(t, 0))) + " ago" + }, + "toHumanSize": func(sz int64) string { + return units.HumanSize(float64(sz)) + }, +} + +func ReportHeader(columns ...string) []byte { + hdr := make([]string, len(columns)) + for i, h := range columns { + hdr[i] = strings.ToUpper(h) + } + return []byte(strings.Join(hdr, "\t") + "\n") +} + +func AppendFuncMap(funcMap template.FuncMap) template.FuncMap { + merged := PodmanTemplateFuncs() + for k, v := range funcMap { + merged[k] = v + } + return merged +} + +func PodmanTemplateFuncs() template.FuncMap { + merged := make(template.FuncMap) + for k, v := range defaultFuncMap { + merged[k] = v + } + return merged +} diff --git a/cmd/podmanV2/root.go b/cmd/podmanV2/root.go new file mode 100644 index 000000000..778184f28 --- /dev/null +++ b/cmd/podmanV2/root.go @@ -0,0 +1,35 @@ +package main + +import ( + "fmt" + "os" + "path" + + "github.com/containers/libpod/cmd/podmanV2/registry" + "github.com/containers/libpod/version" + "github.com/spf13/cobra" +) + +var rootCmd = &cobra.Command{ + Use: path.Base(os.Args[0]), + Long: "Manage pods, containers and images", + SilenceUsage: true, + SilenceErrors: true, + TraverseChildren: true, + RunE: registry.SubCommandExists, + Version: version.Version, +} + +func init() { + // Override default --help information of `--version` global flag} + var dummyVersion bool + rootCmd.PersistentFlags().BoolVarP(&dummyVersion, "version", "v", false, "Version of podman") + rootCmd.PersistentFlags().BoolVarP(®istry.PodmanTunnel, "remote", "r", false, "Access service via SSH tunnel") +} + +func Execute() { + if err := rootCmd.Execute(); err != nil { + fmt.Println(err) + os.Exit(1) + } +} diff --git a/cmd/podmanV2/system/system.go b/cmd/podmanV2/system/system.go new file mode 100644 index 000000000..30ed328e8 --- /dev/null +++ b/cmd/podmanV2/system/system.go @@ -0,0 +1,33 @@ +package images + +import ( + "github.com/containers/libpod/cmd/podmanV2/registry" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/spf13/cobra" +) + +var ( + // Command: podman _system_ + cmd = &cobra.Command{ + Use: "system", + Short: "Manage podman", + Long: "Manage podman", + TraverseChildren: true, + PersistentPreRunE: preRunE, + RunE: registry.SubCommandExists, + } +) + +func init() { + registry.Commands = append(registry.Commands, registry.CliCommand{ + Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode}, + Command: cmd, + }) + cmd.SetHelpTemplate(registry.HelpTemplate()) + cmd.SetUsageTemplate(registry.UsageTemplate()) +} + +func preRunE(cmd *cobra.Command, args []string) error { + _, err := registry.NewContainerEngine(cmd, args) + return err +} diff --git a/cmd/podmanV2/volumes/volume.go b/cmd/podmanV2/volumes/volume.go new file mode 100644 index 000000000..245c06da0 --- /dev/null +++ b/cmd/podmanV2/volumes/volume.go @@ -0,0 +1,33 @@ +package images + +import ( + "github.com/containers/libpod/cmd/podmanV2/registry" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/spf13/cobra" +) + +var ( + // Command: podman _volume_ + cmd = &cobra.Command{ + Use: "volume", + Short: "Manage volumes", + Long: "Volumes are created in and can be shared between containers", + TraverseChildren: true, + PersistentPreRunE: preRunE, + RunE: registry.SubCommandExists, + } +) + +func init() { + registry.Commands = append(registry.Commands, registry.CliCommand{ + Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode}, + Command: cmd, + }) + cmd.SetHelpTemplate(registry.HelpTemplate()) + cmd.SetUsageTemplate(registry.UsageTemplate()) +} + +func preRunE(cmd *cobra.Command, args []string) error { + _, err := registry.NewContainerEngine(cmd, args) + return err +} diff --git a/contrib/spec/podman.spec.in b/contrib/spec/podman.spec.in index 0222be7ba..635de0c7e 100644 --- a/contrib/spec/podman.spec.in +++ b/contrib/spec/podman.spec.in @@ -48,7 +48,7 @@ Epoch: 99 %else Epoch: 0 %endif -Version: 1.8.2 +Version: 1.8.3 Release: #COMMITDATE#.git%{shortcommit0}%{?dist} Summary: Manage Pods, Containers and Container Images License: ASL 2.0 diff --git a/hack/man-page-checker b/hack/man-page-checker index 528ff800a..17d85d65d 100755 --- a/hack/man-page-checker +++ b/hack/man-page-checker @@ -1,16 +1,6 @@ #!/bin/bash # -# man-page-name-checker - validate and cross-reference man page names -# -# FIXME as of 2019-03-20 there are still four files with inconsistent names: -# -# podman-logs.1.md NAME= podman-container-logs -# podman-info.1.md NAME= podman-system-info -# podman-rm.1.md NAME= podman-container-rm -# podman-rmi.1.md NAME= podman-image-rm -# -# If those four get renamed (with suitable symlink fixes), this script -# can be enabled in CI to prevent future inconsistencies. +# man-page-checker - validate and cross-reference man page names # die() { diff --git a/hack/xref-helpmsgs-manpages b/hack/xref-helpmsgs-manpages new file mode 100755 index 000000000..00db3c8de --- /dev/null +++ b/hack/xref-helpmsgs-manpages @@ -0,0 +1,307 @@ +#!/usr/bin/perl +# +# xref-helpmsgs-manpages - cross-reference --help options against man pages +# +package LibPod::CI::XrefHelpmsgsManpages; + +use v5.14; +use utf8; + +use strict; +use warnings; + +(our $ME = $0) =~ s|.*/||; +our $VERSION = '0.1'; + +# For debugging, show data structures using DumpTree($var) +#use Data::TreeDumper; $Data::TreeDumper::Displayaddress = 0; + +############################################################################### +# BEGIN user-customizable section + +# Path to podman executable +my $Default_Podman = './bin/podman'; +my $PODMAN = $ENV{PODMAN} || $Default_Podman; + +# Path to podman markdown source files (of the form podman-*.1.md) +my $Markdown_Path = 'docs/source/markdown'; + +# END user-customizable section +############################################################################### + +use FindBin; + +############################################################################### +# BEGIN boilerplate args checking, usage messages + +sub usage { + print <<"END_USAGE"; +Usage: $ME [OPTIONS] + +$ME recursively runs 'podman --help' against +all subcommands; and recursively reads podman-*.1.md files +in $Markdown_Path, then cross-references that each --help +option is listed in the appropriate man page and vice-versa. + +$ME invokes '\$PODMAN' (default: $Default_Podman). + +Exit status is zero if no inconsistencies found, one otherwise + +OPTIONS: + + -v, --verbose show verbose progress indicators + -n, --dry-run make no actual changes + + --help display this message + --version display program name and version +END_USAGE + + exit; +} + +# Command-line options. Note that this operates directly on @ARGV ! +our $debug = 0; +our $verbose = 0; +sub handle_opts { + use Getopt::Long; + GetOptions( + 'debug!' => \$debug, + 'verbose|v' => \$verbose, + + help => \&usage, + version => sub { print "$ME version $VERSION\n"; exit 0 }, + ) or die "Try `$ME --help' for help\n"; +} + +# END boilerplate args checking, usage messages +############################################################################### + +############################## CODE BEGINS HERE ############################### + +# The term is "modulino". +__PACKAGE__->main() unless caller(); + +# Main code. +sub main { + # Note that we operate directly on @ARGV, not on function parameters. + # This is deliberate: it's because Getopt::Long only operates on @ARGV + # and there's no clean way to make it use @_. + handle_opts(); # will set package globals + + # Fetch command-line arguments. Barf if too many. + die "$ME: Too many arguments; try $ME --help\n" if @ARGV; + + my $help = podman_help(); + my $man = podman_man('podman'); + + my $retval = xref_by_help($help, $man) + + xref_by_man($help, $man); + + exit !!$retval; +} + +################## +# xref_by_help # Find keys in '--help' but not in man +################## +sub xref_by_help { + my ($help, $man, @subcommand) = @_; + my $errs = 0; + + for my $k (sort keys %$help) { + if (exists $man->{$k}) { + if (ref $help->{$k}) { + $errs += xref_by_help($help->{$k}, $man->{$k}, @subcommand, $k); + } + # Otherwise, non-ref is leaf node such as a --option + } + else { + my $man = $man->{_path} || 'man'; + warn "$ME: podman @subcommand --help lists $k, but $k not in $man\n"; + ++$errs; + } + } + + return $errs; +} + +################# +# xref_by_man # Find keys in man pages but not in --help +################# +# +# In an ideal world we could share the functionality in one function; but +# there are just too many special cases in man pages. +# +sub xref_by_man { + my ($help, $man, @subcommand) = @_; + + my $errs = 0; + + # FIXME: this generates way too much output + for my $k (grep { $_ ne '_path' } sort keys %$man) { + if (exists $help->{$k}) { + if (ref $man->{$k}) { + $errs += xref_by_man($help->{$k}, $man->{$k}, @subcommand, $k); + } + } + elsif ($k ne '--help' && $k ne '-h') { + my $man = $man->{_path} || 'man'; + + # Special case: podman-inspect serves dual purpose (image, ctr) + my %ignore = map { $_ => 1 } qw(-l -s -t --latest --size --type); + next if $man =~ /-inspect/ && $ignore{$k}; + + # Special case: the 'trust' man page is a mess + next if $man =~ /-trust/; + + # Special case: '--net' is an undocumented shortcut + next if $k eq '--net' && $help->{'--network'}; + + # Special case: these are actually global options + next if $k =~ /^--(cni-config-dir|runtime)$/ && $man =~ /-build/; + + # Special case: weirdness with Cobra and global/local options + next if $k eq '--namespace' && $man =~ /-ps/; + + # Special case: these require compiling with 'varlink' tag, + # which doesn't happen in CI gating task. + next if $k eq 'varlink'; + next if "@subcommand" eq 'system' && $k eq 'service'; + + warn "$ME: podman @subcommand: $k in $man, but not --help\n"; + ++$errs; + } + } + + return $errs; +} + + +################# +# podman_help # Parse output of 'podman [subcommand] --help' +################# +sub podman_help { + my %help; + open my $fh, '-|', $PODMAN, @_, '--help' + or die "$ME: Cannot fork: $!\n"; + my $section = ''; + while (my $line = <$fh>) { + # Cobra is blessedly consistent in its output: + # Usage: ... + # Available Commands: + # .... + # Flags: + # .... + # + # Start by identifying the section we're in... + if ($line =~ /^Available\s+(Commands):/) { + $section = lc $1; + } + elsif ($line =~ /^(Flags):/) { + $section = lc $1; + } + + # ...then track commands and options. For subcommands, recurse. + elsif ($section eq 'commands') { + if ($line =~ /^\s{1,4}(\S+)\s/) { + my $subcommand = $1; + print "> podman @_ $subcommand\n" if $debug; + $help{$subcommand} = podman_help(@_, $subcommand) + unless $subcommand eq 'help'; # 'help' not in man + } + } + elsif ($section eq 'flags') { + # Handle '--foo' or '-f, --foo' + if ($line =~ /^\s{1,10}(--\S+)\s/) { + print "> podman @_ $1\n" if $debug; + $help{$1} = 1; + } + elsif ($line =~ /^\s{1,10}(-\S),\s+(--\S+)\s/) { + print "> podman @_ $1, $2\n" if $debug; + $help{$1} = $help{$2} = 1; + } + } + } + close $fh + or die "$ME: Error running 'podman @_ --help'\n"; + + return \%help; +} + + +################ +# podman_man # Parse contents of podman-*.1.md +################ +sub podman_man { + my $command = shift; + my $subpath = "$Markdown_Path/$command.1.md"; + my $manpath = "$FindBin::Bin/../$subpath"; + print "** $subpath \n" if $debug; + + my %man = (_path => $subpath); + open my $fh, '<', $manpath + or die "$ME: Cannot read $manpath: $!\n"; + my $section = ''; + my @most_recent_flags; + while (my $line = <$fh>) { + chomp $line; + next unless $line; # skip empty lines + + # .md files designate sections with leading double hash + if ($line =~ /^##\s*(GLOBAL\s+)?OPTIONS/) { + $section = 'flags'; + } + elsif ($line =~ /^\#\#\s+(SUB)?COMMANDS/) { + $section = 'commands'; + } + elsif ($line =~ /^\#\#/) { + $section = ''; + } + + # This will be a table containing subcommand names, links to man pages. + # The format is slightly different between podman.1.md and subcommands. + elsif ($section eq 'commands') { + # In podman.1.md + if ($line =~ /^\|\s*\[podman-(\S+?)\(\d\)\]/) { + $man{$1} = podman_man("podman-$1"); + } + + # In podman-<subcommand>.1.md + elsif ($line =~ /^\|\s+(\S+)\s+\|\s+\[\S+\]\((\S+)\.1\.md\)/) { + $man{$1} = podman_man($2); + } + } + + # Flags should always be of the form '**-f**' or '**--flag**', + # possibly separated by comma-space. + elsif ($section eq 'flags') { + # e.g. 'podman run --ip6', documented in man page, but nonexistent + if ($line =~ /^not\s+implemented/i) { + delete $man{$_} for @most_recent_flags; + } + + @most_recent_flags = (); + # Handle any variation of '**--foo**, **-f**' + while ($line =~ s/^\*\*((--[a-z0-9-]+)|(-.))\*\*(,\s+)?//g) { + $man{$1} = 1; + + # Keep track of them, in case we see 'Not implemented' below + push @most_recent_flags, $1; + } + } + } + close $fh; + + # Special case: the 'image trust' man page tries hard to cover both set + # and show, which means it ends up not being machine-readable. + if ($command eq 'podman-image-trust') { + my %set = %man; + my %show = %man; + $show{$_} = 1 for qw(--raw -j --json); + return +{ set => \%set, show => \%show } + } + + return \%man; +} + + +1; diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go index 4918bf57a..2ce8e0de4 100644 --- a/libpod/boltdb_state.go +++ b/libpod/boltdb_state.go @@ -41,6 +41,8 @@ type BoltState struct { // containing the path to the container's network namespace, a dependencies // bucket containing the container's dependencies, and an optional pod key // containing the ID of the pod the container is joined to. +// After updates to include exec sessions, may also include an exec bucket +// with the IDs of exec sessions currently in use by the container. // - allCtrsBkt: Map of ID to name containing only containers. Used for // container lookup operations. // - podBkt: Contains a sub-bucket for each pod in the state. @@ -49,6 +51,10 @@ type BoltState struct { // containers in the pod. // - allPodsBkt: Map of ID to name containing only pods. Used for pod lookup // operations. +// - execBkt: Map of exec session ID to exec session - contains a sub-bucket for +// each exec session in the DB. +// - execRegistryBkt: Map of exec session ID to nothing. Contains one entry for +// each exec session. Used for iterating through all exec sessions. // - runtimeConfigBkt: Contains configuration of the libpod instance that // initially created the database. This must match for any further instances // that access the database, to ensure that state mismatches with @@ -86,6 +92,7 @@ func NewBoltState(path string, runtime *Runtime) (State, error) { allPodsBkt, volBkt, allVolsBkt, + execBkt, runtimeConfigBkt, } @@ -171,6 +178,11 @@ func (s *BoltState) Refresh() error { return err } + execBucket, err := getExecBucket(tx) + if err != nil { + return err + } + // Iterate through all IDs. Check if they are containers. // If they are, unmarshal their state, and then clear // PID, mountpoint, and state for all of them @@ -245,6 +257,26 @@ func (s *BoltState) Refresh() error { return errors.Wrapf(err, "error updating state for container %s in DB", string(id)) } + // Delete all exec sessions, if there are any + ctrExecBkt := ctrBkt.Bucket(execBkt) + if ctrExecBkt != nil { + // Can't delete in a ForEach, so build a list of + // what to remove then remove. + toRemove := []string{} + err = ctrExecBkt.ForEach(func(id, unused []byte) error { + toRemove = append(toRemove, string(id)) + return nil + }) + if err != nil { + return err + } + for _, execId := range toRemove { + if err := ctrExecBkt.Delete([]byte(execId)); err != nil { + return errors.Wrapf(err, "error removing exec session %s from container %s", execId, string(id)) + } + } + } + return nil }) if err != nil { @@ -285,7 +317,30 @@ func (s *BoltState) Refresh() error { return nil }) - return err + if err != nil { + return err + } + + // Now refresh exec sessions + // We want to remove them all, but for-each can't modify buckets + // So we have to make a list of what to operate on, then do the + // work. + toRemoveExec := []string{} + err = execBucket.ForEach(func(id, unused []byte) error { + toRemoveExec = append(toRemoveExec, string(id)) + return nil + }) + if err != nil { + return err + } + + for _, execSession := range toRemoveExec { + if err := execBucket.Delete([]byte(execSession)); err != nil { + return errors.Wrapf(err, "error deleting exec session %s registry from database", execSession) + } + } + + return nil }) return err } @@ -895,6 +950,287 @@ func (s *BoltState) GetContainerConfig(id string) (*ContainerConfig, error) { return config, nil } +// AddExecSession adds an exec session to the state. +func (s *BoltState) AddExecSession(ctr *Container, session *ExecSession) error { + if !s.valid { + return define.ErrDBClosed + } + + if !ctr.valid { + return define.ErrCtrRemoved + } + + db, err := s.getDBCon() + if err != nil { + return err + } + defer s.deferredCloseDBCon(db) + + ctrID := []byte(ctr.ID()) + sessionID := []byte(session.ID()) + + err = db.Update(func(tx *bolt.Tx) error { + execBucket, err := getExecBucket(tx) + if err != nil { + return err + } + ctrBucket, err := getCtrBucket(tx) + if err != nil { + return err + } + + dbCtr := ctrBucket.Bucket(ctrID) + if dbCtr == nil { + ctr.valid = false + return errors.Wrapf(define.ErrNoSuchCtr, "container %s is not present in the database", ctr.ID()) + } + + ctrExecSessionBucket, err := dbCtr.CreateBucketIfNotExists(execBkt) + if err != nil { + return errors.Wrapf(err, "error creating exec sessions bucket for container %s", ctr.ID()) + } + + execExists := execBucket.Get(sessionID) + if execExists != nil { + return errors.Wrapf(define.ErrExecSessionExists, "an exec session with ID %s already exists", session.ID()) + } + + if err := execBucket.Put(sessionID, ctrID); err != nil { + return errors.Wrapf(err, "error adding exec session %s to DB", session.ID()) + } + + if err := ctrExecSessionBucket.Put(sessionID, ctrID); err != nil { + return errors.Wrapf(err, "error adding exec session %s to container %s in DB", session.ID(), ctr.ID()) + } + + return nil + }) + return err +} + +// GetExecSession returns the ID of the container an exec session is associated +// with. +func (s *BoltState) GetExecSession(id string) (string, error) { + if !s.valid { + return "", define.ErrDBClosed + } + + if id == "" { + return "", define.ErrEmptyID + } + + db, err := s.getDBCon() + if err != nil { + return "", err + } + defer s.deferredCloseDBCon(db) + + ctrID := "" + err = db.View(func(tx *bolt.Tx) error { + execBucket, err := getExecBucket(tx) + if err != nil { + return err + } + + ctr := execBucket.Get([]byte(id)) + if ctr == nil { + return errors.Wrapf(define.ErrNoSuchExecSession, "no exec session with ID %s found", id) + } + ctrID = string(ctr) + return nil + }) + return ctrID, err +} + +// RemoveExecSession removes references to the given exec session in the +// database. +func (s *BoltState) RemoveExecSession(session *ExecSession) error { + if !s.valid { + return define.ErrDBClosed + } + + db, err := s.getDBCon() + if err != nil { + return err + } + defer s.deferredCloseDBCon(db) + + sessionID := []byte(session.ID()) + containerID := []byte(session.ContainerID()) + err = db.Update(func(tx *bolt.Tx) error { + execBucket, err := getExecBucket(tx) + if err != nil { + return err + } + ctrBucket, err := getCtrBucket(tx) + if err != nil { + return err + } + + sessionExists := execBucket.Get(sessionID) + if sessionExists == nil { + return define.ErrNoSuchExecSession + } + // Check that container ID matches + if string(sessionExists) != session.ContainerID() { + return errors.Wrapf(define.ErrInternal, "database inconsistency: exec session %s points to container %s in state but %s in database", session.ID(), session.ContainerID(), string(sessionExists)) + } + + if err := execBucket.Delete(sessionID); err != nil { + return errors.Wrapf(err, "error removing exec session %s from database", session.ID()) + } + + dbCtr := ctrBucket.Bucket(containerID) + if dbCtr == nil { + // State is inconsistent. We refer to a container that + // is no longer in the state. + // Return without error, to attempt to recover. + return nil + } + + ctrExecBucket := dbCtr.Bucket(execBkt) + if ctrExecBucket == nil { + // Again, state is inconsistent. We should have an exec + // bucket, and it should have this session. + // Again, nothing we can do, so proceed and try to + // recover. + return nil + } + + ctrSessionExists := ctrExecBucket.Get(sessionID) + if ctrSessionExists != nil { + if err := ctrExecBucket.Delete(sessionID); err != nil { + return errors.Wrapf(err, "error removing exec session %s from container %s in database", session.ID(), session.ContainerID()) + } + } + + return nil + }) + return err +} + +// GetContainerExecSessions retrieves the IDs of all exec sessions running in a +// container that the database is aware of (IE, were added via AddExecSession). +func (s *BoltState) GetContainerExecSessions(ctr *Container) ([]string, error) { + if !s.valid { + return nil, define.ErrDBClosed + } + + if !ctr.valid { + return nil, define.ErrCtrRemoved + } + + db, err := s.getDBCon() + if err != nil { + return nil, err + } + defer s.deferredCloseDBCon(db) + + ctrID := []byte(ctr.ID()) + sessions := []string{} + err = db.View(func(tx *bolt.Tx) error { + ctrBucket, err := getCtrBucket(tx) + if err != nil { + return err + } + + dbCtr := ctrBucket.Bucket(ctrID) + if dbCtr == nil { + ctr.valid = false + return define.ErrNoSuchCtr + } + + ctrExecSessions := dbCtr.Bucket(execBkt) + if ctrExecSessions == nil { + return nil + } + + return ctrExecSessions.ForEach(func(id, unused []byte) error { + sessions = append(sessions, string(id)) + return nil + }) + }) + if err != nil { + return nil, err + } + + return sessions, nil +} + +// RemoveContainerExecSessions removes all exec sessions attached to a given +// container. +func (s *BoltState) RemoveContainerExecSessions(ctr *Container) error { + if !s.valid { + return define.ErrDBClosed + } + + if !ctr.valid { + return define.ErrCtrRemoved + } + + db, err := s.getDBCon() + if err != nil { + return err + } + defer s.deferredCloseDBCon(db) + + ctrID := []byte(ctr.ID()) + sessions := []string{} + + err = db.Update(func(tx *bolt.Tx) error { + execBucket, err := getExecBucket(tx) + if err != nil { + return err + } + ctrBucket, err := getCtrBucket(tx) + if err != nil { + return err + } + + dbCtr := ctrBucket.Bucket(ctrID) + if dbCtr == nil { + ctr.valid = false + return define.ErrNoSuchCtr + } + + ctrExecSessions := dbCtr.Bucket(execBkt) + if ctrExecSessions == nil { + return nil + } + + err = ctrExecSessions.ForEach(func(id, unused []byte) error { + sessions = append(sessions, string(id)) + return nil + }) + if err != nil { + return err + } + + for _, session := range sessions { + if err := ctrExecSessions.Delete([]byte(session)); err != nil { + return errors.Wrapf(err, "error removing container %s exec session %s from database", ctr.ID(), session) + } + // Check if the session exists in the global table + // before removing. It should, but in cases where the DB + // has become inconsistent, we should try and proceed + // so we can recover. + sessionExists := execBucket.Get([]byte(session)) + if sessionExists == nil { + continue + } + if string(sessionExists) != ctr.ID() { + return errors.Wrapf(define.ErrInternal, "database mismatch: exec session %s is associated with containers %s and %s", session, ctr.ID(), string(sessionExists)) + } + if err := execBucket.Delete([]byte(session)); err != nil { + return errors.Wrapf(err, "error removing container %s exec session %s from exec sessions", ctr.ID(), session) + } + } + + return nil + }) + return err +} + // RewriteContainerConfig rewrites a container's configuration. // WARNING: This function is DANGEROUS. Do not use without reading the full // comment on this function in state.go. diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go index 3f09305f5..1c4c9e12d 100644 --- a/libpod/boltdb_state_internal.go +++ b/libpod/boltdb_state_internal.go @@ -24,6 +24,7 @@ const ( allPodsName = "allPods" volName = "vol" allVolsName = "allVolumes" + execName = "exec" runtimeConfigName = "runtime-config" configName = "config" @@ -54,6 +55,7 @@ var ( allPodsBkt = []byte(allPodsName) volBkt = []byte(volName) allVolsBkt = []byte(allVolsName) + execBkt = []byte(execName) runtimeConfigBkt = []byte(runtimeConfigName) configKey = []byte(configName) @@ -339,6 +341,14 @@ func getAllVolsBucket(tx *bolt.Tx) (*bolt.Bucket, error) { return bkt, nil } +func getExecBucket(tx *bolt.Tx) (*bolt.Bucket, error) { + bkt := tx.Bucket(execBkt) + if bkt == nil { + return nil, errors.Wrapf(define.ErrDBBadConfig, "exec bucket not found in DB") + } + return bkt, nil +} + func getRuntimeConfigBucket(tx *bolt.Tx) (*bolt.Bucket, error) { bkt := tx.Bucket(runtimeConfigBkt) if bkt == nil { @@ -787,6 +797,23 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error } } + // Does the container have exec sessions? + ctrExecSessionsBkt := ctrExists.Bucket(execBkt) + if ctrExecSessionsBkt != nil { + sessions := []string{} + err = ctrExecSessionsBkt.ForEach(func(id, value []byte) error { + sessions = append(sessions, string(id)) + + return nil + }) + if err != nil { + return err + } + if len(sessions) > 0 { + return errors.Wrapf(define.ErrExecSessionExists, "container %s has active exec sessions: %s", ctr.ID(), strings.Join(sessions, ", ")) + } + } + // Does the container have dependencies? ctrDepsBkt := ctrExists.Bucket(dependenciesBkt) if ctrDepsBkt == nil { diff --git a/libpod/common_test.go b/libpod/common_test.go index 63ea4f41b..747252cf4 100644 --- a/libpod/common_test.go +++ b/libpod/common_test.go @@ -58,14 +58,12 @@ func getTestContainer(id, name string, manager lock.Manager) (*Container, error) PID: 1234, ExecSessions: map[string]*ExecSession{ "abcd": { - ID: "1", - Command: []string{"2", "3"}, - PID: 9876, + Id: "1", + PID: 9876, }, "ef01": { - ID: "5", - Command: []string{"hello", "world"}, - PID: 46765, + Id: "5", + PID: 46765, }, }, BindMounts: map[string]string{ diff --git a/libpod/container.go b/libpod/container.go index d83de93bb..e59fb9fe8 100644 --- a/libpod/container.go +++ b/libpod/container.go @@ -181,9 +181,13 @@ type ContainerState struct { PID int `json:"pid,omitempty"` // ConmonPID is the PID of the container's conmon ConmonPID int `json:"conmonPid,omitempty"` - // ExecSessions contains active exec sessions for container - // Exec session ID is mapped to PID of exec process - ExecSessions map[string]*ExecSession `json:"execSessions,omitempty"` + // ExecSessions contains all exec sessions that are associated with this + // container. + ExecSessions map[string]*ExecSession `json:"newExecSessions,omitempty"` + // LegacyExecSessions are legacy exec sessions from older versions of + // Podman. + // These are DEPRECATED and will be removed in a future release. + LegacyExecSessions map[string]*legacyExecSession `json:"execSessions,omitempty"` // NetworkStatus contains the configuration results for all networks // the pod is attached to. Only populated if we created a network // namespace for the container, and the network namespace is currently @@ -214,13 +218,6 @@ type ContainerState struct { containerPlatformState } -// ExecSession contains information on an active exec session -type ExecSession struct { - ID string `json:"id"` - Command []string `json:"command"` - PID int `json:"pid"` -} - // ContainerConfig contains all information that was used to create the // container. It may not be changed once created. // It is stored, read-only, on disk @@ -944,13 +941,13 @@ func (c *Container) ExecSession(id string) (*ExecSession, error) { session, ok := c.state.ExecSessions[id] if !ok { - return nil, errors.Wrapf(define.ErrNoSuchCtr, "no exec session with ID %s found in container %s", id, c.ID()) + return nil, errors.Wrapf(define.ErrNoSuchExecSession, "no exec session with ID %s found in container %s", id, c.ID()) } returnSession := new(ExecSession) - returnSession.ID = session.ID - returnSession.Command = session.Command - returnSession.PID = session.PID + if err := JSONDeepCopy(session, returnSession); err != nil { + return nil, errors.Wrapf(err, "error copying contents of container %s exec session %s", c.ID(), session.ID()) + } return returnSession, nil } diff --git a/libpod/container_api.go b/libpod/container_api.go index 039619ea6..967180437 100644 --- a/libpod/container_api.go +++ b/libpod/container_api.go @@ -9,10 +9,8 @@ import ( "os" "time" - "github.com/containers/common/pkg/capabilities" "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/events" - "github.com/containers/storage/pkg/stringid" "github.com/opentracing/opentracing-go" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -215,142 +213,6 @@ func (c *Container) Kill(signal uint) error { return c.save() } -// Exec starts a new process inside the container -// Returns an exit code and an error. If Exec was not able to exec in the container before a failure, an exit code of define.ExecErrorCodeCannotInvoke is returned. -// If another generic error happens, an exit code of define.ExecErrorCodeGeneric is returned. -// Sometimes, the $RUNTIME exec call errors, and if that is the case, the exit code is the exit code of the call. -// Otherwise, the exit code will be the exit code of the executed call inside of the container. -// TODO investigate allowing exec without attaching -func (c *Container) Exec(tty, privileged bool, env map[string]string, cmd []string, user, workDir string, streams *AttachStreams, preserveFDs uint, resize chan remotecommand.TerminalSize, detachKeys string) (int, error) { - var capList []string - if !c.batched { - c.lock.Lock() - defer c.lock.Unlock() - - if err := c.syncContainer(); err != nil { - return define.ExecErrorCodeCannotInvoke, err - } - } - - if c.state.State != define.ContainerStateRunning { - return define.ExecErrorCodeCannotInvoke, errors.Wrapf(define.ErrCtrStateInvalid, "cannot exec into container that is not running") - } - - if privileged || c.config.Privileged { - capList = capabilities.AllCapabilities() - } - - // Generate exec session ID - // Ensure we don't conflict with an existing session ID - sessionID := stringid.GenerateNonCryptoID() - found := true - // This really ought to be a do-while, but Go doesn't have those... - for found { - found = false - for id := range c.state.ExecSessions { - if id == sessionID { - found = true - break - } - } - if found { - sessionID = stringid.GenerateNonCryptoID() - } - } - - logrus.Debugf("Creating new exec session in container %s with session id %s", c.ID(), sessionID) - if err := c.createExecBundle(sessionID); err != nil { - return define.ExecErrorCodeCannotInvoke, err - } - - defer func() { - // cleanup exec bundle - if err := c.cleanupExecBundle(sessionID); err != nil { - logrus.Errorf("Error removing exec session %s bundle path for container %s: %v", sessionID, c.ID(), err) - } - }() - - opts := new(ExecOptions) - opts.Cmd = cmd - opts.CapAdd = capList - opts.Env = env - opts.Terminal = tty - opts.Cwd = workDir - opts.User = user - opts.Streams = streams - opts.PreserveFDs = preserveFDs - opts.Resize = resize - opts.DetachKeys = detachKeys - - pid, attachChan, err := c.ociRuntime.ExecContainer(c, sessionID, opts) - if err != nil { - ec := define.ExecErrorCodeGeneric - // Conmon will pass a non-zero exit code from the runtime as a pid here. - // we differentiate a pid with an exit code by sending it as negative, so reverse - // that change and return the exit code the runtime failed with. - if pid < 0 { - ec = -1 * pid - } - return ec, err - } - - // We have the PID, add it to state - if c.state.ExecSessions == nil { - c.state.ExecSessions = make(map[string]*ExecSession) - } - session := new(ExecSession) - session.ID = sessionID - session.Command = cmd - session.PID = pid - c.state.ExecSessions[sessionID] = session - if err := c.save(); err != nil { - // Now we have a PID but we can't save it in the DB - // TODO handle this better - return define.ExecErrorCodeGeneric, errors.Wrapf(err, "error saving exec sessions %s for container %s", sessionID, c.ID()) - } - c.newContainerEvent(events.Exec) - logrus.Debugf("Successfully started exec session %s in container %s", sessionID, c.ID()) - - // Unlock so other processes can use the container - if !c.batched { - c.lock.Unlock() - } - - lastErr := <-attachChan - - exitCode, err := c.readExecExitCode(sessionID) - if err != nil { - if lastErr != nil { - logrus.Errorf(lastErr.Error()) - } - lastErr = err - } - if exitCode != 0 { - if lastErr != nil { - logrus.Errorf(lastErr.Error()) - } - lastErr = errors.Wrapf(define.ErrOCIRuntime, "non zero exit code: %d", exitCode) - } - - // Lock again - if !c.batched { - c.lock.Lock() - } - - // Sync the container again to pick up changes in state - if err := c.syncContainer(); err != nil { - logrus.Errorf("error syncing container %s state to remove exec session %s", c.ID(), sessionID) - return exitCode, lastErr - } - - // Remove the exec session from state - delete(c.state.ExecSessions, sessionID) - if err := c.save(); err != nil { - logrus.Errorf("Error removing exec session %s from container %s state: %v", sessionID, c.ID(), err) - } - return exitCode, lastErr -} - // AttachStreams contains streams that will be attached to the container type AttachStreams struct { // OutputStream will be attached to container's STDOUT @@ -493,7 +355,11 @@ func (c *Container) Unmount(force bool) error { if c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) { return errors.Wrapf(define.ErrCtrStateInvalid, "cannot unmount storage for container %s as it is running or paused", c.ID()) } - if len(c.state.ExecSessions) != 0 { + execSessions, err := c.getActiveExecSessions() + if err != nil { + return err + } + if len(execSessions) != 0 { return errors.Wrapf(define.ErrCtrStateInvalid, "container %s has active exec sessions, refusing to unmount", c.ID()) } return errors.Wrapf(define.ErrInternal, "can't unmount %s last mount, it is still in use", c.ID()) @@ -674,15 +540,15 @@ func (c *Container) Cleanup(ctx context.Context) error { // If we didn't restart, we perform a normal cleanup - // Reap exec sessions first. - if err := c.reapExecSessions(); err != nil { + // Check for running exec sessions + sessions, err := c.getActiveExecSessions() + if err != nil { return err } - - // Check if we have active exec sessions after reaping. - if len(c.state.ExecSessions) != 0 { + if len(sessions) > 0 { return errors.Wrapf(define.ErrCtrStateInvalid, "container %s has active exec sessions, refusing to clean up", c.ID()) } + defer c.newContainerEvent(events.Cleanup) return c.cleanup(ctx) } @@ -757,114 +623,11 @@ func (c *Container) Sync() error { return nil } -// Refresh refreshes a container's state in the database, restarting the -// container if it is running +// Refresh is DEPRECATED and REMOVED. func (c *Container) Refresh(ctx context.Context) error { - if !c.batched { - c.lock.Lock() - defer c.lock.Unlock() - - if err := c.syncContainer(); err != nil { - return err - } - } - - if c.state.State == define.ContainerStateRemoving { - return errors.Wrapf(define.ErrCtrStateInvalid, "cannot refresh containers that are being removed") - } - - wasCreated := false - if c.state.State == define.ContainerStateCreated { - wasCreated = true - } - wasRunning := false - if c.state.State == define.ContainerStateRunning { - wasRunning = true - } - wasPaused := false - if c.state.State == define.ContainerStatePaused { - wasPaused = true - } - - // First, unpause the container if it's paused - if c.state.State == define.ContainerStatePaused { - if err := c.unpause(); err != nil { - return err - } - } - - // Next, if the container is running, stop it - if c.state.State == define.ContainerStateRunning { - if err := c.stop(c.config.StopTimeout); err != nil { - return err - } - } - - // If there are active exec sessions, we need to kill them - if len(c.state.ExecSessions) > 0 { - logrus.Infof("Killing %d exec sessions in container %s. They will not be restored after refresh.", - len(c.state.ExecSessions), c.ID()) - } - for _, session := range c.state.ExecSessions { - if err := c.ociRuntime.ExecStopContainer(c, session.ID, c.StopTimeout()); err != nil { - return errors.Wrapf(err, "error stopping exec session %s of container %s", session.ID, c.ID()) - } - } - - // If the container is in ContainerStateStopped, we need to delete it - // from the runtime and clear conmon state - if c.state.State == define.ContainerStateStopped { - if err := c.delete(ctx); err != nil { - return err - } - if err := c.removeConmonFiles(); err != nil { - return err - } - } - - // Fire cleanup code one more time unconditionally to ensure we are good - // to refresh - if err := c.cleanup(ctx); err != nil { - return err - } - - logrus.Debugf("Resetting state of container %s", c.ID()) - - // We've finished unwinding the container back to its initial state - // Now safe to refresh container state - if err := resetState(c.state); err != nil { - return errors.Wrapf(err, "error resetting state of container %s", c.ID()) - } - if err := c.refresh(); err != nil { - return err - } - - logrus.Debugf("Successfully refresh container %s state", c.ID()) - - // Initialize the container if it was created in runc - if wasCreated || wasRunning || wasPaused { - if err := c.prepare(); err != nil { - return err - } - if err := c.init(ctx, false); err != nil { - return err - } - } - - // If the container was running before, start it - if wasRunning || wasPaused { - if err := c.start(); err != nil { - return err - } - } - - // If the container was paused before, re-pause it - if wasPaused { - if err := c.pause(); err != nil { - return err - } - } - return nil + // This has been deprecated for a long while, and is in the process of + // being removed. + return define.ErrNotImplemented } // ContainerCheckpointOptions is a struct used to pass the parameters diff --git a/libpod/container_exec.go b/libpod/container_exec.go new file mode 100644 index 000000000..7ed7a3343 --- /dev/null +++ b/libpod/container_exec.go @@ -0,0 +1,857 @@ +package libpod + +import ( + "io/ioutil" + "os" + "path/filepath" + "strconv" + "time" + + "github.com/containers/common/pkg/capabilities" + "github.com/containers/libpod/libpod/define" + "github.com/containers/libpod/libpod/events" + "github.com/containers/storage/pkg/stringid" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "k8s.io/client-go/tools/remotecommand" +) + +// ExecConfig contains the configuration of an exec session +type ExecConfig struct { + // Command the the command that will be invoked in the exec session. + // Must not be empty. + Command []string `json:"command"` + // Terminal is whether the exec session will allocate a pseudoterminal. + Terminal bool `json:"terminal,omitempty"` + // AttachStdin is whether the STDIN stream will be forwarded to the exec + // session's first process when attaching. Only available if Terminal is + // false. + AttachStdin bool `json:"attachStdin,omitempty"` + // AttachStdout is whether the STDOUT stream will be forwarded to the + // exec session's first process when attaching. Only available if + // Terminal is false. + AttachStdout bool `json:"attachStdout,omitempty"` + // AttachStderr is whether the STDERR stream will be forwarded to the + // exec session's first process when attaching. Only available if + // Terminal is false. + AttachStderr bool `json:"attachStderr,omitempty"` + // DetachKeys are keys that will be used to detach from the exec + // session. Here, nil will use the default detach keys, where a pointer + // to the empty string ("") will disable detaching via detach keys. + DetachKeys *string `json:"detachKeys,omitempty"` + // Environment is a set of environment variables that will be set for + // the first process started by the exec session. + Environment map[string]string `json:"environment,omitempty"` + // Privileged is whether the exec session will be privileged - that is, + // will be granted additional capabilities. + Privileged bool `json:"privileged,omitempty"` + // User is the user the exec session will be run as. + // If set to "" the exec session will be started as the same user the + // container was started as. + User string `json:"user,omitempty"` + // WorkDir is the working directory for the first process that will be + // launched by the exec session. + // If set to "" the exec session will be started in / within the + // container. + WorkDir string `json:"workDir,omitempty"` + // PreserveFDs indicates that a number of extra FDs from the process + // running libpod will be passed into the container. These are assumed + // to begin at 3 (immediately after the standard streams). The number + // given is the number that will be passed into the exec session, + // starting at 3. + PreserveFDs uint `json:"preserveFds,omitempty"` +} + +// ExecSession contains information on a single exec session attached to a given +// container. +type ExecSession struct { + // Id is the ID of the exec session. + // Named somewhat strangely to not conflict with ID(). + Id string `json:"id"` + // ContainerId is the ID of the container this exec session belongs to. + // Named somewhat strangely to not conflict with ContainerID(). + ContainerId string `json:"containerId"` + + // State is the state of the exec session. + State define.ContainerExecStatus `json:"state"` + // PID is the PID of the process created by the exec session. + PID int `json:"pid,omitempty"` + // ExitCode is the exit code of the exec session, if it has exited. + ExitCode int `json:"exitCode,omitempty"` + + // Config is the configuration of this exec session. + // Cannot be empty. + Config *ExecConfig `json:"config"` +} + +// ID returns the ID of an exec session. +func (e *ExecSession) ID() string { + return e.Id +} + +// ContainerID returns the ID of the container this exec session was started in. +func (e *ExecSession) ContainerID() string { + return e.ContainerId +} + +// InspectExecSession contains information about a given exec session. +type InspectExecSession struct { + // CanRemove is legacy and used purely for compatibility reasons. + // Will always be set to true, unless the exec session is running. + CanRemove bool `json:"CanRemove"` + // ContainerID is the ID of the container this exec session is attached + // to. + ContainerID string `json:"ContainerID"` + // DetachKeys are the detach keys used by the exec session. + // If set to "" the default keys are being used. + // Will show "<none>" if no detach keys are set. + DetachKeys string `json:"DetachKeys"` + // ExitCode is the exit code of the exec session. Will be set to 0 if + // the exec session has not yet exited. + ExitCode int `json:"ExitCode"` + // ID is the ID of the exec session. + ID string `json:"ID"` + // OpenStderr is whether the container's STDERR stream will be attached. + // Always set to true if the exec session created a TTY. + OpenStderr bool `json:"OpenStderr"` + // OpenStdin is whether the container's STDIN stream will be attached + // to. + OpenStdin bool `json:"OpenStdin"` + // OpenStdout is whether the container's STDOUT stream will be attached. + // Always set to true if the exec session created a TTY. + OpenStdout bool `json:"OpenStdout"` + // Running is whether the exec session is running. + Running bool `json:"Running"` + // Pid is the PID of the exec session's process. + // Will be set to 0 if the exec session is not running. + Pid int `json:"Pid"` + // ProcessConfig contains information about the exec session's process. + ProcessConfig *InspectExecProcess `json:"ProcessConfig"` +} + +// InspectExecProcess contains information about the process in a given exec +// session. +type InspectExecProcess struct { + // Arguments are the arguments to the entrypoint command of the exec + // session. + Arguments []string `json:"arguments"` + // Entrypoint is the entrypoint for the exec session (the command that + // will be executed in the container). + Entrypoint string `json:"entrypoint"` + // Privileged is whether the exec session will be started with elevated + // privileges. + Privileged bool `json:"privileged"` + // Tty is whether the exec session created a terminal. + Tty bool `json:"tty"` + // User is the user the exec session was started as. + User string `json:"user"` +} + +// Inspect inspects the given exec session and produces detailed output on its +// configuration and current state. +func (e *ExecSession) Inspect() (*InspectExecSession, error) { + if e.Config == nil { + return nil, errors.Wrapf(define.ErrInternal, "given exec session does not have a configuration block") + } + + output := new(InspectExecSession) + output.CanRemove = e.State != define.ExecStateRunning + output.ContainerID = e.ContainerId + if e.Config.DetachKeys != nil { + output.DetachKeys = *e.Config.DetachKeys + } + output.ExitCode = e.ExitCode + output.ID = e.Id + output.OpenStderr = e.Config.AttachStderr + output.OpenStdin = e.Config.AttachStdin + output.OpenStdout = e.Config.AttachStdout + output.Running = e.State == define.ExecStateRunning + output.Pid = e.PID + output.ProcessConfig = new(InspectExecProcess) + if len(e.Config.Command) > 0 { + output.ProcessConfig.Entrypoint = e.Config.Command[0] + if len(e.Config.Command) > 1 { + output.ProcessConfig.Arguments = make([]string, 0, len(e.Config.Command)-1) + output.ProcessConfig.Arguments = append(output.ProcessConfig.Arguments, e.Config.Command[1:]...) + } + } + output.ProcessConfig.Privileged = e.Config.Privileged + output.ProcessConfig.Tty = e.Config.Terminal + output.ProcessConfig.User = e.Config.User + + return output, nil +} + +// legacyExecSession contains information on an active exec session. It is a +// holdover from a previous Podman version and is DEPRECATED. +type legacyExecSession struct { + ID string `json:"id"` + Command []string `json:"command"` + PID int `json:"pid"` +} + +// ExecCreate creates a new exec session for the container. +// The session is not started. The ID of the new exec session will be returned. +func (c *Container) ExecCreate(config *ExecConfig) (string, error) { + if !c.batched { + c.lock.Lock() + defer c.lock.Unlock() + + if err := c.syncContainer(); err != nil { + return "", err + } + } + + // Verify our config + if config == nil { + return "", errors.Wrapf(define.ErrInvalidArg, "must provide a configuration to ExecCreate") + } + if len(config.Command) == 0 { + return "", errors.Wrapf(define.ErrInvalidArg, "must provide a non-empty command to start an exec session") + } + if config.Terminal && (config.AttachStdin || config.AttachStdout || config.AttachStderr) { + return "", errors.Wrapf(define.ErrInvalidArg, "cannot specify streams to attach to when exec session has a pseudoterminal") + } + + // Generate an ID for our new exec session + sessionID := stringid.GenerateNonCryptoID() + found := true + // This really ought to be a do-while, but Go doesn't have those... + for found { + found = false + for id := range c.state.ExecSessions { + if id == sessionID { + found = true + break + } + } + if found { + sessionID = stringid.GenerateNonCryptoID() + } + } + + // Make our new exec session + session := new(ExecSession) + session.Id = sessionID + session.ContainerId = c.ID() + session.State = define.ExecStateCreated + session.Config = new(ExecConfig) + if err := JSONDeepCopy(config, session.Config); err != nil { + return "", errors.Wrapf(err, "error copying exec configuration into exec session") + } + + if c.state.ExecSessions == nil { + c.state.ExecSessions = make(map[string]*ExecSession) + } + + // Need to add to container state and exec session registry + c.state.ExecSessions[session.ID()] = session + if err := c.save(); err != nil { + return "", err + } + if err := c.runtime.state.AddExecSession(c, session); err != nil { + return "", err + } + + logrus.Infof("Created exec session %s in container %s", session.ID(), c.ID()) + + return sessionID, nil +} + +// ExecStart starts an exec session in the container, but does not attach to it. +// Returns immediately upon starting the exec session. +func (c *Container) ExecStart(sessionID string) error { + // Will be implemented in part 2, migrating Start and implementing + // detached Start. + return define.ErrNotImplemented +} + +// ExecStartAndAttach starts and attaches to an exec session in a container. +// TODO: Should we include detach keys in the signature to allow override? +// TODO: How do we handle AttachStdin/AttachStdout/AttachStderr? +func (c *Container) ExecStartAndAttach(sessionID string, streams *AttachStreams) error { + if !c.batched { + c.lock.Lock() + defer c.lock.Unlock() + + if err := c.syncContainer(); err != nil { + return err + } + } + + session, ok := c.state.ExecSessions[sessionID] + if !ok { + return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID) + } + + if session.State != define.ExecStateCreated { + return errors.Wrapf(define.ErrExecSessionStateInvalid, "can only start created exec sessions, while container %s session %s state is %q", c.ID(), session.ID(), session.State.String()) + } + + logrus.Infof("Going to start container %s exec session %s and attach to it", c.ID(), session.ID()) + + // TODO: check logic here - should we set Privileged if the container is + // privileged? + var capList []string + if session.Config.Privileged || c.config.Privileged { + capList = capabilities.AllCapabilities() + } + + user := c.config.User + if session.Config.User != "" { + user = session.Config.User + } + + if err := c.createExecBundle(session.ID()); err != nil { + return err + } + + opts := new(ExecOptions) + opts.Cmd = session.Config.Command + opts.CapAdd = capList + opts.Env = session.Config.Environment + opts.Terminal = session.Config.Terminal + opts.Cwd = session.Config.WorkDir + opts.User = user + opts.Streams = streams + opts.PreserveFDs = session.Config.PreserveFDs + opts.DetachKeys = session.Config.DetachKeys + + pid, attachChan, err := c.ociRuntime.ExecContainer(c, session.ID(), opts) + if err != nil { + return err + } + + c.newContainerEvent(events.Exec) + logrus.Debugf("Successfully started exec session %s in container %s", session.ID(), c.ID()) + + var lastErr error + + // Update and save session to reflect PID/running + session.PID = pid + session.State = define.ExecStateRunning + + if err := c.save(); err != nil { + lastErr = err + } + + // Unlock so other processes can use the container + if !c.batched { + c.lock.Unlock() + } + + tmpErr := <-attachChan + if lastErr != nil { + logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr) + } + lastErr = tmpErr + + exitCode, err := c.readExecExitCode(session.ID()) + if err != nil { + if lastErr != nil { + logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr) + } + lastErr = err + } + + logrus.Debugf("Container %s exec session %s completed with exit code %d", c.ID(), session.ID(), exitCode) + + // Lock again + if !c.batched { + c.lock.Lock() + } + + // Sync the container to pick up state changes + if err := c.syncContainer(); err != nil { + if lastErr != nil { + logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr) + } + return errors.Wrapf(err, "error syncing container %s state to remove exec session %s", c.ID(), session.ID()) + } + + // Update status + // Since we did a syncContainer, the old session has been overwritten. + // Grab a fresh one from the database. + session, ok = c.state.ExecSessions[sessionID] + if !ok { + // Exec session already removed. + logrus.Infof("Container %s exec session %s already removed from database", c.ID(), sessionID) + return nil + } + session.State = define.ExecStateStopped + session.ExitCode = exitCode + session.PID = 0 + + if err := c.save(); err != nil { + if lastErr != nil { + logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr) + } + lastErr = err + } + + // Clean up after ourselves + if err := c.cleanupExecBundle(session.ID()); err != nil { + if lastErr != nil { + logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr) + } + lastErr = err + } + + return lastErr +} + +// ExecHTTPStartAndAttach starts and performs an HTTP attach to an exec session. +func (c *Container) ExecHTTPStartAndAttach(sessionID string) error { + // Will be implemented in part 2, migrating Start. + return define.ErrNotImplemented +} + +// ExecStop stops an exec session in the container. +// If a timeout is provided, it will be used; otherwise, the timeout will +// default to the stop timeout of the container. +// Cleanup will be invoked automatically once the session is stopped. +func (c *Container) ExecStop(sessionID string, timeout *uint) error { + if !c.batched { + c.lock.Lock() + defer c.lock.Unlock() + + if err := c.syncContainer(); err != nil { + return err + } + } + + session, ok := c.state.ExecSessions[sessionID] + if !ok { + return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID) + } + + if session.State != define.ExecStateRunning { + return errors.Wrapf(define.ErrExecSessionStateInvalid, "container %s exec session %s is %q, can only stop running sessions", c.ID(), session.ID(), session.State.String()) + } + + logrus.Infof("Stopping container %s exec session %s", c.ID(), session.ID()) + + finalTimeout := c.StopTimeout() + if timeout != nil { + finalTimeout = *timeout + } + + // Stop the session + if err := c.ociRuntime.ExecStopContainer(c, session.ID(), finalTimeout); err != nil { + return err + } + + var cleanupErr error + + // Retrieve exit code and update status + exitCode, err := c.readExecExitCode(session.ID()) + if err != nil { + cleanupErr = err + } + session.ExitCode = exitCode + session.PID = 0 + session.State = define.ExecStateStopped + + if err := c.save(); err != nil { + if cleanupErr != nil { + logrus.Errorf("Error stopping container %s exec session %s: %v", c.ID(), session.ID(), cleanupErr) + } + cleanupErr = err + } + + if err := c.cleanupExecBundle(session.ID()); err != nil { + if cleanupErr != nil { + logrus.Errorf("Error stopping container %s exec session %s: %v", c.ID(), session.ID(), cleanupErr) + } + cleanupErr = err + } + + return cleanupErr +} + +// ExecCleanup cleans up an exec session in the container, removing temporary +// files associated with it. +func (c *Container) ExecCleanup(sessionID string) error { + if !c.batched { + c.lock.Lock() + defer c.lock.Unlock() + + if err := c.syncContainer(); err != nil { + return err + } + } + + session, ok := c.state.ExecSessions[sessionID] + if !ok { + return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID) + } + + if session.State == define.ExecStateRunning { + return errors.Wrapf(define.ErrExecSessionStateInvalid, "cannot clean up container %s exec session %s as it is running", c.ID(), session.ID()) + } + + logrus.Infof("Cleaning up container %s exec session %s", c.ID(), session.ID()) + + return c.cleanupExecBundle(session.ID()) +} + +// ExecRemove removes an exec session in the container. +// If force is given, the session will be stopped first if it is running. +func (c *Container) ExecRemove(sessionID string, force bool) error { + if !c.batched { + c.lock.Lock() + defer c.lock.Unlock() + + if err := c.syncContainer(); err != nil { + return err + } + } + + session, ok := c.state.ExecSessions[sessionID] + if !ok { + return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID) + } + + logrus.Infof("Removing container %s exec session %s", c.ID(), session.ID()) + + // Update status of exec session if running, so we cna check if it + // stopped in the meantime. + if session.State == define.ExecStateRunning { + stopped, err := c.ociRuntime.ExecUpdateStatus(c, session.ID()) + if err != nil { + return err + } + if stopped { + session.State = define.ExecStateStopped + // TODO: should we retrieve exit code here? + // TODO: Might be worth saving state here. + } + } + + if session.State == define.ExecStateRunning { + if !force { + return errors.Wrapf(define.ErrExecSessionStateInvalid, "container %s exec session %s is still running, cannot remove", c.ID(), session.ID()) + } + + // Stop the session + if err := c.ociRuntime.ExecStopContainer(c, session.ID(), c.StopTimeout()); err != nil { + return err + } + + if err := c.cleanupExecBundle(session.ID()); err != nil { + return err + } + } + + // First remove exec session from DB. + if err := c.runtime.state.RemoveExecSession(session); err != nil { + return err + } + // Next, remove it from the container and save state + delete(c.state.ExecSessions, sessionID) + if err := c.save(); err != nil { + return err + } + + logrus.Debugf("Successfully removed container %s exec session %s", c.ID(), session.ID()) + + return nil +} + +// ExecResize resizes the TTY of the given exec session. Only available if the +// exec session created a TTY. +func (c *Container) ExecResize(sessionID string, newSize remotecommand.TerminalSize) error { + if !c.batched { + c.lock.Lock() + defer c.lock.Unlock() + + if err := c.syncContainer(); err != nil { + return err + } + } + + session, ok := c.state.ExecSessions[sessionID] + if !ok { + return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID) + } + + logrus.Infof("Removing container %s exec session %s", c.ID(), session.ID()) + + if session.State != define.ExecStateRunning { + return errors.Wrapf(define.ErrExecSessionStateInvalid, "cannot resize container %s exec session %s as it is not running", c.ID(), session.ID()) + } + + return c.ociRuntime.ExecAttachResize(c, sessionID, newSize) +} + +// Exec emulates the old Libpod exec API, providing a single call to create, +// run, and remove an exec session. Returns exit code and error. Exit code is +// not guaranteed to be set sanely if error is not nil. +func (c *Container) Exec(config *ExecConfig, streams *AttachStreams, resize <-chan remotecommand.TerminalSize) (int, error) { + sessionID, err := c.ExecCreate(config) + if err != nil { + return -1, err + } + if err := c.ExecStartAndAttach(sessionID, streams); err != nil { + return -1, err + } + + // Start resizing if we have a resize channel. + // This goroutine may likely leak, given that we cannot close it here. + // Not a big deal, since it should run for as long as the Podman process + // does. Could be a big deal for `podman service` but we don't need this + // API there. + // TODO: Refactor so this is closed here, before we remove the exec + // session. + if resize != nil { + go func() { + for resizeRequest := range resize { + if err := c.ExecResize(sessionID, resizeRequest); err != nil { + // Assume the exec session went down. + logrus.Warnf("Error resizing exec session %s: %v", sessionID, err) + return + } + } + }() + } + + session, err := c.ExecSession(sessionID) + if err != nil { + return -1, err + } + exitCode := session.ExitCode + if err := c.ExecRemove(sessionID, false); err != nil { + return -1, err + } + + if exitCode != 0 { + return exitCode, errors.Wrapf(define.ErrOCIRuntime, "exec session exited with non-zero exit code %d", exitCode) + } + + return exitCode, nil +} + +// cleanup an exec session after its done +func (c *Container) cleanupExecBundle(sessionID string) error { + if err := os.RemoveAll(c.execBundlePath(sessionID)); err != nil && !os.IsNotExist(err) { + return err + } + + return c.ociRuntime.ExecContainerCleanup(c, sessionID) +} + +// the path to a containers exec session bundle +func (c *Container) execBundlePath(sessionID string) string { + return filepath.Join(c.bundlePath(), sessionID) +} + +// Get PID file path for a container's exec session +func (c *Container) execPidPath(sessionID string) string { + return filepath.Join(c.execBundlePath(sessionID), "exec_pid") +} + +// the log path for an exec session +func (c *Container) execLogPath(sessionID string) string { + return filepath.Join(c.execBundlePath(sessionID), "exec_log") +} + +// the socket conmon creates for an exec session +func (c *Container) execAttachSocketPath(sessionID string) (string, error) { + return c.ociRuntime.ExecAttachSocketPath(c, sessionID) +} + +// execExitFileDir gets the path to the container's exit file +func (c *Container) execExitFileDir(sessionID string) string { + return filepath.Join(c.execBundlePath(sessionID), "exit") +} + +// execOCILog returns the file path for the exec sessions oci log +func (c *Container) execOCILog(sessionID string) string { + if !c.ociRuntime.SupportsJSONErrors() { + return "" + } + return filepath.Join(c.execBundlePath(sessionID), "oci-log") +} + +// create a bundle path and associated files for an exec session +func (c *Container) createExecBundle(sessionID string) (err error) { + bundlePath := c.execBundlePath(sessionID) + if createErr := os.MkdirAll(bundlePath, execDirPermission); createErr != nil { + return createErr + } + defer func() { + if err != nil { + if err2 := os.RemoveAll(bundlePath); err != nil { + logrus.Warnf("error removing exec bundle after creation caused another error: %v", err2) + } + } + }() + if err2 := os.MkdirAll(c.execExitFileDir(sessionID), execDirPermission); err2 != nil { + // The directory is allowed to exist + if !os.IsExist(err2) { + err = errors.Wrapf(err2, "error creating OCI runtime exit file path %s", c.execExitFileDir(sessionID)) + } + } + return +} + +// readExecExitCode reads the exit file for an exec session and returns +// the exit code +func (c *Container) readExecExitCode(sessionID string) (int, error) { + exitFile := filepath.Join(c.execExitFileDir(sessionID), c.ID()) + chWait := make(chan error) + defer close(chWait) + + _, err := WaitForFile(exitFile, chWait, time.Second*5) + if err != nil { + return -1, err + } + ec, err := ioutil.ReadFile(exitFile) + if err != nil { + return -1, err + } + ecInt, err := strconv.Atoi(string(ec)) + if err != nil { + return -1, err + } + return ecInt, nil +} + +// getExecSessionPID gets the PID of an active exec session +func (c *Container) getExecSessionPID(sessionID string) (int, error) { + session, ok := c.state.ExecSessions[sessionID] + if ok { + return session.PID, nil + } + oldSession, ok := c.state.LegacyExecSessions[sessionID] + if ok { + return oldSession.PID, nil + } + + return -1, errors.Wrapf(define.ErrNoSuchExecSession, "no exec session with ID %s found in container %s", sessionID, c.ID()) +} + +// getKnownExecSessions gets a list of all exec sessions we think are running, +// but does not verify their current state. +// Please use getActiveExecSessions() outside of container_exec.go, as this +// function performs further checks to return an accurate list. +func (c *Container) getKnownExecSessions() []string { + knownSessions := []string{} + // First check legacy sessions. + // TODO: This is DEPRECATED and will be removed in a future major + // release. + for sessionID := range c.state.LegacyExecSessions { + knownSessions = append(knownSessions, sessionID) + } + // Next check new exec sessions, but only if in running state + for sessionID, session := range c.state.ExecSessions { + if session.State == define.ExecStateRunning { + knownSessions = append(knownSessions, sessionID) + } + } + + return knownSessions +} + +// getActiveExecSessions checks if there are any active exec sessions in the +// current container. Returns an array of active exec sessions. +// Will continue through errors where possible. +// Currently handles both new and legacy, deprecated exec sessions. +func (c *Container) getActiveExecSessions() ([]string, error) { + activeSessions := []string{} + knownSessions := c.getKnownExecSessions() + + // Instead of saving once per iteration, do it once at the end. + var lastErr error + needSave := false + for _, id := range knownSessions { + alive, err := c.ociRuntime.ExecUpdateStatus(c, id) + if err != nil { + if lastErr != nil { + logrus.Errorf("Error checking container %s exec sessions: %v", c.ID(), lastErr) + } + lastErr = err + continue + } + if !alive { + if err := c.cleanupExecBundle(id); err != nil { + if lastErr != nil { + logrus.Errorf("Error checking container %s exec sessions: %v", c.ID(), lastErr) + } + lastErr = err + } + + _, isLegacy := c.state.LegacyExecSessions[id] + if isLegacy { + delete(c.state.LegacyExecSessions, id) + needSave = true + } else { + session := c.state.ExecSessions[id] + exitCode, err := c.readExecExitCode(session.ID()) + if err != nil { + if lastErr != nil { + logrus.Errorf("Error checking container %s exec sessions: %v", c.ID(), lastErr) + } + lastErr = err + } + session.ExitCode = exitCode + session.PID = 0 + session.State = define.ExecStateStopped + + needSave = true + } + } else { + activeSessions = append(activeSessions, id) + } + } + if needSave { + if err := c.save(); err != nil { + if lastErr != nil { + logrus.Errorf("Error reaping exec sessions for container %s: %v", c.ID(), lastErr) + } + lastErr = err + } + } + + return activeSessions, lastErr +} + +// removeAllExecSessions stops and removes all the container's exec sessions +func (c *Container) removeAllExecSessions() error { + knownSessions := c.getKnownExecSessions() + + var lastErr error + for _, id := range knownSessions { + if err := c.ociRuntime.ExecStopContainer(c, id, c.StopTimeout()); err != nil { + if lastErr != nil { + logrus.Errorf("Error stopping container %s exec sessions: %v", c.ID(), lastErr) + } + lastErr = err + continue + } + + if err := c.cleanupExecBundle(id); err != nil { + if lastErr != nil { + logrus.Errorf("Error stopping container %s exec sessions: %v", c.ID(), lastErr) + } + lastErr = err + } + } + // Delete all exec sessions + if err := c.runtime.state.RemoveContainerExecSessions(c); err != nil { + if lastErr != nil { + logrus.Errorf("Error stopping container %s exec sessions: %v", c.ID(), lastErr) + } + lastErr = err + } + c.state.ExecSessions = nil + c.state.LegacyExecSessions = nil + if err := c.save(); err != nil { + if lastErr != nil { + logrus.Errorf("Error stopping container %s exec sessions: %v", c.ID(), lastErr) + } + lastErr = err + } + + return lastErr +} diff --git a/libpod/container_internal.go b/libpod/container_internal.go index a0805c1fa..12a13a0ce 100644 --- a/libpod/container_internal.go +++ b/libpod/container_internal.go @@ -142,92 +142,6 @@ func (c *Container) exitFilePath() (string, error) { return c.ociRuntime.ExitFilePath(c) } -// create a bundle path and associated files for an exec session -func (c *Container) createExecBundle(sessionID string) (err error) { - bundlePath := c.execBundlePath(sessionID) - if createErr := os.MkdirAll(bundlePath, execDirPermission); createErr != nil { - return createErr - } - defer func() { - if err != nil { - if err2 := os.RemoveAll(bundlePath); err != nil { - logrus.Warnf("error removing exec bundle after creation caused another error: %v", err2) - } - } - }() - if err2 := os.MkdirAll(c.execExitFileDir(sessionID), execDirPermission); err2 != nil { - // The directory is allowed to exist - if !os.IsExist(err2) { - err = errors.Wrapf(err2, "error creating OCI runtime exit file path %s", c.execExitFileDir(sessionID)) - } - } - return -} - -// cleanup an exec session after its done -func (c *Container) cleanupExecBundle(sessionID string) error { - if err := os.RemoveAll(c.execBundlePath(sessionID)); err != nil && !os.IsNotExist(err) { - return err - } - - return c.ociRuntime.ExecContainerCleanup(c, sessionID) -} - -// the path to a containers exec session bundle -func (c *Container) execBundlePath(sessionID string) string { - return filepath.Join(c.bundlePath(), sessionID) -} - -// Get PID file path for a container's exec session -func (c *Container) execPidPath(sessionID string) string { - return filepath.Join(c.execBundlePath(sessionID), "exec_pid") -} - -// the log path for an exec session -func (c *Container) execLogPath(sessionID string) string { - return filepath.Join(c.execBundlePath(sessionID), "exec_log") -} - -// the socket conmon creates for an exec session -func (c *Container) execAttachSocketPath(sessionID string) (string, error) { - return c.ociRuntime.ExecAttachSocketPath(c, sessionID) -} - -// execExitFileDir gets the path to the container's exit file -func (c *Container) execExitFileDir(sessionID string) string { - return filepath.Join(c.execBundlePath(sessionID), "exit") -} - -// execOCILog returns the file path for the exec sessions oci log -func (c *Container) execOCILog(sessionID string) string { - if !c.ociRuntime.SupportsJSONErrors() { - return "" - } - return filepath.Join(c.execBundlePath(sessionID), "oci-log") -} - -// readExecExitCode reads the exit file for an exec session and returns -// the exit code -func (c *Container) readExecExitCode(sessionID string) (int, error) { - exitFile := filepath.Join(c.execExitFileDir(sessionID), c.ID()) - chWait := make(chan error) - defer close(chWait) - - _, err := WaitForFile(exitFile, chWait, time.Second*5) - if err != nil { - return -1, err - } - ec, err := ioutil.ReadFile(exitFile) - if err != nil { - return -1, err - } - ecInt, err := strconv.Atoi(string(ec)) - if err != nil { - return -1, err - } - return ecInt, nil -} - // Wait for the container's exit file to appear. // When it does, update our state based on it. func (c *Container) waitForExitFileAndSync() error { @@ -568,6 +482,7 @@ func resetState(state *ContainerState) error { state.State = define.ContainerStateConfigured } state.ExecSessions = make(map[string]*ExecSession) + state.LegacyExecSessions = nil state.NetworkStatus = nil state.BindMounts = make(map[string]string) state.StoppedByUser = false @@ -1814,12 +1729,12 @@ func (c *Container) checkReadyForRemoval() error { return errors.Wrapf(define.ErrCtrStateInvalid, "cannot remove container %s as it is %s - running or paused containers cannot be removed without force", c.ID(), c.state.State.String()) } - // Reap exec sessions - if err := c.reapExecSessions(); err != nil { + // Check exec sessions + sessions, err := c.getActiveExecSessions() + if err != nil { return err } - - if len(c.state.ExecSessions) != 0 { + if len(sessions) != 0 { return errors.Wrapf(define.ErrCtrStateInvalid, "cannot remove container %s as it has active exec sessions", c.ID()) } @@ -1926,41 +1841,6 @@ func (c *Container) checkExitFile() error { return c.handleExitFile(exitFile, info) } -// Reap dead exec sessions -func (c *Container) reapExecSessions() error { - // Instead of saving once per iteration, use a defer to do it once at - // the end. - var lastErr error - needSave := false - for id := range c.state.ExecSessions { - alive, err := c.ociRuntime.ExecUpdateStatus(c, id) - if err != nil { - if lastErr != nil { - logrus.Errorf("Error reaping exec sessions for container %s: %v", c.ID(), lastErr) - } - lastErr = err - continue - } - if !alive { - // Clean up lingering files and remove the exec session - if err := c.ociRuntime.ExecContainerCleanup(c, id); err != nil { - return errors.Wrapf(err, "error cleaning up container %s exec session %s files", c.ID(), id) - } - delete(c.state.ExecSessions, id) - needSave = true - } - } - if needSave { - if err := c.save(); err != nil { - if lastErr != nil { - logrus.Errorf("Error reaping exec sessions for container %s: %v", c.ID(), lastErr) - } - lastErr = err - } - } - return lastErr -} - func (c *Container) hasNamespace(namespace spec.LinuxNamespaceType) bool { if c.config.Spec == nil || c.config.Spec.Linux == nil { return false diff --git a/libpod/container_top_linux.go b/libpod/container_top_linux.go index 98edc340a..2a35a2ae9 100644 --- a/libpod/container_top_linux.go +++ b/libpod/container_top_linux.go @@ -134,7 +134,9 @@ func (c *Container) execPS(args []string) ([]string, error) { }() cmd := append([]string{"ps"}, args...) - ec, err := c.Exec(false, false, map[string]string{}, cmd, "", "", streams, 0, nil, "") + config := new(ExecConfig) + config.Command = cmd + ec, err := c.Exec(config, streams, nil) if err != nil { return nil, err } else if ec != 0 { diff --git a/libpod/define/containerstate.go b/libpod/define/containerstate.go index e7d258e21..6da49a594 100644 --- a/libpod/define/containerstate.go +++ b/libpod/define/containerstate.go @@ -78,3 +78,37 @@ func StringToContainerStatus(status string) (ContainerStatus, error) { return ContainerStateUnknown, errors.Wrapf(ErrInvalidArg, "unknown container state: %s", status) } } + +// ContainerExecStatus is the status of an exec session within a container. +type ContainerExecStatus int + +const ( + // ExecStateUnknown indicates that the state of the exec session is not + // known. + ExecStateUnknown ContainerExecStatus = iota + // ExecStateCreated indicates that the exec session has been created but + // not yet started + ExecStateCreated ContainerExecStatus = iota + // ExecStateRunning indicates that the exec session has been started but + // has not yet exited. + ExecStateRunning ContainerExecStatus = iota + // ExecStateStopped indicates that the exec session has stopped and is + // no longer running. + ExecStateStopped ContainerExecStatus = iota +) + +// String returns a string representation of a given exec state. +func (s ContainerExecStatus) String() string { + switch s { + case ExecStateUnknown: + return "unknown" + case ExecStateCreated: + return "created" + case ExecStateRunning: + return "running" + case ExecStateStopped: + return "stopped" + default: + return "bad state" + } +} diff --git a/libpod/define/errors.go b/libpod/define/errors.go index b79cf08dc..3ba343789 100644 --- a/libpod/define/errors.go +++ b/libpod/define/errors.go @@ -20,6 +20,10 @@ var ( // ErrNoSuchVolume indicates the requested volume does not exist ErrNoSuchVolume = errors.New("no such volume") + // ErrNoSuchExecSession indicates that the requested exec session does + // not exist. + ErrNoSuchExecSession = errors.New("no such exec session") + // ErrCtrExists indicates a container with the same name or ID already // exists ErrCtrExists = errors.New("container already exists") @@ -29,10 +33,16 @@ var ( ErrImageExists = errors.New("image already exists") // ErrVolumeExists indicates a volume with the same name already exists ErrVolumeExists = errors.New("volume already exists") + // ErrExecSessionExists indicates an exec session with the same ID + // already exists. + ErrExecSessionExists = errors.New("exec session already exists") // ErrCtrStateInvalid indicates a container is in an improper state for // the requested operation ErrCtrStateInvalid = errors.New("container state improper") + // ErrExecSessionStateInvalid indicates that an exec session is in an + // improper state for the requested operation + ErrExecSessionStateInvalid = errors.New("exec session state improper") // ErrVolumeBeingUsed indicates that a volume is being used by at least one container ErrVolumeBeingUsed = errors.New("volume is being used") @@ -90,6 +100,9 @@ var ( // ErrVolumeRemoved indicates that the volume has already been removed and // no further operations can be performed on it ErrVolumeRemoved = errors.New("volume has already been removed") + // ErrExecSessionRemoved indicates that the exec session has already + // been removed and no further operations can be performed on it. + ErrExecSessionRemoved = errors.New("exec session has already been removed") // ErrDBClosed indicates that the connection to the state database has // already been closed diff --git a/libpod/healthcheck.go b/libpod/healthcheck.go index 9c274c4f3..76b7a1fcf 100644 --- a/libpod/healthcheck.go +++ b/libpod/healthcheck.go @@ -143,7 +143,9 @@ func (c *Container) runHealthCheck() (HealthCheckStatus, error) { logrus.Debugf("executing health check command %s for %s", strings.Join(newCommand, " "), c.ID()) timeStart := time.Now() hcResult := HealthCheckSuccess - _, hcErr := c.Exec(false, false, map[string]string{}, newCommand, "", "", streams, 0, nil, "") + config := new(ExecConfig) + config.Command = newCommand + _, hcErr := c.Exec(config, streams, nil) if hcErr != nil { errCause := errors.Cause(hcErr) hcResult = HealthCheckFailure diff --git a/libpod/in_memory_state.go b/libpod/in_memory_state.go index 2144671a5..ca562ab7e 100644 --- a/libpod/in_memory_state.go +++ b/libpod/in_memory_state.go @@ -20,10 +20,16 @@ type InMemoryState struct { pods map[string]*Pod // Maps container ID to container struct. containers map[string]*Container - volumes map[string]*Volume + // Maps volume ID to volume struct + volumes map[string]*Volume + // Maps exec session ID to ID of associated container + execSessions map[string]string // Maps container ID to a list of IDs of dependencies. - ctrDepends map[string][]string + ctrDepends map[string][]string + // Maps volume ID to IDs of dependencies volumeDepends map[string][]string + // Maps container ID to IDs of associated exec sessions. + ctrExecSessions map[string][]string // Maps pod ID to a map of container ID to container struct. podContainers map[string]map[string]*Container // Global name registry - ensures name uniqueness and performs lookups. @@ -51,10 +57,13 @@ func NewInMemoryState() (State, error) { state.pods = make(map[string]*Pod) state.containers = make(map[string]*Container) state.volumes = make(map[string]*Volume) + state.execSessions = make(map[string]string) state.ctrDepends = make(map[string][]string) state.volumeDepends = make(map[string][]string) + state.ctrExecSessions = make(map[string][]string) + state.podContainers = make(map[string]map[string]*Container) state.nameIndex = registrar.NewRegistrar() @@ -316,6 +325,13 @@ func (s *InMemoryState) RemoveContainer(ctr *Container) error { return errors.Wrapf(define.ErrCtrExists, "the following containers depend on container %s: %s", ctr.ID(), depsStr) } + // Ensure we don't have active exec sessions + ctrSessions := s.ctrExecSessions[ctr.ID()] + if len(ctrSessions) > 0 { + sessStr := strings.Join(ctrSessions, ", ") + return errors.Wrapf(define.ErrCtrExists, "the following exec sessions are running for container %s: %s", ctr.ID(), sessStr) + } + if _, ok := s.containers[ctr.ID()]; !ok { ctr.valid = false return errors.Wrapf(define.ErrNoSuchCtr, "no container exists in state with ID %s", ctr.ID()) @@ -437,6 +453,117 @@ func (s *InMemoryState) GetContainerConfig(id string) (*ContainerConfig, error) return ctr.Config(), nil } +// Add an exec session to the database +func (s *InMemoryState) AddExecSession(ctr *Container, session *ExecSession) error { + if !ctr.valid { + return define.ErrCtrRemoved + } + if session.ContainerID() != ctr.ID() { + return errors.Wrapf(define.ErrInvalidArg, "container ID and exec session ID must match") + } + if _, ok := s.containers[ctr.ID()]; !ok { + return define.ErrNoSuchCtr + } + + if _, ok := s.execSessions[session.ID()]; ok { + return define.ErrExecSessionExists + } + + s.execSessions[session.ID()] = ctr.ID() + + ctrSessions, ok := s.ctrExecSessions[ctr.ID()] + if !ok { + ctrSessions = []string{} + } + + ctrSessions = append(ctrSessions, session.ID()) + s.ctrExecSessions[ctr.ID()] = ctrSessions + + return nil +} + +// Get an exec session from the database by full or partial ID. +func (s *InMemoryState) GetExecSession(id string) (string, error) { + if id == "" { + return "", define.ErrEmptyID + } + + session, ok := s.execSessions[id] + if !ok { + return "", define.ErrNoSuchExecSession + } + + return session, nil +} + +// RemoveExecSession removes an exec session from the database. +func (s *InMemoryState) RemoveExecSession(session *ExecSession) error { + if _, ok := s.execSessions[session.ID()]; !ok { + return define.ErrNoSuchExecSession + } + + ctrSessions, ok := s.ctrExecSessions[session.ContainerID()] + // If !ok - internal state seems inconsistent, but the thing we wanted + // to remove is gone. Continue. + if ok { + newSessions := []string{} + for _, sess := range ctrSessions { + if sess != session.ID() { + newSessions = append(newSessions, sess) + } + } + s.ctrExecSessions[session.ContainerID()] = newSessions + } + + delete(s.execSessions, session.ID()) + + return nil +} + +// GetContainerExecSessions retrieves all exec sessions for the given container. +func (s *InMemoryState) GetContainerExecSessions(ctr *Container) ([]string, error) { + if !ctr.valid { + return nil, define.ErrCtrRemoved + } + if _, ok := s.containers[ctr.ID()]; !ok { + ctr.valid = false + return nil, define.ErrNoSuchCtr + } + + ctrSessions := s.ctrExecSessions[ctr.ID()] + + return ctrSessions, nil +} + +// RemoveContainerExecSessions removes all exec sessions for the given +// container. +func (s *InMemoryState) RemoveContainerExecSessions(ctr *Container) error { + if !ctr.valid { + return define.ErrCtrRemoved + } + if _, ok := s.containers[ctr.ID()]; !ok { + ctr.valid = false + return define.ErrNoSuchCtr + } + + ctrSessions, ok := s.ctrExecSessions[ctr.ID()] + if !ok { + return nil + } + + for _, sess := range ctrSessions { + if _, ok := s.execSessions[sess]; !ok { + // We have an internal state inconsistency + // Error out + return errors.Wrapf(define.ErrInternal, "inconsistent database state: exec session %s is missing", sess) + } + delete(s.execSessions, sess) + } + delete(s.ctrExecSessions, ctr.ID()) + + return nil +} + // RewriteContainerConfig rewrites a container's configuration. // This function is DANGEROUS, even with an in-memory state. // Please read the full comment on it in state.go before using it. @@ -1056,6 +1183,13 @@ func (s *InMemoryState) RemoveContainerFromPod(pod *Pod, ctr *Container) error { return errors.Wrapf(define.ErrCtrExists, "the following containers depend on container %s: %s", ctr.ID(), depsStr) } + // Ensure we don't have active exec sessions + ctrSessions := s.ctrExecSessions[ctr.ID()] + if len(ctrSessions) > 0 { + sessStr := strings.Join(ctrSessions, ", ") + return errors.Wrapf(define.ErrCtrExists, "the following exec sessions are running for container %s: %s", ctr.ID(), sessStr) + } + // Retrieve pod containers podCtrs, ok := s.podContainers[pod.ID()] if !ok { diff --git a/libpod/oci.go b/libpod/oci.go index 27edebefc..ef46cf5c3 100644 --- a/libpod/oci.go +++ b/libpod/oci.go @@ -71,6 +71,9 @@ type OCIRuntime interface { // Returns an int (exit code), error channel (errors from attach), and // error (errors that occurred attempting to start the exec session). ExecContainer(ctr *Container, sessionID string, options *ExecOptions) (int, chan error, error) + // ExecAttachResize resizes the terminal of a running exec session. Only + // allowed with sessions that were created with a TTY. + ExecAttachResize(ctr *Container, sessionID string, newSize remotecommand.TerminalSize) error // ExecStopContainer stops a given exec session in a running container. // SIGTERM with be sent initially, then SIGKILL after the given timeout. // If timeout is 0, SIGKILL will be sent immediately, and SIGTERM will @@ -143,12 +146,12 @@ type ExecOptions struct { // to 0, 1, 2) that will be passed to the executed process. The total FDs // passed will be 3 + PreserveFDs. PreserveFDs uint - // Resize is a channel where terminal resize events are sent to be - // handled. - Resize chan remotecommand.TerminalSize // DetachKeys is a set of keys that, when pressed in sequence, will // detach from the container. - DetachKeys string + // If not provided, the default keys will be used. + // If provided but set to "", detaching from the container will be + // disabled. + DetachKeys *string } // HTTPAttachStreams informs the HTTPAttach endpoint which of the container's diff --git a/libpod/oci_attach_linux.go b/libpod/oci_attach_linux.go index 46c70e7eb..5fc46c31c 100644 --- a/libpod/oci_attach_linux.go +++ b/libpod/oci_attach_linux.go @@ -93,7 +93,7 @@ func (c *Container) attach(streams *AttachStreams, keys string, resize <-chan re // 4. attachToExec sends on startFd, signalling it has attached to the socket and child is ready to go // 5. child receives on startFd, runs the runtime exec command // attachToExec is responsible for closing startFd and attachFd -func (c *Container) attachToExec(streams *AttachStreams, keys string, resize <-chan remotecommand.TerminalSize, sessionID string, startFd, attachFd *os.File) error { +func (c *Container) attachToExec(streams *AttachStreams, keys *string, sessionID string, startFd, attachFd *os.File) error { if !streams.AttachOutput && !streams.AttachError && !streams.AttachInput { return errors.Wrapf(define.ErrInvalidArg, "must provide at least one stream to attach to") } @@ -104,7 +104,11 @@ func (c *Container) attachToExec(streams *AttachStreams, keys string, resize <-c defer errorhandling.CloseQuiet(startFd) defer errorhandling.CloseQuiet(attachFd) - detachKeys, err := processDetachKeys(keys) + detachString := define.DefaultDetachKeys + if keys != nil { + detachString = *keys + } + detachKeys, err := processDetachKeys(detachString) if err != nil { return err } @@ -134,10 +138,6 @@ func (c *Container) attachToExec(streams *AttachStreams, keys string, resize <-c } }() - // Register the resize func after we've read the attach socket, as we know at this point the - // 'ctl' file has been created in conmon - registerResizeFunc(resize, c.execBundlePath(sessionID)) - // start listening on stdio of the process receiveStdoutError, stdinDone := setupStdioChannels(streams, conn, detachKeys) diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go index d3c3bbcc5..82c5d7020 100644 --- a/libpod/oci_conmon_linux.go +++ b/libpod/oci_conmon_linux.go @@ -769,7 +769,7 @@ func (r *ConmonOCIRuntime) ExecContainer(c *Container, sessionID string, options attachChan := make(chan error) go func() { // attachToExec is responsible for closing pipes - attachChan <- c.attachToExec(options.Streams, options.DetachKeys, options.Resize, sessionID, parentStartPipe, parentAttachPipe) + attachChan <- c.attachToExec(options.Streams, options.DetachKeys, sessionID, parentStartPipe, parentAttachPipe) close(attachChan) }() attachToExecCalled = true @@ -783,37 +783,54 @@ func (r *ConmonOCIRuntime) ExecContainer(c *Container, sessionID string, options return pid, attachChan, err } +// ExecAttachResize resizes the TTY of the given exec session. +func (r *ConmonOCIRuntime) ExecAttachResize(ctr *Container, sessionID string, newSize remotecommand.TerminalSize) error { + // TODO: probably want a dedicated function to get ctl file path? + controlPath := filepath.Join(ctr.execBundlePath(sessionID), "ctl") + controlFile, err := os.OpenFile(controlPath, unix.O_WRONLY, 0) + if err != nil { + return errors.Wrapf(err, "could not open ctl file for terminal resize for container %s exec session %s", ctr.ID(), sessionID) + } + defer controlFile.Close() + + logrus.Debugf("Received a resize event for container %s exec session %s: %+v", ctr.ID(), sessionID, newSize) + if _, err = fmt.Fprintf(controlFile, "%d %d %d\n", 1, newSize.Height, newSize.Width); err != nil { + return errors.Wrapf(err, "failed to write to ctl file to resize terminal") + } + + return nil +} + // ExecStopContainer stops a given exec session in a running container. func (r *ConmonOCIRuntime) ExecStopContainer(ctr *Container, sessionID string, timeout uint) error { - session, ok := ctr.state.ExecSessions[sessionID] - if !ok { - // TODO This should probably be a separate error - return errors.Wrapf(define.ErrInvalidArg, "no exec session with ID %s found in container %s", sessionID, ctr.ID()) + pid, err := ctr.getExecSessionPID(sessionID) + if err != nil { + return err } logrus.Debugf("Going to stop container %s exec session %s", ctr.ID(), sessionID) // Is the session dead? // Ping the PID with signal 0 to see if it still exists. - if err := unix.Kill(session.PID, 0); err != nil { + if err := unix.Kill(pid, 0); err != nil { if err == unix.ESRCH { return nil } - return errors.Wrapf(err, "error pinging container %s exec session %s PID %d with signal 0", ctr.ID(), sessionID, session.PID) + return errors.Wrapf(err, "error pinging container %s exec session %s PID %d with signal 0", ctr.ID(), sessionID, pid) } if timeout > 0 { // Use SIGTERM by default, then SIGSTOP after timeout. - logrus.Debugf("Killing exec session %s (PID %d) of container %s with SIGTERM", sessionID, session.PID, ctr.ID()) - if err := unix.Kill(session.PID, unix.SIGTERM); err != nil { + logrus.Debugf("Killing exec session %s (PID %d) of container %s with SIGTERM", sessionID, pid, ctr.ID()) + if err := unix.Kill(pid, unix.SIGTERM); err != nil { if err == unix.ESRCH { return nil } - return errors.Wrapf(err, "error killing container %s exec session %s PID %d with SIGTERM", ctr.ID(), sessionID, session.PID) + return errors.Wrapf(err, "error killing container %s exec session %s PID %d with SIGTERM", ctr.ID(), sessionID, pid) } // Wait for the PID to stop - if err := waitPidStop(session.PID, time.Duration(timeout)*time.Second); err != nil { + if err := waitPidStop(pid, time.Duration(timeout)*time.Second); err != nil { logrus.Warnf("Timed out waiting for container %s exec session %s to stop, resorting to SIGKILL", ctr.ID(), sessionID) } else { // No error, container is dead @@ -822,17 +839,17 @@ func (r *ConmonOCIRuntime) ExecStopContainer(ctr *Container, sessionID string, t } // SIGTERM did not work. On to SIGKILL. - logrus.Debugf("Killing exec session %s (PID %d) of container %s with SIGKILL", sessionID, session.PID, ctr.ID()) - if err := unix.Kill(session.PID, unix.SIGTERM); err != nil { + logrus.Debugf("Killing exec session %s (PID %d) of container %s with SIGKILL", sessionID, pid, ctr.ID()) + if err := unix.Kill(pid, unix.SIGTERM); err != nil { if err == unix.ESRCH { return nil } - return errors.Wrapf(err, "error killing container %s exec session %s PID %d with SIGKILL", ctr.ID(), sessionID, session.PID) + return errors.Wrapf(err, "error killing container %s exec session %s PID %d with SIGKILL", ctr.ID(), sessionID, pid) } // Wait for the PID to stop - if err := waitPidStop(session.PID, killContainerTimeout*time.Second); err != nil { - return errors.Wrapf(err, "timed out waiting for container %s exec session %s PID %d to stop after SIGKILL", ctr.ID(), sessionID, session.PID) + if err := waitPidStop(pid, killContainerTimeout*time.Second); err != nil { + return errors.Wrapf(err, "timed out waiting for container %s exec session %s PID %d to stop after SIGKILL", ctr.ID(), sessionID, pid) } return nil @@ -840,21 +857,20 @@ func (r *ConmonOCIRuntime) ExecStopContainer(ctr *Container, sessionID string, t // ExecUpdateStatus checks if the given exec session is still running. func (r *ConmonOCIRuntime) ExecUpdateStatus(ctr *Container, sessionID string) (bool, error) { - session, ok := ctr.state.ExecSessions[sessionID] - if !ok { - // TODO This should probably be a separate error - return false, errors.Wrapf(define.ErrInvalidArg, "no exec session with ID %s found in container %s", sessionID, ctr.ID()) + pid, err := ctr.getExecSessionPID(sessionID) + if err != nil { + return false, err } logrus.Debugf("Checking status of container %s exec session %s", ctr.ID(), sessionID) // Is the session dead? // Ping the PID with signal 0 to see if it still exists. - if err := unix.Kill(session.PID, 0); err != nil { + if err := unix.Kill(pid, 0); err != nil { if err == unix.ESRCH { return false, nil } - return false, errors.Wrapf(err, "error pinging container %s exec session %s PID %d with signal 0", ctr.ID(), sessionID, session.PID) + return false, errors.Wrapf(err, "error pinging container %s exec session %s PID %d with signal 0", ctr.ID(), sessionID, pid) } return true, nil diff --git a/libpod/oci_missing.go b/libpod/oci_missing.go index ff7eea625..326591d89 100644 --- a/libpod/oci_missing.go +++ b/libpod/oci_missing.go @@ -125,6 +125,11 @@ func (r *MissingRuntime) ExecContainer(ctr *Container, sessionID string, options return -1, nil, r.printError() } +// ExecAttachResize is not available as the runtime is missing. +func (r *MissingRuntime) ExecAttachResize(ctr *Container, sessionID string, newSize remotecommand.TerminalSize) error { + return r.printError() +} + // ExecStopContainer is not available as the runtime is missing. // TODO: We can also investigate using unix.Kill() on the PID of the exec // session here if we want to make stopping containers possible. Won't be diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go index 1f1cdc271..0b18436ca 100644 --- a/libpod/runtime_ctr.go +++ b/libpod/runtime_ctr.go @@ -463,11 +463,9 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool, } } - // Check that all of our exec sessions have finished - for _, session := range c.state.ExecSessions { - if err := c.ociRuntime.ExecStopContainer(c, session.ID, c.StopTimeout()); err != nil { - return errors.Wrapf(err, "error stopping exec session %s of container %s", session.ID, c.ID()) - } + // Remove all active exec sessions + if err := c.removeAllExecSessions(); err != nil { + return err } // Check that no other containers depend on the container. @@ -484,9 +482,8 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool, } } - // Set ContainerStateRemoving and remove exec sessions + // Set ContainerStateRemoving c.state.State = define.ContainerStateRemoving - c.state.ExecSessions = nil if err := c.save(); err != nil { return errors.Wrapf(err, "unable to set container %s removing state in database", c.ID()) diff --git a/libpod/runtime_pod.go b/libpod/runtime_pod.go index e1dc31391..be566e211 100644 --- a/libpod/runtime_pod.go +++ b/libpod/runtime_pod.go @@ -90,18 +90,10 @@ func (r *Runtime) LookupPod(idOrName string) (*Pod, error) { // output. Multiple filters are handled by ANDing their output, so only pods // matching all filters are returned func (r *Runtime) Pods(filters ...PodFilter) ([]*Pod, error) { - r.lock.RLock() - defer r.lock.RUnlock() - - if !r.valid { - return nil, define.ErrRuntimeStopped - } - - pods, err := r.state.AllPods() + pods, err := r.GetAllPods() if err != nil { return nil, err } - podsFiltered := make([]*Pod, 0, len(pods)) for _, pod := range pods { include := true diff --git a/libpod/state.go b/libpod/state.go index b246b5eac..9690e5819 100644 --- a/libpod/state.go +++ b/libpod/state.go @@ -72,6 +72,8 @@ type State interface { // Removes container from state. // Containers that are part of pods must use RemoveContainerFromPod. // The container must be part of the set namespace. + // All dependencies must be removed first. + // All exec sessions referencing the container must be removed first. RemoveContainer(ctr *Container) error // UpdateContainer updates a container's state from the backing store. // The container must be part of the set namespace. @@ -95,6 +97,30 @@ type State interface { // Return a container config from the database by full ID GetContainerConfig(id string) (*ContainerConfig, error) + // Add creates a reference to an exec session in the database. + // The container the exec session is attached to will be recorded. + // The container state will not be modified. + // The actual exec session itself is part of the container's state. + // We assume higher-level callers will add the session by saving the + // container's state before calling this. This only ensures that the ID + // of the exec session is associated with the ID of the container. + // Implementations may, but are not required to, verify that the state + // of the given container has an exec session with the ID given. + AddExecSession(ctr *Container, session *ExecSession) error + // Get retrieves the container a given exec session is attached to. + GetExecSession(id string) (string, error) + // Remove a reference to an exec session from the database. + // This will not modify container state to remove the exec session there + // and instead only removes the session ID -> container ID reference + // added by AddExecSession. + RemoveExecSession(session *ExecSession) error + // Get the IDs of all exec sessions attached to a given container. + GetContainerExecSessions(ctr *Container) ([]string, error) + // Remove all exec sessions for a single container. + // Usually used as part of removing the container. + // As with RemoveExecSession, container state will not be modified. + RemoveContainerExecSessions(ctr *Container) error + // PLEASE READ FULL DESCRIPTION BEFORE USING. // Rewrite a container's configuration. // This function breaks libpod's normal prohibition on a read-only diff --git a/pkg/adapter/pods.go b/pkg/adapter/pods.go index dc856cc8d..1417bd2b9 100644 --- a/pkg/adapter/pods.go +++ b/pkg/adapter/pods.go @@ -122,19 +122,31 @@ func (r *LocalRuntime) GetLatestPod() (*Pod, error) { return &pod, err } +// GetPodsWithFilters gets the filtered list of pods based on the filter parameters provided. +func (r *LocalRuntime) GetPodsWithFilters(filters string) ([]*Pod, error) { + pods, err := shared.GetPodsWithFilters(r.Runtime, filters) + if err != nil { + return nil, err + } + return r.podstoAdapterPods(pods) +} + +func (r *LocalRuntime) podstoAdapterPods(pod []*libpod.Pod) ([]*Pod, error) { + var pods []*Pod + for _, i := range pod { + + pods = append(pods, &Pod{i}) + } + return pods, nil +} + // GetAllPods gets all pods and wraps it in an adapter pod func (r *LocalRuntime) GetAllPods() ([]*Pod, error) { - var pods []*Pod allPods, err := r.Runtime.GetAllPods() if err != nil { return nil, err } - for _, p := range allPods { - pod := Pod{} - pod.Pod = p - pods = append(pods, &pod) - } - return pods, nil + return r.podstoAdapterPods(allPods) } // LookupPod gets a pod by name or id and wraps it in an adapter pod diff --git a/pkg/adapter/pods_remote.go b/pkg/adapter/pods_remote.go index 20f089628..6b8f22f15 100644 --- a/pkg/adapter/pods_remote.go +++ b/pkg/adapter/pods_remote.go @@ -10,7 +10,7 @@ import ( "github.com/containers/libpod/cmd/podman/cliconfig" "github.com/containers/libpod/cmd/podman/shared" - "github.com/containers/libpod/cmd/podman/varlink" + iopodman "github.com/containers/libpod/cmd/podman/varlink" "github.com/containers/libpod/libpod" "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/pkg/varlinkapi" @@ -208,6 +208,11 @@ func (r *LocalRuntime) GetAllPods() ([]*Pod, error) { return pods, nil } +// This is a empty implementation stating remoteclient not yet implemented +func (r *LocalRuntime) GetPodsWithFilters(filters string) ([]*Pod, error) { + return nil, define.ErrNotImplemented +} + // GetPodsByStatus returns a slice of pods filtered by a libpod status func (r *LocalRuntime) GetPodsByStatus(statuses []string) ([]*Pod, error) { podIDs, err := iopodman.GetPodsByStatus().Call(r.Conn, statuses) diff --git a/pkg/adapter/terminal_linux.go b/pkg/adapter/terminal_linux.go index 3dc5864e2..ef5a6f926 100644 --- a/pkg/adapter/terminal_linux.go +++ b/pkg/adapter/terminal_linux.go @@ -16,7 +16,6 @@ import ( // ExecAttachCtr execs and attaches to a container func ExecAttachCtr(ctx context.Context, ctr *libpod.Container, tty, privileged bool, env map[string]string, cmd []string, user, workDir string, streams *libpod.AttachStreams, preserveFDs uint, detachKeys string) (int, error) { resize := make(chan remotecommand.TerminalSize) - haveTerminal := terminal.IsTerminal(int(os.Stdin.Fd())) // Check if we are attached to a terminal. If we are, generate resize @@ -33,7 +32,18 @@ func ExecAttachCtr(ctx context.Context, ctr *libpod.Container, tty, privileged b } }() } - return ctr.Exec(tty, privileged, env, cmd, user, workDir, streams, preserveFDs, resize, detachKeys) + + execConfig := new(libpod.ExecConfig) + execConfig.Command = cmd + execConfig.Terminal = tty + execConfig.Privileged = privileged + execConfig.Environment = env + execConfig.User = user + execConfig.WorkDir = workDir + execConfig.DetachKeys = &detachKeys + execConfig.PreserveFDs = preserveFDs + + return ctr.Exec(execConfig, streams, resize) } // StartAttachCtr starts and (if required) attaches to a container diff --git a/pkg/api/handlers/compat/containers.go b/pkg/api/handlers/compat/containers.go index 74907f0f7..3a269fe50 100644 --- a/pkg/api/handlers/compat/containers.go +++ b/pkg/api/handlers/compat/containers.go @@ -87,7 +87,7 @@ func ListContainers(w http.ResponseWriter, r *http.Request) { utils.InternalServerError(w, err) return } - if _, found := r.URL.Query()["limit"]; found { + if _, found := r.URL.Query()["limit"]; found && query.Limit != -1 { last := query.Limit if len(containers) > last { containers = containers[len(containers)-last:] diff --git a/pkg/api/handlers/compat/images_history.go b/pkg/api/handlers/compat/images_history.go index 04304caa4..afadf4c48 100644 --- a/pkg/api/handlers/compat/images_history.go +++ b/pkg/api/handlers/compat/images_history.go @@ -28,7 +28,7 @@ func HistoryImage(w http.ResponseWriter, r *http.Request) { for _, h := range history { l := handlers.HistoryResponse{ ID: h.ID, - Created: h.Created.UnixNano(), + Created: h.Created.Unix(), CreatedBy: h.CreatedBy, Tags: h.Tags, Size: h.Size, diff --git a/pkg/api/handlers/libpod/pods.go b/pkg/api/handlers/libpod/pods.go index f93c8f8d5..27ec64d89 100644 --- a/pkg/api/handlers/libpod/pods.go +++ b/pkg/api/handlers/libpod/pods.go @@ -103,7 +103,6 @@ func PodCreate(w http.ResponseWriter, r *http.Request) { func Pods(w http.ResponseWriter, r *http.Request) { var ( - runtime = r.Context().Value("runtime").(*libpod.Runtime) podInspectData []*libpod.PodInspect ) decoder := r.Context().Value("decoder").(*schema.Decoder) @@ -118,12 +117,8 @@ func Pods(w http.ResponseWriter, r *http.Request) { return } - if len(query.Filters) > 0 { - utils.Error(w, "filters are not implemented yet", http.StatusInternalServerError, define.ErrNotImplemented) - return - } + pods, err := utils.GetPods(w, r) - pods, err := runtime.GetAllPods() if err != nil { utils.Error(w, "Something went wrong", http.StatusInternalServerError, err) return diff --git a/pkg/api/handlers/utils/containers.go b/pkg/api/handlers/utils/containers.go index d5a79bdc8..bbe4cee3c 100644 --- a/pkg/api/handlers/utils/containers.go +++ b/pkg/api/handlers/utils/containers.go @@ -16,7 +16,7 @@ import ( // ContainerCreateResponse is the response struct for creating a container type ContainerCreateResponse struct { // ID of the container created - ID string `json:"id"` + ID string `json:"Id"` // Warnings during container creation Warnings []string `json:"Warnings"` } diff --git a/pkg/api/handlers/utils/pods.go b/pkg/api/handlers/utils/pods.go new file mode 100644 index 000000000..266ad9a4b --- /dev/null +++ b/pkg/api/handlers/utils/pods.go @@ -0,0 +1,45 @@ +package utils + +import ( + "fmt" + "net/http" + + "github.com/containers/libpod/cmd/podman/shared" + "github.com/containers/libpod/libpod" + "github.com/gorilla/schema" +) + +func GetPods(w http.ResponseWriter, r *http.Request) ([]*libpod.Pod, error) { + runtime := r.Context().Value("runtime").(*libpod.Runtime) + decoder := r.Context().Value("decoder").(*schema.Decoder) + + query := struct { + All bool + Filters map[string][]string `schema:"filters"` + Digests bool + }{} + + if err := decoder.Decode(&query, r.URL.Query()); err != nil { + return nil, err + } + var filters = []string{} + if _, found := r.URL.Query()["digests"]; found && query.Digests { + UnSupportedParameter("digests") + } + + if len(query.Filters) > 0 { + for k, v := range query.Filters { + for _, val := range v { + filters = append(filters, fmt.Sprintf("%s=%s", k, val)) + } + } + filterFuncs, err := shared.GenerateFilterFunction(runtime, filters) + if err != nil { + return nil, err + } + return shared.FilterAllPodsWithFilterFunc(runtime, filterFuncs...) + } + + return runtime.GetAllPods() + +} diff --git a/pkg/bindings/connection.go b/pkg/bindings/connection.go index ba5f9c3aa..289debd8c 100644 --- a/pkg/bindings/connection.go +++ b/pkg/bindings/connection.go @@ -109,7 +109,7 @@ func NewConnection(ctx context.Context, uri string, identity ...string) (context } client, err = tcpClient(_url) default: - return nil, errors.Errorf("%s is not a support schema", _url.Scheme) + return nil, errors.Errorf("'%s' is not a supported schema", _url.Scheme) } if err != nil { return nil, errors.Wrapf(err, "Failed to create %sClient", _url.Scheme) diff --git a/pkg/bindings/test/pods_test.go b/pkg/bindings/test/pods_test.go index ad0a574d3..fae1deca4 100644 --- a/pkg/bindings/test/pods_test.go +++ b/pkg/bindings/test/pods_test.go @@ -75,15 +75,66 @@ var _ = Describe("Podman pods", func() { } Expect(StringInSlice(newpod, names)).To(BeTrue()) Expect(StringInSlice("newpod2", names)).To(BeTrue()) + }) + + // The test validates the list pod endpoint with passing filters as the params. + It("List pods with filters", func() { + var newpod2 string = "newpod2" + bt.Podcreate(&newpod2) + _, err = bt.RunTopContainer(nil, &trueFlag, &newpod) + Expect(err).To(BeNil()) + + // Expected err with invalid filter params + filters := make(map[string][]string) + filters["dummy"] = []string{"dummy"} + filteredPods, err := pods.List(bt.conn, filters) + Expect(err).ToNot(BeNil()) + code, _ := bindings.CheckResponseCode(err) + Expect(code).To(BeNumerically("==", http.StatusInternalServerError)) + + // Expected empty response with invalid filters + filters = make(map[string][]string) + filters["name"] = []string{"dummy"} + filteredPods, err = pods.List(bt.conn, filters) + Expect(err).To(BeNil()) + Expect(len(filteredPods)).To(BeNumerically("==", 0)) + + // Validate list pod with name filter + filters = make(map[string][]string) + filters["name"] = []string{newpod2} + filteredPods, err = pods.List(bt.conn, filters) + Expect(err).To(BeNil()) + Expect(len(filteredPods)).To(BeNumerically("==", 1)) + var names []string + for _, i := range filteredPods { + names = append(names, i.Config.Name) + } + Expect(StringInSlice("newpod2", names)).To(BeTrue()) + + // Validate list pod with id filter + filters = make(map[string][]string) + response, err := pods.Inspect(bt.conn, newpod) + id := response.Config.ID + filters["id"] = []string{id} + filteredPods, err = pods.List(bt.conn, filters) + Expect(err).To(BeNil()) + Expect(len(filteredPods)).To(BeNumerically("==", 1)) + names = names[:0] + for _, i := range filteredPods { + names = append(names, i.Config.Name) + } + Expect(StringInSlice("newpod", names)).To(BeTrue()) - // TODO not working Because: code to list based on filter - // "not yet implemented", - // Validate list pod with filters - //filters := make(map[string][]string) - //filters["name"] = []string{newpod} - //filteredPods, err := pods.List(bt.conn, filters) - //Expect(err).To(BeNil()) - //Expect(len(filteredPods)).To(BeNumerically("==", 1)) + // Using multiple filters + filters["name"] = []string{newpod} + filteredPods, err = pods.List(bt.conn, filters) + Expect(err).To(BeNil()) + Expect(len(filteredPods)).To(BeNumerically("==", 1)) + names = names[:0] + for _, i := range filteredPods { + names = append(names, i.Config.Name) + } + Expect(StringInSlice("newpod", names)).To(BeTrue()) }) // The test validates if the exists responds diff --git a/pkg/domain/entities/containers.go b/pkg/domain/entities/containers.go new file mode 100644 index 000000000..1899b98f7 --- /dev/null +++ b/pkg/domain/entities/containers.go @@ -0,0 +1 @@ +package entities diff --git a/pkg/domain/entities/engine.go b/pkg/domain/entities/engine.go new file mode 100644 index 000000000..a1096f1f1 --- /dev/null +++ b/pkg/domain/entities/engine.go @@ -0,0 +1,97 @@ +package entities + +import ( + "net/url" + "os/user" + "path/filepath" + + "github.com/containers/libpod/libpod/define" + "github.com/spf13/pflag" +) + +type EngineMode string + +const ( + ABIMode = EngineMode("abi") + TunnelMode = EngineMode("tunnel") +) + +func (m EngineMode) String() string { + return string(m) +} + +type EngineOptions struct { + Uri *url.URL + Identities []string + FlagSet pflag.FlagSet + Flags EngineFlags +} + +type EngineFlags struct { + CGroupManager string + CniConfigDir string + ConmonPath string + DefaultMountsFile string + EventsBackend string + HooksDir []string + MaxWorks int + Namespace string + Root string + Runroot string + Runtime string + StorageDriver string + StorageOpts []string + Syslog bool + Trace bool + NetworkCmdPath string + + Config string + CpuProfile string + LogLevel string + TmpDir string + + RemoteUserName string + RemoteHost string + VarlinkAddress string + ConnectionName string + RemoteConfigFilePath string + Port int + IdentityFile string + IgnoreHosts bool + + EngineMode EngineMode +} + +func NewEngineOptions() (EngineFlags, error) { + u, _ := user.Current() + return EngineFlags{ + CGroupManager: define.SystemdCgroupsManager, + CniConfigDir: "", + Config: "", + ConmonPath: filepath.Join("usr", "bin", "conmon"), + ConnectionName: "", + CpuProfile: "", + DefaultMountsFile: "", + EventsBackend: "", + HooksDir: nil, + IdentityFile: "", + IgnoreHosts: false, + LogLevel: "", + MaxWorks: 0, + Namespace: "", + NetworkCmdPath: "", + Port: 0, + RemoteConfigFilePath: "", + RemoteHost: "", + RemoteUserName: "", + Root: "", + Runroot: filepath.Join("run", "user", u.Uid), + Runtime: "", + StorageDriver: "overlayfs", + StorageOpts: nil, + Syslog: false, + TmpDir: filepath.Join("run", "user", u.Uid, "libpod", "tmp"), + Trace: false, + VarlinkAddress: "", + }, nil +} diff --git a/pkg/domain/entities/engine_container.go b/pkg/domain/entities/engine_container.go new file mode 100644 index 000000000..d08f37d44 --- /dev/null +++ b/pkg/domain/entities/engine_container.go @@ -0,0 +1,26 @@ +package entities + +import ( + "context" +) + +type ContainerEngine interface { + ContainerRuntime + PodRuntime + VolumeRuntime +} + +type ContainerRuntime interface { + ContainerDelete(ctx context.Context, opts ContainerDeleteOptions) (*ContainerDeleteReport, error) + ContainerPrune(ctx context.Context) (*ContainerPruneReport, error) +} + +type PodRuntime interface { + PodDelete(ctx context.Context, opts PodPruneOptions) (*PodDeleteReport, error) + PodPrune(ctx context.Context) (*PodPruneReport, error) +} + +type VolumeRuntime interface { + VolumeDelete(ctx context.Context, opts VolumeDeleteOptions) (*VolumeDeleteReport, error) + VolumePrune(ctx context.Context) (*VolumePruneReport, error) +} diff --git a/pkg/domain/entities/engine_image.go b/pkg/domain/entities/engine_image.go new file mode 100644 index 000000000..27676d781 --- /dev/null +++ b/pkg/domain/entities/engine_image.go @@ -0,0 +1,12 @@ +package entities + +import ( + "context" +) + +type ImageEngine interface { + Delete(ctx context.Context, nameOrId string, opts ImageDeleteOptions) (*ImageDeleteReport, error) + History(ctx context.Context, nameOrId string, opts ImageHistoryOptions) (*ImageHistoryReport, error) + List(ctx context.Context, opts ImageListOptions) (*ImageListReport, error) + Prune(ctx context.Context, opts ImagePruneOptions) (*ImagePruneReport, error) +} diff --git a/pkg/domain/entities/filters.go b/pkg/domain/entities/filters.go new file mode 100644 index 000000000..c7e227244 --- /dev/null +++ b/pkg/domain/entities/filters.go @@ -0,0 +1,150 @@ +package entities + +import ( + "net/url" + "strings" +) + +// Identifier interface allows filters to access ID() of object +type Identifier interface { + Id() string +} + +// Named interface allows filters to access Name() of object +type Named interface { + Name() string +} + +// Named interface allows filters to access Name() of object +type Names interface { + Names() []string +} + +// IdOrName interface allows filters to access ID() or Name() of object +type IdOrNamed interface { + Identifier + Named +} + +// IdOrName interface allows filters to access ID() or Names() of object +type IdOrNames interface { + Identifier + Names +} + +type ImageFilter func(Image) bool +type VolumeFilter func(Volume) bool +type ContainerFilter func(Container) bool + +func CompileImageFilters(filters url.Values) ImageFilter { + var fns []interface{} + + for name, targets := range filters { + switch name { + case "id": + fns = append(fns, FilterIdFn(targets)) + case "name": + fns = append(fns, FilterNamesFn(targets)) + case "idOrName": + fns = append(fns, FilterIdOrNameFn(targets)) + } + } + + return func(image Image) bool { + for _, fn := range fns { + if !fn.(ImageFilter)(image) { + return false + } + } + return true + } +} + +func CompileContainerFilters(filters url.Values) ContainerFilter { + var fns []interface{} + + for name, targets := range filters { + switch name { + case "id": + fns = append(fns, FilterIdFn(targets)) + case "name": + fns = append(fns, FilterNameFn(targets)) + case "idOrName": + fns = append(fns, FilterIdOrNameFn(targets)) + } + } + + return func(ctnr Container) bool { + for _, fn := range fns { + if !fn.(ContainerFilter)(ctnr) { + return false + } + } + return true + } +} + +func CompileVolumeFilters(filters url.Values) VolumeFilter { + var fns []interface{} + + for name, targets := range filters { + if name == "id" { + fns = append(fns, FilterIdFn(targets)) + } + } + + return func(volume Volume) bool { + for _, fn := range fns { + if !fn.(VolumeFilter)(volume) { + return false + } + } + return true + } +} + +func FilterIdFn(id []string) func(Identifier) bool { + return func(obj Identifier) bool { + for _, v := range id { + if strings.Contains(obj.Id(), v) { + return true + } + } + return false + } +} + +func FilterNameFn(name []string) func(Named) bool { + return func(obj Named) bool { + for _, v := range name { + if strings.Contains(obj.Name(), v) { + return true + } + } + return false + } +} + +func FilterNamesFn(name []string) func(Names) bool { + return func(obj Names) bool { + for _, v := range name { + for _, n := range obj.Names() { + if strings.Contains(n, v) { + return true + } + } + } + return false + } +} + +func FilterIdOrNameFn(id []string) func(IdOrNamed) bool { + return func(obj IdOrNamed) bool { + for _, v := range id { + if strings.Contains(obj.Id(), v) || strings.Contains(obj.Name(), v) { + return true + } + } + return false + } +} diff --git a/pkg/domain/entities/images.go b/pkg/domain/entities/images.go new file mode 100644 index 000000000..c84ed5351 --- /dev/null +++ b/pkg/domain/entities/images.go @@ -0,0 +1,151 @@ +package entities + +import ( + "net/url" + + "github.com/containers/image/v5/manifest" + docker "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +type Image struct { + IdOrNamed + ID string `json:"Id"` + RepoTags []string `json:",omitempty"` + RepoDigests []string `json:",omitempty"` + Parent string `json:",omitempty"` + Comment string `json:",omitempty"` + Created string `json:",omitempty"` + Container string `json:",omitempty"` + ContainerConfig *container.Config `json:",omitempty"` + DockerVersion string `json:",omitempty"` + Author string `json:",omitempty"` + Config *container.Config `json:",omitempty"` + Architecture string `json:",omitempty"` + Variant string `json:",omitempty"` + Os string `json:",omitempty"` + OsVersion string `json:",omitempty"` + Size int64 `json:",omitempty"` + VirtualSize int64 `json:",omitempty"` + GraphDriver docker.GraphDriverData `json:",omitempty"` + RootFS docker.RootFS `json:",omitempty"` + Metadata docker.ImageMetadata `json:",omitempty"` + + // Podman extensions + Digest digest.Digest `json:",omitempty"` + PodmanVersion string `json:",omitempty"` + ManifestType string `json:",omitempty"` + User string `json:",omitempty"` + History []v1.History `json:",omitempty"` + NamesHistory []string `json:",omitempty"` + HealthCheck *manifest.Schema2HealthConfig `json:",omitempty"` +} + +func (i *Image) Id() string { + return i.ID +} + +type ImageSummary struct { + Identifier + ID string `json:"Id"` + ParentId string `json:",omitempty"` + RepoTags []string `json:",omitempty"` + Created int `json:",omitempty"` + Size int `json:",omitempty"` + SharedSize int `json:",omitempty"` + VirtualSize int `json:",omitempty"` + Labels string `json:",omitempty"` + Containers int `json:",omitempty"` + ReadOnly bool `json:",omitempty"` + Dangling bool `json:",omitempty"` + + // Podman extensions + Digest digest.Digest `json:",omitempty"` + ConfigDigest digest.Digest `json:",omitempty"` +} + +func (i *ImageSummary) Id() string { + return i.ID +} + +func (i *ImageSummary) IsReadOnly() bool { + return i.ReadOnly +} + +func (i *ImageSummary) IsDangling() bool { + return i.Dangling +} + +type ImageOptions struct { + All bool + Digests bool + Filter []string + Format string + Noheading bool + NoTrunc bool + Quiet bool + Sort string + History bool +} + +type ImageDeleteOptions struct { + Force bool +} + +// ImageDeleteResponse is the response for removing an image from storage and containers +// what was untagged vs actually removed +type ImageDeleteReport struct { + Untagged []string `json:"untagged"` + Deleted string `json:"deleted"` +} + +type ImageHistoryOptions struct{} + +type ImageHistoryLayer struct { + ID string `json:"Id"` + Created int64 `json:"Created,omitempty"` + CreatedBy string `json:",omitempty"` + Tags []string `json:",omitempty"` + Size int64 `json:",omitempty"` + Comment string `json:",omitempty"` +} + +type ImageHistoryReport struct { + Layers []ImageHistoryLayer +} + +type ImageInspectOptions struct { + TypeObject string `json:",omitempty"` + Format string `json:",omitempty"` + Size bool `json:",omitempty"` + Latest bool `json:",omitempty"` +} + +type ImageListOptions struct { + All bool `json:"all" schema:"all"` + Digests bool `json:"digests" schema:"digests"` + Filter []string `json:",omitempty"` + Filters url.Values `json:"filters" schema:"filters"` + Format string `json:",omitempty"` + History bool `json:",omitempty"` + Noheading bool `json:",omitempty"` + NoTrunc bool `json:",omitempty"` + Quiet bool `json:",omitempty"` + Sort string `json:",omitempty"` +} + +type ImageListReport struct { + Images []ImageSummary +} + +type ImagePruneOptions struct { + All bool + Filter ImageFilter +} + +type ImagePruneReport struct { + Report Report + Size int64 +} diff --git a/pkg/domain/entities/types.go b/pkg/domain/entities/types.go new file mode 100644 index 000000000..6f947dc4d --- /dev/null +++ b/pkg/domain/entities/types.go @@ -0,0 +1,25 @@ +package entities + +type Container struct { + IdOrNamed +} + +type Volume struct { + Identifier +} + +type Report struct { + Id []string + Err map[string]error +} + +type ContainerDeleteOptions struct{} +type ContainerDeleteReport struct{ Report } +type ContainerPruneReport struct{ Report } + +type PodDeleteReport struct{ Report } +type PodPruneOptions struct{} +type PodPruneReport struct{ Report } +type VolumeDeleteOptions struct{} +type VolumeDeleteReport struct{ Report } +type VolumePruneReport struct{ Report } diff --git a/pkg/domain/infra/abi/images.go b/pkg/domain/infra/abi/images.go new file mode 100644 index 000000000..2db08f259 --- /dev/null +++ b/pkg/domain/infra/abi/images.go @@ -0,0 +1,131 @@ +// +build ABISupport + +package abi + +import ( + "context" + + libpodImage "github.com/containers/libpod/libpod/image" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/containers/libpod/pkg/domain/utils" +) + +func (ir *ImageEngine) Delete(ctx context.Context, nameOrId string, opts entities.ImageDeleteOptions) (*entities.ImageDeleteReport, error) { + image, err := ir.Libpod.ImageRuntime().NewFromLocal(nameOrId) + if err != nil { + return nil, err + } + + results, err := ir.Libpod.RemoveImage(ctx, image, opts.Force) + if err != nil { + return nil, err + } + + report := entities.ImageDeleteReport{} + if err := utils.DeepCopy(&report, results); err != nil { + return nil, err + } + return &report, nil +} + +func (ir *ImageEngine) Prune(ctx context.Context, opts entities.ImagePruneOptions) (*entities.ImagePruneReport, error) { + results, err := ir.Libpod.ImageRuntime().PruneImages(ctx, opts.All, []string{}) + if err != nil { + return nil, err + } + + report := entities.ImagePruneReport{} + copy(report.Report.Id, results) + return &report, nil +} + +func (ir *ImageEngine) List(ctx context.Context, opts entities.ImageListOptions) (*entities.ImageListReport, error) { + var ( + images []*libpodImage.Image + err error + ) + + filters := utils.ToLibpodFilters(opts.Filters) + if len(filters) > 0 { + images, err = ir.Libpod.ImageRuntime().GetImagesWithFilters(filters) + } else { + images, err = ir.Libpod.ImageRuntime().GetImages() + } + if err != nil { + return nil, err + } + + report := entities.ImageListReport{ + Images: make([]entities.ImageSummary, len(images)), + } + for i, img := range images { + hold := entities.ImageSummary{} + if err := utils.DeepCopy(&hold, img); err != nil { + return nil, err + } + report.Images[i] = hold + } + return &report, nil +} + +func (ir *ImageEngine) History(ctx context.Context, nameOrId string, opts entities.ImageHistoryOptions) (*entities.ImageHistoryReport, error) { + image, err := ir.Libpod.ImageRuntime().NewFromLocal(nameOrId) + if err != nil { + return nil, err + } + results, err := image.History(ctx) + if err != nil { + return nil, err + } + + history := entities.ImageHistoryReport{ + Layers: make([]entities.ImageHistoryLayer, len(results)), + } + + for i, layer := range results { + history.Layers[i] = ToDomainHistoryLayer(layer) + } + return &history, nil +} + +func ToDomainHistoryLayer(layer *libpodImage.History) entities.ImageHistoryLayer { + l := entities.ImageHistoryLayer{} + l.ID = layer.ID + l.Created = layer.Created.Unix() + l.CreatedBy = layer.CreatedBy + copy(l.Tags, layer.Tags) + l.Size = layer.Size + l.Comment = layer.Comment + return l +} + +// func (r *imageRuntime) Delete(ctx context.Context, nameOrId string, opts entities.ImageDeleteOptions) (*entities.ImageDeleteReport, error) { +// image, err := r.libpod.ImageEngine().NewFromLocal(nameOrId) +// if err != nil { +// return nil, err +// } +// +// results, err := r.libpod.RemoveImage(ctx, image, opts.Force) +// if err != nil { +// return nil, err +// } +// +// report := entities.ImageDeleteReport{} +// if err := utils.DeepCopy(&report, results); err != nil { +// return nil, err +// } +// return &report, nil +// } +// +// func (r *imageRuntime) Prune(ctx context.Context, opts entities.ImagePruneOptions) (*entities.ImagePruneReport, error) { +// // TODO: map FilterOptions +// id, err := r.libpod.ImageEngine().PruneImages(ctx, opts.All, []string{}) +// if err != nil { +// return nil, err +// } +// +// // TODO: Determine Size +// report := entities.ImagePruneReport{} +// copy(report.Report.Id, id) +// return &report, nil +// } diff --git a/pkg/domain/infra/abi/images_test.go b/pkg/domain/infra/abi/images_test.go new file mode 100644 index 000000000..20ef1b150 --- /dev/null +++ b/pkg/domain/infra/abi/images_test.go @@ -0,0 +1,37 @@ +package abi + +// +// import ( +// "context" +// "testing" +// +// "github.com/stretchr/testify/mock" +// ) +// +// type MockImageRuntime struct { +// mock.Mock +// } +// +// func (m *MockImageRuntime) Delete(ctx context.Context, renderer func() interface{}, name string) error { +// _ = m.Called(ctx, renderer, name) +// return nil +// } +// +// func TestImageSuccess(t *testing.T) { +// actual := func() interface{} { return nil } +// +// m := new(MockImageRuntime) +// m.On( +// "Delete", +// mock.AnythingOfType("*context.emptyCtx"), +// mock.AnythingOfType("func() interface {}"), +// "fedora"). +// Return(nil) +// +// r := DirectImageRuntime{m} +// err := r.Delete(context.TODO(), actual, "fedora") +// if err != nil { +// t.Errorf("error should be nil, got: %v", err) +// } +// m.AssertExpectations(t) +// } diff --git a/pkg/domain/infra/abi/runtime.go b/pkg/domain/infra/abi/runtime.go new file mode 100644 index 000000000..479a69586 --- /dev/null +++ b/pkg/domain/infra/abi/runtime.go @@ -0,0 +1,19 @@ +// +build ABISupport + +package abi + +import ( + "github.com/containers/libpod/libpod" + "github.com/containers/libpod/pkg/domain/entities" +) + +// Image-related runtime linked against libpod library +type ImageEngine struct { + Libpod *libpod.Runtime +} + +// Container-related runtime linked against libpod library +type ContainerEngine struct { + entities.ContainerEngine + Libpod *libpod.Runtime +} diff --git a/pkg/domain/infra/runtime_abi.go b/pkg/domain/infra/runtime_abi.go new file mode 100644 index 000000000..de996f567 --- /dev/null +++ b/pkg/domain/infra/runtime_abi.go @@ -0,0 +1,39 @@ +// +build ABISupport + +package infra + +import ( + "context" + "fmt" + + "github.com/containers/libpod/pkg/bindings" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/containers/libpod/pkg/domain/infra/abi" + "github.com/containers/libpod/pkg/domain/infra/tunnel" +) + +// NewContainerEngine factory provides a libpod runtime for container-related operations +func NewContainerEngine(mode entities.EngineMode, opts entities.EngineOptions) (entities.ContainerEngine, error) { + switch mode { + case entities.ABIMode: + r, err := NewLibpodRuntime(opts.FlagSet, opts.Flags) + return &abi.ContainerEngine{ContainerEngine: r}, err + case entities.TunnelMode: + ctx, err := bindings.NewConnection(context.Background(), opts.Uri.String(), opts.Identities...) + return &tunnel.ContainerEngine{ClientCxt: ctx}, err + } + return nil, fmt.Errorf("runtime mode '%v' is not supported", mode) +} + +// NewContainerEngine factory provides a libpod runtime for image-related operations +func NewImageEngine(mode entities.EngineMode, opts entities.EngineOptions) (entities.ImageEngine, error) { + switch mode { + case entities.ABIMode: + r, err := NewLibpodImageRuntime(opts.FlagSet, opts.Flags) + return r, err + case entities.TunnelMode: + ctx, err := bindings.NewConnection(context.Background(), opts.Uri.String(), opts.Identities...) + return &tunnel.ImageEngine{ClientCxt: ctx}, err + } + return nil, fmt.Errorf("runtime mode '%v' is not supported", mode) +} diff --git a/pkg/domain/infra/runtime_image_proxy.go b/pkg/domain/infra/runtime_image_proxy.go new file mode 100644 index 000000000..480e7c8d7 --- /dev/null +++ b/pkg/domain/infra/runtime_image_proxy.go @@ -0,0 +1,25 @@ +// +build ABISupport + +package infra + +import ( + "context" + + "github.com/containers/libpod/pkg/domain/entities" + "github.com/containers/libpod/pkg/domain/infra/abi" + "github.com/spf13/pflag" +) + +// ContainerEngine Image Proxy will be EOL'ed after podmanV2 is separated from libpod repo + +func NewLibpodImageRuntime(flags pflag.FlagSet, opts entities.EngineFlags) (entities.ImageEngine, error) { + r, err := GetRuntime(context.Background(), flags, opts) + if err != nil { + return nil, err + } + return &abi.ImageEngine{Libpod: r}, nil +} + +func (ir *runtime) ShutdownImageRuntime(force bool) error { + return ir.Libpod.Shutdown(force) +} diff --git a/pkg/domain/infra/runtime_libpod.go b/pkg/domain/infra/runtime_libpod.go new file mode 100644 index 000000000..b42c52b6e --- /dev/null +++ b/pkg/domain/infra/runtime_libpod.go @@ -0,0 +1,331 @@ +package infra + +import ( + "context" + "fmt" + "os" + + "github.com/containers/libpod/libpod" + "github.com/containers/libpod/pkg/cgroups" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/containers/libpod/pkg/namespaces" + "github.com/containers/libpod/pkg/rootless" + "github.com/containers/storage" + "github.com/containers/storage/pkg/idtools" + "github.com/pkg/errors" + flag "github.com/spf13/pflag" +) + +type engineOpts struct { + name string + renumber bool + migrate bool + noStore bool + withFDS bool + flags entities.EngineFlags +} + +// GetRuntimeMigrate gets a libpod runtime that will perform a migration of existing containers +func GetRuntimeMigrate(ctx context.Context, fs flag.FlagSet, ef entities.EngineFlags, newRuntime string) (*libpod.Runtime, error) { + return getRuntime(ctx, fs, &engineOpts{ + name: newRuntime, + renumber: false, + migrate: true, + noStore: false, + withFDS: true, + flags: ef, + }) +} + +// GetRuntimeDisableFDs gets a libpod runtime that will disable sd notify +func GetRuntimeDisableFDs(ctx context.Context, fs flag.FlagSet, ef entities.EngineFlags) (*libpod.Runtime, error) { + return getRuntime(ctx, fs, &engineOpts{ + renumber: false, + migrate: false, + noStore: false, + withFDS: false, + flags: ef, + }) +} + +// GetRuntimeRenumber gets a libpod runtime that will perform a lock renumber +func GetRuntimeRenumber(ctx context.Context, fs flag.FlagSet, ef entities.EngineFlags) (*libpod.Runtime, error) { + return getRuntime(ctx, fs, &engineOpts{ + renumber: true, + migrate: false, + noStore: false, + withFDS: true, + flags: ef, + }) +} + +// GetRuntime generates a new libpod runtime configured by command line options +func GetRuntime(ctx context.Context, flags flag.FlagSet, ef entities.EngineFlags) (*libpod.Runtime, error) { + return getRuntime(ctx, flags, &engineOpts{ + renumber: false, + migrate: false, + noStore: false, + withFDS: true, + flags: ef, + }) +} + +// GetRuntimeNoStore generates a new libpod runtime configured by command line options +func GetRuntimeNoStore(ctx context.Context, fs flag.FlagSet, ef entities.EngineFlags) (*libpod.Runtime, error) { + return getRuntime(ctx, fs, &engineOpts{ + renumber: false, + migrate: false, + noStore: true, + withFDS: true, + flags: ef, + }) +} + +func getRuntime(ctx context.Context, fs flag.FlagSet, opts *engineOpts) (*libpod.Runtime, error) { + options := []libpod.RuntimeOption{} + storageOpts := storage.StoreOptions{} + storageSet := false + + uidmapFlag := fs.Lookup("uidmap") + gidmapFlag := fs.Lookup("gidmap") + subuidname := fs.Lookup("subuidname") + subgidname := fs.Lookup("subgidname") + if (uidmapFlag != nil && gidmapFlag != nil && subuidname != nil && subgidname != nil) && + (uidmapFlag.Changed || gidmapFlag.Changed || subuidname.Changed || subgidname.Changed) { + userns, _ := fs.GetString("userns") + uidmapVal, _ := fs.GetStringSlice("uidmap") + gidmapVal, _ := fs.GetStringSlice("gidmap") + subuidVal, _ := fs.GetString("subuidname") + subgidVal, _ := fs.GetString("subgidname") + mappings, err := ParseIDMapping(namespaces.UsernsMode(userns), uidmapVal, gidmapVal, subuidVal, subgidVal) + if err != nil { + return nil, err + } + storageOpts.UIDMap = mappings.UIDMap + storageOpts.GIDMap = mappings.GIDMap + + storageSet = true + } + + if fs.Changed("root") { + storageSet = true + storageOpts.GraphRoot = opts.flags.Root + } + if fs.Changed("runroot") { + storageSet = true + storageOpts.RunRoot = opts.flags.Runroot + } + if len(storageOpts.RunRoot) > 50 { + return nil, errors.New("the specified runroot is longer than 50 characters") + } + if fs.Changed("storage-driver") { + storageSet = true + storageOpts.GraphDriverName = opts.flags.StorageDriver + // Overriding the default storage driver caused GraphDriverOptions from storage.conf to be ignored + storageOpts.GraphDriverOptions = []string{} + } + // This should always be checked after storage-driver is checked + if len(opts.flags.StorageOpts) > 0 { + storageSet = true + storageOpts.GraphDriverOptions = opts.flags.StorageOpts + } + if opts.migrate { + options = append(options, libpod.WithMigrate()) + if opts.name != "" { + options = append(options, libpod.WithMigrateRuntime(opts.name)) + } + } + + if opts.renumber { + options = append(options, libpod.WithRenumber()) + } + + // Only set this if the user changes storage config on the command line + if storageSet { + options = append(options, libpod.WithStorageConfig(storageOpts)) + } + + if !storageSet && opts.noStore { + options = append(options, libpod.WithNoStore()) + } + // TODO CLI flags for image config? + // TODO CLI flag for signature policy? + + if len(opts.flags.Namespace) > 0 { + options = append(options, libpod.WithNamespace(opts.flags.Namespace)) + } + + if fs.Changed("runtime") { + options = append(options, libpod.WithOCIRuntime(opts.flags.Runtime)) + } + + if fs.Changed("conmon") { + options = append(options, libpod.WithConmonPath(opts.flags.ConmonPath)) + } + if fs.Changed("tmpdir") { + options = append(options, libpod.WithTmpDir(opts.flags.TmpDir)) + } + if fs.Changed("network-cmd-path") { + options = append(options, libpod.WithNetworkCmdPath(opts.flags.NetworkCmdPath)) + } + + if fs.Changed("events-backend") { + options = append(options, libpod.WithEventsLogger(opts.flags.EventsBackend)) + } + + if fs.Changed("cgroup-manager") { + options = append(options, libpod.WithCgroupManager(opts.flags.CGroupManager)) + } else { + unified, err := cgroups.IsCgroup2UnifiedMode() + if err != nil { + return nil, err + } + if rootless.IsRootless() && !unified { + options = append(options, libpod.WithCgroupManager("cgroupfs")) + } + } + + // TODO flag to set libpod static dir? + // TODO flag to set libpod tmp dir? + + if fs.Changed("cni-config-dir") { + options = append(options, libpod.WithCNIConfigDir(opts.flags.CniConfigDir)) + } + if fs.Changed("default-mounts-file") { + options = append(options, libpod.WithDefaultMountsFile(opts.flags.DefaultMountsFile)) + } + if fs.Changed("hooks-dir") { + options = append(options, libpod.WithHooksDir(opts.flags.HooksDir...)) + } + + // TODO flag to set CNI plugins dir? + + // TODO I don't think these belong here? + // Will follow up with a different PR to address + // + // Pod create options + + infraImageFlag := fs.Lookup("infra-image") + if infraImageFlag != nil && infraImageFlag.Changed { + infraImage, _ := fs.GetString("infra-image") + options = append(options, libpod.WithDefaultInfraImage(infraImage)) + } + + infraCommandFlag := fs.Lookup("infra-command") + if infraCommandFlag != nil && infraImageFlag.Changed { + infraCommand, _ := fs.GetString("infra-command") + options = append(options, libpod.WithDefaultInfraCommand(infraCommand)) + } + + if !opts.withFDS { + options = append(options, libpod.WithEnableSDNotify()) + } + if fs.Changed("config") { + return libpod.NewRuntimeFromConfig(ctx, opts.flags.Config, options...) + } + return libpod.NewRuntime(ctx, options...) +} + +// ParseIDMapping takes idmappings and subuid and subgid maps and returns a storage mapping +func ParseIDMapping(mode namespaces.UsernsMode, uidMapSlice, gidMapSlice []string, subUIDMap, subGIDMap string) (*storage.IDMappingOptions, error) { + options := storage.IDMappingOptions{ + HostUIDMapping: true, + HostGIDMapping: true, + } + + if mode.IsKeepID() { + if len(uidMapSlice) > 0 || len(gidMapSlice) > 0 { + return nil, errors.New("cannot specify custom mappings with --userns=keep-id") + } + if len(subUIDMap) > 0 || len(subGIDMap) > 0 { + return nil, errors.New("cannot specify subuidmap or subgidmap with --userns=keep-id") + } + if rootless.IsRootless() { + min := func(a, b int) int { + if a < b { + return a + } + return b + } + + uid := rootless.GetRootlessUID() + gid := rootless.GetRootlessGID() + + uids, gids, err := rootless.GetConfiguredMappings() + if err != nil { + return nil, errors.Wrapf(err, "cannot read mappings") + } + maxUID, maxGID := 0, 0 + for _, u := range uids { + maxUID += u.Size + } + for _, g := range gids { + maxGID += g.Size + } + + options.UIDMap, options.GIDMap = nil, nil + + options.UIDMap = append(options.UIDMap, idtools.IDMap{ContainerID: 0, HostID: 1, Size: min(uid, maxUID)}) + options.UIDMap = append(options.UIDMap, idtools.IDMap{ContainerID: uid, HostID: 0, Size: 1}) + if maxUID > uid { + options.UIDMap = append(options.UIDMap, idtools.IDMap{ContainerID: uid + 1, HostID: uid + 1, Size: maxUID - uid}) + } + + options.GIDMap = append(options.GIDMap, idtools.IDMap{ContainerID: 0, HostID: 1, Size: min(gid, maxGID)}) + options.GIDMap = append(options.GIDMap, idtools.IDMap{ContainerID: gid, HostID: 0, Size: 1}) + if maxGID > gid { + options.GIDMap = append(options.GIDMap, idtools.IDMap{ContainerID: gid + 1, HostID: gid + 1, Size: maxGID - gid}) + } + + options.HostUIDMapping = false + options.HostGIDMapping = false + } + // Simply ignore the setting and do not setup an inner namespace for root as it is a no-op + return &options, nil + } + + if subGIDMap == "" && subUIDMap != "" { + subGIDMap = subUIDMap + } + if subUIDMap == "" && subGIDMap != "" { + subUIDMap = subGIDMap + } + if len(gidMapSlice) == 0 && len(uidMapSlice) != 0 { + gidMapSlice = uidMapSlice + } + if len(uidMapSlice) == 0 && len(gidMapSlice) != 0 { + uidMapSlice = gidMapSlice + } + if len(uidMapSlice) == 0 && subUIDMap == "" && os.Getuid() != 0 { + uidMapSlice = []string{fmt.Sprintf("0:%d:1", os.Getuid())} + } + if len(gidMapSlice) == 0 && subGIDMap == "" && os.Getuid() != 0 { + gidMapSlice = []string{fmt.Sprintf("0:%d:1", os.Getgid())} + } + + if subUIDMap != "" && subGIDMap != "" { + mappings, err := idtools.NewIDMappings(subUIDMap, subGIDMap) + if err != nil { + return nil, err + } + options.UIDMap = mappings.UIDs() + options.GIDMap = mappings.GIDs() + } + parsedUIDMap, err := idtools.ParseIDMap(uidMapSlice, "UID") + if err != nil { + return nil, err + } + parsedGIDMap, err := idtools.ParseIDMap(gidMapSlice, "GID") + if err != nil { + return nil, err + } + options.UIDMap = append(options.UIDMap, parsedUIDMap...) + options.GIDMap = append(options.GIDMap, parsedGIDMap...) + if len(options.UIDMap) > 0 { + options.HostUIDMapping = false + } + if len(options.GIDMap) > 0 { + options.HostGIDMapping = false + } + return &options, nil +} diff --git a/pkg/domain/infra/runtime_proxy.go b/pkg/domain/infra/runtime_proxy.go new file mode 100644 index 000000000..d17b8efa1 --- /dev/null +++ b/pkg/domain/infra/runtime_proxy.go @@ -0,0 +1,54 @@ +// +build ABISupport + +package infra + +import ( + "context" + + "github.com/containers/libpod/libpod" + "github.com/containers/libpod/pkg/domain/entities" + flag "github.com/spf13/pflag" +) + +// ContainerEngine Proxy will be EOL'ed after podmanV2 is separated from libpod repo + +type runtime struct { + entities.ContainerEngine + Libpod *libpod.Runtime +} + +func NewLibpodRuntime(flags flag.FlagSet, opts entities.EngineFlags) (entities.ContainerEngine, error) { + r, err := GetRuntime(context.Background(), flags, opts) + if err != nil { + return nil, err + } + return &runtime{Libpod: r}, nil +} + +func (r *runtime) ShutdownRuntime(force bool) error { + return r.Libpod.Shutdown(force) +} + +func (r *runtime) ContainerDelete(ctx context.Context, opts entities.ContainerDeleteOptions) (*entities.ContainerDeleteReport, error) { + panic("implement me") +} + +func (r *runtime) ContainerPrune(ctx context.Context) (*entities.ContainerPruneReport, error) { + panic("implement me") +} + +func (r *runtime) PodDelete(ctx context.Context, opts entities.PodPruneOptions) (*entities.PodDeleteReport, error) { + panic("implement me") +} + +func (r *runtime) PodPrune(ctx context.Context) (*entities.PodPruneReport, error) { + panic("implement me") +} + +func (r *runtime) VolumeDelete(ctx context.Context, opts entities.VolumeDeleteOptions) (*entities.VolumeDeleteReport, error) { + panic("implement me") +} + +func (r *runtime) VolumePrune(ctx context.Context) (*entities.VolumePruneReport, error) { + panic("implement me") +} diff --git a/pkg/domain/infra/runtime_tunnel.go b/pkg/domain/infra/runtime_tunnel.go new file mode 100644 index 000000000..8a606deaf --- /dev/null +++ b/pkg/domain/infra/runtime_tunnel.go @@ -0,0 +1,35 @@ +// +build !ABISupport + +package infra + +import ( + "context" + "fmt" + + "github.com/containers/libpod/pkg/bindings" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/containers/libpod/pkg/domain/infra/tunnel" +) + +func NewContainerEngine(mode entities.EngineMode, opts entities.EngineOptions) (entities.ContainerEngine, error) { + switch mode { + case entities.ABIMode: + return nil, fmt.Errorf("direct runtime not supported") + case entities.TunnelMode: + ctx, err := bindings.NewConnection(context.Background(), opts.Uri.String(), opts.Identities...) + return &tunnel.ContainerEngine{ClientCxt: ctx}, err + } + return nil, fmt.Errorf("runtime mode '%v' is not supported", mode) +} + +// NewImageEngine factory provides a libpod runtime for image-related operations +func NewImageEngine(mode entities.EngineMode, opts entities.EngineOptions) (entities.ImageEngine, error) { + switch mode { + case entities.ABIMode: + return nil, fmt.Errorf("direct image runtime not supported") + case entities.TunnelMode: + ctx, err := bindings.NewConnection(context.Background(), opts.Uri.String(), opts.Identities...) + return &tunnel.ImageEngine{ClientCxt: ctx}, err + } + return nil, fmt.Errorf("runtime mode '%v' is not supported", mode) +} diff --git a/pkg/domain/infra/tunnel/images.go b/pkg/domain/infra/tunnel/images.go new file mode 100644 index 000000000..718685e57 --- /dev/null +++ b/pkg/domain/infra/tunnel/images.go @@ -0,0 +1,81 @@ +package tunnel + +import ( + "context" + "net/url" + + images "github.com/containers/libpod/pkg/bindings/images" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/containers/libpod/pkg/domain/utils" +) + +func (ir *ImageEngine) Delete(ctx context.Context, nameOrId string, opts entities.ImageDeleteOptions) (*entities.ImageDeleteReport, error) { + results, err := images.Remove(ir.ClientCxt, nameOrId, &opts.Force) + if err != nil { + return nil, err + } + + report := entities.ImageDeleteReport{ + Untagged: nil, + Deleted: "", + } + + for _, e := range results { + if a, ok := e["Deleted"]; ok { + report.Deleted = a + } + + if a, ok := e["Untagged"]; ok { + report.Untagged = append(report.Untagged, a) + } + } + return &report, err +} + +func (ir *ImageEngine) List(ctx context.Context, opts entities.ImageListOptions) (*entities.ImageListReport, error) { + images, err := images.List(ir.ClientCxt, &opts.All, opts.Filters) + if err != nil { + return nil, err + } + + report := entities.ImageListReport{ + Images: make([]entities.ImageSummary, len(images)), + } + for i, img := range images { + hold := entities.ImageSummary{} + if err := utils.DeepCopy(&hold, img); err != nil { + return nil, err + } + report.Images[i] = hold + } + return &report, nil +} + +func (ir *ImageEngine) History(ctx context.Context, nameOrId string, opts entities.ImageHistoryOptions) (*entities.ImageHistoryReport, error) { + results, err := images.History(ir.ClientCxt, nameOrId) + if err != nil { + return nil, err + } + + history := entities.ImageHistoryReport{ + Layers: make([]entities.ImageHistoryLayer, len(results)), + } + + for i, layer := range results { + hold := entities.ImageHistoryLayer{} + _ = utils.DeepCopy(&hold, layer) + history.Layers[i] = hold + } + return &history, nil +} + +func (ir *ImageEngine) Prune(ctx context.Context, opts entities.ImagePruneOptions) (*entities.ImagePruneReport, error) { + results, err := images.Prune(ir.ClientCxt, url.Values{}) + if err != nil { + return nil, err + } + + report := entities.ImagePruneReport{} + copy(report.Report.Id, results) + return &report, nil +} diff --git a/pkg/domain/infra/tunnel/runtime.go b/pkg/domain/infra/tunnel/runtime.go new file mode 100644 index 000000000..af433a6d9 --- /dev/null +++ b/pkg/domain/infra/tunnel/runtime.go @@ -0,0 +1,45 @@ +package tunnel + +import ( + "context" + + "github.com/containers/libpod/pkg/domain/entities" +) + +// Image-related runtime using an ssh-tunnel to utilize Podman service +type ImageEngine struct { + ClientCxt context.Context +} + +// Container-related runtime using an ssh-tunnel to utilize Podman service +type ContainerEngine struct { + ClientCxt context.Context +} + +func (r *ContainerEngine) Shutdown(force bool) error { + return nil +} + +func (r *ContainerEngine) ContainerDelete(ctx context.Context, opts entities.ContainerDeleteOptions) (*entities.ContainerDeleteReport, error) { + panic("implement me") +} + +func (r *ContainerEngine) ContainerPrune(ctx context.Context) (*entities.ContainerPruneReport, error) { + panic("implement me") +} + +func (r *ContainerEngine) PodDelete(ctx context.Context, opts entities.PodPruneOptions) (*entities.PodDeleteReport, error) { + panic("implement me") +} + +func (r *ContainerEngine) PodPrune(ctx context.Context) (*entities.PodPruneReport, error) { + panic("implement me") +} + +func (r *ContainerEngine) VolumeDelete(ctx context.Context, opts entities.VolumeDeleteOptions) (*entities.VolumeDeleteReport, error) { + panic("implement me") +} + +func (r *ContainerEngine) VolumePrune(ctx context.Context) (*entities.VolumePruneReport, error) { + panic("implement me") +} diff --git a/pkg/domain/utils/utils.go b/pkg/domain/utils/utils.go new file mode 100644 index 000000000..c17769f62 --- /dev/null +++ b/pkg/domain/utils/utils.go @@ -0,0 +1,41 @@ +package utils + +import ( + "net/url" + "strings" + + jsoniter "github.com/json-iterator/go" +) + +var json = jsoniter.ConfigCompatibleWithStandardLibrary + +// DeepCopy does a deep copy of a structure +// Error checking of parameters delegated to json engine +var DeepCopy = func(dst interface{}, src interface{}) error { + payload, err := json.Marshal(src) + if err != nil { + return err + } + + err = json.Unmarshal(payload, dst) + if err != nil { + return err + } + return nil +} + +func ToLibpodFilters(f url.Values) (filters []string) { + for k, v := range f { + filters = append(filters, k+"="+v[0]) + } + return +} + +func ToUrlValues(f []string) (filters url.Values) { + filters = make(url.Values) + for _, v := range f { + t := strings.SplitN(v, "=", 2) + filters.Add(t[0], t[1]) + } + return +} diff --git a/pkg/varlinkapi/containers.go b/pkg/varlinkapi/containers.go index 94726bbbd..55427771c 100644 --- a/pkg/varlinkapi/containers.go +++ b/pkg/varlinkapi/containers.go @@ -846,11 +846,6 @@ func (i *LibpodAPI) ExecContainer(call iopodman.VarlinkCall, opts iopodman.ExecO workDir = *opts.Workdir } - var detachKeys string - if opts.DetachKeys != nil { - detachKeys = *opts.DetachKeys - } - resizeChan := make(chan remotecommand.TerminalSize) reader, writer, _, pipeWriter, streams := setupStreams(call) @@ -870,8 +865,17 @@ func (i *LibpodAPI) ExecContainer(call iopodman.VarlinkCall, opts iopodman.ExecO } }() + execConfig := new(libpod.ExecConfig) + execConfig.Command = opts.Cmd + execConfig.Terminal = opts.Tty + execConfig.Privileged = opts.Privileged + execConfig.Environment = envs + execConfig.User = user + execConfig.WorkDir = workDir + execConfig.DetachKeys = opts.DetachKeys + go func() { - ec, err := ctr.Exec(opts.Tty, opts.Privileged, envs, opts.Cmd, user, workDir, streams, 0, resizeChan, detachKeys) + ec, err := ctr.Exec(execConfig, streams, resizeChan) if err != nil { logrus.Errorf(err.Error()) } diff --git a/version/version.go b/version/version.go index 5a7b4a36e..0d0b0b422 100644 --- a/version/version.go +++ b/version/version.go @@ -4,7 +4,7 @@ package version // NOTE: remember to bump the version at the top // of the top-level README.md file when this is // bumped. -const Version = "1.8.2-dev" +const Version = "1.8.3-dev" // RemoteAPIVersion is the version for the remote // client API. It is used to determine compatibility |