diff options
114 files changed, 5950 insertions, 764 deletions
diff --git a/.cirrus.yml b/.cirrus.yml index 5ec35cccb..c44277b05 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -137,7 +137,7 @@ gating_task: - 'cd $GOSRC && ./hack/podman-commands.sh |& ${TIMESTAMP}' # N/B: need 'clean' so some committed files are re-generated. - '/usr/local/bin/entrypoint.sh clean podman-remote |& ${TIMESTAMP}' - - '/usr/local/bin/entrypoint.sh clean podman BUILDTAGS="exclude_graphdriver_devicemapper selinux seccomp" |& ${TIMESTAMP}' + - '/usr/local/bin/entrypoint.sh clean podman xref_helpmsgs_manpages BUILDTAGS="exclude_graphdriver_devicemapper selinux seccomp" |& ${TIMESTAMP}' - '/usr/local/bin/entrypoint.sh local-cross |& ${TIMESTAMP}' - '/usr/local/bin/entrypoint.sh podman-remote-darwin |& ${TIMESTAMP}' - '/usr/local/bin/entrypoint.sh podman-remote-windows |& ${TIMESTAMP}' @@ -383,6 +383,10 @@ docdir: .PHONY: docs docs: .install.md2man docdir $(MANPAGES) ## Generate documentation +.PHONE: xref_helpmsgs_manpages +xref_helpmsgs_manpages: + ./hack/xref-helpmsgs-manpages + install-podman-remote-%-docs: podman-remote docs $(MANPAGES) rm -rf docs/build/remote mkdir -p docs/build/remote @@ -430,7 +434,7 @@ podman-remote-release-%.zip: cp release.txt "$(TMPDIR)/" cp ./bin/podman-remote-$*$(BINSFX) "$(TMPDIR)/$(SUBDIR)/podman$(BINSFX)" cp -r ./docs/build/remote/$* "$(TMPDIR)/$(SUBDIR)/docs/" - cd "$(TMPDIR)" && \ + cd "$(TMPDIR)/$(SUBDIR)" && \ zip --recurse-paths "$(CURDIR)/$@" "./release.txt" "./" -rm -rf "$(TMPDIR)" diff --git a/RELEASE_NOTES.md b/RELEASE_NOTES.md index c9c676ccb..6578f40fd 100644 --- a/RELEASE_NOTES.md +++ b/RELEASE_NOTES.md @@ -2,19 +2,26 @@ ## 1.8.2 +### Features +- Initial support for automatically updating containers managed via Systemd unit files has been merged. This allows containers to automatically upgrade if a newer version of their image becomes available + ### Bugfixes -- Fixed a bug where unit files generates by `podman generate systemd --new` would not force containers to detach, causing the unit to time out when trying to start +- Fixed a bug where unit files generated by `podman generate systemd --new` would not force containers to detach, causing the unit to time out when trying to start - Fixed a bug where `podman system reset` could delete important system directories if run as rootless on installations created by older Podman ([#4831](https://github.com/containers/libpod/issues/4831)) - Fixed a bug where image built by `podman build` would not properly set the OS and Architecture they were built with ([#5503](https://github.com/containers/libpod/issues/5503)) - Fixed a bug where attached `podman run` with `--sig-proxy` enabled (the default), when built with Go 1.14, would repeatedly send signal 23 to the process in the container and could generate errors when the container stopped ([#5483](https://github.com/containers/libpod/issues/5483)) - Fixed a bug where rootless `podman run` commands could hang when forwarding ports +- Fixed a bug where rootless Podman would not work when `/proc` was mounted with the `hidepid` option set +- Fixed a bug where the `podman system service` command would use large amounts of CPU when `--timeout` was set to 0 ([#5531](https://github.com/containers/libpod/issues/5531)) ### HTTP API - Initial support for Libpod endpoints related to creating and operating on image manifest lists has been added - The Libpod Healthcheck and Events API endpoints are now supported +- The Swagger endpoint can now handle cases where no Swagger documentation has been generated ### Misc -- Updated vendored containers/storage to v1.16.5 +- Updated Buildah to v1.14.3 +- Updated containers/storage to v1.16.5 - Several performance improvements have been made to creating containers, which should somewhat improve the performance of `podman create` and `podman run` ## 1.8.1 diff --git a/changelog.txt b/changelog.txt index e1f65fe9b..1e08a8419 100644 --- a/changelog.txt +++ b/changelog.txt @@ -1,3 +1,21 @@ +- Changelog for v1.8.2 (2020-03-19) + * fix reported compat issues + * Don't include SUBDIR in windows.zip + * rootless: fix usage with hidepid=1 + * V2 podman command + * serve swagger when present + * swagger: more consistency fixes + * Vendor in containers/buildah v1.14.3 + * Reduce CPU usage when --timeout=0 + * New test: man page cross-ref against --help + * podman: avoid conmon zombie on exec + * Filter pods through pod list api + * Bump to v1.8.2-dev + * Fix vendoring on master + * fix timeout file flake + * auto updates + * pkg/systemd: add dbus support + - Changelog for v1.8.2-rc1 (2020-03-17) * Update release notes for v1.8.2-rc1 * Fix vendoring on master diff --git a/cmd/podman/pod_ps.go b/cmd/podman/pod_ps.go index d7731e983..7acbd6888 100644 --- a/cmd/podman/pod_ps.go +++ b/cmd/podman/pod_ps.go @@ -4,7 +4,6 @@ import ( "fmt" "reflect" "sort" - "strconv" "strings" "time" @@ -13,7 +12,6 @@ import ( "github.com/containers/libpod/cmd/podman/shared" "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/pkg/adapter" - "github.com/containers/libpod/pkg/util" "github.com/docker/go-units" "github.com/pkg/errors" "github.com/spf13/cobra" @@ -29,8 +27,6 @@ const ( NUM_CTR_INFO = 10 ) -type PodFilter func(*adapter.Pod) bool - var ( bc_opts shared.PsOptions ) @@ -174,29 +170,23 @@ func podPsCmd(c *cliconfig.PodPsValues) error { opts.Format = genPodPsFormat(c) - var filterFuncs []PodFilter - if c.Filter != "" { - filters := strings.Split(c.Filter, ",") - for _, f := range filters { - filterSplit := strings.Split(f, "=") - if len(filterSplit) < 2 { - return errors.Errorf("filter input must be in the form of filter=value: %s is invalid", f) - } - generatedFunc, err := generatePodFilterFuncs(filterSplit[0], filterSplit[1]) - if err != nil { - return errors.Wrapf(err, "invalid filter") - } - filterFuncs = append(filterFuncs, generatedFunc) - } - } - var pods []*adapter.Pod + + // If latest is set true filters are ignored. if c.Latest { pod, err := runtime.GetLatestPod() if err != nil { return err } pods = append(pods, pod) + return generatePodPsOutput(pods, opts) + } + + if c.Filter != "" { + pods, err = runtime.GetPodsWithFilters(c.Filter) + if err != nil { + return err + } } else { pods, err = runtime.GetAllPods() if err != nil { @@ -204,19 +194,7 @@ func podPsCmd(c *cliconfig.PodPsValues) error { } } - podsFiltered := make([]*adapter.Pod, 0, len(pods)) - for _, pod := range pods { - include := true - for _, filter := range filterFuncs { - include = include && filter(pod) - } - - if include { - podsFiltered = append(podsFiltered, pod) - } - } - - return generatePodPsOutput(podsFiltered, opts) + return generatePodPsOutput(pods, opts) } // podPsCheckFlagsPassed checks if mutually exclusive flags are passed together @@ -235,88 +213,6 @@ func podPsCheckFlagsPassed(c *cliconfig.PodPsValues) error { return nil } -func generatePodFilterFuncs(filter, filterValue string) (func(pod *adapter.Pod) bool, error) { - switch filter { - case "ctr-ids": - return func(p *adapter.Pod) bool { - ctrIds, err := p.AllContainersByID() - if err != nil { - return false - } - return util.StringInSlice(filterValue, ctrIds) - }, nil - case "ctr-names": - return func(p *adapter.Pod) bool { - ctrs, err := p.AllContainers() - if err != nil { - return false - } - for _, ctr := range ctrs { - if filterValue == ctr.Name() { - return true - } - } - return false - }, nil - case "ctr-number": - return func(p *adapter.Pod) bool { - ctrIds, err := p.AllContainersByID() - if err != nil { - return false - } - - fVint, err2 := strconv.Atoi(filterValue) - if err2 != nil { - return false - } - return len(ctrIds) == fVint - }, nil - case "ctr-status": - if !util.StringInSlice(filterValue, []string{"created", "restarting", "running", "paused", "exited", "unknown"}) { - return nil, errors.Errorf("%s is not a valid status", filterValue) - } - return func(p *adapter.Pod) bool { - ctr_statuses, err := p.Status() - if err != nil { - return false - } - for _, ctr_status := range ctr_statuses { - state := ctr_status.String() - if ctr_status == define.ContainerStateConfigured { - state = "created" - } - if state == filterValue { - return true - } - } - return false - }, nil - case "id": - return func(p *adapter.Pod) bool { - return strings.Contains(p.ID(), filterValue) - }, nil - case "name": - return func(p *adapter.Pod) bool { - return strings.Contains(p.Name(), filterValue) - }, nil - case "status": - if !util.StringInSlice(filterValue, []string{"stopped", "running", "paused", "exited", "dead", "created"}) { - return nil, errors.Errorf("%s is not a valid pod status", filterValue) - } - return func(p *adapter.Pod) bool { - status, err := p.GetPodStatus() - if err != nil { - return false - } - if strings.ToLower(status) == filterValue { - return true - } - return false - }, nil - } - return nil, errors.Errorf("%s is an invalid filter", filter) -} - // generate the template based on conditions given func genPodPsFormat(c *cliconfig.PodPsValues) string { format := "" diff --git a/cmd/podman/shared/pod.go b/cmd/podman/shared/pod.go index 7b0b497fc..3046953b5 100644 --- a/cmd/podman/shared/pod.go +++ b/cmd/podman/shared/pod.go @@ -2,9 +2,11 @@ package shared import ( "strconv" + "strings" "github.com/containers/libpod/libpod" "github.com/containers/libpod/libpod/define" + "github.com/containers/libpod/pkg/util" "github.com/cri-o/ocicni/pkg/ocicni" "github.com/docker/go-connections/nat" "github.com/pkg/errors" @@ -134,4 +136,128 @@ func CreatePortBindings(ports []string) ([]ocicni.PortMapping, error) { return portBindings, nil } +// GetPodsWithFilters uses the cliconfig to categorize if the latest pod is required. +func GetPodsWithFilters(r *libpod.Runtime, filters string) ([]*libpod.Pod, error) { + filterFuncs, err := GenerateFilterFunction(r, strings.Split(filters, ",")) + if err != nil { + return nil, err + } + return FilterAllPodsWithFilterFunc(r, filterFuncs...) +} + +// FilterAllPodsWithFilterFunc retrieves all pods +// Filters can be provided which will determine which pods are included in the +// output. Multiple filters are handled by ANDing their output, so only pods +// matching all filters are returned +func FilterAllPodsWithFilterFunc(r *libpod.Runtime, filters ...libpod.PodFilter) ([]*libpod.Pod, error) { + pods, err := r.Pods(filters...) + if err != nil { + return nil, err + } + return pods, nil +} + +// GenerateFilterFunction basically gets the filters based on the input by the user +// and filter the pod list based on the criteria. +func GenerateFilterFunction(r *libpod.Runtime, filters []string) ([]libpod.PodFilter, error) { + var filterFuncs []libpod.PodFilter + for _, f := range filters { + filterSplit := strings.Split(f, "=") + if len(filterSplit) < 2 { + return nil, errors.Errorf("filter input must be in the form of filter=value: %s is invalid", f) + } + generatedFunc, err := generatePodFilterFuncs(filterSplit[0], filterSplit[1]) + if err != nil { + return nil, errors.Wrapf(err, "invalid filter") + } + filterFuncs = append(filterFuncs, generatedFunc) + } + + return filterFuncs, nil +} +func generatePodFilterFuncs(filter, filterValue string) ( + func(pod *libpod.Pod) bool, error) { + switch filter { + case "ctr-ids": + return func(p *libpod.Pod) bool { + ctrIds, err := p.AllContainersByID() + if err != nil { + return false + } + return util.StringInSlice(filterValue, ctrIds) + }, nil + case "ctr-names": + return func(p *libpod.Pod) bool { + ctrs, err := p.AllContainers() + if err != nil { + return false + } + for _, ctr := range ctrs { + if filterValue == ctr.Name() { + return true + } + } + return false + }, nil + case "ctr-number": + return func(p *libpod.Pod) bool { + ctrIds, err := p.AllContainersByID() + if err != nil { + return false + } + + fVint, err2 := strconv.Atoi(filterValue) + if err2 != nil { + return false + } + return len(ctrIds) == fVint + }, nil + case "ctr-status": + if !util.StringInSlice(filterValue, + []string{"created", "restarting", "running", "paused", + "exited", "unknown"}) { + return nil, errors.Errorf("%s is not a valid status", filterValue) + } + return func(p *libpod.Pod) bool { + ctr_statuses, err := p.Status() + if err != nil { + return false + } + for _, ctr_status := range ctr_statuses { + state := ctr_status.String() + if ctr_status == define.ContainerStateConfigured { + state = "created" + } + if state == filterValue { + return true + } + } + return false + }, nil + case "id": + return func(p *libpod.Pod) bool { + return strings.Contains(p.ID(), filterValue) + }, nil + case "name": + return func(p *libpod.Pod) bool { + return strings.Contains(p.Name(), filterValue) + }, nil + case "status": + if !util.StringInSlice(filterValue, []string{"stopped", "running", "paused", "exited", "dead", "created"}) { + return nil, errors.Errorf("%s is not a valid pod status", filterValue) + } + return func(p *libpod.Pod) bool { + status, err := p.GetPodStatus() + if err != nil { + return false + } + if strings.ToLower(status) == filterValue { + return true + } + return false + }, nil + } + return nil, errors.Errorf("%s is an invalid filter", filter) +} + var DefaultKernelNamespaces = "cgroup,ipc,net,uts" diff --git a/cmd/podmanV2/README.md b/cmd/podmanV2/README.md new file mode 100644 index 000000000..a17e6f850 --- /dev/null +++ b/cmd/podmanV2/README.md @@ -0,0 +1,113 @@ +# Adding a podman V2 commands + +## Build podman V2 + +```shell script +$ cd $GOPATH/src/github.com/containers/libpod/cmd/podmanV2 +``` +If you wish to include the libpod library in your program, +```shell script +$ go build -tags 'ABISupport' . +``` +The `--remote` flag may be used to connect to the Podman service using the API. +Otherwise, direct calls will be made to the Libpod library. +```shell script +$ go build -tags '!ABISupport' . +``` +The Libpod library is not linked into the executable. +All calls are made via the API and `--remote=False` is an error condition. + +## Adding a new command `podman manifests` +```shell script +$ mkdir -p $GOPATH/src/github.com/containers/libpod/cmd/podmanV2/manifests +``` +Create the file ```$GOPATH/src/github.com/containers/libpod/cmd/podmanV2/manifests/manifest.go``` +```go +package manifests + +import ( + "github.com/containers/libpod/cmd/podmanV2/registry" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/spf13/cobra" +) + +var ( + // podman _manifests_ + manifestCmd = &cobra.Command{ + Use: "manifest", + Short: "Manage manifests", + Long: "Manage manifests", + Example: "podman manifests IMAGE", + TraverseChildren: true, + PersistentPreRunE: preRunE, + RunE: registry.SubCommandExists, // Report error if there is no sub command given + } +) +func init() { + // Subscribe command to podman + registry.Commands = append(registry.Commands, registry.CliCommand{ + // _podman manifest_ will support both ABIMode and TunnelMode + Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode}, + // The definition for this command + Command: manifestCmd, + }) + // Setup cobra templates, sub commands will inherit + manifestCmd.SetHelpTemplate(registry.HelpTemplate()) + manifestCmd.SetUsageTemplate(registry.UsageTemplate()) +} + +// preRunE populates the image engine for sub commands +func preRunE(cmd *cobra.Command, args []string) error { + _, err := registry.NewImageEngine(cmd, args) + return err +} +``` +To "wire" in the `manifest` command, edit the file ```$GOPATH/src/github.com/containers/libpod/cmd/podmanV2/main.go``` to add: +```go +package main + +import _ "github.com/containers/libpod/cmd/podmanV2/manifests" +``` + +## Adding a new sub command `podman manifests list` +Create the file ```$GOPATH/src/github.com/containers/libpod/cmd/podmanV2/manifests/inspect.go``` +```go +package manifests + +import ( + "github.com/containers/libpod/cmd/podmanV2/registry" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/spf13/cobra" +) + +var ( + // podman manifests _inspect_ + inspectCmd = &cobra.Command{ + Use: "inspect IMAGE", + Short: "Display manifest from image", + Long: "Displays the low-level information on a manifest identified by image name or ID", + RunE: inspect, + Example: "podman manifest DEADBEEF", + } +) + +func init() { + // Subscribe inspect sub command to manifest command + registry.Commands = append(registry.Commands, registry.CliCommand{ + // _podman manifest inspect_ will support both ABIMode and TunnelMode + Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode}, + // The definition for this command + Command: inspectCmd, + Parent: manifestCmd, + }) + + // This is where you would configure the cobra flags using inspectCmd.Flags() +} + +// Business logic: cmd is inspectCmd, args is the positional arguments from os.Args +func inspect(cmd *cobra.Command, args []string) error { + // Business logic using registry.ImageEngine + // Do not pull from libpod directly use the domain objects and types + return nil +} +``` diff --git a/cmd/podmanV2/containers/container.go b/cmd/podmanV2/containers/container.go new file mode 100644 index 000000000..6b44f2a3e --- /dev/null +++ b/cmd/podmanV2/containers/container.go @@ -0,0 +1,33 @@ +package containers + +import ( + "github.com/containers/libpod/cmd/podmanV2/registry" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/spf13/cobra" +) + +var ( + // Command: podman _container_ + containerCmd = &cobra.Command{ + Use: "container", + Short: "Manage containers", + Long: "Manage containers", + TraverseChildren: true, + PersistentPreRunE: preRunE, + RunE: registry.SubCommandExists, + } +) + +func init() { + registry.Commands = append(registry.Commands, registry.CliCommand{ + Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode}, + Command: containerCmd, + }) + containerCmd.SetHelpTemplate(registry.HelpTemplate()) + containerCmd.SetUsageTemplate(registry.UsageTemplate()) +} + +func preRunE(cmd *cobra.Command, args []string) error { + _, err := registry.NewContainerEngine(cmd, args) + return err +} diff --git a/cmd/podmanV2/containers/inspect.go b/cmd/podmanV2/containers/inspect.go new file mode 100644 index 000000000..635be4789 --- /dev/null +++ b/cmd/podmanV2/containers/inspect.go @@ -0,0 +1,42 @@ +package containers + +import ( + "github.com/containers/libpod/cmd/podmanV2/registry" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/spf13/cobra" +) + +var ( + // podman container _inspect_ + inspectCmd = &cobra.Command{ + Use: "inspect [flags] CONTAINER", + Short: "Display the configuration of a container", + Long: `Displays the low-level information on a container identified by name or ID.`, + PreRunE: inspectPreRunE, + RunE: inspect, + Example: `podman container inspect myCtr + podman container inspect -l --format '{{.Id}} {{.Config.Labels}}'`, + } +) + +func init() { + registry.Commands = append(registry.Commands, registry.CliCommand{ + Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode}, + Command: inspectCmd, + Parent: containerCmd, + }) +} + +func inspectPreRunE(cmd *cobra.Command, args []string) (err error) { + err = preRunE(cmd, args) + if err != nil { + return + } + + _, err = registry.NewImageEngine(cmd, args) + return err +} + +func inspect(cmd *cobra.Command, args []string) error { + return nil +} diff --git a/cmd/podmanV2/containers/list.go b/cmd/podmanV2/containers/list.go new file mode 100644 index 000000000..630d9bbc7 --- /dev/null +++ b/cmd/podmanV2/containers/list.go @@ -0,0 +1,34 @@ +package containers + +import ( + "github.com/containers/libpod/cmd/podmanV2/registry" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/spf13/cobra" +) + +var ( + // podman container _list_ + listCmd = &cobra.Command{ + Use: "list", + Aliases: []string{"ls"}, + Args: cobra.NoArgs, + Short: "List containers", + Long: "Prints out information about the containers", + RunE: containers, + Example: `podman container list -a + podman container list -a --format "{{.ID}} {{.Image}} {{.Labels}} {{.Mounts}}" + podman container list --size --sort names`, + } +) + +func init() { + registry.Commands = append(registry.Commands, registry.CliCommand{ + Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode}, + Command: listCmd, + Parent: containerCmd, + }) +} + +func containers(cmd *cobra.Command, args []string) error { + return nil +} diff --git a/cmd/podmanV2/containers/ps.go b/cmd/podmanV2/containers/ps.go new file mode 100644 index 000000000..ce3d66c51 --- /dev/null +++ b/cmd/podmanV2/containers/ps.go @@ -0,0 +1,29 @@ +package containers + +import ( + "strings" + + "github.com/containers/libpod/cmd/podmanV2/registry" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/spf13/cobra" +) + +var ( + // podman _ps_ + psCmd = &cobra.Command{ + Use: "ps", + Args: cobra.NoArgs, + Short: listCmd.Short, + Long: listCmd.Long, + PersistentPreRunE: preRunE, + RunE: containers, + Example: strings.Replace(listCmd.Example, "container list", "ps", -1), + } +) + +func init() { + registry.Commands = append(registry.Commands, registry.CliCommand{ + Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode}, + Command: psCmd, + }) +} diff --git a/cmd/podmanV2/images/history.go b/cmd/podmanV2/images/history.go new file mode 100644 index 000000000..c75ae6ddc --- /dev/null +++ b/cmd/podmanV2/images/history.go @@ -0,0 +1,79 @@ +package images + +import ( + "context" + "fmt" + "os" + "text/tabwriter" + "text/template" + + "github.com/containers/libpod/cmd/podmanV2/registry" + "github.com/containers/libpod/cmd/podmanV2/report" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +var ( + long = `Displays the history of an image. + + The information can be printed out in an easy to read, or user specified format, and can be truncated.` + + // podman _history_ + historyCmd = &cobra.Command{ + Use: "history [flags] IMAGE", + Short: "Show history of a specified image", + Long: long, + Example: "podman history quay.io/fedora/fedora", + Args: cobra.ExactArgs(1), + PersistentPreRunE: preRunE, + RunE: history, + } +) + +var cmdFlags = struct { + Human bool + NoTrunc bool + Quiet bool + Format string +}{} + +func init() { + registry.Commands = append(registry.Commands, registry.CliCommand{ + Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode}, + Command: historyCmd, + }) + + historyCmd.SetHelpTemplate(registry.HelpTemplate()) + historyCmd.SetUsageTemplate(registry.UsageTemplate()) + flags := historyCmd.Flags() + flags.StringVar(&cmdFlags.Format, "format", "", "Change the output to JSON or a Go template") + flags.BoolVarP(&cmdFlags.Human, "human", "H", true, "Display sizes and dates in human readable format") + flags.BoolVar(&cmdFlags.NoTrunc, "no-trunc", false, "Do not truncate the output") + flags.BoolVar(&cmdFlags.NoTrunc, "notruncate", false, "Do not truncate the output") + flags.BoolVarP(&cmdFlags.Quiet, "quiet", "q", false, "Display the numeric IDs only") +} + +func history(cmd *cobra.Command, args []string) error { + results, err := registry.ImageEngine().History(context.Background(), args[0], entities.ImageHistoryOptions{}) + if err != nil { + return err + } + + row := "{{slice $x.ID 0 12}}\t{{toRFC3339 $x.Created}}\t{{ellipsis $x.CreatedBy 45}}\t{{$x.Size}}\t{{$x.Comment}}\n" + if cmdFlags.Human { + row = "{{slice $x.ID 0 12}}\t{{toHumanDuration $x.Created}}\t{{ellipsis $x.CreatedBy 45}}\t{{toHumanSize $x.Size}}\t{{$x.Comment}}\n" + } + format := "{{range $y, $x := . }}" + row + "{{end}}" + + tmpl := template.Must(template.New("report").Funcs(report.PodmanTemplateFuncs()).Parse(format)) + w := tabwriter.NewWriter(os.Stdout, 8, 2, 2, ' ', 0) + + _, _ = w.Write(report.ReportHeader("id", "created", "created by", "size", "comment")) + err = tmpl.Execute(w, results.Layers) + if err != nil { + fmt.Fprintln(os.Stderr, errors.Wrapf(err, "Failed to print report")) + } + w.Flush() + return nil +} diff --git a/cmd/podmanV2/images/image.go b/cmd/podmanV2/images/image.go new file mode 100644 index 000000000..a15c3e826 --- /dev/null +++ b/cmd/podmanV2/images/image.go @@ -0,0 +1,33 @@ +package images + +import ( + "github.com/containers/libpod/cmd/podmanV2/registry" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/spf13/cobra" +) + +var ( + // Command: podman _image_ + imageCmd = &cobra.Command{ + Use: "image", + Short: "Manage images", + Long: "Manage images", + TraverseChildren: true, + PersistentPreRunE: preRunE, + RunE: registry.SubCommandExists, + } +) + +func init() { + registry.Commands = append(registry.Commands, registry.CliCommand{ + Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode}, + Command: imageCmd, + }) + imageCmd.SetHelpTemplate(registry.HelpTemplate()) + imageCmd.SetUsageTemplate(registry.UsageTemplate()) +} + +func preRunE(cmd *cobra.Command, args []string) error { + _, err := registry.NewImageEngine(cmd, args) + return err +} diff --git a/cmd/podmanV2/images/images.go b/cmd/podmanV2/images/images.go new file mode 100644 index 000000000..a1e56396a --- /dev/null +++ b/cmd/podmanV2/images/images.go @@ -0,0 +1,46 @@ +package images + +import ( + "strings" + + "github.com/containers/libpod/cmd/podmanV2/registry" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/spf13/cobra" +) + +var ( + // podman _images_ + imagesCmd = &cobra.Command{ + Use: strings.Replace(listCmd.Use, "list", "images", 1), + Short: listCmd.Short, + Long: listCmd.Long, + PersistentPreRunE: preRunE, + RunE: images, + Example: strings.Replace(listCmd.Example, "podman image list", "podman images", -1), + } + + imagesOpts = entities.ImageListOptions{} +) + +func init() { + registry.Commands = append(registry.Commands, registry.CliCommand{ + Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode}, + Command: imagesCmd, + }) + imagesCmd.SetHelpTemplate(registry.HelpTemplate()) + imagesCmd.SetUsageTemplate(registry.UsageTemplate()) + + flags := imagesCmd.Flags() + flags.BoolVarP(&imagesOpts.All, "all", "a", false, "Show all images (default hides intermediate images)") + flags.BoolVar(&imagesOpts.Digests, "digests", false, "Show digests") + flags.StringSliceVarP(&imagesOpts.Filter, "filter", "f", []string{}, "Filter output based on conditions provided (default [])") + flags.StringVar(&imagesOpts.Format, "format", "", "Change the output format to JSON or a Go template") + flags.BoolVarP(&imagesOpts.Noheading, "noheading", "n", false, "Do not print column headings") + // TODO Need to learn how to deal with second name being a string instead of a char. + // This needs to be "no-trunc, notruncate" + flags.BoolVar(&imagesOpts.NoTrunc, "no-trunc", false, "Do not truncate output") + flags.BoolVar(&imagesOpts.NoTrunc, "notruncate", false, "Do not truncate output") + flags.BoolVarP(&imagesOpts.Quiet, "quiet", "q", false, "Display only image IDs") + flags.StringVar(&imagesOpts.Sort, "sort", "created", "Sort by created, id, repository, size, or tag") + flags.BoolVarP(&imagesOpts.History, "history", "", false, "Display the image name history") +} diff --git a/cmd/podmanV2/images/inspect.go b/cmd/podmanV2/images/inspect.go new file mode 100644 index 000000000..9c44cea35 --- /dev/null +++ b/cmd/podmanV2/images/inspect.go @@ -0,0 +1,124 @@ +package images + +import ( + "strings" + + "github.com/containers/buildah/pkg/formats" + "github.com/containers/libpod/cmd/podmanV2/registry" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/containers/libpod/pkg/util" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +var ( + inspectOpts = entities.ImageInspectOptions{} + + // Command: podman image _inspect_ + inspectCmd = &cobra.Command{ + Use: "inspect [flags] IMAGE", + Short: "Display the configuration of an image", + Long: `Displays the low-level information on an image identified by name or ID.`, + PreRunE: populateEngines, + RunE: imageInspect, + Example: `podman image inspect alpine`, + } + + containerEngine entities.ContainerEngine +) + +// Inspect is unique in that it needs both an ImageEngine and a ContainerEngine +func populateEngines(cmd *cobra.Command, args []string) (err error) { + // Populate registry.ImageEngine + err = preRunE(cmd, args) + if err != nil { + return + } + + // Populate registry.ContainerEngine + containerEngine, err = registry.NewContainerEngine(cmd, args) + return +} + +func init() { + registry.Commands = append(registry.Commands, registry.CliCommand{ + Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode}, + Command: inspectCmd, + Parent: imageCmd, + }) + + flags := inspectCmd.Flags() + flags.BoolVarP(&inspectOpts.Latest, "latest", "l", false, "Act on the latest container podman is aware of") + flags.BoolVarP(&inspectOpts.Size, "size", "s", false, "Display total file size") + flags.StringVarP(&inspectOpts.Format, "format", "f", "", "Change the output format to a Go template") + + if registry.GlobalFlags.EngineMode == entities.ABIMode { + // TODO: This is the same as V1. We could skip creating the flag altogether in V2... + _ = flags.MarkHidden("latest") + } +} + +const ( + inspectTypeContainer = "container" + inspectTypeImage = "image" + inspectAll = "all" +) + +func imageInspect(cmd *cobra.Command, args []string) error { + inspectType := inspectTypeImage + latestContainer := inspectOpts.Latest + + if len(args) == 0 && !latestContainer { + return errors.Errorf("container or image name must be specified: podman inspect [options [...]] name") + } + + if len(args) > 0 && latestContainer { + return errors.Errorf("you cannot provide additional arguments with --latest") + } + + if !util.StringInSlice(inspectType, []string{inspectTypeContainer, inspectTypeImage, inspectAll}) { + return errors.Errorf("the only recognized types are %q, %q, and %q", inspectTypeContainer, inspectTypeImage, inspectAll) + } + + outputFormat := inspectOpts.Format + if strings.Contains(outputFormat, "{{.Id}}") { + outputFormat = strings.Replace(outputFormat, "{{.Id}}", formats.IDString, -1) + } + // These fields were renamed, so we need to provide backward compat for + // the old names. + if strings.Contains(outputFormat, ".Src") { + outputFormat = strings.Replace(outputFormat, ".Src", ".Source", -1) + } + if strings.Contains(outputFormat, ".Dst") { + outputFormat = strings.Replace(outputFormat, ".Dst", ".Destination", -1) + } + if strings.Contains(outputFormat, ".ImageID") { + outputFormat = strings.Replace(outputFormat, ".ImageID", ".Image", -1) + } + _ = outputFormat + // if latestContainer { + // lc, err := ctnrRuntime.GetLatestContainer() + // if err != nil { + // return err + // } + // args = append(args, lc.ID()) + // inspectType = inspectTypeContainer + // } + + // inspectedObjects, iterateErr := iterateInput(getContext(), c.Size, args, runtime, inspectType) + // if iterateErr != nil { + // return iterateErr + // } + // + // var out formats.Writer + // if outputFormat != "" && outputFormat != formats.JSONString { + // // template + // out = formats.StdoutTemplateArray{Output: inspectedObjects, Template: outputFormat} + // } else { + // // default is json output + // out = formats.JSONStructArray{Output: inspectedObjects} + // } + // + // return out.Out() + return nil +} diff --git a/cmd/podmanV2/images/list.go b/cmd/podmanV2/images/list.go new file mode 100644 index 000000000..cfdfaaed2 --- /dev/null +++ b/cmd/podmanV2/images/list.go @@ -0,0 +1,33 @@ +package images + +import ( + "github.com/containers/libpod/cmd/podmanV2/registry" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/spf13/cobra" +) + +var ( + // Command: podman image _list_ + listCmd = &cobra.Command{ + Use: "list [flag] [IMAGE]", + Aliases: []string{"ls"}, + Short: "List images in local storage", + Long: "Lists images previously pulled to the system or created on the system.", + RunE: images, + Example: `podman image list --format json + podman image list --sort repository --format "table {{.ID}} {{.Repository}} {{.Tag}}" + podman image list --filter dangling=true`, + } +) + +func init() { + registry.Commands = append(registry.Commands, registry.CliCommand{ + Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode}, + Command: listCmd, + Parent: imageCmd, + }) +} + +func images(cmd *cobra.Command, args []string) error { + return nil +} diff --git a/cmd/podmanV2/main.go b/cmd/podmanV2/main.go new file mode 100644 index 000000000..0df086352 --- /dev/null +++ b/cmd/podmanV2/main.go @@ -0,0 +1,84 @@ +package main + +import ( + "fmt" + "os" + "reflect" + "runtime" + + _ "github.com/containers/libpod/cmd/podmanV2/containers" + _ "github.com/containers/libpod/cmd/podmanV2/images" + _ "github.com/containers/libpod/cmd/podmanV2/networks" + _ "github.com/containers/libpod/cmd/podmanV2/pods" + "github.com/containers/libpod/cmd/podmanV2/registry" + _ "github.com/containers/libpod/cmd/podmanV2/volumes" + "github.com/containers/libpod/libpod" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" +) + +func init() { + if err := libpod.SetXdgDirs(); err != nil { + logrus.Errorf(err.Error()) + os.Exit(1) + } + initCobra() +} + +func initCobra() { + switch runtime.GOOS { + case "darwin": + fallthrough + case "windows": + registry.GlobalFlags.EngineMode = entities.TunnelMode + case "linux": + registry.GlobalFlags.EngineMode = entities.ABIMode + default: + logrus.Errorf("%s is not a supported OS", runtime.GOOS) + os.Exit(1) + } + + // TODO: Is there a Cobra way to "peek" at os.Args? + if ok := Contains("--remote", os.Args); ok { + registry.GlobalFlags.EngineMode = entities.TunnelMode + } + + cobra.OnInitialize(func() {}) +} + +func main() { + fmt.Fprintf(os.Stderr, "Number of commands: %d\n", len(registry.Commands)) + for _, c := range registry.Commands { + if Contains(registry.GlobalFlags.EngineMode, c.Mode) { + parent := rootCmd + if c.Parent != nil { + parent = c.Parent + } + parent.AddCommand(c.Command) + } + } + + Execute() + os.Exit(0) +} + +func Contains(item interface{}, slice interface{}) bool { + s := reflect.ValueOf(slice) + + switch s.Kind() { + case reflect.Array: + fallthrough + case reflect.Slice: + break + default: + return false + } + + for i := 0; i < s.Len(); i++ { + if s.Index(i).Interface() == item { + return true + } + } + return false +} diff --git a/cmd/podmanV2/networks/network.go b/cmd/podmanV2/networks/network.go new file mode 100644 index 000000000..fc92d2321 --- /dev/null +++ b/cmd/podmanV2/networks/network.go @@ -0,0 +1,33 @@ +package images + +import ( + "github.com/containers/libpod/cmd/podmanV2/registry" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/spf13/cobra" +) + +var ( + // Command: podman _network_ + cmd = &cobra.Command{ + Use: "network", + Short: "Manage networks", + Long: "Manage networks", + TraverseChildren: true, + PersistentPreRunE: preRunE, + RunE: registry.SubCommandExists, + } +) + +func init() { + registry.Commands = append(registry.Commands, registry.CliCommand{ + Mode: []entities.EngineMode{entities.ABIMode}, + Command: cmd, + }) + cmd.SetHelpTemplate(registry.HelpTemplate()) + cmd.SetUsageTemplate(registry.UsageTemplate()) +} + +func preRunE(cmd *cobra.Command, args []string) error { + _, err := registry.NewContainerEngine(cmd, args) + return err +} diff --git a/cmd/podmanV2/pods/pod.go b/cmd/podmanV2/pods/pod.go new file mode 100644 index 000000000..81c0d33e1 --- /dev/null +++ b/cmd/podmanV2/pods/pod.go @@ -0,0 +1,33 @@ +package pods + +import ( + "github.com/containers/libpod/cmd/podmanV2/registry" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/spf13/cobra" +) + +var ( + // Command: podman _pod_ + podCmd = &cobra.Command{ + Use: "pod", + Short: "Manage pods", + Long: "Manage pods", + TraverseChildren: true, + PersistentPreRunE: preRunE, + RunE: registry.SubCommandExists, + } +) + +func init() { + registry.Commands = append(registry.Commands, registry.CliCommand{ + Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode}, + Command: podCmd, + }) + podCmd.SetHelpTemplate(registry.HelpTemplate()) + podCmd.SetUsageTemplate(registry.UsageTemplate()) +} + +func preRunE(cmd *cobra.Command, args []string) error { + _, err := registry.NewContainerEngine(cmd, args) + return err +} diff --git a/cmd/podmanV2/pods/ps.go b/cmd/podmanV2/pods/ps.go new file mode 100644 index 000000000..d4c625b2e --- /dev/null +++ b/cmd/podmanV2/pods/ps.go @@ -0,0 +1,32 @@ +package pods + +import ( + "github.com/containers/libpod/cmd/podmanV2/registry" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/spf13/cobra" +) + +var ( + psDescription = "List all pods on system including their names, ids and current state." + + // Command: podman pod _ps_ + psCmd = &cobra.Command{ + Use: "ps", + Aliases: []string{"ls", "list"}, + Short: "list pods", + Long: psDescription, + RunE: pods, + } +) + +func init() { + registry.Commands = append(registry.Commands, registry.CliCommand{ + Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode}, + Command: psCmd, + Parent: podCmd, + }) +} + +func pods(cmd *cobra.Command, args []string) error { + return nil +} diff --git a/cmd/podmanV2/registry/registry.go b/cmd/podmanV2/registry/registry.go new file mode 100644 index 000000000..fa51d6535 --- /dev/null +++ b/cmd/podmanV2/registry/registry.go @@ -0,0 +1,96 @@ +package registry + +import ( + "github.com/containers/libpod/pkg/domain/entities" + "github.com/containers/libpod/pkg/domain/infra" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type CliCommand struct { + Mode []entities.EngineMode + Command *cobra.Command + Parent *cobra.Command +} + +var ( + Commands []CliCommand + GlobalFlags entities.EngineFlags + imageEngine entities.ImageEngine + containerEngine entities.ContainerEngine + PodmanTunnel bool +) + +// HelpTemplate returns the help template for podman commands +// This uses the short and long options. +// command should not use this. +func HelpTemplate() string { + return `{{.Short}} + +Description: + {{.Long}} + +{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` +} + +// UsageTemplate returns the usage template for podman commands +// This blocks the displaying of the global options. The main podman +// command should not use this. +func UsageTemplate() string { + return `Usage(v2):{{if (and .Runnable (not .HasAvailableSubCommands))}} + {{.UseLine}}{{end}}{{if .HasAvailableSubCommands}} + {{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}} + +Aliases: + {{.NameAndAliases}}{{end}}{{if .HasExample}} + +Examples: + {{.Example}}{{end}}{{if .HasAvailableSubCommands}} + +Available Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}} + {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}} + +Flags: +{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}} +{{end}} +` +} + +func ImageEngine() entities.ImageEngine { + return imageEngine +} + +// NewImageEngine is a wrapper for building an ImageEngine to be used for PreRunE functions +func NewImageEngine(cmd *cobra.Command, args []string) (entities.ImageEngine, error) { + if imageEngine == nil { + engine, err := infra.NewImageEngine(GlobalFlags.EngineMode, entities.EngineOptions{}) + if err != nil { + return nil, err + } + imageEngine = engine + } + return imageEngine, nil +} + +func ContainerEngine() entities.ContainerEngine { + return containerEngine +} + +// NewContainerEngine is a wrapper for building an ContainerEngine to be used for PreRunE functions +func NewContainerEngine(cmd *cobra.Command, args []string) (entities.ContainerEngine, error) { + if containerEngine == nil { + engine, err := infra.NewContainerEngine(GlobalFlags.EngineMode, entities.EngineOptions{}) + if err != nil { + return nil, err + } + containerEngine = engine + } + return containerEngine, nil +} + +func SubCommandExists(cmd *cobra.Command, args []string) error { + if len(args) > 0 { + return errors.Errorf("unrecognized command `%[1]s %[2]s`\nTry '%[1]s --help' for more information.", cmd.CommandPath(), args[0]) + } + return errors.Errorf("missing command '%[1]s COMMAND'\nTry '%[1]s --help' for more information.", cmd.CommandPath()) +} diff --git a/cmd/podmanV2/report/templates.go b/cmd/podmanV2/report/templates.go new file mode 100644 index 000000000..dc43d4f9b --- /dev/null +++ b/cmd/podmanV2/report/templates.go @@ -0,0 +1,61 @@ +package report + +import ( + "strings" + "text/template" + "time" + + "github.com/docker/go-units" +) + +var defaultFuncMap = template.FuncMap{ + "ellipsis": func(s string, length int) string { + if len(s) > length { + return s[:length-3] + "..." + } + return s + }, + // TODO: Remove on Go 1.14 port + "slice": func(s string, i, j int) string { + if i > j || len(s) < i { + return s + } + if len(s) < j { + return s[i:] + } + return s[i:j] + }, + "toRFC3339": func(t int64) string { + return time.Unix(t, 0).Format(time.RFC3339) + }, + "toHumanDuration": func(t int64) string { + return units.HumanDuration(time.Since(time.Unix(t, 0))) + " ago" + }, + "toHumanSize": func(sz int64) string { + return units.HumanSize(float64(sz)) + }, +} + +func ReportHeader(columns ...string) []byte { + hdr := make([]string, len(columns)) + for i, h := range columns { + hdr[i] = strings.ToUpper(h) + } + return []byte(strings.Join(hdr, "\t") + "\n") +} + +func AppendFuncMap(funcMap template.FuncMap) template.FuncMap { + merged := PodmanTemplateFuncs() + for k, v := range funcMap { + merged[k] = v + } + return merged +} + +func PodmanTemplateFuncs() template.FuncMap { + merged := make(template.FuncMap) + for k, v := range defaultFuncMap { + merged[k] = v + } + return merged +} diff --git a/cmd/podmanV2/root.go b/cmd/podmanV2/root.go new file mode 100644 index 000000000..778184f28 --- /dev/null +++ b/cmd/podmanV2/root.go @@ -0,0 +1,35 @@ +package main + +import ( + "fmt" + "os" + "path" + + "github.com/containers/libpod/cmd/podmanV2/registry" + "github.com/containers/libpod/version" + "github.com/spf13/cobra" +) + +var rootCmd = &cobra.Command{ + Use: path.Base(os.Args[0]), + Long: "Manage pods, containers and images", + SilenceUsage: true, + SilenceErrors: true, + TraverseChildren: true, + RunE: registry.SubCommandExists, + Version: version.Version, +} + +func init() { + // Override default --help information of `--version` global flag} + var dummyVersion bool + rootCmd.PersistentFlags().BoolVarP(&dummyVersion, "version", "v", false, "Version of podman") + rootCmd.PersistentFlags().BoolVarP(®istry.PodmanTunnel, "remote", "r", false, "Access service via SSH tunnel") +} + +func Execute() { + if err := rootCmd.Execute(); err != nil { + fmt.Println(err) + os.Exit(1) + } +} diff --git a/cmd/podmanV2/system/system.go b/cmd/podmanV2/system/system.go new file mode 100644 index 000000000..30ed328e8 --- /dev/null +++ b/cmd/podmanV2/system/system.go @@ -0,0 +1,33 @@ +package images + +import ( + "github.com/containers/libpod/cmd/podmanV2/registry" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/spf13/cobra" +) + +var ( + // Command: podman _system_ + cmd = &cobra.Command{ + Use: "system", + Short: "Manage podman", + Long: "Manage podman", + TraverseChildren: true, + PersistentPreRunE: preRunE, + RunE: registry.SubCommandExists, + } +) + +func init() { + registry.Commands = append(registry.Commands, registry.CliCommand{ + Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode}, + Command: cmd, + }) + cmd.SetHelpTemplate(registry.HelpTemplate()) + cmd.SetUsageTemplate(registry.UsageTemplate()) +} + +func preRunE(cmd *cobra.Command, args []string) error { + _, err := registry.NewContainerEngine(cmd, args) + return err +} diff --git a/cmd/podmanV2/volumes/volume.go b/cmd/podmanV2/volumes/volume.go new file mode 100644 index 000000000..245c06da0 --- /dev/null +++ b/cmd/podmanV2/volumes/volume.go @@ -0,0 +1,33 @@ +package images + +import ( + "github.com/containers/libpod/cmd/podmanV2/registry" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/spf13/cobra" +) + +var ( + // Command: podman _volume_ + cmd = &cobra.Command{ + Use: "volume", + Short: "Manage volumes", + Long: "Volumes are created in and can be shared between containers", + TraverseChildren: true, + PersistentPreRunE: preRunE, + RunE: registry.SubCommandExists, + } +) + +func init() { + registry.Commands = append(registry.Commands, registry.CliCommand{ + Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode}, + Command: cmd, + }) + cmd.SetHelpTemplate(registry.HelpTemplate()) + cmd.SetUsageTemplate(registry.UsageTemplate()) +} + +func preRunE(cmd *cobra.Command, args []string) error { + _, err := registry.NewContainerEngine(cmd, args) + return err +} diff --git a/contrib/spec/podman.spec.in b/contrib/spec/podman.spec.in index 0222be7ba..635de0c7e 100644 --- a/contrib/spec/podman.spec.in +++ b/contrib/spec/podman.spec.in @@ -48,7 +48,7 @@ Epoch: 99 %else Epoch: 0 %endif -Version: 1.8.2 +Version: 1.8.3 Release: #COMMITDATE#.git%{shortcommit0}%{?dist} Summary: Manage Pods, Containers and Container Images License: ASL 2.0 @@ -9,7 +9,7 @@ require ( github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd // indirect github.com/containernetworking/cni v0.7.2-0.20200304161608-4fae32b84921 github.com/containernetworking/plugins v0.8.5 - github.com/containers/buildah v1.14.3-0.20200313154200-d26f437b2a46 + github.com/containers/buildah v1.14.3 github.com/containers/common v0.5.0 github.com/containers/conmon v2.0.10+incompatible github.com/containers/image/v5 v5.2.1 @@ -71,6 +71,8 @@ github.com/containers/buildah v1.14.3-0.20200313093807-c0e60d444696 h1:TCJsENYev github.com/containers/buildah v1.14.3-0.20200313093807-c0e60d444696/go.mod h1:OCorIy7yUrQ2hIZY5z/LhJuPiH8bT8GUwC+9CarZK5o= github.com/containers/buildah v1.14.3-0.20200313154200-d26f437b2a46 h1:Zw8xYI3HATHra5Csm1k5GOXNCietwGR6D2kQVP5zw2w= github.com/containers/buildah v1.14.3-0.20200313154200-d26f437b2a46/go.mod h1:OCorIy7yUrQ2hIZY5z/LhJuPiH8bT8GUwC+9CarZK5o= +github.com/containers/buildah v1.14.3 h1:GYH/o3ME76CI0bvjFp++Fr9mOTrqvK5FLGtLkCBrKic= +github.com/containers/buildah v1.14.3/go.mod h1:OmsmT+HR5i1o2U/0qm81fvI+m71hjd57gyf02z1Q7YI= github.com/containers/common v0.4.2 h1:O5d1gj/xdpQdZi0MEivRQ/7AeRaVeHdbSP/bvShw458= github.com/containers/common v0.4.2/go.mod h1:m62kenckrWi5rZx32kaLje2Og0hpf6NsaTBn6+b+Oys= github.com/containers/common v0.5.0 h1:ZAef7h3oO46PcbTyfooZf8XLHrYad+GkhSu3EhH6P24= @@ -371,6 +373,8 @@ github.com/openshift/api v0.0.0-20200106203948-7ab22a2c8316 h1:enQG2QUGwug4fR1yM github.com/openshift/api v0.0.0-20200106203948-7ab22a2c8316/go.mod h1:dv+J0b/HWai0QnMVb37/H0v36klkLBi2TNpPeWDxX10= github.com/openshift/imagebuilder v1.1.1 h1:KAUR31p8UBJdfVO42azWgb+LeMAed2zaKQ19e0C0X2I= github.com/openshift/imagebuilder v1.1.1/go.mod h1:9aJRczxCH0mvT6XQ+5STAQaPWz7OsWcU5/mRkt8IWeo= +github.com/openshift/imagebuilder v1.1.2 h1:vCO8hZQR/4uzo+j0PceBH5aKFcvCDM43UzUGOYQN+Go= +github.com/openshift/imagebuilder v1.1.2/go.mod h1:9aJRczxCH0mvT6XQ+5STAQaPWz7OsWcU5/mRkt8IWeo= github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 h1:TnbXhKzrTOyuvWrjI8W6pcoI9XPbLHFXCdN2dtUw7Rw= diff --git a/hack/man-page-checker b/hack/man-page-checker index 528ff800a..17d85d65d 100755 --- a/hack/man-page-checker +++ b/hack/man-page-checker @@ -1,16 +1,6 @@ #!/bin/bash # -# man-page-name-checker - validate and cross-reference man page names -# -# FIXME as of 2019-03-20 there are still four files with inconsistent names: -# -# podman-logs.1.md NAME= podman-container-logs -# podman-info.1.md NAME= podman-system-info -# podman-rm.1.md NAME= podman-container-rm -# podman-rmi.1.md NAME= podman-image-rm -# -# If those four get renamed (with suitable symlink fixes), this script -# can be enabled in CI to prevent future inconsistencies. +# man-page-checker - validate and cross-reference man page names # die() { diff --git a/hack/xref-helpmsgs-manpages b/hack/xref-helpmsgs-manpages new file mode 100755 index 000000000..00db3c8de --- /dev/null +++ b/hack/xref-helpmsgs-manpages @@ -0,0 +1,307 @@ +#!/usr/bin/perl +# +# xref-helpmsgs-manpages - cross-reference --help options against man pages +# +package LibPod::CI::XrefHelpmsgsManpages; + +use v5.14; +use utf8; + +use strict; +use warnings; + +(our $ME = $0) =~ s|.*/||; +our $VERSION = '0.1'; + +# For debugging, show data structures using DumpTree($var) +#use Data::TreeDumper; $Data::TreeDumper::Displayaddress = 0; + +############################################################################### +# BEGIN user-customizable section + +# Path to podman executable +my $Default_Podman = './bin/podman'; +my $PODMAN = $ENV{PODMAN} || $Default_Podman; + +# Path to podman markdown source files (of the form podman-*.1.md) +my $Markdown_Path = 'docs/source/markdown'; + +# END user-customizable section +############################################################################### + +use FindBin; + +############################################################################### +# BEGIN boilerplate args checking, usage messages + +sub usage { + print <<"END_USAGE"; +Usage: $ME [OPTIONS] + +$ME recursively runs 'podman --help' against +all subcommands; and recursively reads podman-*.1.md files +in $Markdown_Path, then cross-references that each --help +option is listed in the appropriate man page and vice-versa. + +$ME invokes '\$PODMAN' (default: $Default_Podman). + +Exit status is zero if no inconsistencies found, one otherwise + +OPTIONS: + + -v, --verbose show verbose progress indicators + -n, --dry-run make no actual changes + + --help display this message + --version display program name and version +END_USAGE + + exit; +} + +# Command-line options. Note that this operates directly on @ARGV ! +our $debug = 0; +our $verbose = 0; +sub handle_opts { + use Getopt::Long; + GetOptions( + 'debug!' => \$debug, + 'verbose|v' => \$verbose, + + help => \&usage, + version => sub { print "$ME version $VERSION\n"; exit 0 }, + ) or die "Try `$ME --help' for help\n"; +} + +# END boilerplate args checking, usage messages +############################################################################### + +############################## CODE BEGINS HERE ############################### + +# The term is "modulino". +__PACKAGE__->main() unless caller(); + +# Main code. +sub main { + # Note that we operate directly on @ARGV, not on function parameters. + # This is deliberate: it's because Getopt::Long only operates on @ARGV + # and there's no clean way to make it use @_. + handle_opts(); # will set package globals + + # Fetch command-line arguments. Barf if too many. + die "$ME: Too many arguments; try $ME --help\n" if @ARGV; + + my $help = podman_help(); + my $man = podman_man('podman'); + + my $retval = xref_by_help($help, $man) + + xref_by_man($help, $man); + + exit !!$retval; +} + +################## +# xref_by_help # Find keys in '--help' but not in man +################## +sub xref_by_help { + my ($help, $man, @subcommand) = @_; + my $errs = 0; + + for my $k (sort keys %$help) { + if (exists $man->{$k}) { + if (ref $help->{$k}) { + $errs += xref_by_help($help->{$k}, $man->{$k}, @subcommand, $k); + } + # Otherwise, non-ref is leaf node such as a --option + } + else { + my $man = $man->{_path} || 'man'; + warn "$ME: podman @subcommand --help lists $k, but $k not in $man\n"; + ++$errs; + } + } + + return $errs; +} + +################# +# xref_by_man # Find keys in man pages but not in --help +################# +# +# In an ideal world we could share the functionality in one function; but +# there are just too many special cases in man pages. +# +sub xref_by_man { + my ($help, $man, @subcommand) = @_; + + my $errs = 0; + + # FIXME: this generates way too much output + for my $k (grep { $_ ne '_path' } sort keys %$man) { + if (exists $help->{$k}) { + if (ref $man->{$k}) { + $errs += xref_by_man($help->{$k}, $man->{$k}, @subcommand, $k); + } + } + elsif ($k ne '--help' && $k ne '-h') { + my $man = $man->{_path} || 'man'; + + # Special case: podman-inspect serves dual purpose (image, ctr) + my %ignore = map { $_ => 1 } qw(-l -s -t --latest --size --type); + next if $man =~ /-inspect/ && $ignore{$k}; + + # Special case: the 'trust' man page is a mess + next if $man =~ /-trust/; + + # Special case: '--net' is an undocumented shortcut + next if $k eq '--net' && $help->{'--network'}; + + # Special case: these are actually global options + next if $k =~ /^--(cni-config-dir|runtime)$/ && $man =~ /-build/; + + # Special case: weirdness with Cobra and global/local options + next if $k eq '--namespace' && $man =~ /-ps/; + + # Special case: these require compiling with 'varlink' tag, + # which doesn't happen in CI gating task. + next if $k eq 'varlink'; + next if "@subcommand" eq 'system' && $k eq 'service'; + + warn "$ME: podman @subcommand: $k in $man, but not --help\n"; + ++$errs; + } + } + + return $errs; +} + + +################# +# podman_help # Parse output of 'podman [subcommand] --help' +################# +sub podman_help { + my %help; + open my $fh, '-|', $PODMAN, @_, '--help' + or die "$ME: Cannot fork: $!\n"; + my $section = ''; + while (my $line = <$fh>) { + # Cobra is blessedly consistent in its output: + # Usage: ... + # Available Commands: + # .... + # Flags: + # .... + # + # Start by identifying the section we're in... + if ($line =~ /^Available\s+(Commands):/) { + $section = lc $1; + } + elsif ($line =~ /^(Flags):/) { + $section = lc $1; + } + + # ...then track commands and options. For subcommands, recurse. + elsif ($section eq 'commands') { + if ($line =~ /^\s{1,4}(\S+)\s/) { + my $subcommand = $1; + print "> podman @_ $subcommand\n" if $debug; + $help{$subcommand} = podman_help(@_, $subcommand) + unless $subcommand eq 'help'; # 'help' not in man + } + } + elsif ($section eq 'flags') { + # Handle '--foo' or '-f, --foo' + if ($line =~ /^\s{1,10}(--\S+)\s/) { + print "> podman @_ $1\n" if $debug; + $help{$1} = 1; + } + elsif ($line =~ /^\s{1,10}(-\S),\s+(--\S+)\s/) { + print "> podman @_ $1, $2\n" if $debug; + $help{$1} = $help{$2} = 1; + } + } + } + close $fh + or die "$ME: Error running 'podman @_ --help'\n"; + + return \%help; +} + + +################ +# podman_man # Parse contents of podman-*.1.md +################ +sub podman_man { + my $command = shift; + my $subpath = "$Markdown_Path/$command.1.md"; + my $manpath = "$FindBin::Bin/../$subpath"; + print "** $subpath \n" if $debug; + + my %man = (_path => $subpath); + open my $fh, '<', $manpath + or die "$ME: Cannot read $manpath: $!\n"; + my $section = ''; + my @most_recent_flags; + while (my $line = <$fh>) { + chomp $line; + next unless $line; # skip empty lines + + # .md files designate sections with leading double hash + if ($line =~ /^##\s*(GLOBAL\s+)?OPTIONS/) { + $section = 'flags'; + } + elsif ($line =~ /^\#\#\s+(SUB)?COMMANDS/) { + $section = 'commands'; + } + elsif ($line =~ /^\#\#/) { + $section = ''; + } + + # This will be a table containing subcommand names, links to man pages. + # The format is slightly different between podman.1.md and subcommands. + elsif ($section eq 'commands') { + # In podman.1.md + if ($line =~ /^\|\s*\[podman-(\S+?)\(\d\)\]/) { + $man{$1} = podman_man("podman-$1"); + } + + # In podman-<subcommand>.1.md + elsif ($line =~ /^\|\s+(\S+)\s+\|\s+\[\S+\]\((\S+)\.1\.md\)/) { + $man{$1} = podman_man($2); + } + } + + # Flags should always be of the form '**-f**' or '**--flag**', + # possibly separated by comma-space. + elsif ($section eq 'flags') { + # e.g. 'podman run --ip6', documented in man page, but nonexistent + if ($line =~ /^not\s+implemented/i) { + delete $man{$_} for @most_recent_flags; + } + + @most_recent_flags = (); + # Handle any variation of '**--foo**, **-f**' + while ($line =~ s/^\*\*((--[a-z0-9-]+)|(-.))\*\*(,\s+)?//g) { + $man{$1} = 1; + + # Keep track of them, in case we see 'Not implemented' below + push @most_recent_flags, $1; + } + } + } + close $fh; + + # Special case: the 'image trust' man page tries hard to cover both set + # and show, which means it ends up not being machine-readable. + if ($command eq 'podman-image-trust') { + my %set = %man; + my %show = %man; + $show{$_} = 1 for qw(--raw -j --json); + return +{ set => \%set, show => \%show } + } + + return \%man; +} + + +1; diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go index 4918bf57a..2ce8e0de4 100644 --- a/libpod/boltdb_state.go +++ b/libpod/boltdb_state.go @@ -41,6 +41,8 @@ type BoltState struct { // containing the path to the container's network namespace, a dependencies // bucket containing the container's dependencies, and an optional pod key // containing the ID of the pod the container is joined to. +// After updates to include exec sessions, may also include an exec bucket +// with the IDs of exec sessions currently in use by the container. // - allCtrsBkt: Map of ID to name containing only containers. Used for // container lookup operations. // - podBkt: Contains a sub-bucket for each pod in the state. @@ -49,6 +51,10 @@ type BoltState struct { // containers in the pod. // - allPodsBkt: Map of ID to name containing only pods. Used for pod lookup // operations. +// - execBkt: Map of exec session ID to exec session - contains a sub-bucket for +// each exec session in the DB. +// - execRegistryBkt: Map of exec session ID to nothing. Contains one entry for +// each exec session. Used for iterating through all exec sessions. // - runtimeConfigBkt: Contains configuration of the libpod instance that // initially created the database. This must match for any further instances // that access the database, to ensure that state mismatches with @@ -86,6 +92,7 @@ func NewBoltState(path string, runtime *Runtime) (State, error) { allPodsBkt, volBkt, allVolsBkt, + execBkt, runtimeConfigBkt, } @@ -171,6 +178,11 @@ func (s *BoltState) Refresh() error { return err } + execBucket, err := getExecBucket(tx) + if err != nil { + return err + } + // Iterate through all IDs. Check if they are containers. // If they are, unmarshal their state, and then clear // PID, mountpoint, and state for all of them @@ -245,6 +257,26 @@ func (s *BoltState) Refresh() error { return errors.Wrapf(err, "error updating state for container %s in DB", string(id)) } + // Delete all exec sessions, if there are any + ctrExecBkt := ctrBkt.Bucket(execBkt) + if ctrExecBkt != nil { + // Can't delete in a ForEach, so build a list of + // what to remove then remove. + toRemove := []string{} + err = ctrExecBkt.ForEach(func(id, unused []byte) error { + toRemove = append(toRemove, string(id)) + return nil + }) + if err != nil { + return err + } + for _, execId := range toRemove { + if err := ctrExecBkt.Delete([]byte(execId)); err != nil { + return errors.Wrapf(err, "error removing exec session %s from container %s", execId, string(id)) + } + } + } + return nil }) if err != nil { @@ -285,7 +317,30 @@ func (s *BoltState) Refresh() error { return nil }) - return err + if err != nil { + return err + } + + // Now refresh exec sessions + // We want to remove them all, but for-each can't modify buckets + // So we have to make a list of what to operate on, then do the + // work. + toRemoveExec := []string{} + err = execBucket.ForEach(func(id, unused []byte) error { + toRemoveExec = append(toRemoveExec, string(id)) + return nil + }) + if err != nil { + return err + } + + for _, execSession := range toRemoveExec { + if err := execBucket.Delete([]byte(execSession)); err != nil { + return errors.Wrapf(err, "error deleting exec session %s registry from database", execSession) + } + } + + return nil }) return err } @@ -895,6 +950,287 @@ func (s *BoltState) GetContainerConfig(id string) (*ContainerConfig, error) { return config, nil } +// AddExecSession adds an exec session to the state. +func (s *BoltState) AddExecSession(ctr *Container, session *ExecSession) error { + if !s.valid { + return define.ErrDBClosed + } + + if !ctr.valid { + return define.ErrCtrRemoved + } + + db, err := s.getDBCon() + if err != nil { + return err + } + defer s.deferredCloseDBCon(db) + + ctrID := []byte(ctr.ID()) + sessionID := []byte(session.ID()) + + err = db.Update(func(tx *bolt.Tx) error { + execBucket, err := getExecBucket(tx) + if err != nil { + return err + } + ctrBucket, err := getCtrBucket(tx) + if err != nil { + return err + } + + dbCtr := ctrBucket.Bucket(ctrID) + if dbCtr == nil { + ctr.valid = false + return errors.Wrapf(define.ErrNoSuchCtr, "container %s is not present in the database", ctr.ID()) + } + + ctrExecSessionBucket, err := dbCtr.CreateBucketIfNotExists(execBkt) + if err != nil { + return errors.Wrapf(err, "error creating exec sessions bucket for container %s", ctr.ID()) + } + + execExists := execBucket.Get(sessionID) + if execExists != nil { + return errors.Wrapf(define.ErrExecSessionExists, "an exec session with ID %s already exists", session.ID()) + } + + if err := execBucket.Put(sessionID, ctrID); err != nil { + return errors.Wrapf(err, "error adding exec session %s to DB", session.ID()) + } + + if err := ctrExecSessionBucket.Put(sessionID, ctrID); err != nil { + return errors.Wrapf(err, "error adding exec session %s to container %s in DB", session.ID(), ctr.ID()) + } + + return nil + }) + return err +} + +// GetExecSession returns the ID of the container an exec session is associated +// with. +func (s *BoltState) GetExecSession(id string) (string, error) { + if !s.valid { + return "", define.ErrDBClosed + } + + if id == "" { + return "", define.ErrEmptyID + } + + db, err := s.getDBCon() + if err != nil { + return "", err + } + defer s.deferredCloseDBCon(db) + + ctrID := "" + err = db.View(func(tx *bolt.Tx) error { + execBucket, err := getExecBucket(tx) + if err != nil { + return err + } + + ctr := execBucket.Get([]byte(id)) + if ctr == nil { + return errors.Wrapf(define.ErrNoSuchExecSession, "no exec session with ID %s found", id) + } + ctrID = string(ctr) + return nil + }) + return ctrID, err +} + +// RemoveExecSession removes references to the given exec session in the +// database. +func (s *BoltState) RemoveExecSession(session *ExecSession) error { + if !s.valid { + return define.ErrDBClosed + } + + db, err := s.getDBCon() + if err != nil { + return err + } + defer s.deferredCloseDBCon(db) + + sessionID := []byte(session.ID()) + containerID := []byte(session.ContainerID()) + err = db.Update(func(tx *bolt.Tx) error { + execBucket, err := getExecBucket(tx) + if err != nil { + return err + } + ctrBucket, err := getCtrBucket(tx) + if err != nil { + return err + } + + sessionExists := execBucket.Get(sessionID) + if sessionExists == nil { + return define.ErrNoSuchExecSession + } + // Check that container ID matches + if string(sessionExists) != session.ContainerID() { + return errors.Wrapf(define.ErrInternal, "database inconsistency: exec session %s points to container %s in state but %s in database", session.ID(), session.ContainerID(), string(sessionExists)) + } + + if err := execBucket.Delete(sessionID); err != nil { + return errors.Wrapf(err, "error removing exec session %s from database", session.ID()) + } + + dbCtr := ctrBucket.Bucket(containerID) + if dbCtr == nil { + // State is inconsistent. We refer to a container that + // is no longer in the state. + // Return without error, to attempt to recover. + return nil + } + + ctrExecBucket := dbCtr.Bucket(execBkt) + if ctrExecBucket == nil { + // Again, state is inconsistent. We should have an exec + // bucket, and it should have this session. + // Again, nothing we can do, so proceed and try to + // recover. + return nil + } + + ctrSessionExists := ctrExecBucket.Get(sessionID) + if ctrSessionExists != nil { + if err := ctrExecBucket.Delete(sessionID); err != nil { + return errors.Wrapf(err, "error removing exec session %s from container %s in database", session.ID(), session.ContainerID()) + } + } + + return nil + }) + return err +} + +// GetContainerExecSessions retrieves the IDs of all exec sessions running in a +// container that the database is aware of (IE, were added via AddExecSession). +func (s *BoltState) GetContainerExecSessions(ctr *Container) ([]string, error) { + if !s.valid { + return nil, define.ErrDBClosed + } + + if !ctr.valid { + return nil, define.ErrCtrRemoved + } + + db, err := s.getDBCon() + if err != nil { + return nil, err + } + defer s.deferredCloseDBCon(db) + + ctrID := []byte(ctr.ID()) + sessions := []string{} + err = db.View(func(tx *bolt.Tx) error { + ctrBucket, err := getCtrBucket(tx) + if err != nil { + return err + } + + dbCtr := ctrBucket.Bucket(ctrID) + if dbCtr == nil { + ctr.valid = false + return define.ErrNoSuchCtr + } + + ctrExecSessions := dbCtr.Bucket(execBkt) + if ctrExecSessions == nil { + return nil + } + + return ctrExecSessions.ForEach(func(id, unused []byte) error { + sessions = append(sessions, string(id)) + return nil + }) + }) + if err != nil { + return nil, err + } + + return sessions, nil +} + +// RemoveContainerExecSessions removes all exec sessions attached to a given +// container. +func (s *BoltState) RemoveContainerExecSessions(ctr *Container) error { + if !s.valid { + return define.ErrDBClosed + } + + if !ctr.valid { + return define.ErrCtrRemoved + } + + db, err := s.getDBCon() + if err != nil { + return err + } + defer s.deferredCloseDBCon(db) + + ctrID := []byte(ctr.ID()) + sessions := []string{} + + err = db.Update(func(tx *bolt.Tx) error { + execBucket, err := getExecBucket(tx) + if err != nil { + return err + } + ctrBucket, err := getCtrBucket(tx) + if err != nil { + return err + } + + dbCtr := ctrBucket.Bucket(ctrID) + if dbCtr == nil { + ctr.valid = false + return define.ErrNoSuchCtr + } + + ctrExecSessions := dbCtr.Bucket(execBkt) + if ctrExecSessions == nil { + return nil + } + + err = ctrExecSessions.ForEach(func(id, unused []byte) error { + sessions = append(sessions, string(id)) + return nil + }) + if err != nil { + return err + } + + for _, session := range sessions { + if err := ctrExecSessions.Delete([]byte(session)); err != nil { + return errors.Wrapf(err, "error removing container %s exec session %s from database", ctr.ID(), session) + } + // Check if the session exists in the global table + // before removing. It should, but in cases where the DB + // has become inconsistent, we should try and proceed + // so we can recover. + sessionExists := execBucket.Get([]byte(session)) + if sessionExists == nil { + continue + } + if string(sessionExists) != ctr.ID() { + return errors.Wrapf(define.ErrInternal, "database mismatch: exec session %s is associated with containers %s and %s", session, ctr.ID(), string(sessionExists)) + } + if err := execBucket.Delete([]byte(session)); err != nil { + return errors.Wrapf(err, "error removing container %s exec session %s from exec sessions", ctr.ID(), session) + } + } + + return nil + }) + return err +} + // RewriteContainerConfig rewrites a container's configuration. // WARNING: This function is DANGEROUS. Do not use without reading the full // comment on this function in state.go. diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go index 3f09305f5..1c4c9e12d 100644 --- a/libpod/boltdb_state_internal.go +++ b/libpod/boltdb_state_internal.go @@ -24,6 +24,7 @@ const ( allPodsName = "allPods" volName = "vol" allVolsName = "allVolumes" + execName = "exec" runtimeConfigName = "runtime-config" configName = "config" @@ -54,6 +55,7 @@ var ( allPodsBkt = []byte(allPodsName) volBkt = []byte(volName) allVolsBkt = []byte(allVolsName) + execBkt = []byte(execName) runtimeConfigBkt = []byte(runtimeConfigName) configKey = []byte(configName) @@ -339,6 +341,14 @@ func getAllVolsBucket(tx *bolt.Tx) (*bolt.Bucket, error) { return bkt, nil } +func getExecBucket(tx *bolt.Tx) (*bolt.Bucket, error) { + bkt := tx.Bucket(execBkt) + if bkt == nil { + return nil, errors.Wrapf(define.ErrDBBadConfig, "exec bucket not found in DB") + } + return bkt, nil +} + func getRuntimeConfigBucket(tx *bolt.Tx) (*bolt.Bucket, error) { bkt := tx.Bucket(runtimeConfigBkt) if bkt == nil { @@ -787,6 +797,23 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error } } + // Does the container have exec sessions? + ctrExecSessionsBkt := ctrExists.Bucket(execBkt) + if ctrExecSessionsBkt != nil { + sessions := []string{} + err = ctrExecSessionsBkt.ForEach(func(id, value []byte) error { + sessions = append(sessions, string(id)) + + return nil + }) + if err != nil { + return err + } + if len(sessions) > 0 { + return errors.Wrapf(define.ErrExecSessionExists, "container %s has active exec sessions: %s", ctr.ID(), strings.Join(sessions, ", ")) + } + } + // Does the container have dependencies? ctrDepsBkt := ctrExists.Bucket(dependenciesBkt) if ctrDepsBkt == nil { diff --git a/libpod/common_test.go b/libpod/common_test.go index 63ea4f41b..747252cf4 100644 --- a/libpod/common_test.go +++ b/libpod/common_test.go @@ -58,14 +58,12 @@ func getTestContainer(id, name string, manager lock.Manager) (*Container, error) PID: 1234, ExecSessions: map[string]*ExecSession{ "abcd": { - ID: "1", - Command: []string{"2", "3"}, - PID: 9876, + Id: "1", + PID: 9876, }, "ef01": { - ID: "5", - Command: []string{"hello", "world"}, - PID: 46765, + Id: "5", + PID: 46765, }, }, BindMounts: map[string]string{ diff --git a/libpod/container.go b/libpod/container.go index d83de93bb..e59fb9fe8 100644 --- a/libpod/container.go +++ b/libpod/container.go @@ -181,9 +181,13 @@ type ContainerState struct { PID int `json:"pid,omitempty"` // ConmonPID is the PID of the container's conmon ConmonPID int `json:"conmonPid,omitempty"` - // ExecSessions contains active exec sessions for container - // Exec session ID is mapped to PID of exec process - ExecSessions map[string]*ExecSession `json:"execSessions,omitempty"` + // ExecSessions contains all exec sessions that are associated with this + // container. + ExecSessions map[string]*ExecSession `json:"newExecSessions,omitempty"` + // LegacyExecSessions are legacy exec sessions from older versions of + // Podman. + // These are DEPRECATED and will be removed in a future release. + LegacyExecSessions map[string]*legacyExecSession `json:"execSessions,omitempty"` // NetworkStatus contains the configuration results for all networks // the pod is attached to. Only populated if we created a network // namespace for the container, and the network namespace is currently @@ -214,13 +218,6 @@ type ContainerState struct { containerPlatformState } -// ExecSession contains information on an active exec session -type ExecSession struct { - ID string `json:"id"` - Command []string `json:"command"` - PID int `json:"pid"` -} - // ContainerConfig contains all information that was used to create the // container. It may not be changed once created. // It is stored, read-only, on disk @@ -944,13 +941,13 @@ func (c *Container) ExecSession(id string) (*ExecSession, error) { session, ok := c.state.ExecSessions[id] if !ok { - return nil, errors.Wrapf(define.ErrNoSuchCtr, "no exec session with ID %s found in container %s", id, c.ID()) + return nil, errors.Wrapf(define.ErrNoSuchExecSession, "no exec session with ID %s found in container %s", id, c.ID()) } returnSession := new(ExecSession) - returnSession.ID = session.ID - returnSession.Command = session.Command - returnSession.PID = session.PID + if err := JSONDeepCopy(session, returnSession); err != nil { + return nil, errors.Wrapf(err, "error copying contents of container %s exec session %s", c.ID(), session.ID()) + } return returnSession, nil } diff --git a/libpod/container_api.go b/libpod/container_api.go index 039619ea6..967180437 100644 --- a/libpod/container_api.go +++ b/libpod/container_api.go @@ -9,10 +9,8 @@ import ( "os" "time" - "github.com/containers/common/pkg/capabilities" "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/events" - "github.com/containers/storage/pkg/stringid" "github.com/opentracing/opentracing-go" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -215,142 +213,6 @@ func (c *Container) Kill(signal uint) error { return c.save() } -// Exec starts a new process inside the container -// Returns an exit code and an error. If Exec was not able to exec in the container before a failure, an exit code of define.ExecErrorCodeCannotInvoke is returned. -// If another generic error happens, an exit code of define.ExecErrorCodeGeneric is returned. -// Sometimes, the $RUNTIME exec call errors, and if that is the case, the exit code is the exit code of the call. -// Otherwise, the exit code will be the exit code of the executed call inside of the container. -// TODO investigate allowing exec without attaching -func (c *Container) Exec(tty, privileged bool, env map[string]string, cmd []string, user, workDir string, streams *AttachStreams, preserveFDs uint, resize chan remotecommand.TerminalSize, detachKeys string) (int, error) { - var capList []string - if !c.batched { - c.lock.Lock() - defer c.lock.Unlock() - - if err := c.syncContainer(); err != nil { - return define.ExecErrorCodeCannotInvoke, err - } - } - - if c.state.State != define.ContainerStateRunning { - return define.ExecErrorCodeCannotInvoke, errors.Wrapf(define.ErrCtrStateInvalid, "cannot exec into container that is not running") - } - - if privileged || c.config.Privileged { - capList = capabilities.AllCapabilities() - } - - // Generate exec session ID - // Ensure we don't conflict with an existing session ID - sessionID := stringid.GenerateNonCryptoID() - found := true - // This really ought to be a do-while, but Go doesn't have those... - for found { - found = false - for id := range c.state.ExecSessions { - if id == sessionID { - found = true - break - } - } - if found { - sessionID = stringid.GenerateNonCryptoID() - } - } - - logrus.Debugf("Creating new exec session in container %s with session id %s", c.ID(), sessionID) - if err := c.createExecBundle(sessionID); err != nil { - return define.ExecErrorCodeCannotInvoke, err - } - - defer func() { - // cleanup exec bundle - if err := c.cleanupExecBundle(sessionID); err != nil { - logrus.Errorf("Error removing exec session %s bundle path for container %s: %v", sessionID, c.ID(), err) - } - }() - - opts := new(ExecOptions) - opts.Cmd = cmd - opts.CapAdd = capList - opts.Env = env - opts.Terminal = tty - opts.Cwd = workDir - opts.User = user - opts.Streams = streams - opts.PreserveFDs = preserveFDs - opts.Resize = resize - opts.DetachKeys = detachKeys - - pid, attachChan, err := c.ociRuntime.ExecContainer(c, sessionID, opts) - if err != nil { - ec := define.ExecErrorCodeGeneric - // Conmon will pass a non-zero exit code from the runtime as a pid here. - // we differentiate a pid with an exit code by sending it as negative, so reverse - // that change and return the exit code the runtime failed with. - if pid < 0 { - ec = -1 * pid - } - return ec, err - } - - // We have the PID, add it to state - if c.state.ExecSessions == nil { - c.state.ExecSessions = make(map[string]*ExecSession) - } - session := new(ExecSession) - session.ID = sessionID - session.Command = cmd - session.PID = pid - c.state.ExecSessions[sessionID] = session - if err := c.save(); err != nil { - // Now we have a PID but we can't save it in the DB - // TODO handle this better - return define.ExecErrorCodeGeneric, errors.Wrapf(err, "error saving exec sessions %s for container %s", sessionID, c.ID()) - } - c.newContainerEvent(events.Exec) - logrus.Debugf("Successfully started exec session %s in container %s", sessionID, c.ID()) - - // Unlock so other processes can use the container - if !c.batched { - c.lock.Unlock() - } - - lastErr := <-attachChan - - exitCode, err := c.readExecExitCode(sessionID) - if err != nil { - if lastErr != nil { - logrus.Errorf(lastErr.Error()) - } - lastErr = err - } - if exitCode != 0 { - if lastErr != nil { - logrus.Errorf(lastErr.Error()) - } - lastErr = errors.Wrapf(define.ErrOCIRuntime, "non zero exit code: %d", exitCode) - } - - // Lock again - if !c.batched { - c.lock.Lock() - } - - // Sync the container again to pick up changes in state - if err := c.syncContainer(); err != nil { - logrus.Errorf("error syncing container %s state to remove exec session %s", c.ID(), sessionID) - return exitCode, lastErr - } - - // Remove the exec session from state - delete(c.state.ExecSessions, sessionID) - if err := c.save(); err != nil { - logrus.Errorf("Error removing exec session %s from container %s state: %v", sessionID, c.ID(), err) - } - return exitCode, lastErr -} - // AttachStreams contains streams that will be attached to the container type AttachStreams struct { // OutputStream will be attached to container's STDOUT @@ -493,7 +355,11 @@ func (c *Container) Unmount(force bool) error { if c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) { return errors.Wrapf(define.ErrCtrStateInvalid, "cannot unmount storage for container %s as it is running or paused", c.ID()) } - if len(c.state.ExecSessions) != 0 { + execSessions, err := c.getActiveExecSessions() + if err != nil { + return err + } + if len(execSessions) != 0 { return errors.Wrapf(define.ErrCtrStateInvalid, "container %s has active exec sessions, refusing to unmount", c.ID()) } return errors.Wrapf(define.ErrInternal, "can't unmount %s last mount, it is still in use", c.ID()) @@ -674,15 +540,15 @@ func (c *Container) Cleanup(ctx context.Context) error { // If we didn't restart, we perform a normal cleanup - // Reap exec sessions first. - if err := c.reapExecSessions(); err != nil { + // Check for running exec sessions + sessions, err := c.getActiveExecSessions() + if err != nil { return err } - - // Check if we have active exec sessions after reaping. - if len(c.state.ExecSessions) != 0 { + if len(sessions) > 0 { return errors.Wrapf(define.ErrCtrStateInvalid, "container %s has active exec sessions, refusing to clean up", c.ID()) } + defer c.newContainerEvent(events.Cleanup) return c.cleanup(ctx) } @@ -757,114 +623,11 @@ func (c *Container) Sync() error { return nil } -// Refresh refreshes a container's state in the database, restarting the -// container if it is running +// Refresh is DEPRECATED and REMOVED. func (c *Container) Refresh(ctx context.Context) error { - if !c.batched { - c.lock.Lock() - defer c.lock.Unlock() - - if err := c.syncContainer(); err != nil { - return err - } - } - - if c.state.State == define.ContainerStateRemoving { - return errors.Wrapf(define.ErrCtrStateInvalid, "cannot refresh containers that are being removed") - } - - wasCreated := false - if c.state.State == define.ContainerStateCreated { - wasCreated = true - } - wasRunning := false - if c.state.State == define.ContainerStateRunning { - wasRunning = true - } - wasPaused := false - if c.state.State == define.ContainerStatePaused { - wasPaused = true - } - - // First, unpause the container if it's paused - if c.state.State == define.ContainerStatePaused { - if err := c.unpause(); err != nil { - return err - } - } - - // Next, if the container is running, stop it - if c.state.State == define.ContainerStateRunning { - if err := c.stop(c.config.StopTimeout); err != nil { - return err - } - } - - // If there are active exec sessions, we need to kill them - if len(c.state.ExecSessions) > 0 { - logrus.Infof("Killing %d exec sessions in container %s. They will not be restored after refresh.", - len(c.state.ExecSessions), c.ID()) - } - for _, session := range c.state.ExecSessions { - if err := c.ociRuntime.ExecStopContainer(c, session.ID, c.StopTimeout()); err != nil { - return errors.Wrapf(err, "error stopping exec session %s of container %s", session.ID, c.ID()) - } - } - - // If the container is in ContainerStateStopped, we need to delete it - // from the runtime and clear conmon state - if c.state.State == define.ContainerStateStopped { - if err := c.delete(ctx); err != nil { - return err - } - if err := c.removeConmonFiles(); err != nil { - return err - } - } - - // Fire cleanup code one more time unconditionally to ensure we are good - // to refresh - if err := c.cleanup(ctx); err != nil { - return err - } - - logrus.Debugf("Resetting state of container %s", c.ID()) - - // We've finished unwinding the container back to its initial state - // Now safe to refresh container state - if err := resetState(c.state); err != nil { - return errors.Wrapf(err, "error resetting state of container %s", c.ID()) - } - if err := c.refresh(); err != nil { - return err - } - - logrus.Debugf("Successfully refresh container %s state", c.ID()) - - // Initialize the container if it was created in runc - if wasCreated || wasRunning || wasPaused { - if err := c.prepare(); err != nil { - return err - } - if err := c.init(ctx, false); err != nil { - return err - } - } - - // If the container was running before, start it - if wasRunning || wasPaused { - if err := c.start(); err != nil { - return err - } - } - - // If the container was paused before, re-pause it - if wasPaused { - if err := c.pause(); err != nil { - return err - } - } - return nil + // This has been deprecated for a long while, and is in the process of + // being removed. + return define.ErrNotImplemented } // ContainerCheckpointOptions is a struct used to pass the parameters diff --git a/libpod/container_exec.go b/libpod/container_exec.go new file mode 100644 index 000000000..7ed7a3343 --- /dev/null +++ b/libpod/container_exec.go @@ -0,0 +1,857 @@ +package libpod + +import ( + "io/ioutil" + "os" + "path/filepath" + "strconv" + "time" + + "github.com/containers/common/pkg/capabilities" + "github.com/containers/libpod/libpod/define" + "github.com/containers/libpod/libpod/events" + "github.com/containers/storage/pkg/stringid" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "k8s.io/client-go/tools/remotecommand" +) + +// ExecConfig contains the configuration of an exec session +type ExecConfig struct { + // Command the the command that will be invoked in the exec session. + // Must not be empty. + Command []string `json:"command"` + // Terminal is whether the exec session will allocate a pseudoterminal. + Terminal bool `json:"terminal,omitempty"` + // AttachStdin is whether the STDIN stream will be forwarded to the exec + // session's first process when attaching. Only available if Terminal is + // false. + AttachStdin bool `json:"attachStdin,omitempty"` + // AttachStdout is whether the STDOUT stream will be forwarded to the + // exec session's first process when attaching. Only available if + // Terminal is false. + AttachStdout bool `json:"attachStdout,omitempty"` + // AttachStderr is whether the STDERR stream will be forwarded to the + // exec session's first process when attaching. Only available if + // Terminal is false. + AttachStderr bool `json:"attachStderr,omitempty"` + // DetachKeys are keys that will be used to detach from the exec + // session. Here, nil will use the default detach keys, where a pointer + // to the empty string ("") will disable detaching via detach keys. + DetachKeys *string `json:"detachKeys,omitempty"` + // Environment is a set of environment variables that will be set for + // the first process started by the exec session. + Environment map[string]string `json:"environment,omitempty"` + // Privileged is whether the exec session will be privileged - that is, + // will be granted additional capabilities. + Privileged bool `json:"privileged,omitempty"` + // User is the user the exec session will be run as. + // If set to "" the exec session will be started as the same user the + // container was started as. + User string `json:"user,omitempty"` + // WorkDir is the working directory for the first process that will be + // launched by the exec session. + // If set to "" the exec session will be started in / within the + // container. + WorkDir string `json:"workDir,omitempty"` + // PreserveFDs indicates that a number of extra FDs from the process + // running libpod will be passed into the container. These are assumed + // to begin at 3 (immediately after the standard streams). The number + // given is the number that will be passed into the exec session, + // starting at 3. + PreserveFDs uint `json:"preserveFds,omitempty"` +} + +// ExecSession contains information on a single exec session attached to a given +// container. +type ExecSession struct { + // Id is the ID of the exec session. + // Named somewhat strangely to not conflict with ID(). + Id string `json:"id"` + // ContainerId is the ID of the container this exec session belongs to. + // Named somewhat strangely to not conflict with ContainerID(). + ContainerId string `json:"containerId"` + + // State is the state of the exec session. + State define.ContainerExecStatus `json:"state"` + // PID is the PID of the process created by the exec session. + PID int `json:"pid,omitempty"` + // ExitCode is the exit code of the exec session, if it has exited. + ExitCode int `json:"exitCode,omitempty"` + + // Config is the configuration of this exec session. + // Cannot be empty. + Config *ExecConfig `json:"config"` +} + +// ID returns the ID of an exec session. +func (e *ExecSession) ID() string { + return e.Id +} + +// ContainerID returns the ID of the container this exec session was started in. +func (e *ExecSession) ContainerID() string { + return e.ContainerId +} + +// InspectExecSession contains information about a given exec session. +type InspectExecSession struct { + // CanRemove is legacy and used purely for compatibility reasons. + // Will always be set to true, unless the exec session is running. + CanRemove bool `json:"CanRemove"` + // ContainerID is the ID of the container this exec session is attached + // to. + ContainerID string `json:"ContainerID"` + // DetachKeys are the detach keys used by the exec session. + // If set to "" the default keys are being used. + // Will show "<none>" if no detach keys are set. + DetachKeys string `json:"DetachKeys"` + // ExitCode is the exit code of the exec session. Will be set to 0 if + // the exec session has not yet exited. + ExitCode int `json:"ExitCode"` + // ID is the ID of the exec session. + ID string `json:"ID"` + // OpenStderr is whether the container's STDERR stream will be attached. + // Always set to true if the exec session created a TTY. + OpenStderr bool `json:"OpenStderr"` + // OpenStdin is whether the container's STDIN stream will be attached + // to. + OpenStdin bool `json:"OpenStdin"` + // OpenStdout is whether the container's STDOUT stream will be attached. + // Always set to true if the exec session created a TTY. + OpenStdout bool `json:"OpenStdout"` + // Running is whether the exec session is running. + Running bool `json:"Running"` + // Pid is the PID of the exec session's process. + // Will be set to 0 if the exec session is not running. + Pid int `json:"Pid"` + // ProcessConfig contains information about the exec session's process. + ProcessConfig *InspectExecProcess `json:"ProcessConfig"` +} + +// InspectExecProcess contains information about the process in a given exec +// session. +type InspectExecProcess struct { + // Arguments are the arguments to the entrypoint command of the exec + // session. + Arguments []string `json:"arguments"` + // Entrypoint is the entrypoint for the exec session (the command that + // will be executed in the container). + Entrypoint string `json:"entrypoint"` + // Privileged is whether the exec session will be started with elevated + // privileges. + Privileged bool `json:"privileged"` + // Tty is whether the exec session created a terminal. + Tty bool `json:"tty"` + // User is the user the exec session was started as. + User string `json:"user"` +} + +// Inspect inspects the given exec session and produces detailed output on its +// configuration and current state. +func (e *ExecSession) Inspect() (*InspectExecSession, error) { + if e.Config == nil { + return nil, errors.Wrapf(define.ErrInternal, "given exec session does not have a configuration block") + } + + output := new(InspectExecSession) + output.CanRemove = e.State != define.ExecStateRunning + output.ContainerID = e.ContainerId + if e.Config.DetachKeys != nil { + output.DetachKeys = *e.Config.DetachKeys + } + output.ExitCode = e.ExitCode + output.ID = e.Id + output.OpenStderr = e.Config.AttachStderr + output.OpenStdin = e.Config.AttachStdin + output.OpenStdout = e.Config.AttachStdout + output.Running = e.State == define.ExecStateRunning + output.Pid = e.PID + output.ProcessConfig = new(InspectExecProcess) + if len(e.Config.Command) > 0 { + output.ProcessConfig.Entrypoint = e.Config.Command[0] + if len(e.Config.Command) > 1 { + output.ProcessConfig.Arguments = make([]string, 0, len(e.Config.Command)-1) + output.ProcessConfig.Arguments = append(output.ProcessConfig.Arguments, e.Config.Command[1:]...) + } + } + output.ProcessConfig.Privileged = e.Config.Privileged + output.ProcessConfig.Tty = e.Config.Terminal + output.ProcessConfig.User = e.Config.User + + return output, nil +} + +// legacyExecSession contains information on an active exec session. It is a +// holdover from a previous Podman version and is DEPRECATED. +type legacyExecSession struct { + ID string `json:"id"` + Command []string `json:"command"` + PID int `json:"pid"` +} + +// ExecCreate creates a new exec session for the container. +// The session is not started. The ID of the new exec session will be returned. +func (c *Container) ExecCreate(config *ExecConfig) (string, error) { + if !c.batched { + c.lock.Lock() + defer c.lock.Unlock() + + if err := c.syncContainer(); err != nil { + return "", err + } + } + + // Verify our config + if config == nil { + return "", errors.Wrapf(define.ErrInvalidArg, "must provide a configuration to ExecCreate") + } + if len(config.Command) == 0 { + return "", errors.Wrapf(define.ErrInvalidArg, "must provide a non-empty command to start an exec session") + } + if config.Terminal && (config.AttachStdin || config.AttachStdout || config.AttachStderr) { + return "", errors.Wrapf(define.ErrInvalidArg, "cannot specify streams to attach to when exec session has a pseudoterminal") + } + + // Generate an ID for our new exec session + sessionID := stringid.GenerateNonCryptoID() + found := true + // This really ought to be a do-while, but Go doesn't have those... + for found { + found = false + for id := range c.state.ExecSessions { + if id == sessionID { + found = true + break + } + } + if found { + sessionID = stringid.GenerateNonCryptoID() + } + } + + // Make our new exec session + session := new(ExecSession) + session.Id = sessionID + session.ContainerId = c.ID() + session.State = define.ExecStateCreated + session.Config = new(ExecConfig) + if err := JSONDeepCopy(config, session.Config); err != nil { + return "", errors.Wrapf(err, "error copying exec configuration into exec session") + } + + if c.state.ExecSessions == nil { + c.state.ExecSessions = make(map[string]*ExecSession) + } + + // Need to add to container state and exec session registry + c.state.ExecSessions[session.ID()] = session + if err := c.save(); err != nil { + return "", err + } + if err := c.runtime.state.AddExecSession(c, session); err != nil { + return "", err + } + + logrus.Infof("Created exec session %s in container %s", session.ID(), c.ID()) + + return sessionID, nil +} + +// ExecStart starts an exec session in the container, but does not attach to it. +// Returns immediately upon starting the exec session. +func (c *Container) ExecStart(sessionID string) error { + // Will be implemented in part 2, migrating Start and implementing + // detached Start. + return define.ErrNotImplemented +} + +// ExecStartAndAttach starts and attaches to an exec session in a container. +// TODO: Should we include detach keys in the signature to allow override? +// TODO: How do we handle AttachStdin/AttachStdout/AttachStderr? +func (c *Container) ExecStartAndAttach(sessionID string, streams *AttachStreams) error { + if !c.batched { + c.lock.Lock() + defer c.lock.Unlock() + + if err := c.syncContainer(); err != nil { + return err + } + } + + session, ok := c.state.ExecSessions[sessionID] + if !ok { + return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID) + } + + if session.State != define.ExecStateCreated { + return errors.Wrapf(define.ErrExecSessionStateInvalid, "can only start created exec sessions, while container %s session %s state is %q", c.ID(), session.ID(), session.State.String()) + } + + logrus.Infof("Going to start container %s exec session %s and attach to it", c.ID(), session.ID()) + + // TODO: check logic here - should we set Privileged if the container is + // privileged? + var capList []string + if session.Config.Privileged || c.config.Privileged { + capList = capabilities.AllCapabilities() + } + + user := c.config.User + if session.Config.User != "" { + user = session.Config.User + } + + if err := c.createExecBundle(session.ID()); err != nil { + return err + } + + opts := new(ExecOptions) + opts.Cmd = session.Config.Command + opts.CapAdd = capList + opts.Env = session.Config.Environment + opts.Terminal = session.Config.Terminal + opts.Cwd = session.Config.WorkDir + opts.User = user + opts.Streams = streams + opts.PreserveFDs = session.Config.PreserveFDs + opts.DetachKeys = session.Config.DetachKeys + + pid, attachChan, err := c.ociRuntime.ExecContainer(c, session.ID(), opts) + if err != nil { + return err + } + + c.newContainerEvent(events.Exec) + logrus.Debugf("Successfully started exec session %s in container %s", session.ID(), c.ID()) + + var lastErr error + + // Update and save session to reflect PID/running + session.PID = pid + session.State = define.ExecStateRunning + + if err := c.save(); err != nil { + lastErr = err + } + + // Unlock so other processes can use the container + if !c.batched { + c.lock.Unlock() + } + + tmpErr := <-attachChan + if lastErr != nil { + logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr) + } + lastErr = tmpErr + + exitCode, err := c.readExecExitCode(session.ID()) + if err != nil { + if lastErr != nil { + logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr) + } + lastErr = err + } + + logrus.Debugf("Container %s exec session %s completed with exit code %d", c.ID(), session.ID(), exitCode) + + // Lock again + if !c.batched { + c.lock.Lock() + } + + // Sync the container to pick up state changes + if err := c.syncContainer(); err != nil { + if lastErr != nil { + logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr) + } + return errors.Wrapf(err, "error syncing container %s state to remove exec session %s", c.ID(), session.ID()) + } + + // Update status + // Since we did a syncContainer, the old session has been overwritten. + // Grab a fresh one from the database. + session, ok = c.state.ExecSessions[sessionID] + if !ok { + // Exec session already removed. + logrus.Infof("Container %s exec session %s already removed from database", c.ID(), sessionID) + return nil + } + session.State = define.ExecStateStopped + session.ExitCode = exitCode + session.PID = 0 + + if err := c.save(); err != nil { + if lastErr != nil { + logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr) + } + lastErr = err + } + + // Clean up after ourselves + if err := c.cleanupExecBundle(session.ID()); err != nil { + if lastErr != nil { + logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr) + } + lastErr = err + } + + return lastErr +} + +// ExecHTTPStartAndAttach starts and performs an HTTP attach to an exec session. +func (c *Container) ExecHTTPStartAndAttach(sessionID string) error { + // Will be implemented in part 2, migrating Start. + return define.ErrNotImplemented +} + +// ExecStop stops an exec session in the container. +// If a timeout is provided, it will be used; otherwise, the timeout will +// default to the stop timeout of the container. +// Cleanup will be invoked automatically once the session is stopped. +func (c *Container) ExecStop(sessionID string, timeout *uint) error { + if !c.batched { + c.lock.Lock() + defer c.lock.Unlock() + + if err := c.syncContainer(); err != nil { + return err + } + } + + session, ok := c.state.ExecSessions[sessionID] + if !ok { + return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID) + } + + if session.State != define.ExecStateRunning { + return errors.Wrapf(define.ErrExecSessionStateInvalid, "container %s exec session %s is %q, can only stop running sessions", c.ID(), session.ID(), session.State.String()) + } + + logrus.Infof("Stopping container %s exec session %s", c.ID(), session.ID()) + + finalTimeout := c.StopTimeout() + if timeout != nil { + finalTimeout = *timeout + } + + // Stop the session + if err := c.ociRuntime.ExecStopContainer(c, session.ID(), finalTimeout); err != nil { + return err + } + + var cleanupErr error + + // Retrieve exit code and update status + exitCode, err := c.readExecExitCode(session.ID()) + if err != nil { + cleanupErr = err + } + session.ExitCode = exitCode + session.PID = 0 + session.State = define.ExecStateStopped + + if err := c.save(); err != nil { + if cleanupErr != nil { + logrus.Errorf("Error stopping container %s exec session %s: %v", c.ID(), session.ID(), cleanupErr) + } + cleanupErr = err + } + + if err := c.cleanupExecBundle(session.ID()); err != nil { + if cleanupErr != nil { + logrus.Errorf("Error stopping container %s exec session %s: %v", c.ID(), session.ID(), cleanupErr) + } + cleanupErr = err + } + + return cleanupErr +} + +// ExecCleanup cleans up an exec session in the container, removing temporary +// files associated with it. +func (c *Container) ExecCleanup(sessionID string) error { + if !c.batched { + c.lock.Lock() + defer c.lock.Unlock() + + if err := c.syncContainer(); err != nil { + return err + } + } + + session, ok := c.state.ExecSessions[sessionID] + if !ok { + return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID) + } + + if session.State == define.ExecStateRunning { + return errors.Wrapf(define.ErrExecSessionStateInvalid, "cannot clean up container %s exec session %s as it is running", c.ID(), session.ID()) + } + + logrus.Infof("Cleaning up container %s exec session %s", c.ID(), session.ID()) + + return c.cleanupExecBundle(session.ID()) +} + +// ExecRemove removes an exec session in the container. +// If force is given, the session will be stopped first if it is running. +func (c *Container) ExecRemove(sessionID string, force bool) error { + if !c.batched { + c.lock.Lock() + defer c.lock.Unlock() + + if err := c.syncContainer(); err != nil { + return err + } + } + + session, ok := c.state.ExecSessions[sessionID] + if !ok { + return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID) + } + + logrus.Infof("Removing container %s exec session %s", c.ID(), session.ID()) + + // Update status of exec session if running, so we cna check if it + // stopped in the meantime. + if session.State == define.ExecStateRunning { + stopped, err := c.ociRuntime.ExecUpdateStatus(c, session.ID()) + if err != nil { + return err + } + if stopped { + session.State = define.ExecStateStopped + // TODO: should we retrieve exit code here? + // TODO: Might be worth saving state here. + } + } + + if session.State == define.ExecStateRunning { + if !force { + return errors.Wrapf(define.ErrExecSessionStateInvalid, "container %s exec session %s is still running, cannot remove", c.ID(), session.ID()) + } + + // Stop the session + if err := c.ociRuntime.ExecStopContainer(c, session.ID(), c.StopTimeout()); err != nil { + return err + } + + if err := c.cleanupExecBundle(session.ID()); err != nil { + return err + } + } + + // First remove exec session from DB. + if err := c.runtime.state.RemoveExecSession(session); err != nil { + return err + } + // Next, remove it from the container and save state + delete(c.state.ExecSessions, sessionID) + if err := c.save(); err != nil { + return err + } + + logrus.Debugf("Successfully removed container %s exec session %s", c.ID(), session.ID()) + + return nil +} + +// ExecResize resizes the TTY of the given exec session. Only available if the +// exec session created a TTY. +func (c *Container) ExecResize(sessionID string, newSize remotecommand.TerminalSize) error { + if !c.batched { + c.lock.Lock() + defer c.lock.Unlock() + + if err := c.syncContainer(); err != nil { + return err + } + } + + session, ok := c.state.ExecSessions[sessionID] + if !ok { + return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID) + } + + logrus.Infof("Removing container %s exec session %s", c.ID(), session.ID()) + + if session.State != define.ExecStateRunning { + return errors.Wrapf(define.ErrExecSessionStateInvalid, "cannot resize container %s exec session %s as it is not running", c.ID(), session.ID()) + } + + return c.ociRuntime.ExecAttachResize(c, sessionID, newSize) +} + +// Exec emulates the old Libpod exec API, providing a single call to create, +// run, and remove an exec session. Returns exit code and error. Exit code is +// not guaranteed to be set sanely if error is not nil. +func (c *Container) Exec(config *ExecConfig, streams *AttachStreams, resize <-chan remotecommand.TerminalSize) (int, error) { + sessionID, err := c.ExecCreate(config) + if err != nil { + return -1, err + } + if err := c.ExecStartAndAttach(sessionID, streams); err != nil { + return -1, err + } + + // Start resizing if we have a resize channel. + // This goroutine may likely leak, given that we cannot close it here. + // Not a big deal, since it should run for as long as the Podman process + // does. Could be a big deal for `podman service` but we don't need this + // API there. + // TODO: Refactor so this is closed here, before we remove the exec + // session. + if resize != nil { + go func() { + for resizeRequest := range resize { + if err := c.ExecResize(sessionID, resizeRequest); err != nil { + // Assume the exec session went down. + logrus.Warnf("Error resizing exec session %s: %v", sessionID, err) + return + } + } + }() + } + + session, err := c.ExecSession(sessionID) + if err != nil { + return -1, err + } + exitCode := session.ExitCode + if err := c.ExecRemove(sessionID, false); err != nil { + return -1, err + } + + if exitCode != 0 { + return exitCode, errors.Wrapf(define.ErrOCIRuntime, "exec session exited with non-zero exit code %d", exitCode) + } + + return exitCode, nil +} + +// cleanup an exec session after its done +func (c *Container) cleanupExecBundle(sessionID string) error { + if err := os.RemoveAll(c.execBundlePath(sessionID)); err != nil && !os.IsNotExist(err) { + return err + } + + return c.ociRuntime.ExecContainerCleanup(c, sessionID) +} + +// the path to a containers exec session bundle +func (c *Container) execBundlePath(sessionID string) string { + return filepath.Join(c.bundlePath(), sessionID) +} + +// Get PID file path for a container's exec session +func (c *Container) execPidPath(sessionID string) string { + return filepath.Join(c.execBundlePath(sessionID), "exec_pid") +} + +// the log path for an exec session +func (c *Container) execLogPath(sessionID string) string { + return filepath.Join(c.execBundlePath(sessionID), "exec_log") +} + +// the socket conmon creates for an exec session +func (c *Container) execAttachSocketPath(sessionID string) (string, error) { + return c.ociRuntime.ExecAttachSocketPath(c, sessionID) +} + +// execExitFileDir gets the path to the container's exit file +func (c *Container) execExitFileDir(sessionID string) string { + return filepath.Join(c.execBundlePath(sessionID), "exit") +} + +// execOCILog returns the file path for the exec sessions oci log +func (c *Container) execOCILog(sessionID string) string { + if !c.ociRuntime.SupportsJSONErrors() { + return "" + } + return filepath.Join(c.execBundlePath(sessionID), "oci-log") +} + +// create a bundle path and associated files for an exec session +func (c *Container) createExecBundle(sessionID string) (err error) { + bundlePath := c.execBundlePath(sessionID) + if createErr := os.MkdirAll(bundlePath, execDirPermission); createErr != nil { + return createErr + } + defer func() { + if err != nil { + if err2 := os.RemoveAll(bundlePath); err != nil { + logrus.Warnf("error removing exec bundle after creation caused another error: %v", err2) + } + } + }() + if err2 := os.MkdirAll(c.execExitFileDir(sessionID), execDirPermission); err2 != nil { + // The directory is allowed to exist + if !os.IsExist(err2) { + err = errors.Wrapf(err2, "error creating OCI runtime exit file path %s", c.execExitFileDir(sessionID)) + } + } + return +} + +// readExecExitCode reads the exit file for an exec session and returns +// the exit code +func (c *Container) readExecExitCode(sessionID string) (int, error) { + exitFile := filepath.Join(c.execExitFileDir(sessionID), c.ID()) + chWait := make(chan error) + defer close(chWait) + + _, err := WaitForFile(exitFile, chWait, time.Second*5) + if err != nil { + return -1, err + } + ec, err := ioutil.ReadFile(exitFile) + if err != nil { + return -1, err + } + ecInt, err := strconv.Atoi(string(ec)) + if err != nil { + return -1, err + } + return ecInt, nil +} + +// getExecSessionPID gets the PID of an active exec session +func (c *Container) getExecSessionPID(sessionID string) (int, error) { + session, ok := c.state.ExecSessions[sessionID] + if ok { + return session.PID, nil + } + oldSession, ok := c.state.LegacyExecSessions[sessionID] + if ok { + return oldSession.PID, nil + } + + return -1, errors.Wrapf(define.ErrNoSuchExecSession, "no exec session with ID %s found in container %s", sessionID, c.ID()) +} + +// getKnownExecSessions gets a list of all exec sessions we think are running, +// but does not verify their current state. +// Please use getActiveExecSessions() outside of container_exec.go, as this +// function performs further checks to return an accurate list. +func (c *Container) getKnownExecSessions() []string { + knownSessions := []string{} + // First check legacy sessions. + // TODO: This is DEPRECATED and will be removed in a future major + // release. + for sessionID := range c.state.LegacyExecSessions { + knownSessions = append(knownSessions, sessionID) + } + // Next check new exec sessions, but only if in running state + for sessionID, session := range c.state.ExecSessions { + if session.State == define.ExecStateRunning { + knownSessions = append(knownSessions, sessionID) + } + } + + return knownSessions +} + +// getActiveExecSessions checks if there are any active exec sessions in the +// current container. Returns an array of active exec sessions. +// Will continue through errors where possible. +// Currently handles both new and legacy, deprecated exec sessions. +func (c *Container) getActiveExecSessions() ([]string, error) { + activeSessions := []string{} + knownSessions := c.getKnownExecSessions() + + // Instead of saving once per iteration, do it once at the end. + var lastErr error + needSave := false + for _, id := range knownSessions { + alive, err := c.ociRuntime.ExecUpdateStatus(c, id) + if err != nil { + if lastErr != nil { + logrus.Errorf("Error checking container %s exec sessions: %v", c.ID(), lastErr) + } + lastErr = err + continue + } + if !alive { + if err := c.cleanupExecBundle(id); err != nil { + if lastErr != nil { + logrus.Errorf("Error checking container %s exec sessions: %v", c.ID(), lastErr) + } + lastErr = err + } + + _, isLegacy := c.state.LegacyExecSessions[id] + if isLegacy { + delete(c.state.LegacyExecSessions, id) + needSave = true + } else { + session := c.state.ExecSessions[id] + exitCode, err := c.readExecExitCode(session.ID()) + if err != nil { + if lastErr != nil { + logrus.Errorf("Error checking container %s exec sessions: %v", c.ID(), lastErr) + } + lastErr = err + } + session.ExitCode = exitCode + session.PID = 0 + session.State = define.ExecStateStopped + + needSave = true + } + } else { + activeSessions = append(activeSessions, id) + } + } + if needSave { + if err := c.save(); err != nil { + if lastErr != nil { + logrus.Errorf("Error reaping exec sessions for container %s: %v", c.ID(), lastErr) + } + lastErr = err + } + } + + return activeSessions, lastErr +} + +// removeAllExecSessions stops and removes all the container's exec sessions +func (c *Container) removeAllExecSessions() error { + knownSessions := c.getKnownExecSessions() + + var lastErr error + for _, id := range knownSessions { + if err := c.ociRuntime.ExecStopContainer(c, id, c.StopTimeout()); err != nil { + if lastErr != nil { + logrus.Errorf("Error stopping container %s exec sessions: %v", c.ID(), lastErr) + } + lastErr = err + continue + } + + if err := c.cleanupExecBundle(id); err != nil { + if lastErr != nil { + logrus.Errorf("Error stopping container %s exec sessions: %v", c.ID(), lastErr) + } + lastErr = err + } + } + // Delete all exec sessions + if err := c.runtime.state.RemoveContainerExecSessions(c); err != nil { + if lastErr != nil { + logrus.Errorf("Error stopping container %s exec sessions: %v", c.ID(), lastErr) + } + lastErr = err + } + c.state.ExecSessions = nil + c.state.LegacyExecSessions = nil + if err := c.save(); err != nil { + if lastErr != nil { + logrus.Errorf("Error stopping container %s exec sessions: %v", c.ID(), lastErr) + } + lastErr = err + } + + return lastErr +} diff --git a/libpod/container_internal.go b/libpod/container_internal.go index a0805c1fa..12a13a0ce 100644 --- a/libpod/container_internal.go +++ b/libpod/container_internal.go @@ -142,92 +142,6 @@ func (c *Container) exitFilePath() (string, error) { return c.ociRuntime.ExitFilePath(c) } -// create a bundle path and associated files for an exec session -func (c *Container) createExecBundle(sessionID string) (err error) { - bundlePath := c.execBundlePath(sessionID) - if createErr := os.MkdirAll(bundlePath, execDirPermission); createErr != nil { - return createErr - } - defer func() { - if err != nil { - if err2 := os.RemoveAll(bundlePath); err != nil { - logrus.Warnf("error removing exec bundle after creation caused another error: %v", err2) - } - } - }() - if err2 := os.MkdirAll(c.execExitFileDir(sessionID), execDirPermission); err2 != nil { - // The directory is allowed to exist - if !os.IsExist(err2) { - err = errors.Wrapf(err2, "error creating OCI runtime exit file path %s", c.execExitFileDir(sessionID)) - } - } - return -} - -// cleanup an exec session after its done -func (c *Container) cleanupExecBundle(sessionID string) error { - if err := os.RemoveAll(c.execBundlePath(sessionID)); err != nil && !os.IsNotExist(err) { - return err - } - - return c.ociRuntime.ExecContainerCleanup(c, sessionID) -} - -// the path to a containers exec session bundle -func (c *Container) execBundlePath(sessionID string) string { - return filepath.Join(c.bundlePath(), sessionID) -} - -// Get PID file path for a container's exec session -func (c *Container) execPidPath(sessionID string) string { - return filepath.Join(c.execBundlePath(sessionID), "exec_pid") -} - -// the log path for an exec session -func (c *Container) execLogPath(sessionID string) string { - return filepath.Join(c.execBundlePath(sessionID), "exec_log") -} - -// the socket conmon creates for an exec session -func (c *Container) execAttachSocketPath(sessionID string) (string, error) { - return c.ociRuntime.ExecAttachSocketPath(c, sessionID) -} - -// execExitFileDir gets the path to the container's exit file -func (c *Container) execExitFileDir(sessionID string) string { - return filepath.Join(c.execBundlePath(sessionID), "exit") -} - -// execOCILog returns the file path for the exec sessions oci log -func (c *Container) execOCILog(sessionID string) string { - if !c.ociRuntime.SupportsJSONErrors() { - return "" - } - return filepath.Join(c.execBundlePath(sessionID), "oci-log") -} - -// readExecExitCode reads the exit file for an exec session and returns -// the exit code -func (c *Container) readExecExitCode(sessionID string) (int, error) { - exitFile := filepath.Join(c.execExitFileDir(sessionID), c.ID()) - chWait := make(chan error) - defer close(chWait) - - _, err := WaitForFile(exitFile, chWait, time.Second*5) - if err != nil { - return -1, err - } - ec, err := ioutil.ReadFile(exitFile) - if err != nil { - return -1, err - } - ecInt, err := strconv.Atoi(string(ec)) - if err != nil { - return -1, err - } - return ecInt, nil -} - // Wait for the container's exit file to appear. // When it does, update our state based on it. func (c *Container) waitForExitFileAndSync() error { @@ -568,6 +482,7 @@ func resetState(state *ContainerState) error { state.State = define.ContainerStateConfigured } state.ExecSessions = make(map[string]*ExecSession) + state.LegacyExecSessions = nil state.NetworkStatus = nil state.BindMounts = make(map[string]string) state.StoppedByUser = false @@ -1814,12 +1729,12 @@ func (c *Container) checkReadyForRemoval() error { return errors.Wrapf(define.ErrCtrStateInvalid, "cannot remove container %s as it is %s - running or paused containers cannot be removed without force", c.ID(), c.state.State.String()) } - // Reap exec sessions - if err := c.reapExecSessions(); err != nil { + // Check exec sessions + sessions, err := c.getActiveExecSessions() + if err != nil { return err } - - if len(c.state.ExecSessions) != 0 { + if len(sessions) != 0 { return errors.Wrapf(define.ErrCtrStateInvalid, "cannot remove container %s as it has active exec sessions", c.ID()) } @@ -1926,41 +1841,6 @@ func (c *Container) checkExitFile() error { return c.handleExitFile(exitFile, info) } -// Reap dead exec sessions -func (c *Container) reapExecSessions() error { - // Instead of saving once per iteration, use a defer to do it once at - // the end. - var lastErr error - needSave := false - for id := range c.state.ExecSessions { - alive, err := c.ociRuntime.ExecUpdateStatus(c, id) - if err != nil { - if lastErr != nil { - logrus.Errorf("Error reaping exec sessions for container %s: %v", c.ID(), lastErr) - } - lastErr = err - continue - } - if !alive { - // Clean up lingering files and remove the exec session - if err := c.ociRuntime.ExecContainerCleanup(c, id); err != nil { - return errors.Wrapf(err, "error cleaning up container %s exec session %s files", c.ID(), id) - } - delete(c.state.ExecSessions, id) - needSave = true - } - } - if needSave { - if err := c.save(); err != nil { - if lastErr != nil { - logrus.Errorf("Error reaping exec sessions for container %s: %v", c.ID(), lastErr) - } - lastErr = err - } - } - return lastErr -} - func (c *Container) hasNamespace(namespace spec.LinuxNamespaceType) bool { if c.config.Spec == nil || c.config.Spec.Linux == nil { return false diff --git a/libpod/container_top_linux.go b/libpod/container_top_linux.go index 98edc340a..2a35a2ae9 100644 --- a/libpod/container_top_linux.go +++ b/libpod/container_top_linux.go @@ -134,7 +134,9 @@ func (c *Container) execPS(args []string) ([]string, error) { }() cmd := append([]string{"ps"}, args...) - ec, err := c.Exec(false, false, map[string]string{}, cmd, "", "", streams, 0, nil, "") + config := new(ExecConfig) + config.Command = cmd + ec, err := c.Exec(config, streams, nil) if err != nil { return nil, err } else if ec != 0 { diff --git a/libpod/define/containerstate.go b/libpod/define/containerstate.go index e7d258e21..6da49a594 100644 --- a/libpod/define/containerstate.go +++ b/libpod/define/containerstate.go @@ -78,3 +78,37 @@ func StringToContainerStatus(status string) (ContainerStatus, error) { return ContainerStateUnknown, errors.Wrapf(ErrInvalidArg, "unknown container state: %s", status) } } + +// ContainerExecStatus is the status of an exec session within a container. +type ContainerExecStatus int + +const ( + // ExecStateUnknown indicates that the state of the exec session is not + // known. + ExecStateUnknown ContainerExecStatus = iota + // ExecStateCreated indicates that the exec session has been created but + // not yet started + ExecStateCreated ContainerExecStatus = iota + // ExecStateRunning indicates that the exec session has been started but + // has not yet exited. + ExecStateRunning ContainerExecStatus = iota + // ExecStateStopped indicates that the exec session has stopped and is + // no longer running. + ExecStateStopped ContainerExecStatus = iota +) + +// String returns a string representation of a given exec state. +func (s ContainerExecStatus) String() string { + switch s { + case ExecStateUnknown: + return "unknown" + case ExecStateCreated: + return "created" + case ExecStateRunning: + return "running" + case ExecStateStopped: + return "stopped" + default: + return "bad state" + } +} diff --git a/libpod/define/errors.go b/libpod/define/errors.go index b79cf08dc..3ba343789 100644 --- a/libpod/define/errors.go +++ b/libpod/define/errors.go @@ -20,6 +20,10 @@ var ( // ErrNoSuchVolume indicates the requested volume does not exist ErrNoSuchVolume = errors.New("no such volume") + // ErrNoSuchExecSession indicates that the requested exec session does + // not exist. + ErrNoSuchExecSession = errors.New("no such exec session") + // ErrCtrExists indicates a container with the same name or ID already // exists ErrCtrExists = errors.New("container already exists") @@ -29,10 +33,16 @@ var ( ErrImageExists = errors.New("image already exists") // ErrVolumeExists indicates a volume with the same name already exists ErrVolumeExists = errors.New("volume already exists") + // ErrExecSessionExists indicates an exec session with the same ID + // already exists. + ErrExecSessionExists = errors.New("exec session already exists") // ErrCtrStateInvalid indicates a container is in an improper state for // the requested operation ErrCtrStateInvalid = errors.New("container state improper") + // ErrExecSessionStateInvalid indicates that an exec session is in an + // improper state for the requested operation + ErrExecSessionStateInvalid = errors.New("exec session state improper") // ErrVolumeBeingUsed indicates that a volume is being used by at least one container ErrVolumeBeingUsed = errors.New("volume is being used") @@ -90,6 +100,9 @@ var ( // ErrVolumeRemoved indicates that the volume has already been removed and // no further operations can be performed on it ErrVolumeRemoved = errors.New("volume has already been removed") + // ErrExecSessionRemoved indicates that the exec session has already + // been removed and no further operations can be performed on it. + ErrExecSessionRemoved = errors.New("exec session has already been removed") // ErrDBClosed indicates that the connection to the state database has // already been closed diff --git a/libpod/healthcheck.go b/libpod/healthcheck.go index 9c274c4f3..76b7a1fcf 100644 --- a/libpod/healthcheck.go +++ b/libpod/healthcheck.go @@ -143,7 +143,9 @@ func (c *Container) runHealthCheck() (HealthCheckStatus, error) { logrus.Debugf("executing health check command %s for %s", strings.Join(newCommand, " "), c.ID()) timeStart := time.Now() hcResult := HealthCheckSuccess - _, hcErr := c.Exec(false, false, map[string]string{}, newCommand, "", "", streams, 0, nil, "") + config := new(ExecConfig) + config.Command = newCommand + _, hcErr := c.Exec(config, streams, nil) if hcErr != nil { errCause := errors.Cause(hcErr) hcResult = HealthCheckFailure diff --git a/libpod/in_memory_state.go b/libpod/in_memory_state.go index 2144671a5..ca562ab7e 100644 --- a/libpod/in_memory_state.go +++ b/libpod/in_memory_state.go @@ -20,10 +20,16 @@ type InMemoryState struct { pods map[string]*Pod // Maps container ID to container struct. containers map[string]*Container - volumes map[string]*Volume + // Maps volume ID to volume struct + volumes map[string]*Volume + // Maps exec session ID to ID of associated container + execSessions map[string]string // Maps container ID to a list of IDs of dependencies. - ctrDepends map[string][]string + ctrDepends map[string][]string + // Maps volume ID to IDs of dependencies volumeDepends map[string][]string + // Maps container ID to IDs of associated exec sessions. + ctrExecSessions map[string][]string // Maps pod ID to a map of container ID to container struct. podContainers map[string]map[string]*Container // Global name registry - ensures name uniqueness and performs lookups. @@ -51,10 +57,13 @@ func NewInMemoryState() (State, error) { state.pods = make(map[string]*Pod) state.containers = make(map[string]*Container) state.volumes = make(map[string]*Volume) + state.execSessions = make(map[string]string) state.ctrDepends = make(map[string][]string) state.volumeDepends = make(map[string][]string) + state.ctrExecSessions = make(map[string][]string) + state.podContainers = make(map[string]map[string]*Container) state.nameIndex = registrar.NewRegistrar() @@ -316,6 +325,13 @@ func (s *InMemoryState) RemoveContainer(ctr *Container) error { return errors.Wrapf(define.ErrCtrExists, "the following containers depend on container %s: %s", ctr.ID(), depsStr) } + // Ensure we don't have active exec sessions + ctrSessions := s.ctrExecSessions[ctr.ID()] + if len(ctrSessions) > 0 { + sessStr := strings.Join(ctrSessions, ", ") + return errors.Wrapf(define.ErrCtrExists, "the following exec sessions are running for container %s: %s", ctr.ID(), sessStr) + } + if _, ok := s.containers[ctr.ID()]; !ok { ctr.valid = false return errors.Wrapf(define.ErrNoSuchCtr, "no container exists in state with ID %s", ctr.ID()) @@ -437,6 +453,117 @@ func (s *InMemoryState) GetContainerConfig(id string) (*ContainerConfig, error) return ctr.Config(), nil } +// Add an exec session to the database +func (s *InMemoryState) AddExecSession(ctr *Container, session *ExecSession) error { + if !ctr.valid { + return define.ErrCtrRemoved + } + if session.ContainerID() != ctr.ID() { + return errors.Wrapf(define.ErrInvalidArg, "container ID and exec session ID must match") + } + if _, ok := s.containers[ctr.ID()]; !ok { + return define.ErrNoSuchCtr + } + + if _, ok := s.execSessions[session.ID()]; ok { + return define.ErrExecSessionExists + } + + s.execSessions[session.ID()] = ctr.ID() + + ctrSessions, ok := s.ctrExecSessions[ctr.ID()] + if !ok { + ctrSessions = []string{} + } + + ctrSessions = append(ctrSessions, session.ID()) + s.ctrExecSessions[ctr.ID()] = ctrSessions + + return nil +} + +// Get an exec session from the database by full or partial ID. +func (s *InMemoryState) GetExecSession(id string) (string, error) { + if id == "" { + return "", define.ErrEmptyID + } + + session, ok := s.execSessions[id] + if !ok { + return "", define.ErrNoSuchExecSession + } + + return session, nil +} + +// RemoveExecSession removes an exec session from the database. +func (s *InMemoryState) RemoveExecSession(session *ExecSession) error { + if _, ok := s.execSessions[session.ID()]; !ok { + return define.ErrNoSuchExecSession + } + + ctrSessions, ok := s.ctrExecSessions[session.ContainerID()] + // If !ok - internal state seems inconsistent, but the thing we wanted + // to remove is gone. Continue. + if ok { + newSessions := []string{} + for _, sess := range ctrSessions { + if sess != session.ID() { + newSessions = append(newSessions, sess) + } + } + s.ctrExecSessions[session.ContainerID()] = newSessions + } + + delete(s.execSessions, session.ID()) + + return nil +} + +// GetContainerExecSessions retrieves all exec sessions for the given container. +func (s *InMemoryState) GetContainerExecSessions(ctr *Container) ([]string, error) { + if !ctr.valid { + return nil, define.ErrCtrRemoved + } + if _, ok := s.containers[ctr.ID()]; !ok { + ctr.valid = false + return nil, define.ErrNoSuchCtr + } + + ctrSessions := s.ctrExecSessions[ctr.ID()] + + return ctrSessions, nil +} + +// RemoveContainerExecSessions removes all exec sessions for the given +// container. +func (s *InMemoryState) RemoveContainerExecSessions(ctr *Container) error { + if !ctr.valid { + return define.ErrCtrRemoved + } + if _, ok := s.containers[ctr.ID()]; !ok { + ctr.valid = false + return define.ErrNoSuchCtr + } + + ctrSessions, ok := s.ctrExecSessions[ctr.ID()] + if !ok { + return nil + } + + for _, sess := range ctrSessions { + if _, ok := s.execSessions[sess]; !ok { + // We have an internal state inconsistency + // Error out + return errors.Wrapf(define.ErrInternal, "inconsistent database state: exec session %s is missing", sess) + } + delete(s.execSessions, sess) + } + delete(s.ctrExecSessions, ctr.ID()) + + return nil +} + // RewriteContainerConfig rewrites a container's configuration. // This function is DANGEROUS, even with an in-memory state. // Please read the full comment on it in state.go before using it. @@ -1056,6 +1183,13 @@ func (s *InMemoryState) RemoveContainerFromPod(pod *Pod, ctr *Container) error { return errors.Wrapf(define.ErrCtrExists, "the following containers depend on container %s: %s", ctr.ID(), depsStr) } + // Ensure we don't have active exec sessions + ctrSessions := s.ctrExecSessions[ctr.ID()] + if len(ctrSessions) > 0 { + sessStr := strings.Join(ctrSessions, ", ") + return errors.Wrapf(define.ErrCtrExists, "the following exec sessions are running for container %s: %s", ctr.ID(), sessStr) + } + // Retrieve pod containers podCtrs, ok := s.podContainers[pod.ID()] if !ok { diff --git a/libpod/oci.go b/libpod/oci.go index 27edebefc..ef46cf5c3 100644 --- a/libpod/oci.go +++ b/libpod/oci.go @@ -71,6 +71,9 @@ type OCIRuntime interface { // Returns an int (exit code), error channel (errors from attach), and // error (errors that occurred attempting to start the exec session). ExecContainer(ctr *Container, sessionID string, options *ExecOptions) (int, chan error, error) + // ExecAttachResize resizes the terminal of a running exec session. Only + // allowed with sessions that were created with a TTY. + ExecAttachResize(ctr *Container, sessionID string, newSize remotecommand.TerminalSize) error // ExecStopContainer stops a given exec session in a running container. // SIGTERM with be sent initially, then SIGKILL after the given timeout. // If timeout is 0, SIGKILL will be sent immediately, and SIGTERM will @@ -143,12 +146,12 @@ type ExecOptions struct { // to 0, 1, 2) that will be passed to the executed process. The total FDs // passed will be 3 + PreserveFDs. PreserveFDs uint - // Resize is a channel where terminal resize events are sent to be - // handled. - Resize chan remotecommand.TerminalSize // DetachKeys is a set of keys that, when pressed in sequence, will // detach from the container. - DetachKeys string + // If not provided, the default keys will be used. + // If provided but set to "", detaching from the container will be + // disabled. + DetachKeys *string } // HTTPAttachStreams informs the HTTPAttach endpoint which of the container's diff --git a/libpod/oci_attach_linux.go b/libpod/oci_attach_linux.go index 46c70e7eb..5fc46c31c 100644 --- a/libpod/oci_attach_linux.go +++ b/libpod/oci_attach_linux.go @@ -93,7 +93,7 @@ func (c *Container) attach(streams *AttachStreams, keys string, resize <-chan re // 4. attachToExec sends on startFd, signalling it has attached to the socket and child is ready to go // 5. child receives on startFd, runs the runtime exec command // attachToExec is responsible for closing startFd and attachFd -func (c *Container) attachToExec(streams *AttachStreams, keys string, resize <-chan remotecommand.TerminalSize, sessionID string, startFd, attachFd *os.File) error { +func (c *Container) attachToExec(streams *AttachStreams, keys *string, sessionID string, startFd, attachFd *os.File) error { if !streams.AttachOutput && !streams.AttachError && !streams.AttachInput { return errors.Wrapf(define.ErrInvalidArg, "must provide at least one stream to attach to") } @@ -104,7 +104,11 @@ func (c *Container) attachToExec(streams *AttachStreams, keys string, resize <-c defer errorhandling.CloseQuiet(startFd) defer errorhandling.CloseQuiet(attachFd) - detachKeys, err := processDetachKeys(keys) + detachString := define.DefaultDetachKeys + if keys != nil { + detachString = *keys + } + detachKeys, err := processDetachKeys(detachString) if err != nil { return err } @@ -134,10 +138,6 @@ func (c *Container) attachToExec(streams *AttachStreams, keys string, resize <-c } }() - // Register the resize func after we've read the attach socket, as we know at this point the - // 'ctl' file has been created in conmon - registerResizeFunc(resize, c.execBundlePath(sessionID)) - // start listening on stdio of the process receiveStdoutError, stdinDone := setupStdioChannels(streams, conn, detachKeys) diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go index d3c3bbcc5..82c5d7020 100644 --- a/libpod/oci_conmon_linux.go +++ b/libpod/oci_conmon_linux.go @@ -769,7 +769,7 @@ func (r *ConmonOCIRuntime) ExecContainer(c *Container, sessionID string, options attachChan := make(chan error) go func() { // attachToExec is responsible for closing pipes - attachChan <- c.attachToExec(options.Streams, options.DetachKeys, options.Resize, sessionID, parentStartPipe, parentAttachPipe) + attachChan <- c.attachToExec(options.Streams, options.DetachKeys, sessionID, parentStartPipe, parentAttachPipe) close(attachChan) }() attachToExecCalled = true @@ -783,37 +783,54 @@ func (r *ConmonOCIRuntime) ExecContainer(c *Container, sessionID string, options return pid, attachChan, err } +// ExecAttachResize resizes the TTY of the given exec session. +func (r *ConmonOCIRuntime) ExecAttachResize(ctr *Container, sessionID string, newSize remotecommand.TerminalSize) error { + // TODO: probably want a dedicated function to get ctl file path? + controlPath := filepath.Join(ctr.execBundlePath(sessionID), "ctl") + controlFile, err := os.OpenFile(controlPath, unix.O_WRONLY, 0) + if err != nil { + return errors.Wrapf(err, "could not open ctl file for terminal resize for container %s exec session %s", ctr.ID(), sessionID) + } + defer controlFile.Close() + + logrus.Debugf("Received a resize event for container %s exec session %s: %+v", ctr.ID(), sessionID, newSize) + if _, err = fmt.Fprintf(controlFile, "%d %d %d\n", 1, newSize.Height, newSize.Width); err != nil { + return errors.Wrapf(err, "failed to write to ctl file to resize terminal") + } + + return nil +} + // ExecStopContainer stops a given exec session in a running container. func (r *ConmonOCIRuntime) ExecStopContainer(ctr *Container, sessionID string, timeout uint) error { - session, ok := ctr.state.ExecSessions[sessionID] - if !ok { - // TODO This should probably be a separate error - return errors.Wrapf(define.ErrInvalidArg, "no exec session with ID %s found in container %s", sessionID, ctr.ID()) + pid, err := ctr.getExecSessionPID(sessionID) + if err != nil { + return err } logrus.Debugf("Going to stop container %s exec session %s", ctr.ID(), sessionID) // Is the session dead? // Ping the PID with signal 0 to see if it still exists. - if err := unix.Kill(session.PID, 0); err != nil { + if err := unix.Kill(pid, 0); err != nil { if err == unix.ESRCH { return nil } - return errors.Wrapf(err, "error pinging container %s exec session %s PID %d with signal 0", ctr.ID(), sessionID, session.PID) + return errors.Wrapf(err, "error pinging container %s exec session %s PID %d with signal 0", ctr.ID(), sessionID, pid) } if timeout > 0 { // Use SIGTERM by default, then SIGSTOP after timeout. - logrus.Debugf("Killing exec session %s (PID %d) of container %s with SIGTERM", sessionID, session.PID, ctr.ID()) - if err := unix.Kill(session.PID, unix.SIGTERM); err != nil { + logrus.Debugf("Killing exec session %s (PID %d) of container %s with SIGTERM", sessionID, pid, ctr.ID()) + if err := unix.Kill(pid, unix.SIGTERM); err != nil { if err == unix.ESRCH { return nil } - return errors.Wrapf(err, "error killing container %s exec session %s PID %d with SIGTERM", ctr.ID(), sessionID, session.PID) + return errors.Wrapf(err, "error killing container %s exec session %s PID %d with SIGTERM", ctr.ID(), sessionID, pid) } // Wait for the PID to stop - if err := waitPidStop(session.PID, time.Duration(timeout)*time.Second); err != nil { + if err := waitPidStop(pid, time.Duration(timeout)*time.Second); err != nil { logrus.Warnf("Timed out waiting for container %s exec session %s to stop, resorting to SIGKILL", ctr.ID(), sessionID) } else { // No error, container is dead @@ -822,17 +839,17 @@ func (r *ConmonOCIRuntime) ExecStopContainer(ctr *Container, sessionID string, t } // SIGTERM did not work. On to SIGKILL. - logrus.Debugf("Killing exec session %s (PID %d) of container %s with SIGKILL", sessionID, session.PID, ctr.ID()) - if err := unix.Kill(session.PID, unix.SIGTERM); err != nil { + logrus.Debugf("Killing exec session %s (PID %d) of container %s with SIGKILL", sessionID, pid, ctr.ID()) + if err := unix.Kill(pid, unix.SIGTERM); err != nil { if err == unix.ESRCH { return nil } - return errors.Wrapf(err, "error killing container %s exec session %s PID %d with SIGKILL", ctr.ID(), sessionID, session.PID) + return errors.Wrapf(err, "error killing container %s exec session %s PID %d with SIGKILL", ctr.ID(), sessionID, pid) } // Wait for the PID to stop - if err := waitPidStop(session.PID, killContainerTimeout*time.Second); err != nil { - return errors.Wrapf(err, "timed out waiting for container %s exec session %s PID %d to stop after SIGKILL", ctr.ID(), sessionID, session.PID) + if err := waitPidStop(pid, killContainerTimeout*time.Second); err != nil { + return errors.Wrapf(err, "timed out waiting for container %s exec session %s PID %d to stop after SIGKILL", ctr.ID(), sessionID, pid) } return nil @@ -840,21 +857,20 @@ func (r *ConmonOCIRuntime) ExecStopContainer(ctr *Container, sessionID string, t // ExecUpdateStatus checks if the given exec session is still running. func (r *ConmonOCIRuntime) ExecUpdateStatus(ctr *Container, sessionID string) (bool, error) { - session, ok := ctr.state.ExecSessions[sessionID] - if !ok { - // TODO This should probably be a separate error - return false, errors.Wrapf(define.ErrInvalidArg, "no exec session with ID %s found in container %s", sessionID, ctr.ID()) + pid, err := ctr.getExecSessionPID(sessionID) + if err != nil { + return false, err } logrus.Debugf("Checking status of container %s exec session %s", ctr.ID(), sessionID) // Is the session dead? // Ping the PID with signal 0 to see if it still exists. - if err := unix.Kill(session.PID, 0); err != nil { + if err := unix.Kill(pid, 0); err != nil { if err == unix.ESRCH { return false, nil } - return false, errors.Wrapf(err, "error pinging container %s exec session %s PID %d with signal 0", ctr.ID(), sessionID, session.PID) + return false, errors.Wrapf(err, "error pinging container %s exec session %s PID %d with signal 0", ctr.ID(), sessionID, pid) } return true, nil diff --git a/libpod/oci_missing.go b/libpod/oci_missing.go index ff7eea625..326591d89 100644 --- a/libpod/oci_missing.go +++ b/libpod/oci_missing.go @@ -125,6 +125,11 @@ func (r *MissingRuntime) ExecContainer(ctr *Container, sessionID string, options return -1, nil, r.printError() } +// ExecAttachResize is not available as the runtime is missing. +func (r *MissingRuntime) ExecAttachResize(ctr *Container, sessionID string, newSize remotecommand.TerminalSize) error { + return r.printError() +} + // ExecStopContainer is not available as the runtime is missing. // TODO: We can also investigate using unix.Kill() on the PID of the exec // session here if we want to make stopping containers possible. Won't be diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go index 1f1cdc271..0b18436ca 100644 --- a/libpod/runtime_ctr.go +++ b/libpod/runtime_ctr.go @@ -463,11 +463,9 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool, } } - // Check that all of our exec sessions have finished - for _, session := range c.state.ExecSessions { - if err := c.ociRuntime.ExecStopContainer(c, session.ID, c.StopTimeout()); err != nil { - return errors.Wrapf(err, "error stopping exec session %s of container %s", session.ID, c.ID()) - } + // Remove all active exec sessions + if err := c.removeAllExecSessions(); err != nil { + return err } // Check that no other containers depend on the container. @@ -484,9 +482,8 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool, } } - // Set ContainerStateRemoving and remove exec sessions + // Set ContainerStateRemoving c.state.State = define.ContainerStateRemoving - c.state.ExecSessions = nil if err := c.save(); err != nil { return errors.Wrapf(err, "unable to set container %s removing state in database", c.ID()) diff --git a/libpod/runtime_pod.go b/libpod/runtime_pod.go index e1dc31391..be566e211 100644 --- a/libpod/runtime_pod.go +++ b/libpod/runtime_pod.go @@ -90,18 +90,10 @@ func (r *Runtime) LookupPod(idOrName string) (*Pod, error) { // output. Multiple filters are handled by ANDing their output, so only pods // matching all filters are returned func (r *Runtime) Pods(filters ...PodFilter) ([]*Pod, error) { - r.lock.RLock() - defer r.lock.RUnlock() - - if !r.valid { - return nil, define.ErrRuntimeStopped - } - - pods, err := r.state.AllPods() + pods, err := r.GetAllPods() if err != nil { return nil, err } - podsFiltered := make([]*Pod, 0, len(pods)) for _, pod := range pods { include := true diff --git a/libpod/state.go b/libpod/state.go index b246b5eac..9690e5819 100644 --- a/libpod/state.go +++ b/libpod/state.go @@ -72,6 +72,8 @@ type State interface { // Removes container from state. // Containers that are part of pods must use RemoveContainerFromPod. // The container must be part of the set namespace. + // All dependencies must be removed first. + // All exec sessions referencing the container must be removed first. RemoveContainer(ctr *Container) error // UpdateContainer updates a container's state from the backing store. // The container must be part of the set namespace. @@ -95,6 +97,30 @@ type State interface { // Return a container config from the database by full ID GetContainerConfig(id string) (*ContainerConfig, error) + // Add creates a reference to an exec session in the database. + // The container the exec session is attached to will be recorded. + // The container state will not be modified. + // The actual exec session itself is part of the container's state. + // We assume higher-level callers will add the session by saving the + // container's state before calling this. This only ensures that the ID + // of the exec session is associated with the ID of the container. + // Implementations may, but are not required to, verify that the state + // of the given container has an exec session with the ID given. + AddExecSession(ctr *Container, session *ExecSession) error + // Get retrieves the container a given exec session is attached to. + GetExecSession(id string) (string, error) + // Remove a reference to an exec session from the database. + // This will not modify container state to remove the exec session there + // and instead only removes the session ID -> container ID reference + // added by AddExecSession. + RemoveExecSession(session *ExecSession) error + // Get the IDs of all exec sessions attached to a given container. + GetContainerExecSessions(ctr *Container) ([]string, error) + // Remove all exec sessions for a single container. + // Usually used as part of removing the container. + // As with RemoveExecSession, container state will not be modified. + RemoveContainerExecSessions(ctr *Container) error + // PLEASE READ FULL DESCRIPTION BEFORE USING. // Rewrite a container's configuration. // This function breaks libpod's normal prohibition on a read-only diff --git a/pkg/adapter/pods.go b/pkg/adapter/pods.go index dc856cc8d..1417bd2b9 100644 --- a/pkg/adapter/pods.go +++ b/pkg/adapter/pods.go @@ -122,19 +122,31 @@ func (r *LocalRuntime) GetLatestPod() (*Pod, error) { return &pod, err } +// GetPodsWithFilters gets the filtered list of pods based on the filter parameters provided. +func (r *LocalRuntime) GetPodsWithFilters(filters string) ([]*Pod, error) { + pods, err := shared.GetPodsWithFilters(r.Runtime, filters) + if err != nil { + return nil, err + } + return r.podstoAdapterPods(pods) +} + +func (r *LocalRuntime) podstoAdapterPods(pod []*libpod.Pod) ([]*Pod, error) { + var pods []*Pod + for _, i := range pod { + + pods = append(pods, &Pod{i}) + } + return pods, nil +} + // GetAllPods gets all pods and wraps it in an adapter pod func (r *LocalRuntime) GetAllPods() ([]*Pod, error) { - var pods []*Pod allPods, err := r.Runtime.GetAllPods() if err != nil { return nil, err } - for _, p := range allPods { - pod := Pod{} - pod.Pod = p - pods = append(pods, &pod) - } - return pods, nil + return r.podstoAdapterPods(allPods) } // LookupPod gets a pod by name or id and wraps it in an adapter pod diff --git a/pkg/adapter/pods_remote.go b/pkg/adapter/pods_remote.go index 20f089628..6b8f22f15 100644 --- a/pkg/adapter/pods_remote.go +++ b/pkg/adapter/pods_remote.go @@ -10,7 +10,7 @@ import ( "github.com/containers/libpod/cmd/podman/cliconfig" "github.com/containers/libpod/cmd/podman/shared" - "github.com/containers/libpod/cmd/podman/varlink" + iopodman "github.com/containers/libpod/cmd/podman/varlink" "github.com/containers/libpod/libpod" "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/pkg/varlinkapi" @@ -208,6 +208,11 @@ func (r *LocalRuntime) GetAllPods() ([]*Pod, error) { return pods, nil } +// This is a empty implementation stating remoteclient not yet implemented +func (r *LocalRuntime) GetPodsWithFilters(filters string) ([]*Pod, error) { + return nil, define.ErrNotImplemented +} + // GetPodsByStatus returns a slice of pods filtered by a libpod status func (r *LocalRuntime) GetPodsByStatus(statuses []string) ([]*Pod, error) { podIDs, err := iopodman.GetPodsByStatus().Call(r.Conn, statuses) diff --git a/pkg/adapter/terminal_linux.go b/pkg/adapter/terminal_linux.go index 3dc5864e2..ef5a6f926 100644 --- a/pkg/adapter/terminal_linux.go +++ b/pkg/adapter/terminal_linux.go @@ -16,7 +16,6 @@ import ( // ExecAttachCtr execs and attaches to a container func ExecAttachCtr(ctx context.Context, ctr *libpod.Container, tty, privileged bool, env map[string]string, cmd []string, user, workDir string, streams *libpod.AttachStreams, preserveFDs uint, detachKeys string) (int, error) { resize := make(chan remotecommand.TerminalSize) - haveTerminal := terminal.IsTerminal(int(os.Stdin.Fd())) // Check if we are attached to a terminal. If we are, generate resize @@ -33,7 +32,18 @@ func ExecAttachCtr(ctx context.Context, ctr *libpod.Container, tty, privileged b } }() } - return ctr.Exec(tty, privileged, env, cmd, user, workDir, streams, preserveFDs, resize, detachKeys) + + execConfig := new(libpod.ExecConfig) + execConfig.Command = cmd + execConfig.Terminal = tty + execConfig.Privileged = privileged + execConfig.Environment = env + execConfig.User = user + execConfig.WorkDir = workDir + execConfig.DetachKeys = &detachKeys + execConfig.PreserveFDs = preserveFDs + + return ctr.Exec(execConfig, streams, resize) } // StartAttachCtr starts and (if required) attaches to a container diff --git a/pkg/api/handlers/compat/containers.go b/pkg/api/handlers/compat/containers.go index 1298e7fa4..3a269fe50 100644 --- a/pkg/api/handlers/compat/containers.go +++ b/pkg/api/handlers/compat/containers.go @@ -87,7 +87,7 @@ func ListContainers(w http.ResponseWriter, r *http.Request) { utils.InternalServerError(w, err) return } - if _, found := r.URL.Query()["limit"]; found { + if _, found := r.URL.Query()["limit"]; found && query.Limit != -1 { last := query.Limit if len(containers) > last { containers = containers[len(containers)-last:] @@ -326,7 +326,6 @@ func LogsFromContainer(w http.ResponseWriter, r *http.Request) { builder.WriteRune(' ') } builder.WriteString(line.Msg) - // Build header and output entry binary.BigEndian.PutUint32(header[4:], uint32(len(header)+builder.Len())) if _, err := w.Write(header[:]); err != nil { @@ -335,7 +334,6 @@ func LogsFromContainer(w http.ResponseWriter, r *http.Request) { if _, err := fmt.Fprint(w, builder.String()); err != nil { log.Errorf("unable to write builder string: %q", err) } - if flusher, ok := w.(http.Flusher); ok { flusher.Flush() } diff --git a/pkg/api/handlers/compat/images_history.go b/pkg/api/handlers/compat/images_history.go index 04304caa4..afadf4c48 100644 --- a/pkg/api/handlers/compat/images_history.go +++ b/pkg/api/handlers/compat/images_history.go @@ -28,7 +28,7 @@ func HistoryImage(w http.ResponseWriter, r *http.Request) { for _, h := range history { l := handlers.HistoryResponse{ ID: h.ID, - Created: h.Created.UnixNano(), + Created: h.Created.Unix(), CreatedBy: h.CreatedBy, Tags: h.Tags, Size: h.Size, diff --git a/pkg/api/handlers/libpod/pods.go b/pkg/api/handlers/libpod/pods.go index f93c8f8d5..27ec64d89 100644 --- a/pkg/api/handlers/libpod/pods.go +++ b/pkg/api/handlers/libpod/pods.go @@ -103,7 +103,6 @@ func PodCreate(w http.ResponseWriter, r *http.Request) { func Pods(w http.ResponseWriter, r *http.Request) { var ( - runtime = r.Context().Value("runtime").(*libpod.Runtime) podInspectData []*libpod.PodInspect ) decoder := r.Context().Value("decoder").(*schema.Decoder) @@ -118,12 +117,8 @@ func Pods(w http.ResponseWriter, r *http.Request) { return } - if len(query.Filters) > 0 { - utils.Error(w, "filters are not implemented yet", http.StatusInternalServerError, define.ErrNotImplemented) - return - } + pods, err := utils.GetPods(w, r) - pods, err := runtime.GetAllPods() if err != nil { utils.Error(w, "Something went wrong", http.StatusInternalServerError, err) return diff --git a/pkg/api/handlers/libpod/swagger.go b/pkg/api/handlers/libpod/swagger.go index f6a26134b..149fa10dc 100644 --- a/pkg/api/handlers/libpod/swagger.go +++ b/pkg/api/handlers/libpod/swagger.go @@ -1,6 +1,16 @@ package libpod -import "github.com/containers/image/v5/manifest" +import ( + "net/http" + "os" + + "github.com/containers/image/v5/manifest" + "github.com/containers/libpod/pkg/api/handlers/utils" + "github.com/pkg/errors" +) + +// DefaultPodmanSwaggerSpec provides the default path to the podman swagger spec file +const DefaultPodmanSwaggerSpec = "/usr/share/containers/podman/swagger.yaml" // List Containers // swagger:response ListContainers @@ -15,3 +25,20 @@ type swagInspectManifestResponse struct { // in:body Body manifest.List } + +func ServeSwagger(w http.ResponseWriter, r *http.Request) { + path := DefaultPodmanSwaggerSpec + if p, found := os.LookupEnv("PODMAN_SWAGGER_SPEC"); found { + path = p + } + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + utils.InternalServerError(w, errors.Errorf("file %q does not exist", path)) + return + } + utils.InternalServerError(w, err) + return + } + w.Header().Set("Content-Type", "text/yaml") + http.ServeFile(w, r, path) +} diff --git a/pkg/api/handlers/utils/containers.go b/pkg/api/handlers/utils/containers.go index d5a79bdc8..bbe4cee3c 100644 --- a/pkg/api/handlers/utils/containers.go +++ b/pkg/api/handlers/utils/containers.go @@ -16,7 +16,7 @@ import ( // ContainerCreateResponse is the response struct for creating a container type ContainerCreateResponse struct { // ID of the container created - ID string `json:"id"` + ID string `json:"Id"` // Warnings during container creation Warnings []string `json:"Warnings"` } diff --git a/pkg/api/handlers/utils/pods.go b/pkg/api/handlers/utils/pods.go new file mode 100644 index 000000000..266ad9a4b --- /dev/null +++ b/pkg/api/handlers/utils/pods.go @@ -0,0 +1,45 @@ +package utils + +import ( + "fmt" + "net/http" + + "github.com/containers/libpod/cmd/podman/shared" + "github.com/containers/libpod/libpod" + "github.com/gorilla/schema" +) + +func GetPods(w http.ResponseWriter, r *http.Request) ([]*libpod.Pod, error) { + runtime := r.Context().Value("runtime").(*libpod.Runtime) + decoder := r.Context().Value("decoder").(*schema.Decoder) + + query := struct { + All bool + Filters map[string][]string `schema:"filters"` + Digests bool + }{} + + if err := decoder.Decode(&query, r.URL.Query()); err != nil { + return nil, err + } + var filters = []string{} + if _, found := r.URL.Query()["digests"]; found && query.Digests { + UnSupportedParameter("digests") + } + + if len(query.Filters) > 0 { + for k, v := range query.Filters { + for _, val := range v { + filters = append(filters, fmt.Sprintf("%s=%s", k, val)) + } + } + filterFuncs, err := shared.GenerateFilterFunction(runtime, filters) + if err != nil { + return nil, err + } + return shared.FilterAllPodsWithFilterFunc(runtime, filterFuncs...) + } + + return runtime.GetAllPods() + +} diff --git a/pkg/api/server/register_events.go b/pkg/api/server/register_events.go index b0f403709..e909303da 100644 --- a/pkg/api/server/register_events.go +++ b/pkg/api/server/register_events.go @@ -63,6 +63,6 @@ func (s *APIServer) registerEventsHandlers(r *mux.Router) error { // description: returns a string of json data describing an event // 500: // "$ref": "#/responses/InternalError" - r.Handle(VersionedPath("/events"), s.APIHandler(compat.GetEvents)).Methods(http.MethodGet) + r.Handle(VersionedPath("/libpod/events"), s.APIHandler(compat.GetEvents)).Methods(http.MethodGet) return nil } diff --git a/pkg/api/server/register_manifest.go b/pkg/api/server/register_manifest.go index ccfc2192d..8fd84f205 100644 --- a/pkg/api/server/register_manifest.go +++ b/pkg/api/server/register_manifest.go @@ -38,7 +38,7 @@ func (s *APIServer) registerManifestHandlers(r *mux.Router) error { // 500: // $ref: "#/responses/InternalError" r.Handle(VersionedPath("/libpod/manifests/create"), s.APIHandler(libpod.ManifestCreate)).Methods(http.MethodPost) - // swagger:operation GET /libpod/manifests/{name}/json manifests Inspect + // swagger:operation GET /libpod/manifests/{name:.*}/json manifests Inspect // --- // summary: Inspect // description: Display a manifest list @@ -46,7 +46,7 @@ func (s *APIServer) registerManifestHandlers(r *mux.Router) error { // - application/json // parameters: // - in: path - // name: name + // name: name:.* // type: string // required: true // description: the name or ID of the manifest @@ -58,14 +58,14 @@ func (s *APIServer) registerManifestHandlers(r *mux.Router) error { // 500: // $ref: "#/responses/InternalError" r.Handle(VersionedPath("/libpod/manifests/{name:.*}/json"), s.APIHandler(libpod.ManifestInspect)).Methods(http.MethodGet) - // swagger:operation POST /libpod/manifests/{name}/add manifests AddManifest + // swagger:operation POST /libpod/manifests/{name:.*}/add manifests AddManifest // --- // description: Add an image to a manifest list // produces: // - application/json // parameters: // - in: path - // name: name + // name: name:.* // type: string // required: true // description: the name or ID of the manifest @@ -84,7 +84,7 @@ func (s *APIServer) registerManifestHandlers(r *mux.Router) error { // 500: // $ref: "#/responses/InternalError" r.Handle(VersionedPath("/libpod/manifests/{name:.*}/add"), s.APIHandler(libpod.ManifestAdd)).Methods(http.MethodPost) - // swagger:operation DELETE /libpod/manifests/{name} manifests RemoveManifest + // swagger:operation DELETE /libpod/manifests/{name:.*} manifests RemoveManifest // --- // summary: Remove // description: Remove an image from a manifest list @@ -92,7 +92,7 @@ func (s *APIServer) registerManifestHandlers(r *mux.Router) error { // - application/json // parameters: // - in: path - // name: name + // name: name:.* // type: string // required: true // description: the image associated with the manifest diff --git a/pkg/api/server/register_swagger.go b/pkg/api/server/register_swagger.go index 5564ec096..9048c1951 100644 --- a/pkg/api/server/register_swagger.go +++ b/pkg/api/server/register_swagger.go @@ -2,25 +2,14 @@ package server import ( "net/http" - "os" + "github.com/containers/libpod/pkg/api/handlers/libpod" "github.com/gorilla/mux" ) -// DefaultPodmanSwaggerSpec provides the default path to the podman swagger spec file -const DefaultPodmanSwaggerSpec = "/usr/share/containers/podman/swagger.yaml" - // RegisterSwaggerHandlers maps the swagger endpoint for the server func (s *APIServer) RegisterSwaggerHandlers(r *mux.Router) error { // This handler does _*NOT*_ provide an UI rather just a swagger spec that an UI could render - r.PathPrefix("/swagger/").HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - path := DefaultPodmanSwaggerSpec - if p, found := os.LookupEnv("PODMAN_SWAGGER_SPEC"); found { - path = p - } - w.Header().Set("Content-Type", "text/yaml") - - http.ServeFile(w, r, path) - }) + r.HandleFunc(VersionedPath("/libpod/swagger"), s.APIHandler(libpod.ServeSwagger)).Methods(http.MethodGet) return nil } diff --git a/pkg/api/server/server.go b/pkg/api/server/server.go index 8496cd11c..59f1f95cb 100644 --- a/pkg/api/server/server.go +++ b/pkg/api/server/server.go @@ -83,10 +83,6 @@ func newServer(runtime *libpod.Runtime, duration time.Duration, listener *net.Li ConnectionCh: make(chan int), } - server.Timer = time.AfterFunc(server.Duration, func() { - server.ConnectionCh <- NOOPHandler - }) - router.NotFoundHandler = http.HandlerFunc( func(w http.ResponseWriter, r *http.Request) { // We can track user errors... @@ -109,6 +105,7 @@ func newServer(runtime *libpod.Runtime, duration time.Duration, listener *net.Li server.registerPingHandlers, server.registerPluginsHandlers, server.registerPodsHandlers, + server.RegisterSwaggerHandlers, server.registerSwarmHandlers, server.registerSystemHandlers, server.registerVersionHandlers, @@ -139,36 +136,15 @@ func newServer(runtime *libpod.Runtime, duration time.Duration, listener *net.Li // Serve starts responding to HTTP requests func (s *APIServer) Serve() error { - // stalker to count the connections. Should the timer expire it will shutdown the service. - go func() { - for delta := range s.ConnectionCh { - switch delta { - case EnterHandler: - s.Timer.Stop() - s.ActiveConnections += 1 - s.TotalConnections += 1 - case ExitHandler: - s.Timer.Stop() - s.ActiveConnections -= 1 - if s.ActiveConnections == 0 { - // Server will be shutdown iff the timer expires before being reset or stopped - s.Timer = time.AfterFunc(s.Duration, func() { - if err := s.Shutdown(); err != nil { - logrus.Errorf("Failed to shutdown APIServer: %v", err) - os.Exit(1) - } - }) - } else { - s.Timer.Reset(s.Duration) - } - case NOOPHandler: - // push the check out another duration... - s.Timer.Reset(s.Duration) - default: - logrus.Errorf("ConnectionCh received unsupported input %d", delta) - } - } - }() + // This is initialized here as Timer is not needed until Serve'ing + if s.Duration > 0 { + s.Timer = time.AfterFunc(s.Duration, func() { + s.ConnectionCh <- NOOPHandler + }) + go s.ReadChannelWithTimeout() + } else { + go s.ReadChannelNoTimeout() + } sigChan := make(chan os.Signal, 1) signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) @@ -193,6 +169,53 @@ func (s *APIServer) Serve() error { return nil } +func (s *APIServer) ReadChannelWithTimeout() { + // stalker to count the connections. Should the timer expire it will shutdown the service. + for delta := range s.ConnectionCh { + switch delta { + case EnterHandler: + s.Timer.Stop() + s.ActiveConnections += 1 + s.TotalConnections += 1 + case ExitHandler: + s.Timer.Stop() + s.ActiveConnections -= 1 + if s.ActiveConnections == 0 { + // Server will be shutdown iff the timer expires before being reset or stopped + s.Timer = time.AfterFunc(s.Duration, func() { + if err := s.Shutdown(); err != nil { + logrus.Errorf("Failed to shutdown APIServer: %v", err) + os.Exit(1) + } + }) + } else { + s.Timer.Reset(s.Duration) + } + case NOOPHandler: + // push the check out another duration... + s.Timer.Reset(s.Duration) + default: + logrus.Warnf("ConnectionCh received unsupported input %d", delta) + } + } +} + +func (s *APIServer) ReadChannelNoTimeout() { + // stalker to count the connections. + for delta := range s.ConnectionCh { + switch delta { + case EnterHandler: + s.ActiveConnections += 1 + s.TotalConnections += 1 + case ExitHandler: + s.ActiveConnections -= 1 + case NOOPHandler: + default: + logrus.Warnf("ConnectionCh received unsupported input %d", delta) + } + } +} + // Shutdown is a clean shutdown waiting on existing clients func (s *APIServer) Shutdown() error { // Duration == 0 flags no auto-shutdown of the server diff --git a/pkg/bindings/bindings.go b/pkg/bindings/bindings.go index e83c4a5e1..4b07847d1 100644 --- a/pkg/bindings/bindings.go +++ b/pkg/bindings/bindings.go @@ -7,3 +7,12 @@ // is established, users can then manage the Podman container runtime. package bindings + +var ( + // PTrue is a convenience variable that can be used in bindings where + // a pointer to a bool (optional parameter) is required. + PTrue bool = true + // PFalse is a convenience variable that can be used in bindings where + // a pointer to a bool (optional parameter) is required. + PFalse bool = false +) diff --git a/pkg/bindings/connection.go b/pkg/bindings/connection.go index ba5f9c3aa..289debd8c 100644 --- a/pkg/bindings/connection.go +++ b/pkg/bindings/connection.go @@ -109,7 +109,7 @@ func NewConnection(ctx context.Context, uri string, identity ...string) (context } client, err = tcpClient(_url) default: - return nil, errors.Errorf("%s is not a support schema", _url.Scheme) + return nil, errors.Errorf("'%s' is not a supported schema", _url.Scheme) } if err != nil { return nil, errors.Wrapf(err, "Failed to create %sClient", _url.Scheme) diff --git a/pkg/bindings/containers/containers.go b/pkg/bindings/containers/containers.go index 670321f21..f298dbba1 100644 --- a/pkg/bindings/containers/containers.go +++ b/pkg/bindings/containers/containers.go @@ -139,7 +139,6 @@ func Kill(ctx context.Context, nameOrID string, signal string) error { return response.Process(nil) } -func Logs() {} // Pause pauses a given container. The nameOrID can be a container name // or a partial/full ID. diff --git a/pkg/bindings/containers/logs.go b/pkg/bindings/containers/logs.go new file mode 100644 index 000000000..b7ecb3c7e --- /dev/null +++ b/pkg/bindings/containers/logs.go @@ -0,0 +1,116 @@ +package containers + +import ( + "context" + "encoding/binary" + "io" + "net/http" + "net/url" + "strconv" + + "github.com/containers/libpod/pkg/bindings" + "github.com/pkg/errors" +) + +// Logs obtains a container's logs given the options provided. The logs are then sent to the +// stdout|stderr channels as strings. +func Logs(ctx context.Context, nameOrID string, opts LogOptions, stdoutChan, stderrChan chan string) error { + conn, err := bindings.GetClient(ctx) + if err != nil { + return err + } + params := url.Values{} + if opts.Follow != nil { + params.Set("follow", strconv.FormatBool(*opts.Follow)) + } + if opts.Since != nil { + params.Set("since", *opts.Since) + } + if opts.Stderr != nil { + params.Set("stderr", strconv.FormatBool(*opts.Stderr)) + } + if opts.Stdout != nil { + params.Set("stdout", strconv.FormatBool(*opts.Stdout)) + } + if opts.Tail != nil { + params.Set("tail", *opts.Tail) + } + if opts.Timestamps != nil { + params.Set("timestamps", strconv.FormatBool(*opts.Timestamps)) + } + if opts.Until != nil { + params.Set("until", *opts.Until) + } + // The API requires either stdout|stderr be used. If neither are specified, we specify stdout + if opts.Stdout == nil && opts.Stderr == nil { + params.Set("stdout", strconv.FormatBool(true)) + } + response, err := conn.DoRequest(nil, http.MethodGet, "/containers/%s/logs", params, nameOrID) + if err != nil { + return err + } + + // read 8 bytes + // first byte determines stderr=2|stdout=1 + // bytes 4-7 len(msg) in uint32 + for { + stream, msgSize, err := readHeader(response.Body) + if err != nil { + // In case the server side closes up shop because !follow + if err == io.EOF { + break + } + return errors.Wrap(err, "unable to read log header") + } + msg, err := readMsg(response.Body, msgSize) + if err != nil { + return errors.Wrap(err, "unable to read log message") + } + if stream == 1 { + stdoutChan <- msg + } else { + stderrChan <- msg + } + } + return nil +} + +func readMsg(r io.Reader, msgSize int) (string, error) { + var msg []byte + size := msgSize + for { + b := make([]byte, size) + _, err := r.Read(b) + if err != nil { + return "", err + } + msg = append(msg, b...) + if len(msg) == msgSize { + break + } + size = msgSize - len(msg) + } + return string(msg), nil +} + +func readHeader(r io.Reader) (byte, int, error) { + var ( + header []byte + size = 8 + ) + for { + b := make([]byte, size) + _, err := r.Read(b) + if err != nil { + return 0, 0, err + } + header = append(header, b...) + if len(header) == 8 { + break + } + size = 8 - len(header) + } + stream := header[0] + msgSize := int(binary.BigEndian.Uint32(header[4:]) - 8) + return stream, msgSize, nil +} diff --git a/pkg/bindings/containers/types.go b/pkg/bindings/containers/types.go new file mode 100644 index 000000000..87342f7ea --- /dev/null +++ b/pkg/bindings/containers/types.go @@ -0,0 +1,13 @@ +package containers + +// LogOptions describe finer control of log content or +// how the content is formatted. +type LogOptions struct { + Follow *bool + Since *string + Stderr *bool + Stdout *bool + Tail *string + Timestamps *bool + Until *string +} diff --git a/pkg/bindings/system/system.go b/pkg/bindings/system/system.go new file mode 100644 index 000000000..fce8bbb8e --- /dev/null +++ b/pkg/bindings/system/system.go @@ -0,0 +1,61 @@ +package system + +import ( + "context" + "encoding/json" + "io" + "net/http" + "net/url" + + "github.com/containers/libpod/pkg/api/handlers" + "github.com/containers/libpod/pkg/bindings" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// Events allows you to monitor libdpod related events like container creation and +// removal. The events are then passed to the eventChan provided. The optional cancelChan +// can be used to cancel the read of events and close down the HTTP connection. +func Events(ctx context.Context, eventChan chan (handlers.Event), cancelChan chan bool, since, until *string, filters map[string][]string) error { + conn, err := bindings.GetClient(ctx) + if err != nil { + return err + } + params := url.Values{} + if since != nil { + params.Set("since", *since) + } + if until != nil { + params.Set("until", *until) + } + if filters != nil { + filterString, err := bindings.FiltersToString(filters) + if err != nil { + return errors.Wrap(err, "invalid filters") + } + params.Set("filters", filterString) + } + response, err := conn.DoRequest(nil, http.MethodGet, "/events", params) + if err != nil { + return err + } + if cancelChan != nil { + go func() { + <-cancelChan + err = response.Body.Close() + logrus.Error(errors.Wrap(err, "unable to close event response body")) + }() + } + dec := json.NewDecoder(response.Body) + for { + e := handlers.Event{} + if err := dec.Decode(&e); err != nil { + if err == io.EOF { + break + } + return errors.Wrap(err, "unable to decode event response") + } + eventChan <- e + } + return nil +} diff --git a/pkg/bindings/test/common_test.go b/pkg/bindings/test/common_test.go index a4d065a14..409e620a3 100644 --- a/pkg/bindings/test/common_test.go +++ b/pkg/bindings/test/common_test.go @@ -152,7 +152,7 @@ func (b *bindingTest) startAPIService() *gexec.Session { var ( cmd []string ) - cmd = append(cmd, "--log-level=debug", "system", "service", "--timeout=999999", b.sock) + cmd = append(cmd, "--log-level=debug", "--events-backend=file", "system", "service", "--timeout=0", b.sock) return b.runPodman(cmd) } diff --git a/pkg/bindings/test/containers_test.go b/pkg/bindings/test/containers_test.go index 34a9c3136..afb0cc19b 100644 --- a/pkg/bindings/test/containers_test.go +++ b/pkg/bindings/test/containers_test.go @@ -3,10 +3,12 @@ package test_bindings import ( "net/http" "strconv" + "strings" "time" "github.com/containers/libpod/pkg/bindings" "github.com/containers/libpod/pkg/bindings/containers" + "github.com/containers/libpod/pkg/specgen" "github.com/containers/libpod/test/utils" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -15,11 +17,9 @@ import ( var _ = Describe("Podman containers ", func() { var ( - bt *bindingTest - s *gexec.Session - err error - falseFlag bool = false - trueFlag bool = true + bt *bindingTest + s *gexec.Session + err error ) BeforeEach(func() { @@ -55,7 +55,7 @@ var _ = Describe("Podman containers ", func() { It("podman pause a running container by name", func() { // Pausing by name should work var name = "top" - _, err := bt.RunTopContainer(&name, &falseFlag, nil) + _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) Expect(err).To(BeNil()) err = containers.Pause(bt.conn, name) Expect(err).To(BeNil()) @@ -69,7 +69,7 @@ var _ = Describe("Podman containers ", func() { It("podman pause a running container by id", func() { // Pausing by id should work var name = "top" - cid, err := bt.RunTopContainer(&name, &falseFlag, nil) + cid, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) Expect(err).To(BeNil()) err = containers.Pause(bt.conn, cid) Expect(err).To(BeNil()) @@ -83,7 +83,7 @@ var _ = Describe("Podman containers ", func() { It("podman unpause a running container by name", func() { // Unpausing by name should work var name = "top" - _, err := bt.RunTopContainer(&name, &falseFlag, nil) + _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) Expect(err).To(BeNil()) err = containers.Pause(bt.conn, name) Expect(err).To(BeNil()) @@ -99,7 +99,7 @@ var _ = Describe("Podman containers ", func() { It("podman unpause a running container by ID", func() { // Unpausing by ID should work var name = "top" - _, err := bt.RunTopContainer(&name, &falseFlag, nil) + _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) Expect(err).To(BeNil()) // Pause by name err = containers.Pause(bt.conn, name) @@ -118,7 +118,7 @@ var _ = Describe("Podman containers ", func() { It("podman pause a paused container by name", func() { // Pausing a paused container by name should fail var name = "top" - _, err := bt.RunTopContainer(&name, &falseFlag, nil) + _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) Expect(err).To(BeNil()) err = containers.Pause(bt.conn, name) Expect(err).To(BeNil()) @@ -131,7 +131,7 @@ var _ = Describe("Podman containers ", func() { It("podman pause a paused container by id", func() { // Pausing a paused container by id should fail var name = "top" - cid, err := bt.RunTopContainer(&name, &falseFlag, nil) + cid, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) Expect(err).To(BeNil()) err = containers.Pause(bt.conn, cid) Expect(err).To(BeNil()) @@ -144,7 +144,7 @@ var _ = Describe("Podman containers ", func() { It("podman pause a stopped container by name", func() { // Pausing a stopped container by name should fail var name = "top" - _, err := bt.RunTopContainer(&name, &falseFlag, nil) + _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) Expect(err).To(BeNil()) err = containers.Stop(bt.conn, name, nil) Expect(err).To(BeNil()) @@ -157,7 +157,7 @@ var _ = Describe("Podman containers ", func() { It("podman pause a stopped container by id", func() { // Pausing a stopped container by id should fail var name = "top" - cid, err := bt.RunTopContainer(&name, &falseFlag, nil) + cid, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) Expect(err).To(BeNil()) err = containers.Stop(bt.conn, cid, nil) Expect(err).To(BeNil()) @@ -170,11 +170,11 @@ var _ = Describe("Podman containers ", func() { It("podman remove a paused container by id without force", func() { // Removing a paused container without force should fail var name = "top" - cid, err := bt.RunTopContainer(&name, &falseFlag, nil) + cid, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) Expect(err).To(BeNil()) err = containers.Pause(bt.conn, cid) Expect(err).To(BeNil()) - err = containers.Remove(bt.conn, cid, &falseFlag, &falseFlag) + err = containers.Remove(bt.conn, cid, &bindings.PFalse, &bindings.PFalse) Expect(err).ToNot(BeNil()) code, _ := bindings.CheckResponseCode(err) Expect(code).To(BeNumerically("==", http.StatusInternalServerError)) @@ -191,18 +191,18 @@ var _ = Describe("Podman containers ", func() { // Removing a paused container with force should work var name = "top" - cid, err := bt.RunTopContainer(&name, &falseFlag, nil) + cid, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) Expect(err).To(BeNil()) err = containers.Pause(bt.conn, cid) Expect(err).To(BeNil()) - err = containers.Remove(bt.conn, cid, &trueFlag, &falseFlag) + err = containers.Remove(bt.conn, cid, &bindings.PTrue, &bindings.PFalse) Expect(err).To(BeNil()) }) It("podman stop a paused container by name", func() { // Stopping a paused container by name should fail var name = "top" - _, err := bt.RunTopContainer(&name, &falseFlag, nil) + _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) Expect(err).To(BeNil()) err = containers.Pause(bt.conn, name) Expect(err).To(BeNil()) @@ -215,7 +215,7 @@ var _ = Describe("Podman containers ", func() { It("podman stop a paused container by id", func() { // Stopping a paused container by id should fail var name = "top" - cid, err := bt.RunTopContainer(&name, &falseFlag, nil) + cid, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) Expect(err).To(BeNil()) err = containers.Pause(bt.conn, cid) Expect(err).To(BeNil()) @@ -228,7 +228,7 @@ var _ = Describe("Podman containers ", func() { It("podman stop a running container by name", func() { // Stopping a running container by name should work var name = "top" - _, err := bt.RunTopContainer(&name, &falseFlag, nil) + _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) Expect(err).To(BeNil()) err = containers.Stop(bt.conn, name, nil) Expect(err).To(BeNil()) @@ -242,7 +242,7 @@ var _ = Describe("Podman containers ", func() { It("podman stop a running container by ID", func() { // Stopping a running container by ID should work var name = "top" - cid, err := bt.RunTopContainer(&name, &falseFlag, nil) + cid, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) Expect(err).To(BeNil()) err = containers.Stop(bt.conn, cid, nil) Expect(err).To(BeNil()) @@ -301,8 +301,8 @@ var _ = Describe("Podman containers ", func() { errChan = make(chan error) go func() { - exitCode, err = containers.Wait(bt.conn, name, &unpause) - errChan <- err + _, waitErr := containers.Wait(bt.conn, name, &unpause) + errChan <- waitErr close(errChan) }() err = containers.Unpause(bt.conn, name) @@ -323,7 +323,7 @@ var _ = Describe("Podman containers ", func() { // a container that has no healthcheck should be a 409 var name = "top" - bt.RunTopContainer(&name, &falseFlag, nil) + bt.RunTopContainer(&name, &bindings.PFalse, nil) _, err = containers.RunHealthCheck(bt.conn, name) Expect(err).ToNot(BeNil()) code, _ = bindings.CheckResponseCode(err) @@ -357,4 +357,26 @@ var _ = Describe("Podman containers ", func() { //Expect(code).To(BeNumerically("==", http.StatusConflict)) }) + It("logging", func() { + stdoutChan := make(chan string, 10) + s := specgen.NewSpecGenerator(alpine.name) + s.Terminal = true + s.Command = []string{"date", "-R"} + r, err := containers.CreateWithSpec(bt.conn, s) + Expect(err).To(BeNil()) + err = containers.Start(bt.conn, r.ID, nil) + Expect(err).To(BeNil()) + + _, err = containers.Wait(bt.conn, r.ID, nil) + Expect(err).To(BeNil()) + + opts := containers.LogOptions{Stdout: &bindings.PTrue, Follow: &bindings.PTrue} + go func() { + containers.Logs(bt.conn, r.ID, opts, stdoutChan, nil) + }() + o := <-stdoutChan + o = strings.ReplaceAll(o, "\r", "") + _, err = time.Parse(time.RFC1123Z, o) + Expect(err).To(BeNil()) + }) }) diff --git a/pkg/bindings/test/images_test.go b/pkg/bindings/test/images_test.go index 17b3b254a..5e4cfe7be 100644 --- a/pkg/bindings/test/images_test.go +++ b/pkg/bindings/test/images_test.go @@ -19,11 +19,9 @@ var _ = Describe("Podman images", func() { //tempdir string //err error //podmanTest *PodmanTestIntegration - bt *bindingTest - s *gexec.Session - err error - falseFlag bool = false - trueFlag bool = true + bt *bindingTest + s *gexec.Session + err error ) BeforeEach(func() { @@ -76,7 +74,7 @@ var _ = Describe("Podman images", func() { //Expect(data.Size).To(BeZero()) // Enabling the size parameter should result in size being populated - data, err = images.GetImage(bt.conn, alpine.name, &trueFlag) + data, err = images.GetImage(bt.conn, alpine.name, &bindings.PTrue) Expect(err).To(BeNil()) Expect(data.Size).To(BeNumerically(">", 0)) }) @@ -84,7 +82,7 @@ var _ = Describe("Podman images", func() { // Test to validate the remove image api It("remove image", func() { // Remove invalid image should be a 404 - _, err = images.Remove(bt.conn, "foobar5000", &falseFlag) + _, err = images.Remove(bt.conn, "foobar5000", &bindings.PFalse) Expect(err).ToNot(BeNil()) code, _ := bindings.CheckResponseCode(err) Expect(code).To(BeNumerically("==", http.StatusNotFound)) @@ -101,21 +99,21 @@ var _ = Describe("Podman images", func() { // Start a container with alpine image var top string = "top" - _, err = bt.RunTopContainer(&top, &falseFlag, nil) + _, err = bt.RunTopContainer(&top, &bindings.PFalse, nil) Expect(err).To(BeNil()) // we should now have a container called "top" running - containerResponse, err := containers.Inspect(bt.conn, "top", &falseFlag) + containerResponse, err := containers.Inspect(bt.conn, "top", &bindings.PFalse) Expect(err).To(BeNil()) Expect(containerResponse.Name).To(Equal("top")) // try to remove the image "alpine". This should fail since we are not force // deleting hence image cannot be deleted until the container is deleted. - response, err = images.Remove(bt.conn, alpine.shortName, &falseFlag) + response, err = images.Remove(bt.conn, alpine.shortName, &bindings.PFalse) code, _ = bindings.CheckResponseCode(err) Expect(code).To(BeNumerically("==", http.StatusInternalServerError)) // Removing the image "alpine" where force = true - response, err = images.Remove(bt.conn, alpine.shortName, &trueFlag) + response, err = images.Remove(bt.conn, alpine.shortName, &bindings.PTrue) Expect(err).To(BeNil()) // Checking if both the images are gone as well as the container is deleted @@ -127,7 +125,7 @@ var _ = Describe("Podman images", func() { code, _ = bindings.CheckResponseCode(err) Expect(code).To(BeNumerically("==", http.StatusNotFound)) - _, err = containers.Inspect(bt.conn, "top", &falseFlag) + _, err = containers.Inspect(bt.conn, "top", &bindings.PFalse) code, _ = bindings.CheckResponseCode(err) Expect(code).To(BeNumerically("==", http.StatusNotFound)) }) @@ -178,13 +176,13 @@ var _ = Describe("Podman images", func() { // List images with a filter filters := make(map[string][]string) filters["reference"] = []string{alpine.name} - filteredImages, err := images.List(bt.conn, &falseFlag, filters) + filteredImages, err := images.List(bt.conn, &bindings.PFalse, filters) Expect(err).To(BeNil()) Expect(len(filteredImages)).To(BeNumerically("==", 1)) // List images with a bad filter filters["name"] = []string{alpine.name} - _, err = images.List(bt.conn, &falseFlag, filters) + _, err = images.List(bt.conn, &bindings.PFalse, filters) Expect(err).ToNot(BeNil()) code, _ := bindings.CheckResponseCode(err) Expect(code).To(BeNumerically("==", http.StatusInternalServerError)) diff --git a/pkg/bindings/test/pods_test.go b/pkg/bindings/test/pods_test.go index 7e29265b7..fae1deca4 100644 --- a/pkg/bindings/test/pods_test.go +++ b/pkg/bindings/test/pods_test.go @@ -14,11 +14,10 @@ import ( var _ = Describe("Podman pods", func() { var ( - bt *bindingTest - s *gexec.Session - newpod string - err error - trueFlag bool = true + bt *bindingTest + s *gexec.Session + newpod string + err error ) BeforeEach(func() { @@ -57,7 +56,7 @@ var _ = Describe("Podman pods", func() { Expect(err).To(BeNil()) Expect(len(podSummary)).To(Equal(1)) // Adding an alpine container to the existing pod - _, err = bt.RunTopContainer(nil, &trueFlag, &newpod) + _, err = bt.RunTopContainer(nil, &bindings.PTrue, &newpod) Expect(err).To(BeNil()) podSummary, err = pods.List(bt.conn, nil) // Verify no errors. @@ -76,15 +75,66 @@ var _ = Describe("Podman pods", func() { } Expect(StringInSlice(newpod, names)).To(BeTrue()) Expect(StringInSlice("newpod2", names)).To(BeTrue()) + }) + + // The test validates the list pod endpoint with passing filters as the params. + It("List pods with filters", func() { + var newpod2 string = "newpod2" + bt.Podcreate(&newpod2) + _, err = bt.RunTopContainer(nil, &trueFlag, &newpod) + Expect(err).To(BeNil()) - // TODO not working Because: code to list based on filter - // "not yet implemented", - // Validate list pod with filters - //filters := make(map[string][]string) - //filters["name"] = []string{newpod} - //filteredPods, err := pods.List(bt.conn, filters) - //Expect(err).To(BeNil()) - //Expect(len(filteredPods)).To(BeNumerically("==", 1)) + // Expected err with invalid filter params + filters := make(map[string][]string) + filters["dummy"] = []string{"dummy"} + filteredPods, err := pods.List(bt.conn, filters) + Expect(err).ToNot(BeNil()) + code, _ := bindings.CheckResponseCode(err) + Expect(code).To(BeNumerically("==", http.StatusInternalServerError)) + + // Expected empty response with invalid filters + filters = make(map[string][]string) + filters["name"] = []string{"dummy"} + filteredPods, err = pods.List(bt.conn, filters) + Expect(err).To(BeNil()) + Expect(len(filteredPods)).To(BeNumerically("==", 0)) + + // Validate list pod with name filter + filters = make(map[string][]string) + filters["name"] = []string{newpod2} + filteredPods, err = pods.List(bt.conn, filters) + Expect(err).To(BeNil()) + Expect(len(filteredPods)).To(BeNumerically("==", 1)) + var names []string + for _, i := range filteredPods { + names = append(names, i.Config.Name) + } + Expect(StringInSlice("newpod2", names)).To(BeTrue()) + + // Validate list pod with id filter + filters = make(map[string][]string) + response, err := pods.Inspect(bt.conn, newpod) + id := response.Config.ID + filters["id"] = []string{id} + filteredPods, err = pods.List(bt.conn, filters) + Expect(err).To(BeNil()) + Expect(len(filteredPods)).To(BeNumerically("==", 1)) + names = names[:0] + for _, i := range filteredPods { + names = append(names, i.Config.Name) + } + Expect(StringInSlice("newpod", names)).To(BeTrue()) + + // Using multiple filters + filters["name"] = []string{newpod} + filteredPods, err = pods.List(bt.conn, filters) + Expect(err).To(BeNil()) + Expect(len(filteredPods)).To(BeNumerically("==", 1)) + names = names[:0] + for _, i := range filteredPods { + names = append(names, i.Config.Name) + } + Expect(StringInSlice("newpod", names)).To(BeTrue()) }) // The test validates if the exists responds @@ -111,7 +161,7 @@ var _ = Describe("Podman pods", func() { Expect(code).To(BeNumerically("==", http.StatusNotFound)) // Adding an alpine container to the existing pod - _, err = bt.RunTopContainer(nil, &trueFlag, &newpod) + _, err = bt.RunTopContainer(nil, &bindings.PTrue, &newpod) Expect(err).To(BeNil()) // Binding needs to be modified to inspect the pod state. diff --git a/pkg/bindings/test/system_test.go b/pkg/bindings/test/system_test.go new file mode 100644 index 000000000..3abc26b34 --- /dev/null +++ b/pkg/bindings/test/system_test.go @@ -0,0 +1,51 @@ +package test_bindings + +import ( + "time" + + "github.com/containers/libpod/pkg/api/handlers" + "github.com/containers/libpod/pkg/bindings/system" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/gexec" +) + +var _ = Describe("Podman system", func() { + var ( + bt *bindingTest + s *gexec.Session + ) + + BeforeEach(func() { + bt = newBindingTest() + bt.RestoreImagesFromCache() + s = bt.startAPIService() + time.Sleep(1 * time.Second) + err := bt.NewConnection() + Expect(err).To(BeNil()) + }) + + AfterEach(func() { + s.Kill() + bt.cleanup() + }) + + It("podman events", func() { + eChan := make(chan handlers.Event, 1) + var messages []handlers.Event + cancelChan := make(chan bool, 1) + go func() { + for e := range eChan { + messages = append(messages, e) + } + }() + go func() { + system.Events(bt.conn, eChan, cancelChan, nil, nil, nil) + }() + + _, err := bt.RunTopContainer(nil, nil, nil) + Expect(err).To(BeNil()) + cancelChan <- true + Expect(len(messages)).To(BeNumerically("==", 3)) + }) +}) diff --git a/pkg/bindings/test/volumes_test.go b/pkg/bindings/test/volumes_test.go index c8940d46e..1d5ae1329 100644 --- a/pkg/bindings/test/volumes_test.go +++ b/pkg/bindings/test/volumes_test.go @@ -3,11 +3,12 @@ package test_bindings import ( "context" "fmt" + "net/http" + "time" + "github.com/containers/libpod/pkg/api/handlers" "github.com/containers/libpod/pkg/bindings/containers" "github.com/containers/libpod/pkg/bindings/volumes" - "net/http" - "time" "github.com/containers/libpod/pkg/bindings" . "github.com/onsi/ginkgo" @@ -24,7 +25,6 @@ var _ = Describe("Podman volumes", func() { s *gexec.Session connText context.Context err error - trueFlag = true ) BeforeEach(func() { @@ -106,7 +106,7 @@ var _ = Describe("Podman volumes", func() { zero := 0 err = containers.Stop(connText, "vtest", &zero) Expect(err).To(BeNil()) - err = volumes.Remove(connText, vol.Name, &trueFlag) + err = volumes.Remove(connText, vol.Name, &bindings.PTrue) Expect(err).To(BeNil()) }) diff --git a/pkg/domain/entities/containers.go b/pkg/domain/entities/containers.go new file mode 100644 index 000000000..1899b98f7 --- /dev/null +++ b/pkg/domain/entities/containers.go @@ -0,0 +1 @@ +package entities diff --git a/pkg/domain/entities/engine.go b/pkg/domain/entities/engine.go new file mode 100644 index 000000000..a1096f1f1 --- /dev/null +++ b/pkg/domain/entities/engine.go @@ -0,0 +1,97 @@ +package entities + +import ( + "net/url" + "os/user" + "path/filepath" + + "github.com/containers/libpod/libpod/define" + "github.com/spf13/pflag" +) + +type EngineMode string + +const ( + ABIMode = EngineMode("abi") + TunnelMode = EngineMode("tunnel") +) + +func (m EngineMode) String() string { + return string(m) +} + +type EngineOptions struct { + Uri *url.URL + Identities []string + FlagSet pflag.FlagSet + Flags EngineFlags +} + +type EngineFlags struct { + CGroupManager string + CniConfigDir string + ConmonPath string + DefaultMountsFile string + EventsBackend string + HooksDir []string + MaxWorks int + Namespace string + Root string + Runroot string + Runtime string + StorageDriver string + StorageOpts []string + Syslog bool + Trace bool + NetworkCmdPath string + + Config string + CpuProfile string + LogLevel string + TmpDir string + + RemoteUserName string + RemoteHost string + VarlinkAddress string + ConnectionName string + RemoteConfigFilePath string + Port int + IdentityFile string + IgnoreHosts bool + + EngineMode EngineMode +} + +func NewEngineOptions() (EngineFlags, error) { + u, _ := user.Current() + return EngineFlags{ + CGroupManager: define.SystemdCgroupsManager, + CniConfigDir: "", + Config: "", + ConmonPath: filepath.Join("usr", "bin", "conmon"), + ConnectionName: "", + CpuProfile: "", + DefaultMountsFile: "", + EventsBackend: "", + HooksDir: nil, + IdentityFile: "", + IgnoreHosts: false, + LogLevel: "", + MaxWorks: 0, + Namespace: "", + NetworkCmdPath: "", + Port: 0, + RemoteConfigFilePath: "", + RemoteHost: "", + RemoteUserName: "", + Root: "", + Runroot: filepath.Join("run", "user", u.Uid), + Runtime: "", + StorageDriver: "overlayfs", + StorageOpts: nil, + Syslog: false, + TmpDir: filepath.Join("run", "user", u.Uid, "libpod", "tmp"), + Trace: false, + VarlinkAddress: "", + }, nil +} diff --git a/pkg/domain/entities/engine_container.go b/pkg/domain/entities/engine_container.go new file mode 100644 index 000000000..d08f37d44 --- /dev/null +++ b/pkg/domain/entities/engine_container.go @@ -0,0 +1,26 @@ +package entities + +import ( + "context" +) + +type ContainerEngine interface { + ContainerRuntime + PodRuntime + VolumeRuntime +} + +type ContainerRuntime interface { + ContainerDelete(ctx context.Context, opts ContainerDeleteOptions) (*ContainerDeleteReport, error) + ContainerPrune(ctx context.Context) (*ContainerPruneReport, error) +} + +type PodRuntime interface { + PodDelete(ctx context.Context, opts PodPruneOptions) (*PodDeleteReport, error) + PodPrune(ctx context.Context) (*PodPruneReport, error) +} + +type VolumeRuntime interface { + VolumeDelete(ctx context.Context, opts VolumeDeleteOptions) (*VolumeDeleteReport, error) + VolumePrune(ctx context.Context) (*VolumePruneReport, error) +} diff --git a/pkg/domain/entities/engine_image.go b/pkg/domain/entities/engine_image.go new file mode 100644 index 000000000..27676d781 --- /dev/null +++ b/pkg/domain/entities/engine_image.go @@ -0,0 +1,12 @@ +package entities + +import ( + "context" +) + +type ImageEngine interface { + Delete(ctx context.Context, nameOrId string, opts ImageDeleteOptions) (*ImageDeleteReport, error) + History(ctx context.Context, nameOrId string, opts ImageHistoryOptions) (*ImageHistoryReport, error) + List(ctx context.Context, opts ImageListOptions) (*ImageListReport, error) + Prune(ctx context.Context, opts ImagePruneOptions) (*ImagePruneReport, error) +} diff --git a/pkg/domain/entities/filters.go b/pkg/domain/entities/filters.go new file mode 100644 index 000000000..c7e227244 --- /dev/null +++ b/pkg/domain/entities/filters.go @@ -0,0 +1,150 @@ +package entities + +import ( + "net/url" + "strings" +) + +// Identifier interface allows filters to access ID() of object +type Identifier interface { + Id() string +} + +// Named interface allows filters to access Name() of object +type Named interface { + Name() string +} + +// Named interface allows filters to access Name() of object +type Names interface { + Names() []string +} + +// IdOrName interface allows filters to access ID() or Name() of object +type IdOrNamed interface { + Identifier + Named +} + +// IdOrName interface allows filters to access ID() or Names() of object +type IdOrNames interface { + Identifier + Names +} + +type ImageFilter func(Image) bool +type VolumeFilter func(Volume) bool +type ContainerFilter func(Container) bool + +func CompileImageFilters(filters url.Values) ImageFilter { + var fns []interface{} + + for name, targets := range filters { + switch name { + case "id": + fns = append(fns, FilterIdFn(targets)) + case "name": + fns = append(fns, FilterNamesFn(targets)) + case "idOrName": + fns = append(fns, FilterIdOrNameFn(targets)) + } + } + + return func(image Image) bool { + for _, fn := range fns { + if !fn.(ImageFilter)(image) { + return false + } + } + return true + } +} + +func CompileContainerFilters(filters url.Values) ContainerFilter { + var fns []interface{} + + for name, targets := range filters { + switch name { + case "id": + fns = append(fns, FilterIdFn(targets)) + case "name": + fns = append(fns, FilterNameFn(targets)) + case "idOrName": + fns = append(fns, FilterIdOrNameFn(targets)) + } + } + + return func(ctnr Container) bool { + for _, fn := range fns { + if !fn.(ContainerFilter)(ctnr) { + return false + } + } + return true + } +} + +func CompileVolumeFilters(filters url.Values) VolumeFilter { + var fns []interface{} + + for name, targets := range filters { + if name == "id" { + fns = append(fns, FilterIdFn(targets)) + } + } + + return func(volume Volume) bool { + for _, fn := range fns { + if !fn.(VolumeFilter)(volume) { + return false + } + } + return true + } +} + +func FilterIdFn(id []string) func(Identifier) bool { + return func(obj Identifier) bool { + for _, v := range id { + if strings.Contains(obj.Id(), v) { + return true + } + } + return false + } +} + +func FilterNameFn(name []string) func(Named) bool { + return func(obj Named) bool { + for _, v := range name { + if strings.Contains(obj.Name(), v) { + return true + } + } + return false + } +} + +func FilterNamesFn(name []string) func(Names) bool { + return func(obj Names) bool { + for _, v := range name { + for _, n := range obj.Names() { + if strings.Contains(n, v) { + return true + } + } + } + return false + } +} + +func FilterIdOrNameFn(id []string) func(IdOrNamed) bool { + return func(obj IdOrNamed) bool { + for _, v := range id { + if strings.Contains(obj.Id(), v) || strings.Contains(obj.Name(), v) { + return true + } + } + return false + } +} diff --git a/pkg/domain/entities/images.go b/pkg/domain/entities/images.go new file mode 100644 index 000000000..c84ed5351 --- /dev/null +++ b/pkg/domain/entities/images.go @@ -0,0 +1,151 @@ +package entities + +import ( + "net/url" + + "github.com/containers/image/v5/manifest" + docker "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +type Image struct { + IdOrNamed + ID string `json:"Id"` + RepoTags []string `json:",omitempty"` + RepoDigests []string `json:",omitempty"` + Parent string `json:",omitempty"` + Comment string `json:",omitempty"` + Created string `json:",omitempty"` + Container string `json:",omitempty"` + ContainerConfig *container.Config `json:",omitempty"` + DockerVersion string `json:",omitempty"` + Author string `json:",omitempty"` + Config *container.Config `json:",omitempty"` + Architecture string `json:",omitempty"` + Variant string `json:",omitempty"` + Os string `json:",omitempty"` + OsVersion string `json:",omitempty"` + Size int64 `json:",omitempty"` + VirtualSize int64 `json:",omitempty"` + GraphDriver docker.GraphDriverData `json:",omitempty"` + RootFS docker.RootFS `json:",omitempty"` + Metadata docker.ImageMetadata `json:",omitempty"` + + // Podman extensions + Digest digest.Digest `json:",omitempty"` + PodmanVersion string `json:",omitempty"` + ManifestType string `json:",omitempty"` + User string `json:",omitempty"` + History []v1.History `json:",omitempty"` + NamesHistory []string `json:",omitempty"` + HealthCheck *manifest.Schema2HealthConfig `json:",omitempty"` +} + +func (i *Image) Id() string { + return i.ID +} + +type ImageSummary struct { + Identifier + ID string `json:"Id"` + ParentId string `json:",omitempty"` + RepoTags []string `json:",omitempty"` + Created int `json:",omitempty"` + Size int `json:",omitempty"` + SharedSize int `json:",omitempty"` + VirtualSize int `json:",omitempty"` + Labels string `json:",omitempty"` + Containers int `json:",omitempty"` + ReadOnly bool `json:",omitempty"` + Dangling bool `json:",omitempty"` + + // Podman extensions + Digest digest.Digest `json:",omitempty"` + ConfigDigest digest.Digest `json:",omitempty"` +} + +func (i *ImageSummary) Id() string { + return i.ID +} + +func (i *ImageSummary) IsReadOnly() bool { + return i.ReadOnly +} + +func (i *ImageSummary) IsDangling() bool { + return i.Dangling +} + +type ImageOptions struct { + All bool + Digests bool + Filter []string + Format string + Noheading bool + NoTrunc bool + Quiet bool + Sort string + History bool +} + +type ImageDeleteOptions struct { + Force bool +} + +// ImageDeleteResponse is the response for removing an image from storage and containers +// what was untagged vs actually removed +type ImageDeleteReport struct { + Untagged []string `json:"untagged"` + Deleted string `json:"deleted"` +} + +type ImageHistoryOptions struct{} + +type ImageHistoryLayer struct { + ID string `json:"Id"` + Created int64 `json:"Created,omitempty"` + CreatedBy string `json:",omitempty"` + Tags []string `json:",omitempty"` + Size int64 `json:",omitempty"` + Comment string `json:",omitempty"` +} + +type ImageHistoryReport struct { + Layers []ImageHistoryLayer +} + +type ImageInspectOptions struct { + TypeObject string `json:",omitempty"` + Format string `json:",omitempty"` + Size bool `json:",omitempty"` + Latest bool `json:",omitempty"` +} + +type ImageListOptions struct { + All bool `json:"all" schema:"all"` + Digests bool `json:"digests" schema:"digests"` + Filter []string `json:",omitempty"` + Filters url.Values `json:"filters" schema:"filters"` + Format string `json:",omitempty"` + History bool `json:",omitempty"` + Noheading bool `json:",omitempty"` + NoTrunc bool `json:",omitempty"` + Quiet bool `json:",omitempty"` + Sort string `json:",omitempty"` +} + +type ImageListReport struct { + Images []ImageSummary +} + +type ImagePruneOptions struct { + All bool + Filter ImageFilter +} + +type ImagePruneReport struct { + Report Report + Size int64 +} diff --git a/pkg/domain/entities/types.go b/pkg/domain/entities/types.go new file mode 100644 index 000000000..6f947dc4d --- /dev/null +++ b/pkg/domain/entities/types.go @@ -0,0 +1,25 @@ +package entities + +type Container struct { + IdOrNamed +} + +type Volume struct { + Identifier +} + +type Report struct { + Id []string + Err map[string]error +} + +type ContainerDeleteOptions struct{} +type ContainerDeleteReport struct{ Report } +type ContainerPruneReport struct{ Report } + +type PodDeleteReport struct{ Report } +type PodPruneOptions struct{} +type PodPruneReport struct{ Report } +type VolumeDeleteOptions struct{} +type VolumeDeleteReport struct{ Report } +type VolumePruneReport struct{ Report } diff --git a/pkg/domain/infra/abi/images.go b/pkg/domain/infra/abi/images.go new file mode 100644 index 000000000..2db08f259 --- /dev/null +++ b/pkg/domain/infra/abi/images.go @@ -0,0 +1,131 @@ +// +build ABISupport + +package abi + +import ( + "context" + + libpodImage "github.com/containers/libpod/libpod/image" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/containers/libpod/pkg/domain/utils" +) + +func (ir *ImageEngine) Delete(ctx context.Context, nameOrId string, opts entities.ImageDeleteOptions) (*entities.ImageDeleteReport, error) { + image, err := ir.Libpod.ImageRuntime().NewFromLocal(nameOrId) + if err != nil { + return nil, err + } + + results, err := ir.Libpod.RemoveImage(ctx, image, opts.Force) + if err != nil { + return nil, err + } + + report := entities.ImageDeleteReport{} + if err := utils.DeepCopy(&report, results); err != nil { + return nil, err + } + return &report, nil +} + +func (ir *ImageEngine) Prune(ctx context.Context, opts entities.ImagePruneOptions) (*entities.ImagePruneReport, error) { + results, err := ir.Libpod.ImageRuntime().PruneImages(ctx, opts.All, []string{}) + if err != nil { + return nil, err + } + + report := entities.ImagePruneReport{} + copy(report.Report.Id, results) + return &report, nil +} + +func (ir *ImageEngine) List(ctx context.Context, opts entities.ImageListOptions) (*entities.ImageListReport, error) { + var ( + images []*libpodImage.Image + err error + ) + + filters := utils.ToLibpodFilters(opts.Filters) + if len(filters) > 0 { + images, err = ir.Libpod.ImageRuntime().GetImagesWithFilters(filters) + } else { + images, err = ir.Libpod.ImageRuntime().GetImages() + } + if err != nil { + return nil, err + } + + report := entities.ImageListReport{ + Images: make([]entities.ImageSummary, len(images)), + } + for i, img := range images { + hold := entities.ImageSummary{} + if err := utils.DeepCopy(&hold, img); err != nil { + return nil, err + } + report.Images[i] = hold + } + return &report, nil +} + +func (ir *ImageEngine) History(ctx context.Context, nameOrId string, opts entities.ImageHistoryOptions) (*entities.ImageHistoryReport, error) { + image, err := ir.Libpod.ImageRuntime().NewFromLocal(nameOrId) + if err != nil { + return nil, err + } + results, err := image.History(ctx) + if err != nil { + return nil, err + } + + history := entities.ImageHistoryReport{ + Layers: make([]entities.ImageHistoryLayer, len(results)), + } + + for i, layer := range results { + history.Layers[i] = ToDomainHistoryLayer(layer) + } + return &history, nil +} + +func ToDomainHistoryLayer(layer *libpodImage.History) entities.ImageHistoryLayer { + l := entities.ImageHistoryLayer{} + l.ID = layer.ID + l.Created = layer.Created.Unix() + l.CreatedBy = layer.CreatedBy + copy(l.Tags, layer.Tags) + l.Size = layer.Size + l.Comment = layer.Comment + return l +} + +// func (r *imageRuntime) Delete(ctx context.Context, nameOrId string, opts entities.ImageDeleteOptions) (*entities.ImageDeleteReport, error) { +// image, err := r.libpod.ImageEngine().NewFromLocal(nameOrId) +// if err != nil { +// return nil, err +// } +// +// results, err := r.libpod.RemoveImage(ctx, image, opts.Force) +// if err != nil { +// return nil, err +// } +// +// report := entities.ImageDeleteReport{} +// if err := utils.DeepCopy(&report, results); err != nil { +// return nil, err +// } +// return &report, nil +// } +// +// func (r *imageRuntime) Prune(ctx context.Context, opts entities.ImagePruneOptions) (*entities.ImagePruneReport, error) { +// // TODO: map FilterOptions +// id, err := r.libpod.ImageEngine().PruneImages(ctx, opts.All, []string{}) +// if err != nil { +// return nil, err +// } +// +// // TODO: Determine Size +// report := entities.ImagePruneReport{} +// copy(report.Report.Id, id) +// return &report, nil +// } diff --git a/pkg/domain/infra/abi/images_test.go b/pkg/domain/infra/abi/images_test.go new file mode 100644 index 000000000..20ef1b150 --- /dev/null +++ b/pkg/domain/infra/abi/images_test.go @@ -0,0 +1,37 @@ +package abi + +// +// import ( +// "context" +// "testing" +// +// "github.com/stretchr/testify/mock" +// ) +// +// type MockImageRuntime struct { +// mock.Mock +// } +// +// func (m *MockImageRuntime) Delete(ctx context.Context, renderer func() interface{}, name string) error { +// _ = m.Called(ctx, renderer, name) +// return nil +// } +// +// func TestImageSuccess(t *testing.T) { +// actual := func() interface{} { return nil } +// +// m := new(MockImageRuntime) +// m.On( +// "Delete", +// mock.AnythingOfType("*context.emptyCtx"), +// mock.AnythingOfType("func() interface {}"), +// "fedora"). +// Return(nil) +// +// r := DirectImageRuntime{m} +// err := r.Delete(context.TODO(), actual, "fedora") +// if err != nil { +// t.Errorf("error should be nil, got: %v", err) +// } +// m.AssertExpectations(t) +// } diff --git a/pkg/domain/infra/abi/runtime.go b/pkg/domain/infra/abi/runtime.go new file mode 100644 index 000000000..479a69586 --- /dev/null +++ b/pkg/domain/infra/abi/runtime.go @@ -0,0 +1,19 @@ +// +build ABISupport + +package abi + +import ( + "github.com/containers/libpod/libpod" + "github.com/containers/libpod/pkg/domain/entities" +) + +// Image-related runtime linked against libpod library +type ImageEngine struct { + Libpod *libpod.Runtime +} + +// Container-related runtime linked against libpod library +type ContainerEngine struct { + entities.ContainerEngine + Libpod *libpod.Runtime +} diff --git a/pkg/domain/infra/runtime_abi.go b/pkg/domain/infra/runtime_abi.go new file mode 100644 index 000000000..de996f567 --- /dev/null +++ b/pkg/domain/infra/runtime_abi.go @@ -0,0 +1,39 @@ +// +build ABISupport + +package infra + +import ( + "context" + "fmt" + + "github.com/containers/libpod/pkg/bindings" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/containers/libpod/pkg/domain/infra/abi" + "github.com/containers/libpod/pkg/domain/infra/tunnel" +) + +// NewContainerEngine factory provides a libpod runtime for container-related operations +func NewContainerEngine(mode entities.EngineMode, opts entities.EngineOptions) (entities.ContainerEngine, error) { + switch mode { + case entities.ABIMode: + r, err := NewLibpodRuntime(opts.FlagSet, opts.Flags) + return &abi.ContainerEngine{ContainerEngine: r}, err + case entities.TunnelMode: + ctx, err := bindings.NewConnection(context.Background(), opts.Uri.String(), opts.Identities...) + return &tunnel.ContainerEngine{ClientCxt: ctx}, err + } + return nil, fmt.Errorf("runtime mode '%v' is not supported", mode) +} + +// NewContainerEngine factory provides a libpod runtime for image-related operations +func NewImageEngine(mode entities.EngineMode, opts entities.EngineOptions) (entities.ImageEngine, error) { + switch mode { + case entities.ABIMode: + r, err := NewLibpodImageRuntime(opts.FlagSet, opts.Flags) + return r, err + case entities.TunnelMode: + ctx, err := bindings.NewConnection(context.Background(), opts.Uri.String(), opts.Identities...) + return &tunnel.ImageEngine{ClientCxt: ctx}, err + } + return nil, fmt.Errorf("runtime mode '%v' is not supported", mode) +} diff --git a/pkg/domain/infra/runtime_image_proxy.go b/pkg/domain/infra/runtime_image_proxy.go new file mode 100644 index 000000000..480e7c8d7 --- /dev/null +++ b/pkg/domain/infra/runtime_image_proxy.go @@ -0,0 +1,25 @@ +// +build ABISupport + +package infra + +import ( + "context" + + "github.com/containers/libpod/pkg/domain/entities" + "github.com/containers/libpod/pkg/domain/infra/abi" + "github.com/spf13/pflag" +) + +// ContainerEngine Image Proxy will be EOL'ed after podmanV2 is separated from libpod repo + +func NewLibpodImageRuntime(flags pflag.FlagSet, opts entities.EngineFlags) (entities.ImageEngine, error) { + r, err := GetRuntime(context.Background(), flags, opts) + if err != nil { + return nil, err + } + return &abi.ImageEngine{Libpod: r}, nil +} + +func (ir *runtime) ShutdownImageRuntime(force bool) error { + return ir.Libpod.Shutdown(force) +} diff --git a/pkg/domain/infra/runtime_libpod.go b/pkg/domain/infra/runtime_libpod.go new file mode 100644 index 000000000..b42c52b6e --- /dev/null +++ b/pkg/domain/infra/runtime_libpod.go @@ -0,0 +1,331 @@ +package infra + +import ( + "context" + "fmt" + "os" + + "github.com/containers/libpod/libpod" + "github.com/containers/libpod/pkg/cgroups" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/containers/libpod/pkg/namespaces" + "github.com/containers/libpod/pkg/rootless" + "github.com/containers/storage" + "github.com/containers/storage/pkg/idtools" + "github.com/pkg/errors" + flag "github.com/spf13/pflag" +) + +type engineOpts struct { + name string + renumber bool + migrate bool + noStore bool + withFDS bool + flags entities.EngineFlags +} + +// GetRuntimeMigrate gets a libpod runtime that will perform a migration of existing containers +func GetRuntimeMigrate(ctx context.Context, fs flag.FlagSet, ef entities.EngineFlags, newRuntime string) (*libpod.Runtime, error) { + return getRuntime(ctx, fs, &engineOpts{ + name: newRuntime, + renumber: false, + migrate: true, + noStore: false, + withFDS: true, + flags: ef, + }) +} + +// GetRuntimeDisableFDs gets a libpod runtime that will disable sd notify +func GetRuntimeDisableFDs(ctx context.Context, fs flag.FlagSet, ef entities.EngineFlags) (*libpod.Runtime, error) { + return getRuntime(ctx, fs, &engineOpts{ + renumber: false, + migrate: false, + noStore: false, + withFDS: false, + flags: ef, + }) +} + +// GetRuntimeRenumber gets a libpod runtime that will perform a lock renumber +func GetRuntimeRenumber(ctx context.Context, fs flag.FlagSet, ef entities.EngineFlags) (*libpod.Runtime, error) { + return getRuntime(ctx, fs, &engineOpts{ + renumber: true, + migrate: false, + noStore: false, + withFDS: true, + flags: ef, + }) +} + +// GetRuntime generates a new libpod runtime configured by command line options +func GetRuntime(ctx context.Context, flags flag.FlagSet, ef entities.EngineFlags) (*libpod.Runtime, error) { + return getRuntime(ctx, flags, &engineOpts{ + renumber: false, + migrate: false, + noStore: false, + withFDS: true, + flags: ef, + }) +} + +// GetRuntimeNoStore generates a new libpod runtime configured by command line options +func GetRuntimeNoStore(ctx context.Context, fs flag.FlagSet, ef entities.EngineFlags) (*libpod.Runtime, error) { + return getRuntime(ctx, fs, &engineOpts{ + renumber: false, + migrate: false, + noStore: true, + withFDS: true, + flags: ef, + }) +} + +func getRuntime(ctx context.Context, fs flag.FlagSet, opts *engineOpts) (*libpod.Runtime, error) { + options := []libpod.RuntimeOption{} + storageOpts := storage.StoreOptions{} + storageSet := false + + uidmapFlag := fs.Lookup("uidmap") + gidmapFlag := fs.Lookup("gidmap") + subuidname := fs.Lookup("subuidname") + subgidname := fs.Lookup("subgidname") + if (uidmapFlag != nil && gidmapFlag != nil && subuidname != nil && subgidname != nil) && + (uidmapFlag.Changed || gidmapFlag.Changed || subuidname.Changed || subgidname.Changed) { + userns, _ := fs.GetString("userns") + uidmapVal, _ := fs.GetStringSlice("uidmap") + gidmapVal, _ := fs.GetStringSlice("gidmap") + subuidVal, _ := fs.GetString("subuidname") + subgidVal, _ := fs.GetString("subgidname") + mappings, err := ParseIDMapping(namespaces.UsernsMode(userns), uidmapVal, gidmapVal, subuidVal, subgidVal) + if err != nil { + return nil, err + } + storageOpts.UIDMap = mappings.UIDMap + storageOpts.GIDMap = mappings.GIDMap + + storageSet = true + } + + if fs.Changed("root") { + storageSet = true + storageOpts.GraphRoot = opts.flags.Root + } + if fs.Changed("runroot") { + storageSet = true + storageOpts.RunRoot = opts.flags.Runroot + } + if len(storageOpts.RunRoot) > 50 { + return nil, errors.New("the specified runroot is longer than 50 characters") + } + if fs.Changed("storage-driver") { + storageSet = true + storageOpts.GraphDriverName = opts.flags.StorageDriver + // Overriding the default storage driver caused GraphDriverOptions from storage.conf to be ignored + storageOpts.GraphDriverOptions = []string{} + } + // This should always be checked after storage-driver is checked + if len(opts.flags.StorageOpts) > 0 { + storageSet = true + storageOpts.GraphDriverOptions = opts.flags.StorageOpts + } + if opts.migrate { + options = append(options, libpod.WithMigrate()) + if opts.name != "" { + options = append(options, libpod.WithMigrateRuntime(opts.name)) + } + } + + if opts.renumber { + options = append(options, libpod.WithRenumber()) + } + + // Only set this if the user changes storage config on the command line + if storageSet { + options = append(options, libpod.WithStorageConfig(storageOpts)) + } + + if !storageSet && opts.noStore { + options = append(options, libpod.WithNoStore()) + } + // TODO CLI flags for image config? + // TODO CLI flag for signature policy? + + if len(opts.flags.Namespace) > 0 { + options = append(options, libpod.WithNamespace(opts.flags.Namespace)) + } + + if fs.Changed("runtime") { + options = append(options, libpod.WithOCIRuntime(opts.flags.Runtime)) + } + + if fs.Changed("conmon") { + options = append(options, libpod.WithConmonPath(opts.flags.ConmonPath)) + } + if fs.Changed("tmpdir") { + options = append(options, libpod.WithTmpDir(opts.flags.TmpDir)) + } + if fs.Changed("network-cmd-path") { + options = append(options, libpod.WithNetworkCmdPath(opts.flags.NetworkCmdPath)) + } + + if fs.Changed("events-backend") { + options = append(options, libpod.WithEventsLogger(opts.flags.EventsBackend)) + } + + if fs.Changed("cgroup-manager") { + options = append(options, libpod.WithCgroupManager(opts.flags.CGroupManager)) + } else { + unified, err := cgroups.IsCgroup2UnifiedMode() + if err != nil { + return nil, err + } + if rootless.IsRootless() && !unified { + options = append(options, libpod.WithCgroupManager("cgroupfs")) + } + } + + // TODO flag to set libpod static dir? + // TODO flag to set libpod tmp dir? + + if fs.Changed("cni-config-dir") { + options = append(options, libpod.WithCNIConfigDir(opts.flags.CniConfigDir)) + } + if fs.Changed("default-mounts-file") { + options = append(options, libpod.WithDefaultMountsFile(opts.flags.DefaultMountsFile)) + } + if fs.Changed("hooks-dir") { + options = append(options, libpod.WithHooksDir(opts.flags.HooksDir...)) + } + + // TODO flag to set CNI plugins dir? + + // TODO I don't think these belong here? + // Will follow up with a different PR to address + // + // Pod create options + + infraImageFlag := fs.Lookup("infra-image") + if infraImageFlag != nil && infraImageFlag.Changed { + infraImage, _ := fs.GetString("infra-image") + options = append(options, libpod.WithDefaultInfraImage(infraImage)) + } + + infraCommandFlag := fs.Lookup("infra-command") + if infraCommandFlag != nil && infraImageFlag.Changed { + infraCommand, _ := fs.GetString("infra-command") + options = append(options, libpod.WithDefaultInfraCommand(infraCommand)) + } + + if !opts.withFDS { + options = append(options, libpod.WithEnableSDNotify()) + } + if fs.Changed("config") { + return libpod.NewRuntimeFromConfig(ctx, opts.flags.Config, options...) + } + return libpod.NewRuntime(ctx, options...) +} + +// ParseIDMapping takes idmappings and subuid and subgid maps and returns a storage mapping +func ParseIDMapping(mode namespaces.UsernsMode, uidMapSlice, gidMapSlice []string, subUIDMap, subGIDMap string) (*storage.IDMappingOptions, error) { + options := storage.IDMappingOptions{ + HostUIDMapping: true, + HostGIDMapping: true, + } + + if mode.IsKeepID() { + if len(uidMapSlice) > 0 || len(gidMapSlice) > 0 { + return nil, errors.New("cannot specify custom mappings with --userns=keep-id") + } + if len(subUIDMap) > 0 || len(subGIDMap) > 0 { + return nil, errors.New("cannot specify subuidmap or subgidmap with --userns=keep-id") + } + if rootless.IsRootless() { + min := func(a, b int) int { + if a < b { + return a + } + return b + } + + uid := rootless.GetRootlessUID() + gid := rootless.GetRootlessGID() + + uids, gids, err := rootless.GetConfiguredMappings() + if err != nil { + return nil, errors.Wrapf(err, "cannot read mappings") + } + maxUID, maxGID := 0, 0 + for _, u := range uids { + maxUID += u.Size + } + for _, g := range gids { + maxGID += g.Size + } + + options.UIDMap, options.GIDMap = nil, nil + + options.UIDMap = append(options.UIDMap, idtools.IDMap{ContainerID: 0, HostID: 1, Size: min(uid, maxUID)}) + options.UIDMap = append(options.UIDMap, idtools.IDMap{ContainerID: uid, HostID: 0, Size: 1}) + if maxUID > uid { + options.UIDMap = append(options.UIDMap, idtools.IDMap{ContainerID: uid + 1, HostID: uid + 1, Size: maxUID - uid}) + } + + options.GIDMap = append(options.GIDMap, idtools.IDMap{ContainerID: 0, HostID: 1, Size: min(gid, maxGID)}) + options.GIDMap = append(options.GIDMap, idtools.IDMap{ContainerID: gid, HostID: 0, Size: 1}) + if maxGID > gid { + options.GIDMap = append(options.GIDMap, idtools.IDMap{ContainerID: gid + 1, HostID: gid + 1, Size: maxGID - gid}) + } + + options.HostUIDMapping = false + options.HostGIDMapping = false + } + // Simply ignore the setting and do not setup an inner namespace for root as it is a no-op + return &options, nil + } + + if subGIDMap == "" && subUIDMap != "" { + subGIDMap = subUIDMap + } + if subUIDMap == "" && subGIDMap != "" { + subUIDMap = subGIDMap + } + if len(gidMapSlice) == 0 && len(uidMapSlice) != 0 { + gidMapSlice = uidMapSlice + } + if len(uidMapSlice) == 0 && len(gidMapSlice) != 0 { + uidMapSlice = gidMapSlice + } + if len(uidMapSlice) == 0 && subUIDMap == "" && os.Getuid() != 0 { + uidMapSlice = []string{fmt.Sprintf("0:%d:1", os.Getuid())} + } + if len(gidMapSlice) == 0 && subGIDMap == "" && os.Getuid() != 0 { + gidMapSlice = []string{fmt.Sprintf("0:%d:1", os.Getgid())} + } + + if subUIDMap != "" && subGIDMap != "" { + mappings, err := idtools.NewIDMappings(subUIDMap, subGIDMap) + if err != nil { + return nil, err + } + options.UIDMap = mappings.UIDs() + options.GIDMap = mappings.GIDs() + } + parsedUIDMap, err := idtools.ParseIDMap(uidMapSlice, "UID") + if err != nil { + return nil, err + } + parsedGIDMap, err := idtools.ParseIDMap(gidMapSlice, "GID") + if err != nil { + return nil, err + } + options.UIDMap = append(options.UIDMap, parsedUIDMap...) + options.GIDMap = append(options.GIDMap, parsedGIDMap...) + if len(options.UIDMap) > 0 { + options.HostUIDMapping = false + } + if len(options.GIDMap) > 0 { + options.HostGIDMapping = false + } + return &options, nil +} diff --git a/pkg/domain/infra/runtime_proxy.go b/pkg/domain/infra/runtime_proxy.go new file mode 100644 index 000000000..d17b8efa1 --- /dev/null +++ b/pkg/domain/infra/runtime_proxy.go @@ -0,0 +1,54 @@ +// +build ABISupport + +package infra + +import ( + "context" + + "github.com/containers/libpod/libpod" + "github.com/containers/libpod/pkg/domain/entities" + flag "github.com/spf13/pflag" +) + +// ContainerEngine Proxy will be EOL'ed after podmanV2 is separated from libpod repo + +type runtime struct { + entities.ContainerEngine + Libpod *libpod.Runtime +} + +func NewLibpodRuntime(flags flag.FlagSet, opts entities.EngineFlags) (entities.ContainerEngine, error) { + r, err := GetRuntime(context.Background(), flags, opts) + if err != nil { + return nil, err + } + return &runtime{Libpod: r}, nil +} + +func (r *runtime) ShutdownRuntime(force bool) error { + return r.Libpod.Shutdown(force) +} + +func (r *runtime) ContainerDelete(ctx context.Context, opts entities.ContainerDeleteOptions) (*entities.ContainerDeleteReport, error) { + panic("implement me") +} + +func (r *runtime) ContainerPrune(ctx context.Context) (*entities.ContainerPruneReport, error) { + panic("implement me") +} + +func (r *runtime) PodDelete(ctx context.Context, opts entities.PodPruneOptions) (*entities.PodDeleteReport, error) { + panic("implement me") +} + +func (r *runtime) PodPrune(ctx context.Context) (*entities.PodPruneReport, error) { + panic("implement me") +} + +func (r *runtime) VolumeDelete(ctx context.Context, opts entities.VolumeDeleteOptions) (*entities.VolumeDeleteReport, error) { + panic("implement me") +} + +func (r *runtime) VolumePrune(ctx context.Context) (*entities.VolumePruneReport, error) { + panic("implement me") +} diff --git a/pkg/domain/infra/runtime_tunnel.go b/pkg/domain/infra/runtime_tunnel.go new file mode 100644 index 000000000..8a606deaf --- /dev/null +++ b/pkg/domain/infra/runtime_tunnel.go @@ -0,0 +1,35 @@ +// +build !ABISupport + +package infra + +import ( + "context" + "fmt" + + "github.com/containers/libpod/pkg/bindings" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/containers/libpod/pkg/domain/infra/tunnel" +) + +func NewContainerEngine(mode entities.EngineMode, opts entities.EngineOptions) (entities.ContainerEngine, error) { + switch mode { + case entities.ABIMode: + return nil, fmt.Errorf("direct runtime not supported") + case entities.TunnelMode: + ctx, err := bindings.NewConnection(context.Background(), opts.Uri.String(), opts.Identities...) + return &tunnel.ContainerEngine{ClientCxt: ctx}, err + } + return nil, fmt.Errorf("runtime mode '%v' is not supported", mode) +} + +// NewImageEngine factory provides a libpod runtime for image-related operations +func NewImageEngine(mode entities.EngineMode, opts entities.EngineOptions) (entities.ImageEngine, error) { + switch mode { + case entities.ABIMode: + return nil, fmt.Errorf("direct image runtime not supported") + case entities.TunnelMode: + ctx, err := bindings.NewConnection(context.Background(), opts.Uri.String(), opts.Identities...) + return &tunnel.ImageEngine{ClientCxt: ctx}, err + } + return nil, fmt.Errorf("runtime mode '%v' is not supported", mode) +} diff --git a/pkg/domain/infra/tunnel/images.go b/pkg/domain/infra/tunnel/images.go new file mode 100644 index 000000000..718685e57 --- /dev/null +++ b/pkg/domain/infra/tunnel/images.go @@ -0,0 +1,81 @@ +package tunnel + +import ( + "context" + "net/url" + + images "github.com/containers/libpod/pkg/bindings/images" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/containers/libpod/pkg/domain/utils" +) + +func (ir *ImageEngine) Delete(ctx context.Context, nameOrId string, opts entities.ImageDeleteOptions) (*entities.ImageDeleteReport, error) { + results, err := images.Remove(ir.ClientCxt, nameOrId, &opts.Force) + if err != nil { + return nil, err + } + + report := entities.ImageDeleteReport{ + Untagged: nil, + Deleted: "", + } + + for _, e := range results { + if a, ok := e["Deleted"]; ok { + report.Deleted = a + } + + if a, ok := e["Untagged"]; ok { + report.Untagged = append(report.Untagged, a) + } + } + return &report, err +} + +func (ir *ImageEngine) List(ctx context.Context, opts entities.ImageListOptions) (*entities.ImageListReport, error) { + images, err := images.List(ir.ClientCxt, &opts.All, opts.Filters) + if err != nil { + return nil, err + } + + report := entities.ImageListReport{ + Images: make([]entities.ImageSummary, len(images)), + } + for i, img := range images { + hold := entities.ImageSummary{} + if err := utils.DeepCopy(&hold, img); err != nil { + return nil, err + } + report.Images[i] = hold + } + return &report, nil +} + +func (ir *ImageEngine) History(ctx context.Context, nameOrId string, opts entities.ImageHistoryOptions) (*entities.ImageHistoryReport, error) { + results, err := images.History(ir.ClientCxt, nameOrId) + if err != nil { + return nil, err + } + + history := entities.ImageHistoryReport{ + Layers: make([]entities.ImageHistoryLayer, len(results)), + } + + for i, layer := range results { + hold := entities.ImageHistoryLayer{} + _ = utils.DeepCopy(&hold, layer) + history.Layers[i] = hold + } + return &history, nil +} + +func (ir *ImageEngine) Prune(ctx context.Context, opts entities.ImagePruneOptions) (*entities.ImagePruneReport, error) { + results, err := images.Prune(ir.ClientCxt, url.Values{}) + if err != nil { + return nil, err + } + + report := entities.ImagePruneReport{} + copy(report.Report.Id, results) + return &report, nil +} diff --git a/pkg/domain/infra/tunnel/runtime.go b/pkg/domain/infra/tunnel/runtime.go new file mode 100644 index 000000000..af433a6d9 --- /dev/null +++ b/pkg/domain/infra/tunnel/runtime.go @@ -0,0 +1,45 @@ +package tunnel + +import ( + "context" + + "github.com/containers/libpod/pkg/domain/entities" +) + +// Image-related runtime using an ssh-tunnel to utilize Podman service +type ImageEngine struct { + ClientCxt context.Context +} + +// Container-related runtime using an ssh-tunnel to utilize Podman service +type ContainerEngine struct { + ClientCxt context.Context +} + +func (r *ContainerEngine) Shutdown(force bool) error { + return nil +} + +func (r *ContainerEngine) ContainerDelete(ctx context.Context, opts entities.ContainerDeleteOptions) (*entities.ContainerDeleteReport, error) { + panic("implement me") +} + +func (r *ContainerEngine) ContainerPrune(ctx context.Context) (*entities.ContainerPruneReport, error) { + panic("implement me") +} + +func (r *ContainerEngine) PodDelete(ctx context.Context, opts entities.PodPruneOptions) (*entities.PodDeleteReport, error) { + panic("implement me") +} + +func (r *ContainerEngine) PodPrune(ctx context.Context) (*entities.PodPruneReport, error) { + panic("implement me") +} + +func (r *ContainerEngine) VolumeDelete(ctx context.Context, opts entities.VolumeDeleteOptions) (*entities.VolumeDeleteReport, error) { + panic("implement me") +} + +func (r *ContainerEngine) VolumePrune(ctx context.Context) (*entities.VolumePruneReport, error) { + panic("implement me") +} diff --git a/pkg/domain/utils/utils.go b/pkg/domain/utils/utils.go new file mode 100644 index 000000000..c17769f62 --- /dev/null +++ b/pkg/domain/utils/utils.go @@ -0,0 +1,41 @@ +package utils + +import ( + "net/url" + "strings" + + jsoniter "github.com/json-iterator/go" +) + +var json = jsoniter.ConfigCompatibleWithStandardLibrary + +// DeepCopy does a deep copy of a structure +// Error checking of parameters delegated to json engine +var DeepCopy = func(dst interface{}, src interface{}) error { + payload, err := json.Marshal(src) + if err != nil { + return err + } + + err = json.Unmarshal(payload, dst) + if err != nil { + return err + } + return nil +} + +func ToLibpodFilters(f url.Values) (filters []string) { + for k, v := range f { + filters = append(filters, k+"="+v[0]) + } + return +} + +func ToUrlValues(f []string) (filters url.Values) { + filters = make(url.Values) + for _, v := range f { + t := strings.SplitN(v, "=", 2) + filters.Add(t[0], t[1]) + } + return +} diff --git a/pkg/rootless/rootless_linux.c b/pkg/rootless/rootless_linux.c index db898e706..6643bfbbf 100644 --- a/pkg/rootless/rootless_linux.c +++ b/pkg/rootless/rootless_linux.c @@ -108,10 +108,9 @@ do_pause () } static char ** -get_cmd_line_args (pid_t pid) +get_cmd_line_args () { int fd; - char path[PATH_MAX]; char *buffer; size_t allocated; size_t used = 0; @@ -119,11 +118,7 @@ get_cmd_line_args (pid_t pid) int i, argc = 0; char **argv; - if (pid) - sprintf (path, "/proc/%d/cmdline", pid); - else - strcpy (path, "/proc/self/cmdline"); - fd = open (path, O_RDONLY); + fd = open ("/proc/self/cmdline", O_RDONLY); if (fd < 0) return NULL; @@ -196,7 +191,7 @@ can_use_shortcut () return false; #endif - argv = get_cmd_line_args (0); + argv = get_cmd_line_args (); if (argv == NULL) return false; @@ -542,7 +537,6 @@ create_pause_process (const char *pause_pid_file_path, char **argv) int reexec_userns_join (int userns, int mountns, char *pause_pid_file_path) { - pid_t ppid = getpid (); char uid[16]; char gid[16]; char **argv; @@ -559,7 +553,7 @@ reexec_userns_join (int userns, int mountns, char *pause_pid_file_path) sprintf (uid, "%d", geteuid ()); sprintf (gid, "%d", getegid ()); - argv = get_cmd_line_args (ppid); + argv = get_cmd_line_args (); if (argv == NULL) { fprintf (stderr, "cannot read argv: %s\n", strerror (errno)); @@ -724,7 +718,6 @@ reexec_in_user_namespace (int ready, char *pause_pid_file_path, char *file_to_re int ret; pid_t pid; char b; - pid_t ppid = getpid (); char **argv; char uid[16]; char gid[16]; @@ -801,7 +794,7 @@ reexec_in_user_namespace (int ready, char *pause_pid_file_path, char *file_to_re _exit (EXIT_FAILURE); } - argv = get_cmd_line_args (ppid); + argv = get_cmd_line_args (); if (argv == NULL) { fprintf (stderr, "cannot read argv: %s\n", strerror (errno)); diff --git a/pkg/varlinkapi/containers.go b/pkg/varlinkapi/containers.go index 94726bbbd..55427771c 100644 --- a/pkg/varlinkapi/containers.go +++ b/pkg/varlinkapi/containers.go @@ -846,11 +846,6 @@ func (i *LibpodAPI) ExecContainer(call iopodman.VarlinkCall, opts iopodman.ExecO workDir = *opts.Workdir } - var detachKeys string - if opts.DetachKeys != nil { - detachKeys = *opts.DetachKeys - } - resizeChan := make(chan remotecommand.TerminalSize) reader, writer, _, pipeWriter, streams := setupStreams(call) @@ -870,8 +865,17 @@ func (i *LibpodAPI) ExecContainer(call iopodman.VarlinkCall, opts iopodman.ExecO } }() + execConfig := new(libpod.ExecConfig) + execConfig.Command = opts.Cmd + execConfig.Terminal = opts.Tty + execConfig.Privileged = opts.Privileged + execConfig.Environment = envs + execConfig.User = user + execConfig.WorkDir = workDir + execConfig.DetachKeys = opts.DetachKeys + go func() { - ec, err := ctr.Exec(opts.Tty, opts.Privileged, envs, opts.Cmd, user, workDir, streams, 0, resizeChan, detachKeys) + ec, err := ctr.Exec(execConfig, streams, resizeChan) if err != nil { logrus.Errorf(err.Error()) } diff --git a/pkg/varlinkapi/system.go b/pkg/varlinkapi/system.go index 50aaaaa44..e88d010c5 100644 --- a/pkg/varlinkapi/system.go +++ b/pkg/varlinkapi/system.go @@ -10,7 +10,7 @@ import ( "time" "github.com/containers/image/v5/pkg/sysregistriesv2" - "github.com/containers/libpod/cmd/podman/varlink" + iopodman "github.com/containers/libpod/cmd/podman/varlink" "github.com/containers/libpod/libpod/define" "github.com/sirupsen/logrus" ) diff --git a/test/e2e/search_test.go b/test/e2e/search_test.go index 6d762d338..9ba0241fe 100644 --- a/test/e2e/search_test.go +++ b/test/e2e/search_test.go @@ -5,13 +5,14 @@ package integration import ( "bytes" "fmt" - . "github.com/containers/libpod/test/utils" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" "io/ioutil" "os" "strconv" "text/template" + + . "github.com/containers/libpod/test/utils" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" ) type endpoint struct { diff --git a/vendor/github.com/containerd/containerd/log/context.go b/vendor/github.com/containerd/containerd/log/context.go new file mode 100644 index 000000000..31f1a3ac0 --- /dev/null +++ b/vendor/github.com/containerd/containerd/log/context.go @@ -0,0 +1,90 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package log + +import ( + "context" + "sync/atomic" + + "github.com/sirupsen/logrus" +) + +var ( + // G is an alias for GetLogger. + // + // We may want to define this locally to a package to get package tagged log + // messages. + G = GetLogger + + // L is an alias for the standard logger. + L = logrus.NewEntry(logrus.StandardLogger()) +) + +type ( + loggerKey struct{} +) + +// TraceLevel is the log level for tracing. Trace level is lower than debug level, +// and is usually used to trace detailed behavior of the program. +const TraceLevel = logrus.Level(uint32(logrus.DebugLevel + 1)) + +// RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to +// ensure the formatted time is always the same number of characters. +const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" + +// ParseLevel takes a string level and returns the Logrus log level constant. +// It supports trace level. +func ParseLevel(lvl string) (logrus.Level, error) { + if lvl == "trace" { + return TraceLevel, nil + } + return logrus.ParseLevel(lvl) +} + +// WithLogger returns a new context with the provided logger. Use in +// combination with logger.WithField(s) for great effect. +func WithLogger(ctx context.Context, logger *logrus.Entry) context.Context { + return context.WithValue(ctx, loggerKey{}, logger) +} + +// GetLogger retrieves the current logger from the context. If no logger is +// available, the default logger is returned. +func GetLogger(ctx context.Context) *logrus.Entry { + logger := ctx.Value(loggerKey{}) + + if logger == nil { + return L + } + + return logger.(*logrus.Entry) +} + +// Trace logs a message at level Trace with the log entry passed-in. +func Trace(e *logrus.Entry, args ...interface{}) { + level := logrus.Level(atomic.LoadUint32((*uint32)(&e.Logger.Level))) + if level >= TraceLevel { + e.Debug(args...) + } +} + +// Tracef logs a message at level Trace with the log entry passed-in. +func Tracef(e *logrus.Entry, format string, args ...interface{}) { + level := logrus.Level(atomic.LoadUint32((*uint32)(&e.Logger.Level))) + if level >= TraceLevel { + e.Debugf(format, args...) + } +} diff --git a/vendor/github.com/containerd/containerd/platforms/compare.go b/vendor/github.com/containerd/containerd/platforms/compare.go new file mode 100644 index 000000000..3ad22a10d --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/compare.go @@ -0,0 +1,229 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import specs "github.com/opencontainers/image-spec/specs-go/v1" + +// MatchComparer is able to match and compare platforms to +// filter and sort platforms. +type MatchComparer interface { + Matcher + + Less(specs.Platform, specs.Platform) bool +} + +// Only returns a match comparer for a single platform +// using default resolution logic for the platform. +// +// For ARMv8, will also match ARMv7, ARMv6 and ARMv5 (for 32bit runtimes) +// For ARMv7, will also match ARMv6 and ARMv5 +// For ARMv6, will also match ARMv5 +func Only(platform specs.Platform) MatchComparer { + platform = Normalize(platform) + if platform.Architecture == "arm" { + if platform.Variant == "v8" { + return orderedPlatformComparer{ + matchers: []Matcher{ + &matcher{ + Platform: platform, + }, + &matcher{ + Platform: specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v7", + }, + }, + &matcher{ + Platform: specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v6", + }, + }, + &matcher{ + Platform: specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v5", + }, + }, + }, + } + } + if platform.Variant == "v7" { + return orderedPlatformComparer{ + matchers: []Matcher{ + &matcher{ + Platform: platform, + }, + &matcher{ + Platform: specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v6", + }, + }, + &matcher{ + Platform: specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v5", + }, + }, + }, + } + } + if platform.Variant == "v6" { + return orderedPlatformComparer{ + matchers: []Matcher{ + &matcher{ + Platform: platform, + }, + &matcher{ + Platform: specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v5", + }, + }, + }, + } + } + } + + return singlePlatformComparer{ + Matcher: &matcher{ + Platform: platform, + }, + } +} + +// Ordered returns a platform MatchComparer which matches any of the platforms +// but orders them in order they are provided. +func Ordered(platforms ...specs.Platform) MatchComparer { + matchers := make([]Matcher, len(platforms)) + for i := range platforms { + matchers[i] = NewMatcher(platforms[i]) + } + return orderedPlatformComparer{ + matchers: matchers, + } +} + +// Any returns a platform MatchComparer which matches any of the platforms +// with no preference for ordering. +func Any(platforms ...specs.Platform) MatchComparer { + matchers := make([]Matcher, len(platforms)) + for i := range platforms { + matchers[i] = NewMatcher(platforms[i]) + } + return anyPlatformComparer{ + matchers: matchers, + } +} + +// All is a platform MatchComparer which matches all platforms +// with preference for ordering. +var All MatchComparer = allPlatformComparer{} + +type singlePlatformComparer struct { + Matcher +} + +func (c singlePlatformComparer) Less(p1, p2 specs.Platform) bool { + return c.Match(p1) && !c.Match(p2) +} + +type orderedPlatformComparer struct { + matchers []Matcher +} + +func (c orderedPlatformComparer) Match(platform specs.Platform) bool { + for _, m := range c.matchers { + if m.Match(platform) { + return true + } + } + return false +} + +func (c orderedPlatformComparer) Less(p1 specs.Platform, p2 specs.Platform) bool { + for _, m := range c.matchers { + p1m := m.Match(p1) + p2m := m.Match(p2) + if p1m && !p2m { + return true + } + if p1m || p2m { + return false + } + } + return false +} + +type anyPlatformComparer struct { + matchers []Matcher +} + +func (c anyPlatformComparer) Match(platform specs.Platform) bool { + for _, m := range c.matchers { + if m.Match(platform) { + return true + } + } + return false +} + +func (c anyPlatformComparer) Less(p1, p2 specs.Platform) bool { + var p1m, p2m bool + for _, m := range c.matchers { + if !p1m && m.Match(p1) { + p1m = true + } + if !p2m && m.Match(p2) { + p2m = true + } + if p1m && p2m { + return false + } + } + // If one matches, and the other does, sort match first + return p1m && !p2m +} + +type allPlatformComparer struct{} + +func (allPlatformComparer) Match(specs.Platform) bool { + return true +} + +func (allPlatformComparer) Less(specs.Platform, specs.Platform) bool { + return false +} diff --git a/vendor/github.com/containerd/containerd/platforms/cpuinfo.go b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go new file mode 100644 index 000000000..69b336d67 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go @@ -0,0 +1,117 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "bufio" + "os" + "runtime" + "strings" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/log" + "github.com/pkg/errors" +) + +// Present the ARM instruction set architecture, eg: v7, v8 +var cpuVariant string + +func init() { + if isArmArch(runtime.GOARCH) { + cpuVariant = getCPUVariant() + } else { + cpuVariant = "" + } +} + +// For Linux, the kernel has already detected the ABI, ISA and Features. +// So we don't need to access the ARM registers to detect platform information +// by ourselves. We can just parse these information from /proc/cpuinfo +func getCPUInfo(pattern string) (info string, err error) { + if !isLinuxOS(runtime.GOOS) { + return "", errors.Wrapf(errdefs.ErrNotImplemented, "getCPUInfo for OS %s", runtime.GOOS) + } + + cpuinfo, err := os.Open("/proc/cpuinfo") + if err != nil { + return "", err + } + defer cpuinfo.Close() + + // Start to Parse the Cpuinfo line by line. For SMP SoC, we parse + // the first core is enough. + scanner := bufio.NewScanner(cpuinfo) + for scanner.Scan() { + newline := scanner.Text() + list := strings.Split(newline, ":") + + if len(list) > 1 && strings.EqualFold(strings.TrimSpace(list[0]), pattern) { + return strings.TrimSpace(list[1]), nil + } + } + + // Check whether the scanner encountered errors + err = scanner.Err() + if err != nil { + return "", err + } + + return "", errors.Wrapf(errdefs.ErrNotFound, "getCPUInfo for pattern: %s", pattern) +} + +func getCPUVariant() string { + if runtime.GOOS == "windows" { + // Windows only supports v7 for ARM32 and v8 for ARM64 and so we can use + // runtime.GOARCH to determine the variants + var variant string + switch runtime.GOARCH { + case "arm64": + variant = "v8" + case "arm": + variant = "v7" + default: + variant = "unknown" + } + + return variant + } + + variant, err := getCPUInfo("Cpu architecture") + if err != nil { + log.L.WithError(err).Error("failure getting variant") + return "" + } + + switch variant { + case "8", "AArch64": + variant = "v8" + case "7", "7M", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)": + variant = "v7" + case "6", "6TEJ": + variant = "v6" + case "5", "5T", "5TE", "5TEJ": + variant = "v5" + case "4", "4T": + variant = "v4" + case "3": + variant = "v3" + default: + variant = "unknown" + } + + return variant +} diff --git a/vendor/github.com/containerd/containerd/platforms/database.go b/vendor/github.com/containerd/containerd/platforms/database.go new file mode 100644 index 000000000..6ede94061 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/database.go @@ -0,0 +1,114 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "runtime" + "strings" +) + +// isLinuxOS returns true if the operating system is Linux. +// +// The OS value should be normalized before calling this function. +func isLinuxOS(os string) bool { + return os == "linux" +} + +// These function are generated from https://golang.org/src/go/build/syslist.go. +// +// We use switch statements because they are slightly faster than map lookups +// and use a little less memory. + +// isKnownOS returns true if we know about the operating system. +// +// The OS value should be normalized before calling this function. +func isKnownOS(os string) bool { + switch os { + case "aix", "android", "darwin", "dragonfly", "freebsd", "hurd", "illumos", "js", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos": + return true + } + return false +} + +// isArmArch returns true if the architecture is ARM. +// +// The arch value should be normalized before being passed to this function. +func isArmArch(arch string) bool { + switch arch { + case "arm", "arm64": + return true + } + return false +} + +// isKnownArch returns true if we know about the architecture. +// +// The arch value should be normalized before being passed to this function. +func isKnownArch(arch string) bool { + switch arch { + case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm": + return true + } + return false +} + +func normalizeOS(os string) string { + if os == "" { + return runtime.GOOS + } + os = strings.ToLower(os) + + switch os { + case "macos": + os = "darwin" + } + return os +} + +// normalizeArch normalizes the architecture. +func normalizeArch(arch, variant string) (string, string) { + arch, variant = strings.ToLower(arch), strings.ToLower(variant) + switch arch { + case "i386": + arch = "386" + variant = "" + case "x86_64", "x86-64": + arch = "amd64" + variant = "" + case "aarch64", "arm64": + arch = "arm64" + switch variant { + case "8", "v8": + variant = "" + } + case "armhf": + arch = "arm" + variant = "v7" + case "armel": + arch = "arm" + variant = "v6" + case "arm": + switch variant { + case "", "7": + variant = "v7" + case "5", "6", "8": + variant = "v" + variant + } + } + + return arch, variant +} diff --git a/vendor/github.com/containerd/containerd/platforms/defaults.go b/vendor/github.com/containerd/containerd/platforms/defaults.go new file mode 100644 index 000000000..a14d80e58 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/defaults.go @@ -0,0 +1,38 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "runtime" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// DefaultString returns the default string specifier for the platform. +func DefaultString() string { + return Format(DefaultSpec()) +} + +// DefaultSpec returns the current platform's default platform specification. +func DefaultSpec() specs.Platform { + return specs.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant, + } +} diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_unix.go b/vendor/github.com/containerd/containerd/platforms/defaults_unix.go new file mode 100644 index 000000000..e8a7d5ffa --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/defaults_unix.go @@ -0,0 +1,24 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +// Default returns the default matcher for the platform. +func Default() MatchComparer { + return Only(DefaultSpec()) +} diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_windows.go b/vendor/github.com/containerd/containerd/platforms/defaults_windows.go new file mode 100644 index 000000000..0defbd36c --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/defaults_windows.go @@ -0,0 +1,31 @@ +// +build windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// Default returns the default matcher for the platform. +func Default() MatchComparer { + return Ordered(DefaultSpec(), specs.Platform{ + OS: "linux", + Architecture: "amd64", + }) +} diff --git a/vendor/github.com/containerd/containerd/platforms/platforms.go b/vendor/github.com/containerd/containerd/platforms/platforms.go new file mode 100644 index 000000000..d2b73ac3d --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/platforms.go @@ -0,0 +1,279 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package platforms provides a toolkit for normalizing, matching and +// specifying container platforms. +// +// Centered around OCI platform specifications, we define a string-based +// specifier syntax that can be used for user input. With a specifier, users +// only need to specify the parts of the platform that are relevant to their +// context, providing an operating system or architecture or both. +// +// How do I use this package? +// +// The vast majority of use cases should simply use the match function with +// user input. The first step is to parse a specifier into a matcher: +// +// m, err := Parse("linux") +// if err != nil { ... } +// +// Once you have a matcher, use it to match against the platform declared by a +// component, typically from an image or runtime. Since extracting an images +// platform is a little more involved, we'll use an example against the +// platform default: +// +// if ok := m.Match(Default()); !ok { /* doesn't match */ } +// +// This can be composed in loops for resolving runtimes or used as a filter for +// fetch and select images. +// +// More details of the specifier syntax and platform spec follow. +// +// Declaring Platform Support +// +// Components that have strict platform requirements should use the OCI +// platform specification to declare their support. Typically, this will be +// images and runtimes that should make these declaring which platform they +// support specifically. This looks roughly as follows: +// +// type Platform struct { +// Architecture string +// OS string +// Variant string +// } +// +// Most images and runtimes should at least set Architecture and OS, according +// to their GOARCH and GOOS values, respectively (follow the OCI image +// specification when in doubt). ARM should set variant under certain +// discussions, which are outlined below. +// +// Platform Specifiers +// +// While the OCI platform specifications provide a tool for components to +// specify structured information, user input typically doesn't need the full +// context and much can be inferred. To solve this problem, we introduced +// "specifiers". A specifier has the format +// `<os>|<arch>|<os>/<arch>[/<variant>]`. The user can provide either the +// operating system or the architecture or both. +// +// An example of a common specifier is `linux/amd64`. If the host has a default +// of runtime that matches this, the user can simply provide the component that +// matters. For example, if a image provides amd64 and arm64 support, the +// operating system, `linux` can be inferred, so they only have to provide +// `arm64` or `amd64`. Similar behavior is implemented for operating systems, +// where the architecture may be known but a runtime may support images from +// different operating systems. +// +// Normalization +// +// Because not all users are familiar with the way the Go runtime represents +// platforms, several normalizations have been provided to make this package +// easier to user. +// +// The following are performed for architectures: +// +// Value Normalized +// aarch64 arm64 +// armhf arm +// armel arm/v6 +// i386 386 +// x86_64 amd64 +// x86-64 amd64 +// +// We also normalize the operating system `macos` to `darwin`. +// +// ARM Support +// +// To qualify ARM architecture, the Variant field is used to qualify the arm +// version. The most common arm version, v7, is represented without the variant +// unless it is explicitly provided. This is treated as equivalent to armhf. A +// previous architecture, armel, will be normalized to arm/v6. +// +// While these normalizations are provided, their support on arm platforms has +// not yet been fully implemented and tested. +package platforms + +import ( + "regexp" + "runtime" + "strconv" + "strings" + + "github.com/containerd/containerd/errdefs" + specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +var ( + specifierRe = regexp.MustCompile(`^[A-Za-z0-9_-]+$`) +) + +// Matcher matches platforms specifications, provided by an image or runtime. +type Matcher interface { + Match(platform specs.Platform) bool +} + +// NewMatcher returns a simple matcher based on the provided platform +// specification. The returned matcher only looks for equality based on os, +// architecture and variant. +// +// One may implement their own matcher if this doesn't provide the required +// functionality. +// +// Applications should opt to use `Match` over directly parsing specifiers. +func NewMatcher(platform specs.Platform) Matcher { + return &matcher{ + Platform: Normalize(platform), + } +} + +type matcher struct { + specs.Platform +} + +func (m *matcher) Match(platform specs.Platform) bool { + normalized := Normalize(platform) + return m.OS == normalized.OS && + m.Architecture == normalized.Architecture && + m.Variant == normalized.Variant +} + +func (m *matcher) String() string { + return Format(m.Platform) +} + +// Parse parses the platform specifier syntax into a platform declaration. +// +// Platform specifiers are in the format `<os>|<arch>|<os>/<arch>[/<variant>]`. +// The minimum required information for a platform specifier is the operating +// system or architecture. If there is only a single string (no slashes), the +// value will be matched against the known set of operating systems, then fall +// back to the known set of architectures. The missing component will be +// inferred based on the local environment. +func Parse(specifier string) (specs.Platform, error) { + if strings.Contains(specifier, "*") { + // TODO(stevvooe): need to work out exact wildcard handling + return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: wildcards not yet supported", specifier) + } + + parts := strings.Split(specifier, "/") + + for _, part := range parts { + if !specifierRe.MatchString(part) { + return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q is an invalid component of %q: platform specifier component must match %q", part, specifier, specifierRe.String()) + } + } + + var p specs.Platform + switch len(parts) { + case 1: + // in this case, we will test that the value might be an OS, then look + // it up. If it is not known, we'll treat it as an architecture. Since + // we have very little information about the platform here, we are + // going to be a little more strict if we don't know about the argument + // value. + p.OS = normalizeOS(parts[0]) + if isKnownOS(p.OS) { + // picks a default architecture + p.Architecture = runtime.GOARCH + if p.Architecture == "arm" { + // TODO(stevvooe): Resolve arm variant, if not v6 (default) + return specs.Platform{}, errors.Wrapf(errdefs.ErrNotImplemented, "arm support not fully implemented") + } + + return p, nil + } + + p.Architecture, p.Variant = normalizeArch(parts[0], "") + if p.Architecture == "arm" && p.Variant == "v7" { + p.Variant = "" + } + if isKnownArch(p.Architecture) { + p.OS = runtime.GOOS + return p, nil + } + + return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: unknown operating system or architecture", specifier) + case 2: + // In this case, we treat as a regular os/arch pair. We don't care + // about whether or not we know of the platform. + p.OS = normalizeOS(parts[0]) + p.Architecture, p.Variant = normalizeArch(parts[1], "") + if p.Architecture == "arm" && p.Variant == "v7" { + p.Variant = "" + } + + return p, nil + case 3: + // we have a fully specified variant, this is rare + p.OS = normalizeOS(parts[0]) + p.Architecture, p.Variant = normalizeArch(parts[1], parts[2]) + if p.Architecture == "arm64" && p.Variant == "" { + p.Variant = "v8" + } + + return p, nil + } + + return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: cannot parse platform specifier", specifier) +} + +// MustParse is like Parses but panics if the specifier cannot be parsed. +// Simplifies initialization of global variables. +func MustParse(specifier string) specs.Platform { + p, err := Parse(specifier) + if err != nil { + panic("platform: Parse(" + strconv.Quote(specifier) + "): " + err.Error()) + } + return p +} + +// Format returns a string specifier from the provided platform specification. +func Format(platform specs.Platform) string { + if platform.OS == "" { + return "unknown" + } + + return joinNotEmpty(platform.OS, platform.Architecture, platform.Variant) +} + +func joinNotEmpty(s ...string) string { + var ss []string + for _, s := range s { + if s == "" { + continue + } + + ss = append(ss, s) + } + + return strings.Join(ss, "/") +} + +// Normalize validates and translate the platform to the canonical value. +// +// For example, if "Aarch64" is encountered, we change it to "arm64" or if +// "x86_64" is encountered, it becomes "amd64". +func Normalize(platform specs.Platform) specs.Platform { + platform.OS = normalizeOS(platform.OS) + platform.Architecture, platform.Variant = normalizeArch(platform.Architecture, platform.Variant) + + // these fields are deprecated, remove them + platform.OSFeatures = nil + platform.OSVersion = "" + + return platform +} diff --git a/vendor/github.com/containers/buildah/CHANGELOG.md b/vendor/github.com/containers/buildah/CHANGELOG.md index 3bf97a522..8d1944c65 100644 --- a/vendor/github.com/containers/buildah/CHANGELOG.md +++ b/vendor/github.com/containers/buildah/CHANGELOG.md @@ -2,6 +2,22 @@ # Changelog +## v1.14.3 (2020-03-17) + Update containers/storage to v1.16.5 + Bump github.com/containers/storage from 1.16.2 to 1.16.4 + Bump github.com/openshift/imagebuilder from 1.1.1 to 1.1.2 + Update github.com/openshift/imagebuilder vendoring + Update unshare man page to fix script example + Fix compilation errors on non linux platforms + Bump containers/common and opencontainers/selinux versions + Add tests for volume ownership + Preserve volume uid and gid through subsequent commands + Fix FORWARD_NULL errors found by Coverity + Bump github.com/containers/storage from 1.16.1 to 1.16.2 + Fix errors found by codespell + Bump back to v1.15.0-dev + Add Pull Request Template + ## v1.14.2 (2020-03-03) Add Buildah pull request template Bump to containers/storage v1.16.1 diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go index 2ece11acd..0fee906e5 100644 --- a/vendor/github.com/containers/buildah/buildah.go +++ b/vendor/github.com/containers/buildah/buildah.go @@ -27,7 +27,7 @@ const ( Package = "buildah" // Version for the Package. Bump version in contrib/rpm/buildah.spec // too. - Version = "1.15.0-dev" + Version = "1.14.3" // The value we use to identify what type of information, currently a // serialized Builder structure, we are using as per-container state. // This should only be changed when we make incompatible changes to diff --git a/vendor/github.com/containers/buildah/changelog.txt b/vendor/github.com/containers/buildah/changelog.txt index b4c71bf6a..900accf10 100644 --- a/vendor/github.com/containers/buildah/changelog.txt +++ b/vendor/github.com/containers/buildah/changelog.txt @@ -1,3 +1,19 @@ +- Changelog for v1.14.3 (2020-03-17) + * Update containers/storage to v1.16.5 + * Bump github.com/containers/storage from 1.16.2 to 1.16.4 + * Bump github.com/openshift/imagebuilder from 1.1.1 to 1.1.2 + * Update github.com/openshift/imagebuilder vendoring + * Update unshare man page to fix script example + * Fix compilation errors on non linux platforms + * Bump containers/common and opencontainers/selinux versions + * Add tests for volume ownership + * Preserve volume uid and gid through subsequent commands + * Fix FORWARD_NULL errors found by Coverity + * Bump github.com/containers/storage from 1.16.1 to 1.16.2 + * Fix errors found by codespell + * Bump back to v1.15.0-dev + * Add Pull Request Template + - Changelog for v1.14.2 (2020-03-03) * Add Buildah pull request template * Bump to containers/storage v1.16.1 diff --git a/vendor/github.com/containers/buildah/go.mod b/vendor/github.com/containers/buildah/go.mod index 97b2eeae8..862c7e1a2 100644 --- a/vendor/github.com/containers/buildah/go.mod +++ b/vendor/github.com/containers/buildah/go.mod @@ -6,7 +6,7 @@ require ( github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784 github.com/containers/common v0.5.0 github.com/containers/image/v5 v5.2.1 - github.com/containers/storage v1.16.2 + github.com/containers/storage v1.16.5 github.com/cyphar/filepath-securejoin v0.2.2 github.com/docker/distribution v2.7.1+incompatible github.com/docker/go-metrics v0.0.1 // indirect @@ -27,7 +27,7 @@ require ( github.com/opencontainers/runtime-tools v0.9.0 github.com/opencontainers/selinux v1.4.0 github.com/openshift/api v0.0.0-20200106203948-7ab22a2c8316 - github.com/openshift/imagebuilder v1.1.1 + github.com/openshift/imagebuilder v1.1.2 github.com/pkg/errors v0.9.1 github.com/seccomp/containers-golang v0.0.0-20190312124753-8ca8945ccf5f github.com/seccomp/libseccomp-golang v0.9.1 diff --git a/vendor/github.com/containers/buildah/go.sum b/vendor/github.com/containers/buildah/go.sum index 191eb1f11..17ea81042 100644 --- a/vendor/github.com/containers/buildah/go.sum +++ b/vendor/github.com/containers/buildah/go.sum @@ -159,6 +159,12 @@ github.com/containers/storage v1.16.1 h1:gVLVqbqaoyopLJbcQ9PQdsnm8SzVy6Vw24fofwM github.com/containers/storage v1.16.1/go.mod h1:toFp72SLn/iyJ6YbrnrZ0bW63aH2Qw3dA8JVwL4ADPo= github.com/containers/storage v1.16.2 h1:S77Y+lmJcnGoPEZB2OOrTrRGyjT8viDCGyhVNNz78h8= github.com/containers/storage v1.16.2/go.mod h1:/RNmsK01ajCL+VtMSi3W8kHzpBwN+Q5gLYWgfw5wlMg= +github.com/containers/storage v1.16.3 h1:bctiz1I+0TIivtXbrVmy02ZYlOA+IjKIJMzAMTBifj8= +github.com/containers/storage v1.16.3/go.mod h1:dNTv0+BaebIAOGgH34dPtwGPR+Km2fObcfOlFxYFwA0= +github.com/containers/storage v1.16.4 h1:+pEL9A1i11qy1j/MYvh8Y5vs79BBfA+hslyJq1iPOGc= +github.com/containers/storage v1.16.4/go.mod h1:SdysZeLKJOvfHYysUWg9OZUC3gdZWi5b2b7NC18VpPE= +github.com/containers/storage v1.16.5 h1:eHeWEhUEWX3VMIG1Vn1rEjfRoLHUQev3cwtA5zd89wk= +github.com/containers/storage v1.16.5/go.mod h1:SdysZeLKJOvfHYysUWg9OZUC3gdZWi5b2b7NC18VpPE= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= @@ -380,10 +386,14 @@ github.com/klauspost/compress v1.10.0 h1:92XGj1AcYzA6UrVdd4qIIBrT8OroryvRvdmg/If github.com/klauspost/compress v1.10.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.10.2 h1:Znfn6hXZAHaLPNnlqUYRrBSReFHYybslgv4PTiyz6P0= github.com/klauspost/compress v1.10.2/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.10.3 h1:OP96hzwJVBIHYU52pVTI6CczrxPvrGfgqF9N5eTO0Q8= +github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w= github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/pgzip v1.2.1 h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM= github.com/klauspost/pgzip v1.2.1/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/klauspost/pgzip v1.2.2 h1:8d4I0LDiieuGngsqlqOih9ker/NS0LX4V0i+EhiFWg0= +github.com/klauspost/pgzip v1.2.2/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= @@ -516,6 +526,8 @@ github.com/openshift/imagebuilder v1.1.0 h1:oT704SkwMEzmIMU/+Uv1Wmvt+p10q3v2WuYM github.com/openshift/imagebuilder v1.1.0/go.mod h1:9aJRczxCH0mvT6XQ+5STAQaPWz7OsWcU5/mRkt8IWeo= github.com/openshift/imagebuilder v1.1.1 h1:KAUR31p8UBJdfVO42azWgb+LeMAed2zaKQ19e0C0X2I= github.com/openshift/imagebuilder v1.1.1/go.mod h1:9aJRczxCH0mvT6XQ+5STAQaPWz7OsWcU5/mRkt8IWeo= +github.com/openshift/imagebuilder v1.1.2 h1:vCO8hZQR/4uzo+j0PceBH5aKFcvCDM43UzUGOYQN+Go= +github.com/openshift/imagebuilder v1.1.2/go.mod h1:9aJRczxCH0mvT6XQ+5STAQaPWz7OsWcU5/mRkt8IWeo= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 h1:TnbXhKzrTOyuvWrjI8W6pcoI9XPbLHFXCdN2dtUw7Rw= github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc= diff --git a/vendor/github.com/openshift/imagebuilder/builder.go b/vendor/github.com/openshift/imagebuilder/builder.go index 5a2d0d539..81d7b8421 100644 --- a/vendor/github.com/openshift/imagebuilder/builder.go +++ b/vendor/github.com/openshift/imagebuilder/builder.go @@ -288,8 +288,12 @@ func NewBuilder(args map[string]string) *Builder { for k, v := range builtinAllowedBuildArgs { allowed[k] = v } + provided := make(map[string]string) + for k, v := range args { + provided[k] = v + } return &Builder{ - Args: args, + Args: provided, AllowedArgs: allowed, } } diff --git a/vendor/github.com/openshift/imagebuilder/dispatchers.go b/vendor/github.com/openshift/imagebuilder/dispatchers.go index ff365848a..e7f2f97bf 100644 --- a/vendor/github.com/openshift/imagebuilder/dispatchers.go +++ b/vendor/github.com/openshift/imagebuilder/dispatchers.go @@ -19,6 +19,7 @@ import ( docker "github.com/fsouza/go-dockerclient" + "github.com/containerd/containerd/platforms" "github.com/openshift/imagebuilder/signal" "github.com/openshift/imagebuilder/strslice" ) @@ -27,6 +28,27 @@ var ( obRgex = regexp.MustCompile(`(?i)^\s*ONBUILD\s*`) ) +var localspec = platforms.DefaultSpec() + +// https://docs.docker.com/engine/reference/builder/#automatic-platform-args-in-the-global-scope +var builtinBuildArgs = map[string]string{ + "TARGETPLATFORM": localspec.OS + "/" + localspec.Architecture, + "TARGETOS": localspec.OS, + "TARGETARCH": localspec.Architecture, + "TARGETVARIANT": localspec.Variant, + "BUILDPLATFORM": localspec.OS + "/" + localspec.Architecture, + "BUILDOS": localspec.OS, + "BUILDARCH": localspec.Architecture, + "BUILDVARIANT": localspec.Variant, +} + +func init() { + if localspec.Variant != "" { + builtinBuildArgs["TARGETPLATFORM"] = builtinBuildArgs["TARGETPLATFORM"] + "/" + localspec.Variant + builtinBuildArgs["BUILDPLATFORM"] = builtinBuildArgs["BUILDPLATFORM"] + "/" + localspec.Variant + } +} + // ENV foo bar // // Sets the environment variable foo to bar, also makes interpolation @@ -131,14 +153,16 @@ func add(b *Builder, args []string, attributes map[string]bool, flagArgs []strin var chown string last := len(args) - 1 dest := makeAbsolute(args[last], b.RunConfig.WorkingDir) - if len(flagArgs) > 0 { - for _, arg := range flagArgs { - switch { - case strings.HasPrefix(arg, "--chown="): - chown = strings.TrimPrefix(arg, "--chown=") - default: - return fmt.Errorf("ADD only supports the --chown=<uid:gid> flag") - } + for _, a := range flagArgs { + arg, err := ProcessWord(a, b.Env) + if err != nil { + return err + } + switch { + case strings.HasPrefix(arg, "--chown="): + chown = strings.TrimPrefix(arg, "--chown=") + default: + return fmt.Errorf("ADD only supports the --chown=<uid:gid> flag") } } b.PendingCopies = append(b.PendingCopies, Copy{Src: args[0:last], Dest: dest, Download: true, Chown: chown}) @@ -157,16 +181,18 @@ func dispatchCopy(b *Builder, args []string, attributes map[string]bool, flagArg dest := makeAbsolute(args[last], b.RunConfig.WorkingDir) var chown string var from string - if len(flagArgs) > 0 { - for _, arg := range flagArgs { - switch { - case strings.HasPrefix(arg, "--chown="): - chown = strings.TrimPrefix(arg, "--chown=") - case strings.HasPrefix(arg, "--from="): - from = strings.TrimPrefix(arg, "--from=") - default: - return fmt.Errorf("COPY only supports the --chown=<uid:gid> and the --from=<image|stage> flags") - } + for _, a := range flagArgs { + arg, err := ProcessWord(a, b.Env) + if err != nil { + return err + } + switch { + case strings.HasPrefix(arg, "--chown="): + chown = strings.TrimPrefix(arg, "--chown=") + case strings.HasPrefix(arg, "--from="): + from = strings.TrimPrefix(arg, "--from=") + default: + return fmt.Errorf("COPY only supports the --chown=<uid:gid> and the --from=<image|stage> flags") } } b.PendingCopies = append(b.PendingCopies, Copy{From: from, Src: args[0:last], Dest: dest, Download: false, Chown: chown}) @@ -516,6 +542,8 @@ func healthcheck(b *Builder, args []string, attributes map[string]bool, flagArgs return nil } +var targetArgs = []string{"TARGETOS", "TARGETARCH", "TARGETVARIANT"} + // ARG name[=value] // // Adds the variable foo to the trusted list of variables that can be passed @@ -543,6 +571,26 @@ func arg(b *Builder, args []string, attributes map[string]bool, flagArgs []strin name = parts[0] value = parts[1] hasDefault = true + if name == "TARGETPLATFORM" { + p, err := platforms.Parse(value) + if err != nil { + return fmt.Errorf("error parsing TARGETPLATFORM argument") + } + for _, val := range targetArgs { + b.AllowedArgs[val] = true + } + b.Args["TARGETPLATFORM"] = p.OS + "/" + p.Architecture + b.Args["TARGETOS"] = p.OS + b.Args["TARGETARCH"] = p.Architecture + b.Args["TARGETVARIANT"] = p.Variant + if p.Variant != "" { + b.Args["TARGETPLATFORM"] = b.Args["TARGETPLATFORM"] + "/" + p.Variant + } + } + } else if val, ok := builtinBuildArgs[arg]; ok { + name = arg + value = val + hasDefault = true } else { name = arg hasDefault = false diff --git a/vendor/github.com/openshift/imagebuilder/vendor.conf b/vendor/github.com/openshift/imagebuilder/vendor.conf index c3f7d1a6b..8074ce80a 100644 --- a/vendor/github.com/openshift/imagebuilder/vendor.conf +++ b/vendor/github.com/openshift/imagebuilder/vendor.conf @@ -1,4 +1,5 @@ github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109 +github.com/containerd/containerd v1.3.0 github.com/containers/storage v1.2 github.com/docker/docker b68221c37ee597950364788204546f9c9d0e46a1 github.com/docker/go-connections 97c2040d34dfae1d1b1275fa3a78dbdd2f41cf7e @@ -18,3 +19,7 @@ golang.org/x/crypto ff983b9c42bc9fbf91556e191cc8efb585c16908 golang.org/x/net 45ffb0cd1ba084b73e26dee67e667e1be5acce83 golang.org/x/sys 7fbe1cd0fcc20051e1fcb87fbabec4a1bacaaeba k8s.io/klog 8e90cee79f823779174776412c13478955131846 +google.golang.org/grpc 6eaf6f47437a6b4e2153a190160ef39a92c7eceb # v1.23.0 +github.com/golang/protobuf v1.2.0 +google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944 + diff --git a/vendor/modules.txt b/vendor/modules.txt index 2b93c89f2..1ad73e1bc 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -44,6 +44,8 @@ github.com/checkpoint-restore/go-criu/rpc github.com/containerd/cgroups/stats/v1 # github.com/containerd/containerd v1.3.0 github.com/containerd/containerd/errdefs +github.com/containerd/containerd/log +github.com/containerd/containerd/platforms # github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc github.com/containerd/continuity/fs github.com/containerd/continuity/syscallx @@ -62,7 +64,7 @@ github.com/containernetworking/plugins/pkg/ns github.com/containernetworking/plugins/pkg/utils/hwaddr github.com/containernetworking/plugins/plugins/ipam/host-local/backend github.com/containernetworking/plugins/plugins/ipam/host-local/backend/allocator -# github.com/containers/buildah v1.14.3-0.20200313154200-d26f437b2a46 +# github.com/containers/buildah v1.14.3 github.com/containers/buildah github.com/containers/buildah/bind github.com/containers/buildah/chroot @@ -413,7 +415,7 @@ github.com/opencontainers/selinux/go-selinux/label github.com/opencontainers/selinux/pkg/pwalk # github.com/openshift/api v0.0.0-20200106203948-7ab22a2c8316 github.com/openshift/api/config/v1 -# github.com/openshift/imagebuilder v1.1.1 +# github.com/openshift/imagebuilder v1.1.2 github.com/openshift/imagebuilder github.com/openshift/imagebuilder/dockerfile/command github.com/openshift/imagebuilder/dockerfile/parser diff --git a/version/version.go b/version/version.go index 5a7b4a36e..0d0b0b422 100644 --- a/version/version.go +++ b/version/version.go @@ -4,7 +4,7 @@ package version // NOTE: remember to bump the version at the top // of the top-level README.md file when this is // bumped. -const Version = "1.8.2-dev" +const Version = "1.8.3-dev" // RemoteAPIVersion is the version for the remote // client API. It is used to determine compatibility |