summaryrefslogtreecommitdiff
path: root/pkg/domain/infra
diff options
context:
space:
mode:
Diffstat (limited to 'pkg/domain/infra')
-rw-r--r--pkg/domain/infra/abi/containers.go327
-rw-r--r--pkg/domain/infra/abi/images.go166
-rw-r--r--pkg/domain/infra/abi/images_list.go80
-rw-r--r--pkg/domain/infra/abi/images_test.go37
-rw-r--r--pkg/domain/infra/abi/parse/parse.go68
-rw-r--r--pkg/domain/infra/abi/pods.go252
-rw-r--r--pkg/domain/infra/abi/runtime.go17
-rw-r--r--pkg/domain/infra/abi/volumes.go159
-rw-r--r--pkg/domain/infra/runtime_abi.go38
-rw-r--r--pkg/domain/infra/runtime_image_proxy.go21
-rw-r--r--pkg/domain/infra/runtime_libpod.go328
-rw-r--r--pkg/domain/infra/runtime_proxy.go21
-rw-r--r--pkg/domain/infra/runtime_tunnel.go35
-rw-r--r--pkg/domain/infra/tunnel/containers.go212
-rw-r--r--pkg/domain/infra/tunnel/helpers.go76
-rw-r--r--pkg/domain/infra/tunnel/images.go87
-rw-r--r--pkg/domain/infra/tunnel/pods.go179
-rw-r--r--pkg/domain/infra/tunnel/runtime.go15
-rw-r--r--pkg/domain/infra/tunnel/volumes.go70
19 files changed, 2188 insertions, 0 deletions
diff --git a/pkg/domain/infra/abi/containers.go b/pkg/domain/infra/abi/containers.go
new file mode 100644
index 000000000..d25af24c5
--- /dev/null
+++ b/pkg/domain/infra/abi/containers.go
@@ -0,0 +1,327 @@
+// +build ABISupport
+
+package abi
+
+import (
+ "context"
+ "io/ioutil"
+ "strings"
+
+ "github.com/containers/buildah"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/libpod/define"
+ "github.com/containers/libpod/libpod/image"
+ "github.com/containers/libpod/pkg/adapter/shortcuts"
+ "github.com/containers/libpod/pkg/domain/entities"
+ "github.com/containers/libpod/pkg/signal"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// TODO: Should return *entities.ContainerExistsReport, error
+func (ic *ContainerEngine) ContainerExists(ctx context.Context, nameOrId string) (*entities.BoolReport, error) {
+ _, err := ic.Libpod.LookupContainer(nameOrId)
+ if err != nil && errors.Cause(err) != define.ErrNoSuchCtr {
+ return nil, err
+ }
+ return &entities.BoolReport{Value: err == nil}, nil
+}
+
+func (ic *ContainerEngine) ContainerWait(ctx context.Context, namesOrIds []string, options entities.WaitOptions) ([]entities.WaitReport, error) {
+ var (
+ responses []entities.WaitReport
+ )
+ ctrs, err := shortcuts.GetContainersByContext(false, options.Latest, namesOrIds, ic.Libpod)
+ if err != nil {
+ return nil, err
+ }
+ for _, c := range ctrs {
+ response := entities.WaitReport{Id: c.ID()}
+ exitCode, err := c.WaitForConditionWithInterval(options.Interval, options.Condition)
+ if err != nil {
+ response.Error = err
+ } else {
+ response.ExitCode = exitCode
+ }
+ responses = append(responses, response)
+ }
+ return responses, nil
+}
+
+func (ic *ContainerEngine) ContainerPause(ctx context.Context, namesOrIds []string, options entities.PauseUnPauseOptions) ([]*entities.PauseUnpauseReport, error) {
+ var (
+ ctrs []*libpod.Container
+ err error
+ report []*entities.PauseUnpauseReport
+ )
+ if options.All {
+ ctrs, err = ic.Libpod.GetAllContainers()
+ } else {
+ ctrs, err = shortcuts.GetContainersByContext(false, false, namesOrIds, ic.Libpod)
+ }
+ if err != nil {
+ return nil, err
+ }
+ for _, c := range ctrs {
+ err := c.Pause()
+ report = append(report, &entities.PauseUnpauseReport{Id: c.ID(), Err: err})
+ }
+ return report, nil
+}
+
+func (ic *ContainerEngine) ContainerUnpause(ctx context.Context, namesOrIds []string, options entities.PauseUnPauseOptions) ([]*entities.PauseUnpauseReport, error) {
+ var (
+ ctrs []*libpod.Container
+ err error
+ report []*entities.PauseUnpauseReport
+ )
+ if options.All {
+ ctrs, err = ic.Libpod.GetAllContainers()
+ } else {
+ ctrs, err = shortcuts.GetContainersByContext(false, false, namesOrIds, ic.Libpod)
+ }
+ if err != nil {
+ return nil, err
+ }
+ for _, c := range ctrs {
+ err := c.Unpause()
+ report = append(report, &entities.PauseUnpauseReport{Id: c.ID(), Err: err})
+ }
+ return report, nil
+}
+func (ic *ContainerEngine) ContainerStop(ctx context.Context, namesOrIds []string, options entities.StopOptions) ([]*entities.StopReport, error) {
+ var (
+ reports []*entities.StopReport
+ )
+ names := namesOrIds
+ for _, cidFile := range options.CIDFiles {
+ content, err := ioutil.ReadFile(cidFile)
+ if err != nil {
+ return nil, errors.Wrap(err, "error reading CIDFile")
+ }
+ id := strings.Split(string(content), "\n")[0]
+ names = append(names, id)
+ }
+ ctrs, err := shortcuts.GetContainersByContext(options.All, options.Latest, names, ic.Libpod)
+ if err != nil && !(options.Ignore && errors.Cause(err) == define.ErrNoSuchCtr) {
+ return nil, err
+ }
+ for _, con := range ctrs {
+ report := entities.StopReport{Id: con.ID()}
+ err = con.StopWithTimeout(options.Timeout)
+ if err != nil {
+ // These first two are considered non-fatal under the right conditions
+ if errors.Cause(err) == define.ErrCtrStopped {
+ logrus.Debugf("Container %s is already stopped", con.ID())
+ reports = append(reports, &report)
+ continue
+
+ } else if options.All && errors.Cause(err) == define.ErrCtrStateInvalid {
+ logrus.Debugf("Container %s is not running, could not stop", con.ID())
+ reports = append(reports, &report)
+ continue
+ }
+ report.Err = err
+ reports = append(reports, &report)
+ continue
+ }
+ reports = append(reports, &report)
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) ContainerKill(ctx context.Context, namesOrIds []string, options entities.KillOptions) ([]*entities.KillReport, error) {
+ var (
+ reports []*entities.KillReport
+ )
+ sig, err := signal.ParseSignalNameOrNumber(options.Signal)
+ if err != nil {
+ return nil, err
+ }
+ ctrs, err := shortcuts.GetContainersByContext(options.All, options.Latest, namesOrIds, ic.Libpod)
+ if err != nil {
+ return nil, err
+ }
+ for _, con := range ctrs {
+ reports = append(reports, &entities.KillReport{
+ Id: con.ID(),
+ Err: con.Kill(uint(sig)),
+ })
+ }
+ return reports, nil
+}
+func (ic *ContainerEngine) ContainerRestart(ctx context.Context, namesOrIds []string, options entities.RestartOptions) ([]*entities.RestartReport, error) {
+ var (
+ reports []*entities.RestartReport
+ )
+ ctrs, err := shortcuts.GetContainersByContext(options.All, options.Latest, namesOrIds, ic.Libpod)
+ if err != nil {
+ return nil, err
+ }
+ for _, con := range ctrs {
+ timeout := con.StopTimeout()
+ if options.Timeout != nil {
+ timeout = *options.Timeout
+ }
+ reports = append(reports, &entities.RestartReport{
+ Id: con.ID(),
+ Err: con.RestartWithTimeout(ctx, timeout),
+ })
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string, options entities.RmOptions) ([]*entities.RmReport, error) {
+ var (
+ reports []*entities.RmReport
+ )
+ if options.Storage {
+ for _, ctr := range namesOrIds {
+ report := entities.RmReport{Id: ctr}
+ if err := ic.Libpod.RemoveStorageContainer(ctr, options.Force); err != nil {
+ report.Err = err
+ }
+ reports = append(reports, &report)
+ }
+ return reports, nil
+ }
+
+ names := namesOrIds
+ for _, cidFile := range options.CIDFiles {
+ content, err := ioutil.ReadFile(cidFile)
+ if err != nil {
+ return nil, errors.Wrap(err, "error reading CIDFile")
+ }
+ id := strings.Split(string(content), "\n")[0]
+ names = append(names, id)
+ }
+
+ ctrs, err := shortcuts.GetContainersByContext(options.All, options.Latest, names, ic.Libpod)
+ if err != nil && !(options.Ignore && errors.Cause(err) == define.ErrNoSuchCtr) {
+ // Failed to get containers. If force is specified, get the containers ID
+ // and evict them
+ if !options.Force {
+ return nil, err
+ }
+
+ for _, ctr := range namesOrIds {
+ logrus.Debugf("Evicting container %q", ctr)
+ report := entities.RmReport{Id: ctr}
+ id, err := ic.Libpod.EvictContainer(ctx, ctr, options.Volumes)
+ if err != nil {
+ if options.Ignore && errors.Cause(err) == define.ErrNoSuchCtr {
+ logrus.Debugf("Ignoring error (--allow-missing): %v", err)
+ reports = append(reports, &report)
+ continue
+ }
+ report.Err = errors.Wrapf(err, "Failed to evict container: %q", id)
+ reports = append(reports, &report)
+ continue
+ }
+ reports = append(reports, &report)
+ }
+ return reports, nil
+ }
+
+ for _, c := range ctrs {
+ report := entities.RmReport{Id: c.ID()}
+ err := ic.Libpod.RemoveContainer(ctx, c, options.Force, options.Volumes)
+ if err != nil {
+ if options.Ignore && errors.Cause(err) == define.ErrNoSuchCtr {
+ logrus.Debugf("Ignoring error (--allow-missing): %v", err)
+ reports = append(reports, &report)
+ continue
+ }
+ logrus.Debugf("Failed to remove container %s: %s", c.ID(), err.Error())
+ report.Err = err
+ reports = append(reports, &report)
+ continue
+ }
+ reports = append(reports, &report)
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) ContainerInspect(ctx context.Context, namesOrIds []string, options entities.ContainerInspectOptions) ([]*entities.ContainerInspectReport, error) {
+ var reports []*entities.ContainerInspectReport
+ ctrs, err := shortcuts.GetContainersByContext(false, options.Latest, namesOrIds, ic.Libpod)
+ if err != nil {
+ return nil, err
+ }
+ for _, c := range ctrs {
+ data, err := c.Inspect(options.Size)
+ if err != nil {
+ return nil, err
+ }
+ reports = append(reports, &entities.ContainerInspectReport{InspectContainerData: data})
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) ContainerTop(ctx context.Context, options entities.TopOptions) (*entities.StringSliceReport, error) {
+ var (
+ container *libpod.Container
+ err error
+ )
+
+ // Look up the container.
+ if options.Latest {
+ container, err = ic.Libpod.GetLatestContainer()
+ } else {
+ container, err = ic.Libpod.LookupContainer(options.NameOrID)
+ }
+ if err != nil {
+ return nil, errors.Wrap(err, "unable to lookup requested container")
+ }
+
+ // Run Top.
+ report := &entities.StringSliceReport{}
+ report.Value, err = container.Top(options.Descriptors)
+ return report, err
+}
+
+func (ic *ContainerEngine) ContainerCommit(ctx context.Context, nameOrId string, options entities.CommitOptions) (*entities.CommitReport, error) {
+ var (
+ mimeType string
+ )
+ ctr, err := ic.Libpod.LookupContainer(nameOrId)
+ if err != nil {
+ return nil, err
+ }
+ rtc, err := ic.Libpod.GetConfig()
+ if err != nil {
+ return nil, err
+ }
+ switch options.Format {
+ case "oci":
+ mimeType = buildah.OCIv1ImageManifest
+ if len(options.Message) > 0 {
+ return nil, errors.Errorf("messages are only compatible with the docker image format (-f docker)")
+ }
+ case "docker":
+ mimeType = manifest.DockerV2Schema2MediaType
+ default:
+ return nil, errors.Errorf("unrecognized image format %q", options.Format)
+ }
+ sc := image.GetSystemContext(rtc.Engine.SignaturePolicyPath, "", false)
+ coptions := buildah.CommitOptions{
+ SignaturePolicyPath: rtc.Engine.SignaturePolicyPath,
+ ReportWriter: options.Writer,
+ SystemContext: sc,
+ PreferredManifestType: mimeType,
+ }
+ opts := libpod.ContainerCommitOptions{
+ CommitOptions: coptions,
+ Pause: options.Pause,
+ IncludeVolumes: options.IncludeVolumes,
+ Message: options.Message,
+ Changes: options.Changes,
+ Author: options.Author,
+ }
+ newImage, err := ctr.Commit(ctx, options.ImageName, opts)
+ if err != nil {
+ return nil, err
+ }
+ return &entities.CommitReport{Id: newImage.ID()}, nil
+}
diff --git a/pkg/domain/infra/abi/images.go b/pkg/domain/infra/abi/images.go
new file mode 100644
index 000000000..44420c1e1
--- /dev/null
+++ b/pkg/domain/infra/abi/images.go
@@ -0,0 +1,166 @@
+// +build ABISupport
+
+package abi
+
+import (
+ "context"
+ "fmt"
+
+ libpodImage "github.com/containers/libpod/libpod/image"
+ "github.com/containers/libpod/pkg/domain/entities"
+ "github.com/containers/storage"
+ "github.com/pkg/errors"
+)
+
+func (ir *ImageEngine) Exists(_ context.Context, nameOrId string) (*entities.BoolReport, error) {
+ if _, err := ir.Libpod.ImageRuntime().NewFromLocal(nameOrId); err != nil {
+ return &entities.BoolReport{}, nil
+ }
+ return &entities.BoolReport{Value: true}, nil
+}
+
+func (ir *ImageEngine) Delete(ctx context.Context, nameOrId []string, opts entities.ImageDeleteOptions) (*entities.ImageDeleteReport, error) {
+ report := entities.ImageDeleteReport{}
+
+ if opts.All {
+ var previousTargets []*libpodImage.Image
+ repeatRun:
+ targets, err := ir.Libpod.ImageRuntime().GetRWImages()
+ if err != nil {
+ return &report, errors.Wrapf(err, "unable to query local images")
+ }
+
+ if len(targets) > 0 && len(targets) == len(previousTargets) {
+ return &report, errors.New("unable to delete all images; re-run the rmi command again.")
+ }
+ previousTargets = targets
+
+ for _, img := range targets {
+ isParent, err := img.IsParent(ctx)
+ if err != nil {
+ return &report, err
+ }
+ if isParent {
+ continue
+ }
+ err = ir.deleteImage(ctx, img, opts, report)
+ report.Errors = append(report.Errors, err)
+ }
+ if len(previousTargets) != 1 {
+ goto repeatRun
+ }
+ return &report, nil
+ }
+
+ for _, id := range nameOrId {
+ image, err := ir.Libpod.ImageRuntime().NewFromLocal(id)
+ if err != nil {
+ return nil, err
+ }
+
+ err = ir.deleteImage(ctx, image, opts, report)
+ if err != nil {
+ return &report, err
+ }
+ }
+ return &report, nil
+}
+
+func (ir *ImageEngine) deleteImage(ctx context.Context, img *libpodImage.Image, opts entities.ImageDeleteOptions, report entities.ImageDeleteReport) error {
+ results, err := ir.Libpod.RemoveImage(ctx, img, opts.Force)
+ switch errors.Cause(err) {
+ case nil:
+ break
+ case storage.ErrImageUsedByContainer:
+ report.ImageInUse = errors.New(
+ fmt.Sprintf("A container associated with containers/storage, i.e. via Buildah, CRI-O, etc., may be associated with this image: %-12.12s\n", img.ID()))
+ return nil
+ case libpodImage.ErrNoSuchImage:
+ report.ImageNotFound = err
+ return nil
+ default:
+ return err
+ }
+
+ report.Deleted = append(report.Deleted, results.Deleted)
+ report.Untagged = append(report.Untagged, results.Untagged...)
+ return nil
+}
+
+func (ir *ImageEngine) Prune(ctx context.Context, opts entities.ImagePruneOptions) (*entities.ImagePruneReport, error) {
+ results, err := ir.Libpod.ImageRuntime().PruneImages(ctx, opts.All, opts.Filter)
+ if err != nil {
+ return nil, err
+ }
+
+ report := entities.ImagePruneReport{
+ Report: entities.Report{
+ Id: results,
+ Err: nil,
+ },
+ Size: 0,
+ }
+ return &report, nil
+}
+
+func (ir *ImageEngine) History(ctx context.Context, nameOrId string, opts entities.ImageHistoryOptions) (*entities.ImageHistoryReport, error) {
+ image, err := ir.Libpod.ImageRuntime().NewFromLocal(nameOrId)
+ if err != nil {
+ return nil, err
+ }
+ results, err := image.History(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ history := entities.ImageHistoryReport{
+ Layers: make([]entities.ImageHistoryLayer, len(results)),
+ }
+
+ for i, layer := range results {
+ history.Layers[i] = ToDomainHistoryLayer(layer)
+ }
+ return &history, nil
+}
+
+func ToDomainHistoryLayer(layer *libpodImage.History) entities.ImageHistoryLayer {
+ l := entities.ImageHistoryLayer{}
+ l.ID = layer.ID
+ l.Created = layer.Created.Unix()
+ l.CreatedBy = layer.CreatedBy
+ copy(l.Tags, layer.Tags)
+ l.Size = layer.Size
+ l.Comment = layer.Comment
+ return l
+}
+
+// func (r *imageRuntime) Delete(ctx context.Context, nameOrId string, opts entities.ImageDeleteOptions) (*entities.ImageDeleteReport, error) {
+// image, err := r.libpod.ImageEngine().NewFromLocal(nameOrId)
+// if err != nil {
+// return nil, err
+// }
+//
+// results, err := r.libpod.RemoveImage(ctx, image, opts.Force)
+// if err != nil {
+// return nil, err
+// }
+//
+// report := entities.ImageDeleteReport{}
+// if err := utils.DeepCopy(&report, results); err != nil {
+// return nil, err
+// }
+// return &report, nil
+// }
+//
+// func (r *imageRuntime) Prune(ctx context.Context, opts entities.ImagePruneOptions) (*entities.ImagePruneReport, error) {
+// // TODO: map FilterOptions
+// id, err := r.libpod.ImageEngine().PruneImages(ctx, opts.All, []string{})
+// if err != nil {
+// return nil, err
+// }
+//
+// // TODO: Determine Size
+// report := entities.ImagePruneReport{}
+// copy(report.Report.Id, id)
+// return &report, nil
+// }
diff --git a/pkg/domain/infra/abi/images_list.go b/pkg/domain/infra/abi/images_list.go
new file mode 100644
index 000000000..2f4020374
--- /dev/null
+++ b/pkg/domain/infra/abi/images_list.go
@@ -0,0 +1,80 @@
+// +build ABISupport
+
+package abi
+
+import (
+ "context"
+
+ libpodImage "github.com/containers/libpod/libpod/image"
+ "github.com/containers/libpod/pkg/domain/entities"
+)
+
+func (ir *ImageEngine) List(ctx context.Context, opts entities.ImageListOptions) ([]*entities.ImageSummary, error) {
+ var (
+ images []*libpodImage.Image
+ err error
+ )
+
+ // TODO: Future work support for domain.Filters
+ // filters := utils.ToLibpodFilters(opts.Filters)
+
+ if len(opts.Filter) > 0 {
+ images, err = ir.Libpod.ImageRuntime().GetImagesWithFilters(opts.Filter)
+ } else {
+ images, err = ir.Libpod.ImageRuntime().GetImages()
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ summaries := make([]*entities.ImageSummary, len(images))
+ for i, img := range images {
+ var repoTags []string
+ if opts.All {
+ pairs, err := libpodImage.ReposToMap(img.Names())
+ if err != nil {
+ return nil, err
+ }
+
+ for repo, tags := range pairs {
+ for _, tag := range tags {
+ repoTags = append(repoTags, repo+":"+tag)
+ }
+ }
+ } else {
+ repoTags, _ = img.RepoTags()
+ }
+
+ digests := make([]string, len(img.Digests()))
+ for j, d := range img.Digests() {
+ digests[j] = string(d)
+ }
+
+ e := entities.ImageSummary{
+ ID: img.ID(),
+
+ ConfigDigest: string(img.ConfigDigest),
+ Created: img.Created().Unix(),
+ Dangling: img.Dangling(),
+ Digest: string(img.Digest()),
+ Digests: digests,
+ History: img.NamesHistory(),
+ Names: img.Names(),
+ ParentId: img.Parent,
+ ReadOnly: img.IsReadOnly(),
+ SharedSize: 0,
+ VirtualSize: img.VirtualSize,
+ RepoTags: repoTags,
+ }
+ e.Labels, _ = img.Labels(context.TODO())
+
+ ctnrs, _ := img.Containers()
+ e.Containers = len(ctnrs)
+
+ sz, _ := img.Size(context.TODO())
+ e.Size = int64(*sz)
+
+ summaries[i] = &e
+ }
+ return summaries, nil
+}
diff --git a/pkg/domain/infra/abi/images_test.go b/pkg/domain/infra/abi/images_test.go
new file mode 100644
index 000000000..20ef1b150
--- /dev/null
+++ b/pkg/domain/infra/abi/images_test.go
@@ -0,0 +1,37 @@
+package abi
+
+//
+// import (
+// "context"
+// "testing"
+//
+// "github.com/stretchr/testify/mock"
+// )
+//
+// type MockImageRuntime struct {
+// mock.Mock
+// }
+//
+// func (m *MockImageRuntime) Delete(ctx context.Context, renderer func() interface{}, name string) error {
+// _ = m.Called(ctx, renderer, name)
+// return nil
+// }
+//
+// func TestImageSuccess(t *testing.T) {
+// actual := func() interface{} { return nil }
+//
+// m := new(MockImageRuntime)
+// m.On(
+// "Delete",
+// mock.AnythingOfType("*context.emptyCtx"),
+// mock.AnythingOfType("func() interface {}"),
+// "fedora").
+// Return(nil)
+//
+// r := DirectImageRuntime{m}
+// err := r.Delete(context.TODO(), actual, "fedora")
+// if err != nil {
+// t.Errorf("error should be nil, got: %v", err)
+// }
+// m.AssertExpectations(t)
+// }
diff --git a/pkg/domain/infra/abi/parse/parse.go b/pkg/domain/infra/abi/parse/parse.go
new file mode 100644
index 000000000..6c0e1ee55
--- /dev/null
+++ b/pkg/domain/infra/abi/parse/parse.go
@@ -0,0 +1,68 @@
+package parse
+
+import (
+ "strconv"
+ "strings"
+
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/libpod/define"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// Handle volume options from CLI.
+// Parse "o" option to find UID, GID.
+func ParseVolumeOptions(opts map[string]string) ([]libpod.VolumeCreateOption, error) {
+ libpodOptions := []libpod.VolumeCreateOption{}
+ volumeOptions := make(map[string]string)
+
+ for key, value := range opts {
+ switch key {
+ case "o":
+ // o has special handling to parse out UID, GID.
+ // These are separate Libpod options.
+ splitVal := strings.Split(value, ",")
+ finalVal := []string{}
+ for _, o := range splitVal {
+ // Options will be formatted as either "opt" or
+ // "opt=value"
+ splitO := strings.SplitN(o, "=", 2)
+ switch strings.ToLower(splitO[0]) {
+ case "uid":
+ if len(splitO) != 2 {
+ return nil, errors.Wrapf(define.ErrInvalidArg, "uid option must provide a UID")
+ }
+ intUID, err := strconv.Atoi(splitO[1])
+ if err != nil {
+ return nil, errors.Wrapf(err, "cannot convert UID %s to integer", splitO[1])
+ }
+ logrus.Debugf("Removing uid= from options and adding WithVolumeUID for UID %d", intUID)
+ libpodOptions = append(libpodOptions, libpod.WithVolumeUID(intUID))
+ case "gid":
+ if len(splitO) != 2 {
+ return nil, errors.Wrapf(define.ErrInvalidArg, "gid option must provide a GID")
+ }
+ intGID, err := strconv.Atoi(splitO[1])
+ if err != nil {
+ return nil, errors.Wrapf(err, "cannot convert GID %s to integer", splitO[1])
+ }
+ logrus.Debugf("Removing gid= from options and adding WithVolumeGID for GID %d", intGID)
+ libpodOptions = append(libpodOptions, libpod.WithVolumeGID(intGID))
+ default:
+ finalVal = append(finalVal, o)
+ }
+ }
+ if len(finalVal) > 0 {
+ volumeOptions[key] = strings.Join(finalVal, ",")
+ }
+ default:
+ volumeOptions[key] = value
+ }
+ }
+
+ if len(volumeOptions) > 0 {
+ libpodOptions = append(libpodOptions, libpod.WithVolumeOptions(volumeOptions))
+ }
+
+ return libpodOptions, nil
+}
diff --git a/pkg/domain/infra/abi/pods.go b/pkg/domain/infra/abi/pods.go
new file mode 100644
index 000000000..619e973cf
--- /dev/null
+++ b/pkg/domain/infra/abi/pods.go
@@ -0,0 +1,252 @@
+// +build ABISupport
+
+package abi
+
+import (
+ "context"
+
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/libpod/define"
+ "github.com/containers/libpod/pkg/domain/entities"
+ "github.com/containers/libpod/pkg/signal"
+ "github.com/containers/libpod/pkg/specgen"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// getPodsByContext returns a slice of pods. Note that all, latest and pods are
+// mutually exclusive arguments.
+func getPodsByContext(all, latest bool, pods []string, runtime *libpod.Runtime) ([]*libpod.Pod, error) {
+ var outpods []*libpod.Pod
+ if all {
+ return runtime.GetAllPods()
+ }
+ if latest {
+ p, err := runtime.GetLatestPod()
+ if err != nil {
+ return nil, err
+ }
+ outpods = append(outpods, p)
+ return outpods, nil
+ }
+ var err error
+ for _, p := range pods {
+ pod, e := runtime.LookupPod(p)
+ if e != nil {
+ // Log all errors here, so callers don't need to.
+ logrus.Debugf("Error looking up pod %q: %v", p, e)
+ if err == nil {
+ err = e
+ }
+ } else {
+ outpods = append(outpods, pod)
+ }
+ }
+ return outpods, err
+}
+
+func (ic *ContainerEngine) PodExists(ctx context.Context, nameOrId string) (*entities.BoolReport, error) {
+ _, err := ic.Libpod.LookupPod(nameOrId)
+ if err != nil && errors.Cause(err) != define.ErrNoSuchPod {
+ return nil, err
+ }
+ return &entities.BoolReport{Value: err == nil}, nil
+}
+
+func (ic *ContainerEngine) PodKill(ctx context.Context, namesOrIds []string, options entities.PodKillOptions) ([]*entities.PodKillReport, error) {
+ var (
+ reports []*entities.PodKillReport
+ )
+ sig, err := signal.ParseSignalNameOrNumber(options.Signal)
+ if err != nil {
+ return nil, err
+ }
+ pods, err := getPodsByContext(options.All, options.Latest, namesOrIds, ic.Libpod)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, p := range pods {
+ report := entities.PodKillReport{Id: p.ID()}
+ conErrs, err := p.Kill(uint(sig))
+ if err != nil {
+ report.Errs = []error{err}
+ reports = append(reports, &report)
+ continue
+ }
+ if len(conErrs) > 0 {
+ for _, err := range conErrs {
+ report.Errs = append(report.Errs, err)
+ }
+ reports = append(reports, &report)
+ continue
+ }
+ reports = append(reports, &report)
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) PodPause(ctx context.Context, namesOrIds []string, options entities.PodPauseOptions) ([]*entities.PodPauseReport, error) {
+ var (
+ reports []*entities.PodPauseReport
+ )
+ pods, err := getPodsByContext(options.All, options.Latest, namesOrIds, ic.Libpod)
+ if err != nil {
+ return nil, err
+ }
+ for _, p := range pods {
+ report := entities.PodPauseReport{Id: p.ID()}
+ errs, err := p.Pause()
+ if err != nil {
+ report.Errs = []error{err}
+ continue
+ }
+ if len(errs) > 0 {
+ for _, v := range errs {
+ report.Errs = append(report.Errs, v)
+ }
+ reports = append(reports, &report)
+ continue
+ }
+ reports = append(reports, &report)
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) PodUnpause(ctx context.Context, namesOrIds []string, options entities.PodunpauseOptions) ([]*entities.PodUnpauseReport, error) {
+ var (
+ reports []*entities.PodUnpauseReport
+ )
+ pods, err := getPodsByContext(options.All, options.Latest, namesOrIds, ic.Libpod)
+ if err != nil {
+ return nil, err
+ }
+ for _, p := range pods {
+ report := entities.PodUnpauseReport{Id: p.ID()}
+ errs, err := p.Unpause()
+ if err != nil {
+ report.Errs = []error{err}
+ continue
+ }
+ if len(errs) > 0 {
+ for _, v := range errs {
+ report.Errs = append(report.Errs, v)
+ }
+ reports = append(reports, &report)
+ continue
+ }
+ reports = append(reports, &report)
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) PodStop(ctx context.Context, namesOrIds []string, options entities.PodStopOptions) ([]*entities.PodStopReport, error) {
+ var (
+ reports []*entities.PodStopReport
+ )
+ pods, err := getPodsByContext(options.All, options.Latest, namesOrIds, ic.Libpod)
+ if err != nil {
+ return nil, err
+ }
+ for _, p := range pods {
+ report := entities.PodStopReport{Id: p.ID()}
+ errs, err := p.StopWithTimeout(ctx, false, options.Timeout)
+ if err != nil {
+ report.Errs = []error{err}
+ continue
+ }
+ if len(errs) > 0 {
+ for _, v := range errs {
+ report.Errs = append(report.Errs, v)
+ }
+ reports = append(reports, &report)
+ continue
+ }
+ reports = append(reports, &report)
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) PodRestart(ctx context.Context, namesOrIds []string, options entities.PodRestartOptions) ([]*entities.PodRestartReport, error) {
+ var (
+ reports []*entities.PodRestartReport
+ )
+ pods, err := getPodsByContext(options.All, options.Latest, namesOrIds, ic.Libpod)
+ if err != nil {
+ return nil, err
+ }
+ for _, p := range pods {
+ report := entities.PodRestartReport{Id: p.ID()}
+ errs, err := p.Restart(ctx)
+ if err != nil {
+ report.Errs = []error{err}
+ continue
+ }
+ if len(errs) > 0 {
+ for _, v := range errs {
+ report.Errs = append(report.Errs, v)
+ }
+ reports = append(reports, &report)
+ continue
+ }
+ reports = append(reports, &report)
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) PodStart(ctx context.Context, namesOrIds []string, options entities.PodStartOptions) ([]*entities.PodStartReport, error) {
+ var (
+ reports []*entities.PodStartReport
+ )
+ pods, err := getPodsByContext(options.All, options.Latest, namesOrIds, ic.Libpod)
+ if err != nil {
+ return nil, err
+ }
+ for _, p := range pods {
+ report := entities.PodStartReport{Id: p.ID()}
+ errs, err := p.Start(ctx)
+ if err != nil {
+ report.Errs = []error{err}
+ continue
+ }
+ if len(errs) > 0 {
+ for _, v := range errs {
+ report.Errs = append(report.Errs, v)
+ }
+ reports = append(reports, &report)
+ continue
+ }
+ reports = append(reports, &report)
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) PodRm(ctx context.Context, namesOrIds []string, options entities.PodRmOptions) ([]*entities.PodRmReport, error) {
+ var (
+ reports []*entities.PodRmReport
+ )
+ pods, err := getPodsByContext(options.All, options.Latest, namesOrIds, ic.Libpod)
+ if err != nil {
+ return nil, err
+ }
+ for _, p := range pods {
+ report := entities.PodRmReport{Id: p.ID()}
+ err := ic.Libpod.RemovePod(ctx, p, true, options.Force)
+ if err != nil {
+ report.Err = err
+ continue
+ }
+ reports = append(reports, &report)
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) PodCreate(ctx context.Context, opts entities.PodCreateOptions) (*entities.PodCreateReport, error) {
+ podSpec := specgen.NewPodSpecGenerator()
+ opts.ToPodSpecGen(podSpec)
+ pod, err := podSpec.MakePod(ic.Libpod)
+ if err != nil {
+ return nil, err
+ }
+ return &entities.PodCreateReport{Id: pod.ID()}, nil
+}
diff --git a/pkg/domain/infra/abi/runtime.go b/pkg/domain/infra/abi/runtime.go
new file mode 100644
index 000000000..b53fb6d3a
--- /dev/null
+++ b/pkg/domain/infra/abi/runtime.go
@@ -0,0 +1,17 @@
+// +build ABISupport
+
+package abi
+
+import (
+ "github.com/containers/libpod/libpod"
+)
+
+// Image-related runtime linked against libpod library
+type ImageEngine struct {
+ Libpod *libpod.Runtime
+}
+
+// Container-related runtime linked against libpod library
+type ContainerEngine struct {
+ Libpod *libpod.Runtime
+}
diff --git a/pkg/domain/infra/abi/volumes.go b/pkg/domain/infra/abi/volumes.go
new file mode 100644
index 000000000..bdae4359d
--- /dev/null
+++ b/pkg/domain/infra/abi/volumes.go
@@ -0,0 +1,159 @@
+// +build ABISupport
+
+package abi
+
+import (
+ "context"
+
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/pkg/domain/entities"
+ "github.com/containers/libpod/pkg/domain/filters"
+ "github.com/containers/libpod/pkg/domain/infra/abi/parse"
+ "github.com/pkg/errors"
+)
+
+func (ic *ContainerEngine) VolumeCreate(ctx context.Context, opts entities.VolumeCreateOptions) (*entities.IdOrNameResponse, error) {
+ var (
+ volumeOptions []libpod.VolumeCreateOption
+ )
+ if len(opts.Name) > 0 {
+ volumeOptions = append(volumeOptions, libpod.WithVolumeName(opts.Name))
+ }
+ if len(opts.Driver) > 0 {
+ volumeOptions = append(volumeOptions, libpod.WithVolumeDriver(opts.Driver))
+ }
+ if len(opts.Label) > 0 {
+ volumeOptions = append(volumeOptions, libpod.WithVolumeLabels(opts.Label))
+ }
+ if len(opts.Options) > 0 {
+ parsedOptions, err := parse.ParseVolumeOptions(opts.Options)
+ if err != nil {
+ return nil, err
+ }
+ volumeOptions = append(volumeOptions, parsedOptions...)
+ }
+ vol, err := ic.Libpod.NewVolume(ctx, volumeOptions...)
+ if err != nil {
+ return nil, err
+ }
+ return &entities.IdOrNameResponse{IdOrName: vol.Name()}, nil
+}
+
+func (ic *ContainerEngine) VolumeRm(ctx context.Context, namesOrIds []string, opts entities.VolumeRmOptions) ([]*entities.VolumeRmReport, error) {
+ var (
+ err error
+ reports []*entities.VolumeRmReport
+ vols []*libpod.Volume
+ )
+ if opts.All {
+ vols, err = ic.Libpod.Volumes()
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ for _, id := range namesOrIds {
+ vol, err := ic.Libpod.LookupVolume(id)
+ if err != nil {
+ reports = append(reports, &entities.VolumeRmReport{
+ Err: err,
+ Id: id,
+ })
+ continue
+ }
+ vols = append(vols, vol)
+ }
+ }
+ for _, vol := range vols {
+ reports = append(reports, &entities.VolumeRmReport{
+ Err: ic.Libpod.RemoveVolume(ctx, vol, opts.Force),
+ Id: vol.Name(),
+ })
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) VolumeInspect(ctx context.Context, namesOrIds []string, opts entities.VolumeInspectOptions) ([]*entities.VolumeInspectReport, error) {
+ var (
+ err error
+ reports []*entities.VolumeInspectReport
+ vols []*libpod.Volume
+ )
+
+ // Note: as with previous implementation, a single failure here
+ // results a return.
+ if opts.All {
+ vols, err = ic.Libpod.GetAllVolumes()
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ for _, v := range namesOrIds {
+ vol, err := ic.Libpod.LookupVolume(v)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error inspecting volume %s", v)
+ }
+ vols = append(vols, vol)
+ }
+ }
+ for _, v := range vols {
+ config := entities.VolumeConfigResponse{
+ Name: v.Name(),
+ Driver: v.Driver(),
+ Mountpoint: v.MountPoint(),
+ CreatedAt: v.CreatedTime(),
+ Labels: v.Labels(),
+ Scope: v.Scope(),
+ Options: v.Options(),
+ UID: v.UID(),
+ GID: v.GID(),
+ }
+ reports = append(reports, &entities.VolumeInspectReport{VolumeConfigResponse: &config})
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) VolumePrune(ctx context.Context, opts entities.VolumePruneOptions) ([]*entities.VolumePruneReport, error) {
+ var (
+ reports []*entities.VolumePruneReport
+ )
+ pruned, err := ic.Libpod.PruneVolumes(ctx)
+ if err != nil {
+ return nil, err
+ }
+ for k, v := range pruned {
+ reports = append(reports, &entities.VolumePruneReport{
+ Err: v,
+ Id: k,
+ })
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) VolumeList(ctx context.Context, opts entities.VolumeListOptions) ([]*entities.VolumeListReport, error) {
+ var (
+ reports []*entities.VolumeListReport
+ )
+ volumeFilters, err := filters.GenerateVolumeFilters(opts.Filter)
+ if err != nil {
+ return nil, err
+ }
+ vols, err := ic.Libpod.Volumes(volumeFilters...)
+ if err != nil {
+ return nil, err
+ }
+ for _, v := range vols {
+ config := entities.VolumeConfigResponse{
+ Name: v.Name(),
+ Driver: v.Driver(),
+ Mountpoint: v.MountPoint(),
+ CreatedAt: v.CreatedTime(),
+ Labels: v.Labels(),
+ Scope: v.Scope(),
+ Options: v.Options(),
+ UID: v.UID(),
+ GID: v.GID(),
+ }
+ reports = append(reports, &entities.VolumeListReport{VolumeConfigResponse: config})
+ }
+ return reports, nil
+}
diff --git a/pkg/domain/infra/runtime_abi.go b/pkg/domain/infra/runtime_abi.go
new file mode 100644
index 000000000..f11026571
--- /dev/null
+++ b/pkg/domain/infra/runtime_abi.go
@@ -0,0 +1,38 @@
+// +build ABISupport
+
+package infra
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/containers/libpod/pkg/bindings"
+ "github.com/containers/libpod/pkg/domain/entities"
+ "github.com/containers/libpod/pkg/domain/infra/tunnel"
+)
+
+// NewContainerEngine factory provides a libpod runtime for container-related operations
+func NewContainerEngine(facts entities.EngineOptions) (entities.ContainerEngine, error) {
+ switch facts.EngineMode {
+ case entities.ABIMode:
+ r, err := NewLibpodRuntime(facts.FlagSet, facts)
+ return r, err
+ case entities.TunnelMode:
+ ctx, err := bindings.NewConnection(context.Background(), facts.Uri, facts.Identities...)
+ return &tunnel.ContainerEngine{ClientCxt: ctx}, err
+ }
+ return nil, fmt.Errorf("runtime mode '%v' is not supported", facts.EngineMode)
+}
+
+// NewContainerEngine factory provides a libpod runtime for image-related operations
+func NewImageEngine(facts entities.EngineOptions) (entities.ImageEngine, error) {
+ switch facts.EngineMode {
+ case entities.ABIMode:
+ r, err := NewLibpodImageRuntime(facts.FlagSet, facts)
+ return r, err
+ case entities.TunnelMode:
+ ctx, err := bindings.NewConnection(context.Background(), facts.Uri, facts.Identities...)
+ return &tunnel.ImageEngine{ClientCxt: ctx}, err
+ }
+ return nil, fmt.Errorf("runtime mode '%v' is not supported", facts.EngineMode)
+}
diff --git a/pkg/domain/infra/runtime_image_proxy.go b/pkg/domain/infra/runtime_image_proxy.go
new file mode 100644
index 000000000..befc66b9a
--- /dev/null
+++ b/pkg/domain/infra/runtime_image_proxy.go
@@ -0,0 +1,21 @@
+// +build ABISupport
+
+package infra
+
+import (
+ "context"
+
+ "github.com/containers/libpod/pkg/domain/entities"
+ "github.com/containers/libpod/pkg/domain/infra/abi"
+ "github.com/spf13/pflag"
+)
+
+// ContainerEngine Image Proxy will be EOL'ed after podmanV2 is separated from libpod repo
+
+func NewLibpodImageRuntime(flags *pflag.FlagSet, opts entities.EngineOptions) (entities.ImageEngine, error) {
+ r, err := GetRuntime(context.Background(), flags, opts)
+ if err != nil {
+ return nil, err
+ }
+ return &abi.ImageEngine{Libpod: r}, nil
+}
diff --git a/pkg/domain/infra/runtime_libpod.go b/pkg/domain/infra/runtime_libpod.go
new file mode 100644
index 000000000..d59759707
--- /dev/null
+++ b/pkg/domain/infra/runtime_libpod.go
@@ -0,0 +1,328 @@
+package infra
+
+import (
+ "context"
+ "fmt"
+ "os"
+
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/pkg/cgroups"
+ "github.com/containers/libpod/pkg/domain/entities"
+ "github.com/containers/libpod/pkg/namespaces"
+ "github.com/containers/libpod/pkg/rootless"
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/pkg/errors"
+ flag "github.com/spf13/pflag"
+)
+
+type engineOpts struct {
+ name string
+ renumber bool
+ migrate bool
+ noStore bool
+ withFDS bool
+ flags entities.EngineOptions
+}
+
+// GetRuntimeMigrate gets a libpod runtime that will perform a migration of existing containers
+func GetRuntimeMigrate(ctx context.Context, fs *flag.FlagSet, ef entities.EngineOptions, newRuntime string) (*libpod.Runtime, error) {
+ return getRuntime(ctx, fs, &engineOpts{
+ name: newRuntime,
+ renumber: false,
+ migrate: true,
+ noStore: false,
+ withFDS: true,
+ flags: ef,
+ })
+}
+
+// GetRuntimeDisableFDs gets a libpod runtime that will disable sd notify
+func GetRuntimeDisableFDs(ctx context.Context, fs *flag.FlagSet, ef entities.EngineOptions) (*libpod.Runtime, error) {
+ return getRuntime(ctx, fs, &engineOpts{
+ renumber: false,
+ migrate: false,
+ noStore: false,
+ withFDS: false,
+ flags: ef,
+ })
+}
+
+// GetRuntimeRenumber gets a libpod runtime that will perform a lock renumber
+func GetRuntimeRenumber(ctx context.Context, fs *flag.FlagSet, ef entities.EngineOptions) (*libpod.Runtime, error) {
+ return getRuntime(ctx, fs, &engineOpts{
+ renumber: true,
+ migrate: false,
+ noStore: false,
+ withFDS: true,
+ flags: ef,
+ })
+}
+
+// GetRuntime generates a new libpod runtime configured by command line options
+func GetRuntime(ctx context.Context, flags *flag.FlagSet, ef entities.EngineOptions) (*libpod.Runtime, error) {
+ return getRuntime(ctx, flags, &engineOpts{
+ renumber: false,
+ migrate: false,
+ noStore: false,
+ withFDS: true,
+ flags: ef,
+ })
+}
+
+// GetRuntimeNoStore generates a new libpod runtime configured by command line options
+func GetRuntimeNoStore(ctx context.Context, fs *flag.FlagSet, ef entities.EngineOptions) (*libpod.Runtime, error) {
+ return getRuntime(ctx, fs, &engineOpts{
+ renumber: false,
+ migrate: false,
+ noStore: true,
+ withFDS: true,
+ flags: ef,
+ })
+}
+
+func getRuntime(ctx context.Context, fs *flag.FlagSet, opts *engineOpts) (*libpod.Runtime, error) {
+ options := []libpod.RuntimeOption{}
+ storageOpts := storage.StoreOptions{}
+ storageSet := false
+
+ uidmapFlag := fs.Lookup("uidmap")
+ gidmapFlag := fs.Lookup("gidmap")
+ subuidname := fs.Lookup("subuidname")
+ subgidname := fs.Lookup("subgidname")
+ if (uidmapFlag != nil && gidmapFlag != nil && subuidname != nil && subgidname != nil) &&
+ (uidmapFlag.Changed || gidmapFlag.Changed || subuidname.Changed || subgidname.Changed) {
+ userns, _ := fs.GetString("userns")
+ uidmapVal, _ := fs.GetStringSlice("uidmap")
+ gidmapVal, _ := fs.GetStringSlice("gidmap")
+ subuidVal, _ := fs.GetString("subuidname")
+ subgidVal, _ := fs.GetString("subgidname")
+ mappings, err := ParseIDMapping(namespaces.UsernsMode(userns), uidmapVal, gidmapVal, subuidVal, subgidVal)
+ if err != nil {
+ return nil, err
+ }
+ storageOpts.UIDMap = mappings.UIDMap
+ storageOpts.GIDMap = mappings.GIDMap
+
+ storageSet = true
+ }
+
+ if fs.Changed("root") {
+ storageSet = true
+ storageOpts.GraphRoot = opts.flags.Root
+ }
+ if fs.Changed("runroot") {
+ storageSet = true
+ storageOpts.RunRoot = opts.flags.Runroot
+ }
+ if len(storageOpts.RunRoot) > 50 {
+ return nil, errors.New("the specified runroot is longer than 50 characters")
+ }
+ if fs.Changed("storage-driver") {
+ storageSet = true
+ storageOpts.GraphDriverName = opts.flags.StorageDriver
+ // Overriding the default storage driver caused GraphDriverOptions from storage.conf to be ignored
+ storageOpts.GraphDriverOptions = []string{}
+ }
+ // This should always be checked after storage-driver is checked
+ if len(opts.flags.StorageOpts) > 0 {
+ storageSet = true
+ storageOpts.GraphDriverOptions = opts.flags.StorageOpts
+ }
+ if opts.migrate {
+ options = append(options, libpod.WithMigrate())
+ if opts.name != "" {
+ options = append(options, libpod.WithMigrateRuntime(opts.name))
+ }
+ }
+
+ if opts.renumber {
+ options = append(options, libpod.WithRenumber())
+ }
+
+ // Only set this if the user changes storage config on the command line
+ if storageSet {
+ options = append(options, libpod.WithStorageConfig(storageOpts))
+ }
+
+ if !storageSet && opts.noStore {
+ options = append(options, libpod.WithNoStore())
+ }
+ // TODO CLI flags for image config?
+ // TODO CLI flag for signature policy?
+
+ if len(opts.flags.Namespace) > 0 {
+ options = append(options, libpod.WithNamespace(opts.flags.Namespace))
+ }
+
+ if fs.Changed("runtime") {
+ options = append(options, libpod.WithOCIRuntime(opts.flags.Runtime))
+ }
+
+ if fs.Changed("conmon") {
+ options = append(options, libpod.WithConmonPath(opts.flags.ConmonPath))
+ }
+ if fs.Changed("tmpdir") {
+ options = append(options, libpod.WithTmpDir(opts.flags.TmpDir))
+ }
+ if fs.Changed("network-cmd-path") {
+ options = append(options, libpod.WithNetworkCmdPath(opts.flags.NetworkCmdPath))
+ }
+
+ if fs.Changed("events-backend") {
+ options = append(options, libpod.WithEventsLogger(opts.flags.EventsBackend))
+ }
+
+ if fs.Changed("cgroup-manager") {
+ options = append(options, libpod.WithCgroupManager(opts.flags.CGroupManager))
+ } else {
+ unified, err := cgroups.IsCgroup2UnifiedMode()
+ if err != nil {
+ return nil, err
+ }
+ if rootless.IsRootless() && !unified {
+ options = append(options, libpod.WithCgroupManager("cgroupfs"))
+ }
+ }
+
+ // TODO flag to set libpod static dir?
+ // TODO flag to set libpod tmp dir?
+
+ if fs.Changed("cni-config-dir") {
+ options = append(options, libpod.WithCNIConfigDir(opts.flags.CniConfigDir))
+ }
+ if fs.Changed("default-mounts-file") {
+ options = append(options, libpod.WithDefaultMountsFile(opts.flags.DefaultMountsFile))
+ }
+ if fs.Changed("hooks-dir") {
+ options = append(options, libpod.WithHooksDir(opts.flags.HooksDir...))
+ }
+
+ // TODO flag to set CNI plugins dir?
+
+ // TODO I don't think these belong here?
+ // Will follow up with a different PR to address
+ //
+ // Pod create options
+
+ infraImageFlag := fs.Lookup("infra-image")
+ if infraImageFlag != nil && infraImageFlag.Changed {
+ infraImage, _ := fs.GetString("infra-image")
+ options = append(options, libpod.WithDefaultInfraImage(infraImage))
+ }
+
+ infraCommandFlag := fs.Lookup("infra-command")
+ if infraCommandFlag != nil && infraImageFlag.Changed {
+ infraCommand, _ := fs.GetString("infra-command")
+ options = append(options, libpod.WithDefaultInfraCommand(infraCommand))
+ }
+
+ if !opts.withFDS {
+ options = append(options, libpod.WithEnableSDNotify())
+ }
+ return libpod.NewRuntime(ctx, options...)
+}
+
+// ParseIDMapping takes idmappings and subuid and subgid maps and returns a storage mapping
+func ParseIDMapping(mode namespaces.UsernsMode, uidMapSlice, gidMapSlice []string, subUIDMap, subGIDMap string) (*storage.IDMappingOptions, error) {
+ options := storage.IDMappingOptions{
+ HostUIDMapping: true,
+ HostGIDMapping: true,
+ }
+
+ if mode.IsKeepID() {
+ if len(uidMapSlice) > 0 || len(gidMapSlice) > 0 {
+ return nil, errors.New("cannot specify custom mappings with --userns=keep-id")
+ }
+ if len(subUIDMap) > 0 || len(subGIDMap) > 0 {
+ return nil, errors.New("cannot specify subuidmap or subgidmap with --userns=keep-id")
+ }
+ if rootless.IsRootless() {
+ min := func(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+ }
+
+ uid := rootless.GetRootlessUID()
+ gid := rootless.GetRootlessGID()
+
+ uids, gids, err := rootless.GetConfiguredMappings()
+ if err != nil {
+ return nil, errors.Wrapf(err, "cannot read mappings")
+ }
+ maxUID, maxGID := 0, 0
+ for _, u := range uids {
+ maxUID += u.Size
+ }
+ for _, g := range gids {
+ maxGID += g.Size
+ }
+
+ options.UIDMap, options.GIDMap = nil, nil
+
+ options.UIDMap = append(options.UIDMap, idtools.IDMap{ContainerID: 0, HostID: 1, Size: min(uid, maxUID)})
+ options.UIDMap = append(options.UIDMap, idtools.IDMap{ContainerID: uid, HostID: 0, Size: 1})
+ if maxUID > uid {
+ options.UIDMap = append(options.UIDMap, idtools.IDMap{ContainerID: uid + 1, HostID: uid + 1, Size: maxUID - uid})
+ }
+
+ options.GIDMap = append(options.GIDMap, idtools.IDMap{ContainerID: 0, HostID: 1, Size: min(gid, maxGID)})
+ options.GIDMap = append(options.GIDMap, idtools.IDMap{ContainerID: gid, HostID: 0, Size: 1})
+ if maxGID > gid {
+ options.GIDMap = append(options.GIDMap, idtools.IDMap{ContainerID: gid + 1, HostID: gid + 1, Size: maxGID - gid})
+ }
+
+ options.HostUIDMapping = false
+ options.HostGIDMapping = false
+ }
+ // Simply ignore the setting and do not setup an inner namespace for root as it is a no-op
+ return &options, nil
+ }
+
+ if subGIDMap == "" && subUIDMap != "" {
+ subGIDMap = subUIDMap
+ }
+ if subUIDMap == "" && subGIDMap != "" {
+ subUIDMap = subGIDMap
+ }
+ if len(gidMapSlice) == 0 && len(uidMapSlice) != 0 {
+ gidMapSlice = uidMapSlice
+ }
+ if len(uidMapSlice) == 0 && len(gidMapSlice) != 0 {
+ uidMapSlice = gidMapSlice
+ }
+ if len(uidMapSlice) == 0 && subUIDMap == "" && os.Getuid() != 0 {
+ uidMapSlice = []string{fmt.Sprintf("0:%d:1", os.Getuid())}
+ }
+ if len(gidMapSlice) == 0 && subGIDMap == "" && os.Getuid() != 0 {
+ gidMapSlice = []string{fmt.Sprintf("0:%d:1", os.Getgid())}
+ }
+
+ if subUIDMap != "" && subGIDMap != "" {
+ mappings, err := idtools.NewIDMappings(subUIDMap, subGIDMap)
+ if err != nil {
+ return nil, err
+ }
+ options.UIDMap = mappings.UIDs()
+ options.GIDMap = mappings.GIDs()
+ }
+ parsedUIDMap, err := idtools.ParseIDMap(uidMapSlice, "UID")
+ if err != nil {
+ return nil, err
+ }
+ parsedGIDMap, err := idtools.ParseIDMap(gidMapSlice, "GID")
+ if err != nil {
+ return nil, err
+ }
+ options.UIDMap = append(options.UIDMap, parsedUIDMap...)
+ options.GIDMap = append(options.GIDMap, parsedGIDMap...)
+ if len(options.UIDMap) > 0 {
+ options.HostUIDMapping = false
+ }
+ if len(options.GIDMap) > 0 {
+ options.HostGIDMapping = false
+ }
+ return &options, nil
+}
diff --git a/pkg/domain/infra/runtime_proxy.go b/pkg/domain/infra/runtime_proxy.go
new file mode 100644
index 000000000..2e38c74b9
--- /dev/null
+++ b/pkg/domain/infra/runtime_proxy.go
@@ -0,0 +1,21 @@
+// +build ABISupport
+
+package infra
+
+import (
+ "context"
+
+ "github.com/containers/libpod/pkg/domain/entities"
+ "github.com/containers/libpod/pkg/domain/infra/abi"
+ flag "github.com/spf13/pflag"
+)
+
+// ContainerEngine Proxy will be EOL'ed after podmanV2 is separated from libpod repo
+
+func NewLibpodRuntime(flags *flag.FlagSet, opts entities.EngineOptions) (entities.ContainerEngine, error) {
+ r, err := GetRuntime(context.Background(), flags, opts)
+ if err != nil {
+ return nil, err
+ }
+ return &abi.ContainerEngine{Libpod: r}, nil
+}
diff --git a/pkg/domain/infra/runtime_tunnel.go b/pkg/domain/infra/runtime_tunnel.go
new file mode 100644
index 000000000..dc04b4e53
--- /dev/null
+++ b/pkg/domain/infra/runtime_tunnel.go
@@ -0,0 +1,35 @@
+// +build !ABISupport
+
+package infra
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/containers/libpod/pkg/bindings"
+ "github.com/containers/libpod/pkg/domain/entities"
+ "github.com/containers/libpod/pkg/domain/infra/tunnel"
+)
+
+func NewContainerEngine(facts entities.EngineOptions) (entities.ContainerEngine, error) {
+ switch facts.EngineMode {
+ case entities.ABIMode:
+ return nil, fmt.Errorf("direct runtime not supported")
+ case entities.TunnelMode:
+ ctx, err := bindings.NewConnection(context.Background(), facts.Uri, facts.Identities...)
+ return &tunnel.ContainerEngine{ClientCxt: ctx}, err
+ }
+ return nil, fmt.Errorf("runtime mode '%v' is not supported", facts.EngineMode)
+}
+
+// NewImageEngine factory provides a libpod runtime for image-related operations
+func NewImageEngine(facts entities.EngineOptions) (entities.ImageEngine, error) {
+ switch facts.EngineMode {
+ case entities.ABIMode:
+ return nil, fmt.Errorf("direct image runtime not supported")
+ case entities.TunnelMode:
+ ctx, err := bindings.NewConnection(context.Background(), facts.Uri, facts.Identities...)
+ return &tunnel.ImageEngine{ClientCxt: ctx}, err
+ }
+ return nil, fmt.Errorf("runtime mode '%v' is not supported", facts.EngineMode)
+}
diff --git a/pkg/domain/infra/tunnel/containers.go b/pkg/domain/infra/tunnel/containers.go
new file mode 100644
index 000000000..3c8be90dc
--- /dev/null
+++ b/pkg/domain/infra/tunnel/containers.go
@@ -0,0 +1,212 @@
+package tunnel
+
+import (
+ "context"
+
+ "github.com/containers/image/v5/docker/reference"
+
+ "github.com/containers/libpod/pkg/bindings/containers"
+ "github.com/containers/libpod/pkg/domain/entities"
+ "github.com/pkg/errors"
+)
+
+func (ic *ContainerEngine) ContainerExists(ctx context.Context, nameOrId string) (*entities.BoolReport, error) {
+ exists, err := containers.Exists(ic.ClientCxt, nameOrId)
+ return &entities.BoolReport{Value: exists}, err
+}
+
+func (ic *ContainerEngine) ContainerWait(ctx context.Context, namesOrIds []string, options entities.WaitOptions) ([]entities.WaitReport, error) {
+ var (
+ responses []entities.WaitReport
+ )
+ cons, err := getContainersByContext(ic.ClientCxt, false, namesOrIds)
+ if err != nil {
+ return nil, err
+ }
+ for _, c := range cons {
+ response := entities.WaitReport{Id: c.ID}
+ exitCode, err := containers.Wait(ic.ClientCxt, c.ID, &options.Condition)
+ if err != nil {
+ response.Error = err
+ } else {
+ response.ExitCode = exitCode
+ }
+ responses = append(responses, response)
+ }
+ return responses, nil
+}
+
+func (ic *ContainerEngine) ContainerPause(ctx context.Context, namesOrIds []string, options entities.PauseUnPauseOptions) ([]*entities.PauseUnpauseReport, error) {
+ var (
+ reports []*entities.PauseUnpauseReport
+ )
+ ctrs, err := getContainersByContext(ic.ClientCxt, options.All, namesOrIds)
+ if err != nil {
+ return nil, err
+ }
+ for _, c := range ctrs {
+ err := containers.Pause(ic.ClientCxt, c.ID)
+ reports = append(reports, &entities.PauseUnpauseReport{Id: c.ID, Err: err})
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) ContainerUnpause(ctx context.Context, namesOrIds []string, options entities.PauseUnPauseOptions) ([]*entities.PauseUnpauseReport, error) {
+ var (
+ reports []*entities.PauseUnpauseReport
+ )
+ ctrs, err := getContainersByContext(ic.ClientCxt, options.All, namesOrIds)
+ if err != nil {
+ return nil, err
+ }
+ for _, c := range ctrs {
+ err := containers.Unpause(ic.ClientCxt, c.ID)
+ reports = append(reports, &entities.PauseUnpauseReport{Id: c.ID, Err: err})
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) ContainerStop(ctx context.Context, namesOrIds []string, options entities.StopOptions) ([]*entities.StopReport, error) {
+ var (
+ reports []*entities.StopReport
+ )
+ ctrs, err := getContainersByContext(ic.ClientCxt, options.All, namesOrIds)
+ if err != nil {
+ return nil, err
+ }
+ for _, c := range ctrs {
+ report := entities.StopReport{Id: c.ID}
+ report.Err = containers.Stop(ic.ClientCxt, c.ID, &options.Timeout)
+ // TODO we need to associate errors returned by http with common
+ // define.errors so that we can equity tests. this will allow output
+ // to be the same as the native client
+ reports = append(reports, &report)
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) ContainerKill(ctx context.Context, namesOrIds []string, options entities.KillOptions) ([]*entities.KillReport, error) {
+ var (
+ reports []*entities.KillReport
+ )
+ ctrs, err := getContainersByContext(ic.ClientCxt, options.All, namesOrIds)
+ if err != nil {
+ return nil, err
+ }
+ for _, c := range ctrs {
+ reports = append(reports, &entities.KillReport{
+ Id: c.ID,
+ Err: containers.Kill(ic.ClientCxt, c.ID, options.Signal),
+ })
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) ContainerRestart(ctx context.Context, namesOrIds []string, options entities.RestartOptions) ([]*entities.RestartReport, error) {
+ var (
+ reports []*entities.RestartReport
+ timeout *int
+ )
+ if options.Timeout != nil {
+ t := int(*options.Timeout)
+ timeout = &t
+ }
+ ctrs, err := getContainersByContext(ic.ClientCxt, options.All, namesOrIds)
+ if err != nil {
+ return nil, err
+ }
+ for _, c := range ctrs {
+ reports = append(reports, &entities.RestartReport{
+ Id: c.ID,
+ Err: containers.Restart(ic.ClientCxt, c.ID, timeout),
+ })
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string, options entities.RmOptions) ([]*entities.RmReport, error) {
+ var (
+ reports []*entities.RmReport
+ )
+ ctrs, err := getContainersByContext(ic.ClientCxt, options.All, namesOrIds)
+ if err != nil {
+ return nil, err
+ }
+ // TODO there is no endpoint for container eviction. Need to discuss
+ for _, c := range ctrs {
+ reports = append(reports, &entities.RmReport{
+ Id: c.ID,
+ Err: containers.Remove(ic.ClientCxt, c.ID, &options.Force, &options.Volumes),
+ })
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) ContainerInspect(ctx context.Context, namesOrIds []string, options entities.ContainerInspectOptions) ([]*entities.ContainerInspectReport, error) {
+ var (
+ reports []*entities.ContainerInspectReport
+ )
+ ctrs, err := getContainersByContext(ic.ClientCxt, false, namesOrIds)
+ if err != nil {
+ return nil, err
+ }
+ for _, con := range ctrs {
+ data, err := containers.Inspect(ic.ClientCxt, con.ID, &options.Size)
+ if err != nil {
+ return nil, err
+ }
+ reports = append(reports, &entities.ContainerInspectReport{InspectContainerData: data})
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) ContainerTop(ctx context.Context, options entities.TopOptions) (*entities.StringSliceReport, error) {
+ switch {
+ case options.Latest:
+ return nil, errors.New("latest is not supported")
+ case options.NameOrID == "":
+ return nil, errors.New("NameOrID must be specified")
+ }
+
+ topOutput, err := containers.Top(ic.ClientCxt, options.NameOrID, options.Descriptors)
+ if err != nil {
+ return nil, err
+ }
+ return &entities.StringSliceReport{Value: topOutput}, nil
+}
+
+func (ic *ContainerEngine) ContainerCommit(ctx context.Context, nameOrId string, options entities.CommitOptions) (*entities.CommitReport, error) {
+ var (
+ repo string
+ tag string = "latest"
+ )
+ if len(options.ImageName) > 0 {
+ ref, err := reference.Parse(options.ImageName)
+ if err != nil {
+ return nil, err
+ }
+ if t, ok := ref.(reference.Tagged); ok {
+ tag = t.Tag()
+ }
+ if r, ok := ref.(reference.Named); ok {
+ repo = r.Name()
+ }
+ if len(repo) < 1 {
+ return nil, errors.Errorf("invalid image name %q", options.ImageName)
+ }
+ }
+ commitOpts := containers.CommitOptions{
+ Author: &options.Author,
+ Changes: options.Changes,
+ Comment: &options.Message,
+ Format: &options.Format,
+ Pause: &options.Pause,
+ Repo: &repo,
+ Tag: &tag,
+ }
+ response, err := containers.Commit(ic.ClientCxt, nameOrId, commitOpts)
+ if err != nil {
+ return nil, err
+ }
+ return &entities.CommitReport{Id: response.ID}, nil
+}
diff --git a/pkg/domain/infra/tunnel/helpers.go b/pkg/domain/infra/tunnel/helpers.go
new file mode 100644
index 000000000..f9183c955
--- /dev/null
+++ b/pkg/domain/infra/tunnel/helpers.go
@@ -0,0 +1,76 @@
+package tunnel
+
+import (
+ "context"
+ "strings"
+
+ "github.com/containers/libpod/libpod/define"
+ "github.com/containers/libpod/pkg/api/handlers/libpod"
+ "github.com/containers/libpod/pkg/bindings"
+ "github.com/containers/libpod/pkg/bindings/containers"
+ "github.com/containers/libpod/pkg/bindings/pods"
+ "github.com/containers/libpod/pkg/domain/entities"
+ "github.com/containers/libpod/pkg/util"
+ "github.com/pkg/errors"
+)
+
+func getContainersByContext(contextWithConnection context.Context, all bool, namesOrIds []string) ([]libpod.ListContainer, error) {
+ var (
+ cons []libpod.ListContainer
+ )
+ if all && len(namesOrIds) > 0 {
+ return nil, errors.New("cannot lookup containers and all")
+ }
+ c, err := containers.List(contextWithConnection, nil, &bindings.PTrue, nil, nil, nil, &bindings.PTrue)
+ if err != nil {
+ return nil, err
+ }
+ if all {
+ return c, err
+ }
+ for _, id := range namesOrIds {
+ var found bool
+ for _, con := range c {
+ if id == con.ID || strings.HasPrefix(con.ID, id) || util.StringInSlice(id, con.Names) {
+ cons = append(cons, con)
+ found = true
+ break
+ }
+ }
+ if !found {
+ return nil, errors.Errorf("unable to find container %q", id)
+ }
+ }
+ return cons, nil
+}
+
+func getPodsByContext(contextWithConnection context.Context, all bool, namesOrIds []string) ([]*entities.ListPodsReport, error) {
+ var (
+ sPods []*entities.ListPodsReport
+ )
+ if all && len(namesOrIds) > 0 {
+ return nil, errors.New("cannot lookup specific pods and all")
+ }
+
+ fPods, err := pods.List(contextWithConnection, nil)
+ if err != nil {
+ return nil, err
+ }
+ if all {
+ return fPods, nil
+ }
+ for _, nameOrId := range namesOrIds {
+ var found bool
+ for _, f := range fPods {
+ if f.Name == nameOrId || strings.HasPrefix(f.Id, nameOrId) {
+ sPods = append(sPods, f)
+ found = true
+ break
+ }
+ }
+ if !found {
+ return nil, errors.Wrapf(define.ErrNoSuchPod, "unable to find pod %q", nameOrId)
+ }
+ }
+ return sPods, nil
+}
diff --git a/pkg/domain/infra/tunnel/images.go b/pkg/domain/infra/tunnel/images.go
new file mode 100644
index 000000000..6a3adc9ee
--- /dev/null
+++ b/pkg/domain/infra/tunnel/images.go
@@ -0,0 +1,87 @@
+package tunnel
+
+import (
+ "context"
+
+ images "github.com/containers/libpod/pkg/bindings/images"
+ "github.com/containers/libpod/pkg/domain/entities"
+ "github.com/containers/libpod/pkg/domain/utils"
+)
+
+func (ir *ImageEngine) Exists(_ context.Context, nameOrId string) (*entities.BoolReport, error) {
+ found, err := images.Exists(ir.ClientCxt, nameOrId)
+ return &entities.BoolReport{Value: found}, err
+}
+
+func (ir *ImageEngine) Delete(ctx context.Context, nameOrId []string, opts entities.ImageDeleteOptions) (*entities.ImageDeleteReport, error) {
+ report := entities.ImageDeleteReport{}
+
+ for _, id := range nameOrId {
+ results, err := images.Remove(ir.ClientCxt, id, &opts.Force)
+ if err != nil {
+ return nil, err
+ }
+ for _, e := range results {
+ if a, ok := e["Deleted"]; ok {
+ report.Deleted = append(report.Deleted, a)
+ }
+
+ if a, ok := e["Untagged"]; ok {
+ report.Untagged = append(report.Untagged, a)
+ }
+ }
+ }
+ return &report, nil
+}
+
+func (ir *ImageEngine) List(ctx context.Context, opts entities.ImageListOptions) ([]*entities.ImageSummary, error) {
+ images, err := images.List(ir.ClientCxt, &opts.All, opts.Filters)
+
+ if err != nil {
+ return nil, err
+ }
+
+ is := make([]*entities.ImageSummary, len(images))
+ for i, img := range images {
+ hold := entities.ImageSummary{}
+ if err := utils.DeepCopy(&hold, img); err != nil {
+ return nil, err
+ }
+ is[i] = &hold
+ }
+ return is, nil
+}
+
+func (ir *ImageEngine) History(ctx context.Context, nameOrId string, opts entities.ImageHistoryOptions) (*entities.ImageHistoryReport, error) {
+ results, err := images.History(ir.ClientCxt, nameOrId)
+ if err != nil {
+ return nil, err
+ }
+
+ history := entities.ImageHistoryReport{
+ Layers: make([]entities.ImageHistoryLayer, len(results)),
+ }
+
+ for i, layer := range results {
+ hold := entities.ImageHistoryLayer{}
+ _ = utils.DeepCopy(&hold, layer)
+ history.Layers[i] = hold
+ }
+ return &history, nil
+}
+
+func (ir *ImageEngine) Prune(ctx context.Context, opts entities.ImagePruneOptions) (*entities.ImagePruneReport, error) {
+ results, err := images.Prune(ir.ClientCxt, &opts.All, opts.Filters)
+ if err != nil {
+ return nil, err
+ }
+
+ report := entities.ImagePruneReport{
+ Report: entities.Report{
+ Id: results,
+ Err: nil,
+ },
+ Size: 0,
+ }
+ return &report, nil
+}
diff --git a/pkg/domain/infra/tunnel/pods.go b/pkg/domain/infra/tunnel/pods.go
new file mode 100644
index 000000000..4894874e5
--- /dev/null
+++ b/pkg/domain/infra/tunnel/pods.go
@@ -0,0 +1,179 @@
+package tunnel
+
+import (
+ "context"
+
+ "github.com/containers/libpod/pkg/bindings/pods"
+ "github.com/containers/libpod/pkg/domain/entities"
+ "github.com/containers/libpod/pkg/specgen"
+)
+
+func (ic *ContainerEngine) PodExists(ctx context.Context, nameOrId string) (*entities.BoolReport, error) {
+ exists, err := pods.Exists(ic.ClientCxt, nameOrId)
+ return &entities.BoolReport{Value: exists}, err
+}
+
+func (ic *ContainerEngine) PodKill(ctx context.Context, namesOrIds []string, options entities.PodKillOptions) ([]*entities.PodKillReport, error) {
+ var (
+ reports []*entities.PodKillReport
+ )
+ foundPods, err := getPodsByContext(ic.ClientCxt, options.All, namesOrIds)
+ if err != nil {
+ return nil, err
+ }
+ for _, p := range foundPods {
+ response, err := pods.Kill(ic.ClientCxt, p.Id, &options.Signal)
+ if err != nil {
+ report := entities.PodKillReport{
+ Errs: []error{err},
+ Id: p.Id,
+ }
+ reports = append(reports, &report)
+ continue
+ }
+ reports = append(reports, response)
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) PodPause(ctx context.Context, namesOrIds []string, options entities.PodPauseOptions) ([]*entities.PodPauseReport, error) {
+ var (
+ reports []*entities.PodPauseReport
+ )
+ foundPods, err := getPodsByContext(ic.ClientCxt, options.All, namesOrIds)
+ if err != nil {
+ return nil, err
+ }
+ for _, p := range foundPods {
+ response, err := pods.Pause(ic.ClientCxt, p.Id)
+ if err != nil {
+ report := entities.PodPauseReport{
+ Errs: []error{err},
+ Id: p.Id,
+ }
+ reports = append(reports, &report)
+ continue
+ }
+ reports = append(reports, response)
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) PodUnpause(ctx context.Context, namesOrIds []string, options entities.PodunpauseOptions) ([]*entities.PodUnpauseReport, error) {
+ var (
+ reports []*entities.PodUnpauseReport
+ )
+ foundPods, err := getPodsByContext(ic.ClientCxt, options.All, namesOrIds)
+ if err != nil {
+ return nil, err
+ }
+ for _, p := range foundPods {
+ response, err := pods.Unpause(ic.ClientCxt, p.Id)
+ if err != nil {
+ report := entities.PodUnpauseReport{
+ Errs: []error{err},
+ Id: p.Id,
+ }
+ reports = append(reports, &report)
+ continue
+ }
+ reports = append(reports, response)
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) PodStop(ctx context.Context, namesOrIds []string, options entities.PodStopOptions) ([]*entities.PodStopReport, error) {
+ var (
+ reports []*entities.PodStopReport
+ timeout int = -1
+ )
+ foundPods, err := getPodsByContext(ic.ClientCxt, options.All, namesOrIds)
+ if err != nil {
+ return nil, err
+ }
+ if options.Timeout != -1 {
+ timeout = options.Timeout
+ }
+ for _, p := range foundPods {
+ response, err := pods.Stop(ic.ClientCxt, p.Id, &timeout)
+ if err != nil {
+ report := entities.PodStopReport{
+ Errs: []error{err},
+ Id: p.Id,
+ }
+ reports = append(reports, &report)
+ continue
+ }
+ reports = append(reports, response)
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) PodRestart(ctx context.Context, namesOrIds []string, options entities.PodRestartOptions) ([]*entities.PodRestartReport, error) {
+ var reports []*entities.PodRestartReport
+ foundPods, err := getPodsByContext(ic.ClientCxt, options.All, namesOrIds)
+ if err != nil {
+ return nil, err
+ }
+ for _, p := range foundPods {
+ response, err := pods.Restart(ic.ClientCxt, p.Id)
+ if err != nil {
+ report := entities.PodRestartReport{
+ Errs: []error{err},
+ Id: p.Id,
+ }
+ reports = append(reports, &report)
+ continue
+ }
+ reports = append(reports, response)
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) PodStart(ctx context.Context, namesOrIds []string, options entities.PodStartOptions) ([]*entities.PodStartReport, error) {
+ var reports []*entities.PodStartReport
+ foundPods, err := getPodsByContext(ic.ClientCxt, options.All, namesOrIds)
+ if err != nil {
+ return nil, err
+ }
+ for _, p := range foundPods {
+ response, err := pods.Start(ic.ClientCxt, p.Id)
+ if err != nil {
+ report := entities.PodStartReport{
+ Errs: []error{err},
+ Id: p.Id,
+ }
+ reports = append(reports, &report)
+ continue
+ }
+ reports = append(reports, response)
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) PodRm(ctx context.Context, namesOrIds []string, options entities.PodRmOptions) ([]*entities.PodRmReport, error) {
+ var reports []*entities.PodRmReport
+ foundPods, err := getPodsByContext(ic.ClientCxt, options.All, namesOrIds)
+ if err != nil {
+ return nil, err
+ }
+ for _, p := range foundPods {
+ response, err := pods.Remove(ic.ClientCxt, p.Id, &options.Force)
+ if err != nil {
+ report := entities.PodRmReport{
+ Err: err,
+ Id: p.Id,
+ }
+ reports = append(reports, &report)
+ continue
+ }
+ reports = append(reports, response)
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) PodCreate(ctx context.Context, opts entities.PodCreateOptions) (*entities.PodCreateReport, error) {
+ podSpec := specgen.NewPodSpecGenerator()
+ opts.ToPodSpecGen(podSpec)
+ return pods.CreatePodFromSpec(ic.ClientCxt, podSpec)
+}
diff --git a/pkg/domain/infra/tunnel/runtime.go b/pkg/domain/infra/tunnel/runtime.go
new file mode 100644
index 000000000..c111f99e9
--- /dev/null
+++ b/pkg/domain/infra/tunnel/runtime.go
@@ -0,0 +1,15 @@
+package tunnel
+
+import (
+ "context"
+)
+
+// Image-related runtime using an ssh-tunnel to utilize Podman service
+type ImageEngine struct {
+ ClientCxt context.Context
+}
+
+// Container-related runtime using an ssh-tunnel to utilize Podman service
+type ContainerEngine struct {
+ ClientCxt context.Context
+}
diff --git a/pkg/domain/infra/tunnel/volumes.go b/pkg/domain/infra/tunnel/volumes.go
new file mode 100644
index 000000000..e48a7fa7c
--- /dev/null
+++ b/pkg/domain/infra/tunnel/volumes.go
@@ -0,0 +1,70 @@
+package tunnel
+
+import (
+ "context"
+
+ "github.com/containers/libpod/pkg/bindings/volumes"
+ "github.com/containers/libpod/pkg/domain/entities"
+)
+
+func (ic *ContainerEngine) VolumeCreate(ctx context.Context, opts entities.VolumeCreateOptions) (*entities.IdOrNameResponse, error) {
+ response, err := volumes.Create(ic.ClientCxt, opts)
+ if err != nil {
+ return nil, err
+ }
+ return &entities.IdOrNameResponse{IdOrName: response.Name}, nil
+}
+
+func (ic *ContainerEngine) VolumeRm(ctx context.Context, namesOrIds []string, opts entities.VolumeRmOptions) ([]*entities.VolumeRmReport, error) {
+ var (
+ reports []*entities.VolumeRmReport
+ )
+
+ if opts.All {
+ vols, err := volumes.List(ic.ClientCxt, nil)
+ if err != nil {
+ return nil, err
+ }
+ for _, v := range vols {
+ namesOrIds = append(namesOrIds, v.Name)
+ }
+ }
+ for _, id := range namesOrIds {
+ reports = append(reports, &entities.VolumeRmReport{
+ Err: volumes.Remove(ic.ClientCxt, id, &opts.Force),
+ Id: id,
+ })
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) VolumeInspect(ctx context.Context, namesOrIds []string, opts entities.VolumeInspectOptions) ([]*entities.VolumeInspectReport, error) {
+ var (
+ reports []*entities.VolumeInspectReport
+ )
+ if opts.All {
+ vols, err := volumes.List(ic.ClientCxt, nil)
+ if err != nil {
+ return nil, err
+ }
+ for _, v := range vols {
+ namesOrIds = append(namesOrIds, v.Name)
+ }
+ }
+ for _, id := range namesOrIds {
+ data, err := volumes.Inspect(ic.ClientCxt, id)
+ if err != nil {
+ return nil, err
+ }
+ reports = append(reports, &entities.VolumeInspectReport{VolumeConfigResponse: data})
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) VolumePrune(ctx context.Context, opts entities.VolumePruneOptions) ([]*entities.VolumePruneReport, error) {
+ return volumes.Prune(ic.ClientCxt)
+}
+
+func (ic *ContainerEngine) VolumeList(ctx context.Context, opts entities.VolumeListOptions) ([]*entities.VolumeListReport, error) {
+ return volumes.List(ic.ClientCxt, opts.Filter)
+}