summaryrefslogtreecommitdiff
path: root/pkg/domain
diff options
context:
space:
mode:
Diffstat (limited to 'pkg/domain')
-rw-r--r--pkg/domain/entities/containers.go108
-rw-r--r--pkg/domain/entities/engine.go92
-rw-r--r--pkg/domain/entities/engine_container.go33
-rw-r--r--pkg/domain/entities/engine_image.go13
-rw-r--r--pkg/domain/entities/filters.go150
-rw-r--r--pkg/domain/entities/images.go139
-rw-r--r--pkg/domain/entities/pods.go143
-rw-r--r--pkg/domain/entities/set.go45
-rw-r--r--pkg/domain/entities/types.go44
-rw-r--r--pkg/domain/entities/volumes.go92
-rw-r--r--pkg/domain/filters/volumes.go70
-rw-r--r--pkg/domain/infra/abi/containers.go279
-rw-r--r--pkg/domain/infra/abi/images.go166
-rw-r--r--pkg/domain/infra/abi/images_list.go80
-rw-r--r--pkg/domain/infra/abi/images_test.go37
-rw-r--r--pkg/domain/infra/abi/parse/parse.go68
-rw-r--r--pkg/domain/infra/abi/pods.go252
-rw-r--r--pkg/domain/infra/abi/runtime.go17
-rw-r--r--pkg/domain/infra/abi/volumes.go159
-rw-r--r--pkg/domain/infra/runtime_abi.go38
-rw-r--r--pkg/domain/infra/runtime_image_proxy.go21
-rw-r--r--pkg/domain/infra/runtime_libpod.go328
-rw-r--r--pkg/domain/infra/runtime_proxy.go21
-rw-r--r--pkg/domain/infra/runtime_tunnel.go35
-rw-r--r--pkg/domain/infra/tunnel/containers.go174
-rw-r--r--pkg/domain/infra/tunnel/helpers.go76
-rw-r--r--pkg/domain/infra/tunnel/images.go87
-rw-r--r--pkg/domain/infra/tunnel/pods.go179
-rw-r--r--pkg/domain/infra/tunnel/runtime.go15
-rw-r--r--pkg/domain/infra/tunnel/volumes.go70
-rw-r--r--pkg/domain/utils/utils.go41
31 files changed, 3072 insertions, 0 deletions
diff --git a/pkg/domain/entities/containers.go b/pkg/domain/entities/containers.go
new file mode 100644
index 000000000..fbc0247ab
--- /dev/null
+++ b/pkg/domain/entities/containers.go
@@ -0,0 +1,108 @@
+package entities
+
+import (
+ "time"
+
+ "github.com/containers/libpod/libpod/define"
+)
+
+type WaitOptions struct {
+ Condition define.ContainerStatus
+ Interval time.Duration
+ Latest bool
+}
+
+type WaitReport struct {
+ Id string
+ Error error
+ ExitCode int32
+}
+
+type BoolReport struct {
+ Value bool
+}
+
+// StringSliceReport wraps a string slice.
+type StringSliceReport struct {
+ Value []string
+}
+
+type PauseUnPauseOptions struct {
+ All bool
+}
+
+type PauseUnpauseReport struct {
+ Err error
+ Id string
+}
+
+type StopOptions struct {
+ All bool
+ CIDFiles []string
+ Ignore bool
+ Latest bool
+ Timeout uint
+}
+
+type StopReport struct {
+ Err error
+ Id string
+}
+
+type TopOptions struct {
+ // CLI flags.
+ ListDescriptors bool
+ Latest bool
+
+ // Options for the API.
+ Descriptors []string
+ NameOrID string
+}
+
+type KillOptions struct {
+ All bool
+ Latest bool
+ Signal string
+}
+
+type KillReport struct {
+ Err error
+ Id string
+}
+
+type RestartOptions struct {
+ All bool
+ Latest bool
+ Running bool
+ Timeout *uint
+}
+
+type RestartReport struct {
+ Err error
+ Id string
+}
+
+type RmOptions struct {
+ All bool
+ CIDFiles []string
+ Force bool
+ Ignore bool
+ Latest bool
+ Storage bool
+ Volumes bool
+}
+
+type RmReport struct {
+ Err error
+ Id string
+}
+
+type ContainerInspectOptions struct {
+ Format string
+ Latest bool
+ Size bool
+}
+
+type ContainerInspectReport struct {
+ *define.InspectContainerData
+}
diff --git a/pkg/domain/entities/engine.go b/pkg/domain/entities/engine.go
new file mode 100644
index 000000000..c14348529
--- /dev/null
+++ b/pkg/domain/entities/engine.go
@@ -0,0 +1,92 @@
+package entities
+
+import (
+ "os/user"
+ "path/filepath"
+
+ "github.com/containers/common/pkg/config"
+ "github.com/spf13/pflag"
+)
+
+type EngineMode string
+
+const (
+ ABIMode = EngineMode("abi")
+ TunnelMode = EngineMode("tunnel")
+)
+
+func (m EngineMode) String() string {
+ return string(m)
+}
+
+type EngineOptions struct {
+ Uri string
+ Identities []string
+ FlagSet *pflag.FlagSet
+ EngineMode EngineMode
+
+ CGroupManager string
+ CniConfigDir string
+ ConmonPath string
+ DefaultMountsFile string
+ EventsBackend string
+ HooksDir []string
+ MaxWorks int
+ Namespace string
+ Root string
+ Runroot string
+ Runtime string
+ StorageDriver string
+ StorageOpts []string
+ Syslog bool
+ Trace bool
+ NetworkCmdPath string
+
+ Config string
+ CpuProfile string
+ LogLevel string
+ TmpDir string
+
+ RemoteUserName string
+ RemoteHost string
+ VarlinkAddress string
+ ConnectionName string
+ RemoteConfigFilePath string
+ Port int
+ IdentityFile string
+ IgnoreHosts bool
+}
+
+func NewEngineOptions() (EngineOptions, error) {
+ u, _ := user.Current()
+ return EngineOptions{
+ CGroupManager: config.SystemdCgroupsManager,
+ CniConfigDir: "",
+ Config: "",
+ ConmonPath: filepath.Join("usr", "bin", "conmon"),
+ ConnectionName: "",
+ CpuProfile: "",
+ DefaultMountsFile: "",
+ EventsBackend: "",
+ HooksDir: nil,
+ IdentityFile: "",
+ IgnoreHosts: false,
+ LogLevel: "",
+ MaxWorks: 0,
+ Namespace: "",
+ NetworkCmdPath: "",
+ Port: 0,
+ RemoteConfigFilePath: "",
+ RemoteHost: "",
+ RemoteUserName: "",
+ Root: "",
+ Runroot: filepath.Join("run", "user", u.Uid),
+ Runtime: "",
+ StorageDriver: "overlayfs",
+ StorageOpts: nil,
+ Syslog: false,
+ TmpDir: filepath.Join("run", "user", u.Uid, "libpod", "tmp"),
+ Trace: false,
+ VarlinkAddress: "",
+ }, nil
+}
diff --git a/pkg/domain/entities/engine_container.go b/pkg/domain/entities/engine_container.go
new file mode 100644
index 000000000..fceed1003
--- /dev/null
+++ b/pkg/domain/entities/engine_container.go
@@ -0,0 +1,33 @@
+package entities
+
+import (
+ "context"
+)
+
+type ContainerEngine interface {
+ ContainerExists(ctx context.Context, nameOrId string) (*BoolReport, error)
+ ContainerInspect(ctx context.Context, namesOrIds []string, options ContainerInspectOptions) ([]*ContainerInspectReport, error)
+ ContainerKill(ctx context.Context, namesOrIds []string, options KillOptions) ([]*KillReport, error)
+ ContainerPause(ctx context.Context, namesOrIds []string, options PauseUnPauseOptions) ([]*PauseUnpauseReport, error)
+ ContainerRestart(ctx context.Context, namesOrIds []string, options RestartOptions) ([]*RestartReport, error)
+ ContainerRm(ctx context.Context, namesOrIds []string, options RmOptions) ([]*RmReport, error)
+ ContainerUnpause(ctx context.Context, namesOrIds []string, options PauseUnPauseOptions) ([]*PauseUnpauseReport, error)
+ ContainerStop(ctx context.Context, namesOrIds []string, options StopOptions) ([]*StopReport, error)
+ ContainerWait(ctx context.Context, namesOrIds []string, options WaitOptions) ([]WaitReport, error)
+ ContainerTop(ctx context.Context, options TopOptions) (*StringSliceReport, error)
+ PodCreate(ctx context.Context, opts PodCreateOptions) (*PodCreateReport, error)
+ PodExists(ctx context.Context, nameOrId string) (*BoolReport, error)
+ PodKill(ctx context.Context, namesOrIds []string, options PodKillOptions) ([]*PodKillReport, error)
+ PodPause(ctx context.Context, namesOrIds []string, options PodPauseOptions) ([]*PodPauseReport, error)
+ PodRestart(ctx context.Context, namesOrIds []string, options PodRestartOptions) ([]*PodRestartReport, error)
+ PodStart(ctx context.Context, namesOrIds []string, options PodStartOptions) ([]*PodStartReport, error)
+ PodStop(ctx context.Context, namesOrIds []string, options PodStopOptions) ([]*PodStopReport, error)
+ PodRm(ctx context.Context, namesOrIds []string, options PodRmOptions) ([]*PodRmReport, error)
+ PodUnpause(ctx context.Context, namesOrIds []string, options PodunpauseOptions) ([]*PodUnpauseReport, error)
+
+ VolumeCreate(ctx context.Context, opts VolumeCreateOptions) (*IdOrNameResponse, error)
+ VolumeInspect(ctx context.Context, namesOrIds []string, opts VolumeInspectOptions) ([]*VolumeInspectReport, error)
+ VolumeRm(ctx context.Context, namesOrIds []string, opts VolumeRmOptions) ([]*VolumeRmReport, error)
+ VolumePrune(ctx context.Context, opts VolumePruneOptions) ([]*VolumePruneReport, error)
+ VolumeList(ctx context.Context, opts VolumeListOptions) ([]*VolumeListReport, error)
+}
diff --git a/pkg/domain/entities/engine_image.go b/pkg/domain/entities/engine_image.go
new file mode 100644
index 000000000..d0c860a04
--- /dev/null
+++ b/pkg/domain/entities/engine_image.go
@@ -0,0 +1,13 @@
+package entities
+
+import (
+ "context"
+)
+
+type ImageEngine interface {
+ Delete(ctx context.Context, nameOrId []string, opts ImageDeleteOptions) (*ImageDeleteReport, error)
+ Exists(ctx context.Context, nameOrId string) (*BoolReport, error)
+ History(ctx context.Context, nameOrId string, opts ImageHistoryOptions) (*ImageHistoryReport, error)
+ List(ctx context.Context, opts ImageListOptions) ([]*ImageSummary, error)
+ Prune(ctx context.Context, opts ImagePruneOptions) (*ImagePruneReport, error)
+}
diff --git a/pkg/domain/entities/filters.go b/pkg/domain/entities/filters.go
new file mode 100644
index 000000000..c7e227244
--- /dev/null
+++ b/pkg/domain/entities/filters.go
@@ -0,0 +1,150 @@
+package entities
+
+import (
+ "net/url"
+ "strings"
+)
+
+// Identifier interface allows filters to access ID() of object
+type Identifier interface {
+ Id() string
+}
+
+// Named interface allows filters to access Name() of object
+type Named interface {
+ Name() string
+}
+
+// Named interface allows filters to access Name() of object
+type Names interface {
+ Names() []string
+}
+
+// IdOrName interface allows filters to access ID() or Name() of object
+type IdOrNamed interface {
+ Identifier
+ Named
+}
+
+// IdOrName interface allows filters to access ID() or Names() of object
+type IdOrNames interface {
+ Identifier
+ Names
+}
+
+type ImageFilter func(Image) bool
+type VolumeFilter func(Volume) bool
+type ContainerFilter func(Container) bool
+
+func CompileImageFilters(filters url.Values) ImageFilter {
+ var fns []interface{}
+
+ for name, targets := range filters {
+ switch name {
+ case "id":
+ fns = append(fns, FilterIdFn(targets))
+ case "name":
+ fns = append(fns, FilterNamesFn(targets))
+ case "idOrName":
+ fns = append(fns, FilterIdOrNameFn(targets))
+ }
+ }
+
+ return func(image Image) bool {
+ for _, fn := range fns {
+ if !fn.(ImageFilter)(image) {
+ return false
+ }
+ }
+ return true
+ }
+}
+
+func CompileContainerFilters(filters url.Values) ContainerFilter {
+ var fns []interface{}
+
+ for name, targets := range filters {
+ switch name {
+ case "id":
+ fns = append(fns, FilterIdFn(targets))
+ case "name":
+ fns = append(fns, FilterNameFn(targets))
+ case "idOrName":
+ fns = append(fns, FilterIdOrNameFn(targets))
+ }
+ }
+
+ return func(ctnr Container) bool {
+ for _, fn := range fns {
+ if !fn.(ContainerFilter)(ctnr) {
+ return false
+ }
+ }
+ return true
+ }
+}
+
+func CompileVolumeFilters(filters url.Values) VolumeFilter {
+ var fns []interface{}
+
+ for name, targets := range filters {
+ if name == "id" {
+ fns = append(fns, FilterIdFn(targets))
+ }
+ }
+
+ return func(volume Volume) bool {
+ for _, fn := range fns {
+ if !fn.(VolumeFilter)(volume) {
+ return false
+ }
+ }
+ return true
+ }
+}
+
+func FilterIdFn(id []string) func(Identifier) bool {
+ return func(obj Identifier) bool {
+ for _, v := range id {
+ if strings.Contains(obj.Id(), v) {
+ return true
+ }
+ }
+ return false
+ }
+}
+
+func FilterNameFn(name []string) func(Named) bool {
+ return func(obj Named) bool {
+ for _, v := range name {
+ if strings.Contains(obj.Name(), v) {
+ return true
+ }
+ }
+ return false
+ }
+}
+
+func FilterNamesFn(name []string) func(Names) bool {
+ return func(obj Names) bool {
+ for _, v := range name {
+ for _, n := range obj.Names() {
+ if strings.Contains(n, v) {
+ return true
+ }
+ }
+ }
+ return false
+ }
+}
+
+func FilterIdOrNameFn(id []string) func(IdOrNamed) bool {
+ return func(obj IdOrNamed) bool {
+ for _, v := range id {
+ if strings.Contains(obj.Id(), v) || strings.Contains(obj.Name(), v) {
+ return true
+ }
+ }
+ return false
+ }
+}
diff --git a/pkg/domain/entities/images.go b/pkg/domain/entities/images.go
new file mode 100644
index 000000000..20af0356f
--- /dev/null
+++ b/pkg/domain/entities/images.go
@@ -0,0 +1,139 @@
+package entities
+
+import (
+ "net/url"
+
+ "github.com/containers/image/v5/manifest"
+ docker "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/container"
+ "github.com/opencontainers/go-digest"
+ v1 "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+type Image struct {
+ IdOrNamed
+ ID string `json:"Id"`
+ RepoTags []string `json:",omitempty"`
+ RepoDigests []string `json:",omitempty"`
+ Parent string `json:",omitempty"`
+ Comment string `json:",omitempty"`
+ Created string `json:",omitempty"`
+ Container string `json:",omitempty"`
+ ContainerConfig *container.Config `json:",omitempty"`
+ DockerVersion string `json:",omitempty"`
+ Author string `json:",omitempty"`
+ Config *container.Config `json:",omitempty"`
+ Architecture string `json:",omitempty"`
+ Variant string `json:",omitempty"`
+ Os string `json:",omitempty"`
+ OsVersion string `json:",omitempty"`
+ Size int64 `json:",omitempty"`
+ VirtualSize int64 `json:",omitempty"`
+ GraphDriver docker.GraphDriverData `json:",omitempty"`
+ RootFS docker.RootFS `json:",omitempty"`
+ Metadata docker.ImageMetadata `json:",omitempty"`
+
+ // Podman extensions
+ Digest digest.Digest `json:",omitempty"`
+ PodmanVersion string `json:",omitempty"`
+ ManifestType string `json:",omitempty"`
+ User string `json:",omitempty"`
+ History []v1.History `json:",omitempty"`
+ NamesHistory []string `json:",omitempty"`
+ HealthCheck *manifest.Schema2HealthConfig `json:",omitempty"`
+}
+
+func (i *Image) Id() string {
+ return i.ID
+}
+
+type ImageSummary struct {
+ ID string `json:"Id"`
+ ParentId string `json:",omitempty"`
+ RepoTags []string `json:",omitempty"`
+ Created int64 `json:",omitempty"`
+ Size int64 `json:",omitempty"`
+ SharedSize int `json:",omitempty"`
+ VirtualSize int64 `json:",omitempty"`
+ Labels map[string]string `json:",omitempty"`
+ Containers int `json:",omitempty"`
+ ReadOnly bool `json:",omitempty"`
+ Dangling bool `json:",omitempty"`
+
+ // Podman extensions
+ Names []string `json:",omitempty"`
+ Digest string `json:",omitempty"`
+ Digests []string `json:",omitempty"`
+ ConfigDigest string `json:",omitempty"`
+ History []string `json:",omitempty"`
+}
+
+func (i *ImageSummary) Id() string {
+ return i.ID
+}
+
+func (i *ImageSummary) IsReadOnly() bool {
+ return i.ReadOnly
+}
+
+func (i *ImageSummary) IsDangling() bool {
+ return i.Dangling
+}
+
+type ImageDeleteOptions struct {
+ All bool
+ Force bool
+}
+
+// ImageDeleteResponse is the response for removing one or more image(s) from storage
+// and containers what was untagged vs actually removed
+type ImageDeleteReport struct {
+ Untagged []string `json:",omitempty"`
+ Deleted []string `json:",omitempty"`
+ Errors []error
+ ImageNotFound error
+ ImageInUse error
+}
+
+type ImageHistoryOptions struct{}
+
+type ImageHistoryLayer struct {
+ ID string `json:"Id"`
+ Created int64 `json:",omitempty"`
+ CreatedBy string `json:",omitempty"`
+ Tags []string `json:",omitempty"`
+ Size int64 `json:",omitempty"`
+ Comment string `json:",omitempty"`
+}
+
+type ImageHistoryReport struct {
+ Layers []ImageHistoryLayer
+}
+
+type ImageInspectOptions struct {
+ TypeObject string `json:",omitempty"`
+ Format string `json:",omitempty"`
+ Size bool `json:",omitempty"`
+ Latest bool `json:",omitempty"`
+}
+
+type ImageListOptions struct {
+ All bool `json:"all" schema:"all"`
+ Filter []string `json:"Filter,omitempty"`
+ Filters url.Values `json:"filters" schema:"filters"`
+}
+
+// type ImageListReport struct {
+// Images []ImageSummary
+// }
+
+type ImagePruneOptions struct {
+ All bool `json:"all" schema:"all"`
+ Filter []string `json:"filter" schema:"filter"`
+ Filters url.Values `json:"filters" schema:"filters"`
+}
+
+type ImagePruneReport struct {
+ Report Report
+ Size int64
+}
diff --git a/pkg/domain/entities/pods.go b/pkg/domain/entities/pods.go
new file mode 100644
index 000000000..efda17d65
--- /dev/null
+++ b/pkg/domain/entities/pods.go
@@ -0,0 +1,143 @@
+package entities
+
+import (
+ "time"
+
+ "github.com/containers/libpod/pkg/specgen"
+)
+
+type PodKillOptions struct {
+ All bool
+ Latest bool
+ Signal string
+}
+
+type PodKillReport struct {
+ Errs []error
+ Id string
+}
+
+type ListPodsReport struct {
+ Cgroup string
+ Containers []*ListPodContainer
+ Created time.Time
+ Id string
+ Name string
+ Namespace string
+ Status string
+}
+
+type ListPodContainer struct {
+ Id string
+ Names string
+ Status string
+}
+
+type PodPauseOptions struct {
+ All bool
+ Latest bool
+}
+
+type PodPauseReport struct {
+ Errs []error
+ Id string
+}
+
+type PodunpauseOptions struct {
+ All bool
+ Latest bool
+}
+
+type PodUnpauseReport struct {
+ Errs []error
+ Id string
+}
+
+type PodStopOptions struct {
+ All bool
+ Ignore bool
+ Latest bool
+ Timeout int
+}
+
+type PodStopReport struct {
+ Errs []error
+ Id string
+}
+
+type PodRestartOptions struct {
+ All bool
+ Latest bool
+}
+
+type PodRestartReport struct {
+ Errs []error
+ Id string
+}
+
+type PodStartOptions struct {
+ All bool
+ Latest bool
+}
+
+type PodStartReport struct {
+ Errs []error
+ Id string
+}
+
+type PodRmOptions struct {
+ All bool
+ Force bool
+ Ignore bool
+ Latest bool
+}
+
+type PodRmReport struct {
+ Err error
+ Id string
+}
+
+type PodCreateOptions struct {
+ CGroupParent string
+ Hostname string
+ Infra bool
+ InfraImage string
+ InfraCommand string
+ Labels map[string]string
+ Name string
+ Net *NetOptions
+ Share []string
+}
+
+type PodCreateReport struct {
+ Id string
+}
+
+func (p PodCreateOptions) ToPodSpecGen(s *specgen.PodSpecGenerator) {
+ // Basic Config
+ s.Name = p.Name
+ s.Hostname = p.Hostname
+ s.Labels = p.Labels
+ s.NoInfra = !p.Infra
+ s.InfraCommand = []string{p.InfraCommand}
+ s.InfraImage = p.InfraImage
+ s.SharedNamespaces = p.Share
+
+ // Networking config
+ s.NetNS = p.Net.Network
+ s.StaticIP = p.Net.StaticIP
+ s.StaticMAC = p.Net.StaticMAC
+ s.PortMappings = p.Net.PublishPorts
+ s.CNINetworks = p.Net.CNINetworks
+ if p.Net.DNSHost {
+ s.NoManageResolvConf = true
+ }
+ s.DNSServer = p.Net.DNSServers
+ s.DNSSearch = p.Net.DNSSearch
+ s.DNSOption = p.Net.DNSOptions
+ s.NoManageHosts = p.Net.NoHosts
+ s.HostAdd = p.Net.AddHosts
+
+ // Cgroup
+ s.CgroupParent = p.CGroupParent
+}
diff --git a/pkg/domain/entities/set.go b/pkg/domain/entities/set.go
new file mode 100644
index 000000000..c8d6cb1a9
--- /dev/null
+++ b/pkg/domain/entities/set.go
@@ -0,0 +1,45 @@
+package entities
+
+import (
+ "strings"
+)
+
+type stringSet struct {
+ m map[string]struct{}
+}
+
+func NewStringSet(elem ...string) *stringSet {
+ s := &stringSet{}
+ s.m = make(map[string]struct{}, len(elem))
+ for _, e := range elem {
+ s.Add(e)
+ }
+ return s
+}
+
+func (s *stringSet) Add(elem string) {
+ s.m[elem] = struct{}{}
+}
+
+func (s *stringSet) Remove(elem string) {
+ delete(s.m, elem)
+}
+
+func (s *stringSet) Contains(elem string) bool {
+ _, ok := s.m[elem]
+ return ok
+}
+
+func (s *stringSet) Elements() []string {
+ keys := make([]string, len(s.m))
+ i := 0
+ for k := range s.m {
+ keys[i] = k
+ i++
+ }
+ return keys
+}
+
+func (s *stringSet) String() string {
+ return strings.Join(s.Elements(), ", ")
+}
diff --git a/pkg/domain/entities/types.go b/pkg/domain/entities/types.go
new file mode 100644
index 000000000..a1a729584
--- /dev/null
+++ b/pkg/domain/entities/types.go
@@ -0,0 +1,44 @@
+package entities
+
+import (
+ "net"
+
+ "github.com/containers/libpod/pkg/specgen"
+ "github.com/cri-o/ocicni/pkg/ocicni"
+)
+
+type Container struct {
+ IdOrNamed
+}
+
+type Volume struct {
+ Identifier
+}
+
+type Report struct {
+ Id []string
+ Err map[string]error
+}
+
+type PodDeleteReport struct{ Report }
+type PodPruneOptions struct{}
+
+type PodPruneReport struct{ Report }
+type VolumeDeleteOptions struct{}
+type VolumeDeleteReport struct{ Report }
+
+// NetOptions reflect the shared network options between
+// pods and containers
+type NetOptions struct {
+ AddHosts []string
+ CNINetworks []string
+ DNSHost bool
+ DNSOptions []string
+ DNSSearch []string
+ DNSServers []net.IP
+ Network specgen.Namespace
+ NoHosts bool
+ PublishPorts []ocicni.PortMapping
+ StaticIP *net.IP
+ StaticMAC *net.HardwareAddr
+}
diff --git a/pkg/domain/entities/volumes.go b/pkg/domain/entities/volumes.go
new file mode 100644
index 000000000..23c066083
--- /dev/null
+++ b/pkg/domain/entities/volumes.go
@@ -0,0 +1,92 @@
+package entities
+
+import (
+ "time"
+)
+
+// swagger:model VolumeCreate
+type VolumeCreateOptions struct {
+ // New volume's name. Can be left blank
+ Name string `schema:"name"`
+ // Volume driver to use
+ Driver string `schema:"driver"`
+ // User-defined key/value metadata.
+ Label map[string]string `schema:"label"`
+ // Mapping of driver options and values.
+ Options map[string]string `schema:"opts"`
+}
+
+type IdOrNameResponse struct {
+ // The Id or Name of an object
+ IdOrName string
+}
+
+type VolumeConfigResponse struct {
+ // Name is the name of the volume.
+ Name string `json:"Name"`
+ // Driver is the driver used to create the volume.
+ // This will be properly implemented in a future version.
+ Driver string `json:"Driver"`
+ // Mountpoint is the path on the host where the volume is mounted.
+ Mountpoint string `json:"Mountpoint"`
+ // CreatedAt is the date and time the volume was created at. This is not
+ // stored for older Libpod volumes; if so, it will be omitted.
+ CreatedAt time.Time `json:"CreatedAt,omitempty"`
+ // Status is presently unused and provided only for Docker compatibility.
+ // In the future it will be used to return information on the volume's
+ // current state.
+ Status map[string]string `json:"Status,omitempty"`
+ // Labels includes the volume's configured labels, key:value pairs that
+ // can be passed during volume creation to provide information for third
+ // party tools.
+ Labels map[string]string `json:"Labels"`
+ // Scope is unused and provided solely for Docker compatibility. It is
+ // unconditionally set to "local".
+ Scope string `json:"Scope"`
+ // Options is a set of options that were used when creating the volume.
+ // It is presently not used.
+ Options map[string]string `json:"Options"`
+ // UID is the UID that the volume was created with.
+ UID int `json:"UID,omitempty"`
+ // GID is the GID that the volume was created with.
+ GID int `json:"GID,omitempty"`
+ // Anonymous indicates that the volume was created as an anonymous
+ // volume for a specific container, and will be be removed when any
+ // container using it is removed.
+ Anonymous bool `json:"Anonymous,omitempty"`
+}
+
+type VolumeRmOptions struct {
+ All bool
+ Force bool
+}
+
+type VolumeRmReport struct {
+ Err error
+ Id string
+}
+
+type VolumeInspectOptions struct {
+ All bool
+}
+
+type VolumeInspectReport struct {
+ *VolumeConfigResponse
+}
+
+type VolumePruneOptions struct {
+ Force bool
+}
+
+type VolumePruneReport struct {
+ Err error
+ Id string
+}
+
+type VolumeListOptions struct {
+ Filter map[string][]string
+}
+
+type VolumeListReport struct {
+ VolumeConfigResponse
+}
diff --git a/pkg/domain/filters/volumes.go b/pkg/domain/filters/volumes.go
new file mode 100644
index 000000000..f97c3f570
--- /dev/null
+++ b/pkg/domain/filters/volumes.go
@@ -0,0 +1,70 @@
+package filters
+
+import (
+ "strings"
+
+ "github.com/containers/libpod/libpod"
+ "github.com/pkg/errors"
+)
+
+func GenerateVolumeFilters(filters map[string][]string) ([]libpod.VolumeFilter, error) {
+ var vf []libpod.VolumeFilter
+ for filter, v := range filters {
+ for _, val := range v {
+ switch filter {
+ case "name":
+ nameVal := val
+ vf = append(vf, func(v *libpod.Volume) bool {
+ return nameVal == v.Name()
+ })
+ case "driver":
+ driverVal := val
+ vf = append(vf, func(v *libpod.Volume) bool {
+ return v.Driver() == driverVal
+ })
+ case "scope":
+ scopeVal := val
+ vf = append(vf, func(v *libpod.Volume) bool {
+ return v.Scope() == scopeVal
+ })
+ case "label":
+ filterArray := strings.SplitN(val, "=", 2)
+ filterKey := filterArray[0]
+ var filterVal string
+ if len(filterArray) > 1 {
+ filterVal = filterArray[1]
+ } else {
+ filterVal = ""
+ }
+ vf = append(vf, func(v *libpod.Volume) bool {
+ for labelKey, labelValue := range v.Labels() {
+ if labelKey == filterKey && ("" == filterVal || labelValue == filterVal) {
+ return true
+ }
+ }
+ return false
+ })
+ case "opt":
+ filterArray := strings.SplitN(val, "=", 2)
+ filterKey := filterArray[0]
+ var filterVal string
+ if len(filterArray) > 1 {
+ filterVal = filterArray[1]
+ } else {
+ filterVal = ""
+ }
+ vf = append(vf, func(v *libpod.Volume) bool {
+ for labelKey, labelValue := range v.Options() {
+ if labelKey == filterKey && ("" == filterVal || labelValue == filterVal) {
+ return true
+ }
+ }
+ return false
+ })
+ default:
+ return nil, errors.Errorf("%q is in an invalid volume filter", filter)
+ }
+ }
+ }
+ return vf, nil
+}
diff --git a/pkg/domain/infra/abi/containers.go b/pkg/domain/infra/abi/containers.go
new file mode 100644
index 000000000..3965c5f75
--- /dev/null
+++ b/pkg/domain/infra/abi/containers.go
@@ -0,0 +1,279 @@
+// +build ABISupport
+
+package abi
+
+import (
+ "context"
+ "io/ioutil"
+ "strings"
+
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/libpod/define"
+ "github.com/containers/libpod/pkg/adapter/shortcuts"
+ "github.com/containers/libpod/pkg/domain/entities"
+ "github.com/containers/libpod/pkg/signal"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// TODO: Should return *entities.ContainerExistsReport, error
+func (ic *ContainerEngine) ContainerExists(ctx context.Context, nameOrId string) (*entities.BoolReport, error) {
+ _, err := ic.Libpod.LookupContainer(nameOrId)
+ if err != nil && errors.Cause(err) != define.ErrNoSuchCtr {
+ return nil, err
+ }
+ return &entities.BoolReport{Value: err == nil}, nil
+}
+
+func (ic *ContainerEngine) ContainerWait(ctx context.Context, namesOrIds []string, options entities.WaitOptions) ([]entities.WaitReport, error) {
+ var (
+ responses []entities.WaitReport
+ )
+ ctrs, err := shortcuts.GetContainersByContext(false, options.Latest, namesOrIds, ic.Libpod)
+ if err != nil {
+ return nil, err
+ }
+ for _, c := range ctrs {
+ response := entities.WaitReport{Id: c.ID()}
+ exitCode, err := c.WaitForConditionWithInterval(options.Interval, options.Condition)
+ if err != nil {
+ response.Error = err
+ } else {
+ response.ExitCode = exitCode
+ }
+ responses = append(responses, response)
+ }
+ return responses, nil
+}
+
+func (ic *ContainerEngine) ContainerPause(ctx context.Context, namesOrIds []string, options entities.PauseUnPauseOptions) ([]*entities.PauseUnpauseReport, error) {
+ var (
+ ctrs []*libpod.Container
+ err error
+ report []*entities.PauseUnpauseReport
+ )
+ if options.All {
+ ctrs, err = ic.Libpod.GetAllContainers()
+ } else {
+ ctrs, err = shortcuts.GetContainersByContext(false, false, namesOrIds, ic.Libpod)
+ }
+ if err != nil {
+ return nil, err
+ }
+ for _, c := range ctrs {
+ err := c.Pause()
+ report = append(report, &entities.PauseUnpauseReport{Id: c.ID(), Err: err})
+ }
+ return report, nil
+}
+
+func (ic *ContainerEngine) ContainerUnpause(ctx context.Context, namesOrIds []string, options entities.PauseUnPauseOptions) ([]*entities.PauseUnpauseReport, error) {
+ var (
+ ctrs []*libpod.Container
+ err error
+ report []*entities.PauseUnpauseReport
+ )
+ if options.All {
+ ctrs, err = ic.Libpod.GetAllContainers()
+ } else {
+ ctrs, err = shortcuts.GetContainersByContext(false, false, namesOrIds, ic.Libpod)
+ }
+ if err != nil {
+ return nil, err
+ }
+ for _, c := range ctrs {
+ err := c.Unpause()
+ report = append(report, &entities.PauseUnpauseReport{Id: c.ID(), Err: err})
+ }
+ return report, nil
+}
+func (ic *ContainerEngine) ContainerStop(ctx context.Context, namesOrIds []string, options entities.StopOptions) ([]*entities.StopReport, error) {
+ var (
+ reports []*entities.StopReport
+ )
+ names := namesOrIds
+ for _, cidFile := range options.CIDFiles {
+ content, err := ioutil.ReadFile(cidFile)
+ if err != nil {
+ return nil, errors.Wrap(err, "error reading CIDFile")
+ }
+ id := strings.Split(string(content), "\n")[0]
+ names = append(names, id)
+ }
+ ctrs, err := shortcuts.GetContainersByContext(options.All, options.Latest, names, ic.Libpod)
+ if err != nil && !(options.Ignore && errors.Cause(err) == define.ErrNoSuchCtr) {
+ return nil, err
+ }
+ for _, con := range ctrs {
+ report := entities.StopReport{Id: con.ID()}
+ err = con.StopWithTimeout(options.Timeout)
+ if err != nil {
+ // These first two are considered non-fatal under the right conditions
+ if errors.Cause(err) == define.ErrCtrStopped {
+ logrus.Debugf("Container %s is already stopped", con.ID())
+ reports = append(reports, &report)
+ continue
+
+ } else if options.All && errors.Cause(err) == define.ErrCtrStateInvalid {
+ logrus.Debugf("Container %s is not running, could not stop", con.ID())
+ reports = append(reports, &report)
+ continue
+ }
+ report.Err = err
+ reports = append(reports, &report)
+ continue
+ }
+ reports = append(reports, &report)
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) ContainerKill(ctx context.Context, namesOrIds []string, options entities.KillOptions) ([]*entities.KillReport, error) {
+ var (
+ reports []*entities.KillReport
+ )
+ sig, err := signal.ParseSignalNameOrNumber(options.Signal)
+ if err != nil {
+ return nil, err
+ }
+ ctrs, err := shortcuts.GetContainersByContext(options.All, options.Latest, namesOrIds, ic.Libpod)
+ if err != nil {
+ return nil, err
+ }
+ for _, con := range ctrs {
+ reports = append(reports, &entities.KillReport{
+ Id: con.ID(),
+ Err: con.Kill(uint(sig)),
+ })
+ }
+ return reports, nil
+}
+func (ic *ContainerEngine) ContainerRestart(ctx context.Context, namesOrIds []string, options entities.RestartOptions) ([]*entities.RestartReport, error) {
+ var (
+ reports []*entities.RestartReport
+ )
+ ctrs, err := shortcuts.GetContainersByContext(options.All, options.Latest, namesOrIds, ic.Libpod)
+ if err != nil {
+ return nil, err
+ }
+ for _, con := range ctrs {
+ timeout := con.StopTimeout()
+ if options.Timeout != nil {
+ timeout = *options.Timeout
+ }
+ reports = append(reports, &entities.RestartReport{
+ Id: con.ID(),
+ Err: con.RestartWithTimeout(ctx, timeout),
+ })
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string, options entities.RmOptions) ([]*entities.RmReport, error) {
+ var (
+ reports []*entities.RmReport
+ )
+ if options.Storage {
+ for _, ctr := range namesOrIds {
+ report := entities.RmReport{Id: ctr}
+ if err := ic.Libpod.RemoveStorageContainer(ctr, options.Force); err != nil {
+ report.Err = err
+ }
+ reports = append(reports, &report)
+ }
+ return reports, nil
+ }
+
+ names := namesOrIds
+ for _, cidFile := range options.CIDFiles {
+ content, err := ioutil.ReadFile(cidFile)
+ if err != nil {
+ return nil, errors.Wrap(err, "error reading CIDFile")
+ }
+ id := strings.Split(string(content), "\n")[0]
+ names = append(names, id)
+ }
+
+ ctrs, err := shortcuts.GetContainersByContext(options.All, options.Latest, names, ic.Libpod)
+ if err != nil && !(options.Ignore && errors.Cause(err) == define.ErrNoSuchCtr) {
+ // Failed to get containers. If force is specified, get the containers ID
+ // and evict them
+ if !options.Force {
+ return nil, err
+ }
+
+ for _, ctr := range namesOrIds {
+ logrus.Debugf("Evicting container %q", ctr)
+ report := entities.RmReport{Id: ctr}
+ id, err := ic.Libpod.EvictContainer(ctx, ctr, options.Volumes)
+ if err != nil {
+ if options.Ignore && errors.Cause(err) == define.ErrNoSuchCtr {
+ logrus.Debugf("Ignoring error (--allow-missing): %v", err)
+ reports = append(reports, &report)
+ continue
+ }
+ report.Err = errors.Wrapf(err, "Failed to evict container: %q", id)
+ reports = append(reports, &report)
+ continue
+ }
+ reports = append(reports, &report)
+ }
+ return reports, nil
+ }
+
+ for _, c := range ctrs {
+ report := entities.RmReport{Id: c.ID()}
+ err := ic.Libpod.RemoveContainer(ctx, c, options.Force, options.Volumes)
+ if err != nil {
+ if options.Ignore && errors.Cause(err) == define.ErrNoSuchCtr {
+ logrus.Debugf("Ignoring error (--allow-missing): %v", err)
+ reports = append(reports, &report)
+ continue
+ }
+ logrus.Debugf("Failed to remove container %s: %s", c.ID(), err.Error())
+ report.Err = err
+ reports = append(reports, &report)
+ continue
+ }
+ reports = append(reports, &report)
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) ContainerInspect(ctx context.Context, namesOrIds []string, options entities.ContainerInspectOptions) ([]*entities.ContainerInspectReport, error) {
+ var reports []*entities.ContainerInspectReport
+ ctrs, err := shortcuts.GetContainersByContext(false, options.Latest, namesOrIds, ic.Libpod)
+ if err != nil {
+ return nil, err
+ }
+ for _, c := range ctrs {
+ data, err := c.Inspect(options.Size)
+ if err != nil {
+ return nil, err
+ }
+ reports = append(reports, &entities.ContainerInspectReport{InspectContainerData: data})
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) ContainerTop(ctx context.Context, options entities.TopOptions) (*entities.StringSliceReport, error) {
+ var (
+ container *libpod.Container
+ err error
+ )
+
+ // Look up the container.
+ if options.Latest {
+ container, err = ic.Libpod.GetLatestContainer()
+ } else {
+ container, err = ic.Libpod.LookupContainer(options.NameOrID)
+ }
+ if err != nil {
+ return nil, errors.Wrap(err, "unable to lookup requested container")
+ }
+
+ // Run Top.
+ report := &entities.StringSliceReport{}
+ report.Value, err = container.Top(options.Descriptors)
+ return report, err
+}
diff --git a/pkg/domain/infra/abi/images.go b/pkg/domain/infra/abi/images.go
new file mode 100644
index 000000000..44420c1e1
--- /dev/null
+++ b/pkg/domain/infra/abi/images.go
@@ -0,0 +1,166 @@
+// +build ABISupport
+
+package abi
+
+import (
+ "context"
+ "fmt"
+
+ libpodImage "github.com/containers/libpod/libpod/image"
+ "github.com/containers/libpod/pkg/domain/entities"
+ "github.com/containers/storage"
+ "github.com/pkg/errors"
+)
+
+func (ir *ImageEngine) Exists(_ context.Context, nameOrId string) (*entities.BoolReport, error) {
+ if _, err := ir.Libpod.ImageRuntime().NewFromLocal(nameOrId); err != nil {
+ return &entities.BoolReport{}, nil
+ }
+ return &entities.BoolReport{Value: true}, nil
+}
+
+func (ir *ImageEngine) Delete(ctx context.Context, nameOrId []string, opts entities.ImageDeleteOptions) (*entities.ImageDeleteReport, error) {
+ report := entities.ImageDeleteReport{}
+
+ if opts.All {
+ var previousTargets []*libpodImage.Image
+ repeatRun:
+ targets, err := ir.Libpod.ImageRuntime().GetRWImages()
+ if err != nil {
+ return &report, errors.Wrapf(err, "unable to query local images")
+ }
+
+ if len(targets) > 0 && len(targets) == len(previousTargets) {
+ return &report, errors.New("unable to delete all images; re-run the rmi command again.")
+ }
+ previousTargets = targets
+
+ for _, img := range targets {
+ isParent, err := img.IsParent(ctx)
+ if err != nil {
+ return &report, err
+ }
+ if isParent {
+ continue
+ }
+ err = ir.deleteImage(ctx, img, opts, report)
+ report.Errors = append(report.Errors, err)
+ }
+ if len(previousTargets) != 1 {
+ goto repeatRun
+ }
+ return &report, nil
+ }
+
+ for _, id := range nameOrId {
+ image, err := ir.Libpod.ImageRuntime().NewFromLocal(id)
+ if err != nil {
+ return nil, err
+ }
+
+ err = ir.deleteImage(ctx, image, opts, report)
+ if err != nil {
+ return &report, err
+ }
+ }
+ return &report, nil
+}
+
+func (ir *ImageEngine) deleteImage(ctx context.Context, img *libpodImage.Image, opts entities.ImageDeleteOptions, report entities.ImageDeleteReport) error {
+ results, err := ir.Libpod.RemoveImage(ctx, img, opts.Force)
+ switch errors.Cause(err) {
+ case nil:
+ break
+ case storage.ErrImageUsedByContainer:
+ report.ImageInUse = errors.New(
+ fmt.Sprintf("A container associated with containers/storage, i.e. via Buildah, CRI-O, etc., may be associated with this image: %-12.12s\n", img.ID()))
+ return nil
+ case libpodImage.ErrNoSuchImage:
+ report.ImageNotFound = err
+ return nil
+ default:
+ return err
+ }
+
+ report.Deleted = append(report.Deleted, results.Deleted)
+ report.Untagged = append(report.Untagged, results.Untagged...)
+ return nil
+}
+
+func (ir *ImageEngine) Prune(ctx context.Context, opts entities.ImagePruneOptions) (*entities.ImagePruneReport, error) {
+ results, err := ir.Libpod.ImageRuntime().PruneImages(ctx, opts.All, opts.Filter)
+ if err != nil {
+ return nil, err
+ }
+
+ report := entities.ImagePruneReport{
+ Report: entities.Report{
+ Id: results,
+ Err: nil,
+ },
+ Size: 0,
+ }
+ return &report, nil
+}
+
+func (ir *ImageEngine) History(ctx context.Context, nameOrId string, opts entities.ImageHistoryOptions) (*entities.ImageHistoryReport, error) {
+ image, err := ir.Libpod.ImageRuntime().NewFromLocal(nameOrId)
+ if err != nil {
+ return nil, err
+ }
+ results, err := image.History(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ history := entities.ImageHistoryReport{
+ Layers: make([]entities.ImageHistoryLayer, len(results)),
+ }
+
+ for i, layer := range results {
+ history.Layers[i] = ToDomainHistoryLayer(layer)
+ }
+ return &history, nil
+}
+
+func ToDomainHistoryLayer(layer *libpodImage.History) entities.ImageHistoryLayer {
+ l := entities.ImageHistoryLayer{}
+ l.ID = layer.ID
+ l.Created = layer.Created.Unix()
+ l.CreatedBy = layer.CreatedBy
+ copy(l.Tags, layer.Tags)
+ l.Size = layer.Size
+ l.Comment = layer.Comment
+ return l
+}
+
+// func (r *imageRuntime) Delete(ctx context.Context, nameOrId string, opts entities.ImageDeleteOptions) (*entities.ImageDeleteReport, error) {
+// image, err := r.libpod.ImageEngine().NewFromLocal(nameOrId)
+// if err != nil {
+// return nil, err
+// }
+//
+// results, err := r.libpod.RemoveImage(ctx, image, opts.Force)
+// if err != nil {
+// return nil, err
+// }
+//
+// report := entities.ImageDeleteReport{}
+// if err := utils.DeepCopy(&report, results); err != nil {
+// return nil, err
+// }
+// return &report, nil
+// }
+//
+// func (r *imageRuntime) Prune(ctx context.Context, opts entities.ImagePruneOptions) (*entities.ImagePruneReport, error) {
+// // TODO: map FilterOptions
+// id, err := r.libpod.ImageEngine().PruneImages(ctx, opts.All, []string{})
+// if err != nil {
+// return nil, err
+// }
+//
+// // TODO: Determine Size
+// report := entities.ImagePruneReport{}
+// copy(report.Report.Id, id)
+// return &report, nil
+// }
diff --git a/pkg/domain/infra/abi/images_list.go b/pkg/domain/infra/abi/images_list.go
new file mode 100644
index 000000000..2f4020374
--- /dev/null
+++ b/pkg/domain/infra/abi/images_list.go
@@ -0,0 +1,80 @@
+// +build ABISupport
+
+package abi
+
+import (
+ "context"
+
+ libpodImage "github.com/containers/libpod/libpod/image"
+ "github.com/containers/libpod/pkg/domain/entities"
+)
+
+func (ir *ImageEngine) List(ctx context.Context, opts entities.ImageListOptions) ([]*entities.ImageSummary, error) {
+ var (
+ images []*libpodImage.Image
+ err error
+ )
+
+ // TODO: Future work support for domain.Filters
+ // filters := utils.ToLibpodFilters(opts.Filters)
+
+ if len(opts.Filter) > 0 {
+ images, err = ir.Libpod.ImageRuntime().GetImagesWithFilters(opts.Filter)
+ } else {
+ images, err = ir.Libpod.ImageRuntime().GetImages()
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ summaries := make([]*entities.ImageSummary, len(images))
+ for i, img := range images {
+ var repoTags []string
+ if opts.All {
+ pairs, err := libpodImage.ReposToMap(img.Names())
+ if err != nil {
+ return nil, err
+ }
+
+ for repo, tags := range pairs {
+ for _, tag := range tags {
+ repoTags = append(repoTags, repo+":"+tag)
+ }
+ }
+ } else {
+ repoTags, _ = img.RepoTags()
+ }
+
+ digests := make([]string, len(img.Digests()))
+ for j, d := range img.Digests() {
+ digests[j] = string(d)
+ }
+
+ e := entities.ImageSummary{
+ ID: img.ID(),
+
+ ConfigDigest: string(img.ConfigDigest),
+ Created: img.Created().Unix(),
+ Dangling: img.Dangling(),
+ Digest: string(img.Digest()),
+ Digests: digests,
+ History: img.NamesHistory(),
+ Names: img.Names(),
+ ParentId: img.Parent,
+ ReadOnly: img.IsReadOnly(),
+ SharedSize: 0,
+ VirtualSize: img.VirtualSize,
+ RepoTags: repoTags,
+ }
+ e.Labels, _ = img.Labels(context.TODO())
+
+ ctnrs, _ := img.Containers()
+ e.Containers = len(ctnrs)
+
+ sz, _ := img.Size(context.TODO())
+ e.Size = int64(*sz)
+
+ summaries[i] = &e
+ }
+ return summaries, nil
+}
diff --git a/pkg/domain/infra/abi/images_test.go b/pkg/domain/infra/abi/images_test.go
new file mode 100644
index 000000000..20ef1b150
--- /dev/null
+++ b/pkg/domain/infra/abi/images_test.go
@@ -0,0 +1,37 @@
+package abi
+
+//
+// import (
+// "context"
+// "testing"
+//
+// "github.com/stretchr/testify/mock"
+// )
+//
+// type MockImageRuntime struct {
+// mock.Mock
+// }
+//
+// func (m *MockImageRuntime) Delete(ctx context.Context, renderer func() interface{}, name string) error {
+// _ = m.Called(ctx, renderer, name)
+// return nil
+// }
+//
+// func TestImageSuccess(t *testing.T) {
+// actual := func() interface{} { return nil }
+//
+// m := new(MockImageRuntime)
+// m.On(
+// "Delete",
+// mock.AnythingOfType("*context.emptyCtx"),
+// mock.AnythingOfType("func() interface {}"),
+// "fedora").
+// Return(nil)
+//
+// r := DirectImageRuntime{m}
+// err := r.Delete(context.TODO(), actual, "fedora")
+// if err != nil {
+// t.Errorf("error should be nil, got: %v", err)
+// }
+// m.AssertExpectations(t)
+// }
diff --git a/pkg/domain/infra/abi/parse/parse.go b/pkg/domain/infra/abi/parse/parse.go
new file mode 100644
index 000000000..6c0e1ee55
--- /dev/null
+++ b/pkg/domain/infra/abi/parse/parse.go
@@ -0,0 +1,68 @@
+package parse
+
+import (
+ "strconv"
+ "strings"
+
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/libpod/define"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// Handle volume options from CLI.
+// Parse "o" option to find UID, GID.
+func ParseVolumeOptions(opts map[string]string) ([]libpod.VolumeCreateOption, error) {
+ libpodOptions := []libpod.VolumeCreateOption{}
+ volumeOptions := make(map[string]string)
+
+ for key, value := range opts {
+ switch key {
+ case "o":
+ // o has special handling to parse out UID, GID.
+ // These are separate Libpod options.
+ splitVal := strings.Split(value, ",")
+ finalVal := []string{}
+ for _, o := range splitVal {
+ // Options will be formatted as either "opt" or
+ // "opt=value"
+ splitO := strings.SplitN(o, "=", 2)
+ switch strings.ToLower(splitO[0]) {
+ case "uid":
+ if len(splitO) != 2 {
+ return nil, errors.Wrapf(define.ErrInvalidArg, "uid option must provide a UID")
+ }
+ intUID, err := strconv.Atoi(splitO[1])
+ if err != nil {
+ return nil, errors.Wrapf(err, "cannot convert UID %s to integer", splitO[1])
+ }
+ logrus.Debugf("Removing uid= from options and adding WithVolumeUID for UID %d", intUID)
+ libpodOptions = append(libpodOptions, libpod.WithVolumeUID(intUID))
+ case "gid":
+ if len(splitO) != 2 {
+ return nil, errors.Wrapf(define.ErrInvalidArg, "gid option must provide a GID")
+ }
+ intGID, err := strconv.Atoi(splitO[1])
+ if err != nil {
+ return nil, errors.Wrapf(err, "cannot convert GID %s to integer", splitO[1])
+ }
+ logrus.Debugf("Removing gid= from options and adding WithVolumeGID for GID %d", intGID)
+ libpodOptions = append(libpodOptions, libpod.WithVolumeGID(intGID))
+ default:
+ finalVal = append(finalVal, o)
+ }
+ }
+ if len(finalVal) > 0 {
+ volumeOptions[key] = strings.Join(finalVal, ",")
+ }
+ default:
+ volumeOptions[key] = value
+ }
+ }
+
+ if len(volumeOptions) > 0 {
+ libpodOptions = append(libpodOptions, libpod.WithVolumeOptions(volumeOptions))
+ }
+
+ return libpodOptions, nil
+}
diff --git a/pkg/domain/infra/abi/pods.go b/pkg/domain/infra/abi/pods.go
new file mode 100644
index 000000000..619e973cf
--- /dev/null
+++ b/pkg/domain/infra/abi/pods.go
@@ -0,0 +1,252 @@
+// +build ABISupport
+
+package abi
+
+import (
+ "context"
+
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/libpod/define"
+ "github.com/containers/libpod/pkg/domain/entities"
+ "github.com/containers/libpod/pkg/signal"
+ "github.com/containers/libpod/pkg/specgen"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// getPodsByContext returns a slice of pods. Note that all, latest and pods are
+// mutually exclusive arguments.
+func getPodsByContext(all, latest bool, pods []string, runtime *libpod.Runtime) ([]*libpod.Pod, error) {
+ var outpods []*libpod.Pod
+ if all {
+ return runtime.GetAllPods()
+ }
+ if latest {
+ p, err := runtime.GetLatestPod()
+ if err != nil {
+ return nil, err
+ }
+ outpods = append(outpods, p)
+ return outpods, nil
+ }
+ var err error
+ for _, p := range pods {
+ pod, e := runtime.LookupPod(p)
+ if e != nil {
+ // Log all errors here, so callers don't need to.
+ logrus.Debugf("Error looking up pod %q: %v", p, e)
+ if err == nil {
+ err = e
+ }
+ } else {
+ outpods = append(outpods, pod)
+ }
+ }
+ return outpods, err
+}
+
+func (ic *ContainerEngine) PodExists(ctx context.Context, nameOrId string) (*entities.BoolReport, error) {
+ _, err := ic.Libpod.LookupPod(nameOrId)
+ if err != nil && errors.Cause(err) != define.ErrNoSuchPod {
+ return nil, err
+ }
+ return &entities.BoolReport{Value: err == nil}, nil
+}
+
+func (ic *ContainerEngine) PodKill(ctx context.Context, namesOrIds []string, options entities.PodKillOptions) ([]*entities.PodKillReport, error) {
+ var (
+ reports []*entities.PodKillReport
+ )
+ sig, err := signal.ParseSignalNameOrNumber(options.Signal)
+ if err != nil {
+ return nil, err
+ }
+ pods, err := getPodsByContext(options.All, options.Latest, namesOrIds, ic.Libpod)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, p := range pods {
+ report := entities.PodKillReport{Id: p.ID()}
+ conErrs, err := p.Kill(uint(sig))
+ if err != nil {
+ report.Errs = []error{err}
+ reports = append(reports, &report)
+ continue
+ }
+ if len(conErrs) > 0 {
+ for _, err := range conErrs {
+ report.Errs = append(report.Errs, err)
+ }
+ reports = append(reports, &report)
+ continue
+ }
+ reports = append(reports, &report)
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) PodPause(ctx context.Context, namesOrIds []string, options entities.PodPauseOptions) ([]*entities.PodPauseReport, error) {
+ var (
+ reports []*entities.PodPauseReport
+ )
+ pods, err := getPodsByContext(options.All, options.Latest, namesOrIds, ic.Libpod)
+ if err != nil {
+ return nil, err
+ }
+ for _, p := range pods {
+ report := entities.PodPauseReport{Id: p.ID()}
+ errs, err := p.Pause()
+ if err != nil {
+ report.Errs = []error{err}
+ continue
+ }
+ if len(errs) > 0 {
+ for _, v := range errs {
+ report.Errs = append(report.Errs, v)
+ }
+ reports = append(reports, &report)
+ continue
+ }
+ reports = append(reports, &report)
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) PodUnpause(ctx context.Context, namesOrIds []string, options entities.PodunpauseOptions) ([]*entities.PodUnpauseReport, error) {
+ var (
+ reports []*entities.PodUnpauseReport
+ )
+ pods, err := getPodsByContext(options.All, options.Latest, namesOrIds, ic.Libpod)
+ if err != nil {
+ return nil, err
+ }
+ for _, p := range pods {
+ report := entities.PodUnpauseReport{Id: p.ID()}
+ errs, err := p.Unpause()
+ if err != nil {
+ report.Errs = []error{err}
+ continue
+ }
+ if len(errs) > 0 {
+ for _, v := range errs {
+ report.Errs = append(report.Errs, v)
+ }
+ reports = append(reports, &report)
+ continue
+ }
+ reports = append(reports, &report)
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) PodStop(ctx context.Context, namesOrIds []string, options entities.PodStopOptions) ([]*entities.PodStopReport, error) {
+ var (
+ reports []*entities.PodStopReport
+ )
+ pods, err := getPodsByContext(options.All, options.Latest, namesOrIds, ic.Libpod)
+ if err != nil {
+ return nil, err
+ }
+ for _, p := range pods {
+ report := entities.PodStopReport{Id: p.ID()}
+ errs, err := p.StopWithTimeout(ctx, false, options.Timeout)
+ if err != nil {
+ report.Errs = []error{err}
+ continue
+ }
+ if len(errs) > 0 {
+ for _, v := range errs {
+ report.Errs = append(report.Errs, v)
+ }
+ reports = append(reports, &report)
+ continue
+ }
+ reports = append(reports, &report)
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) PodRestart(ctx context.Context, namesOrIds []string, options entities.PodRestartOptions) ([]*entities.PodRestartReport, error) {
+ var (
+ reports []*entities.PodRestartReport
+ )
+ pods, err := getPodsByContext(options.All, options.Latest, namesOrIds, ic.Libpod)
+ if err != nil {
+ return nil, err
+ }
+ for _, p := range pods {
+ report := entities.PodRestartReport{Id: p.ID()}
+ errs, err := p.Restart(ctx)
+ if err != nil {
+ report.Errs = []error{err}
+ continue
+ }
+ if len(errs) > 0 {
+ for _, v := range errs {
+ report.Errs = append(report.Errs, v)
+ }
+ reports = append(reports, &report)
+ continue
+ }
+ reports = append(reports, &report)
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) PodStart(ctx context.Context, namesOrIds []string, options entities.PodStartOptions) ([]*entities.PodStartReport, error) {
+ var (
+ reports []*entities.PodStartReport
+ )
+ pods, err := getPodsByContext(options.All, options.Latest, namesOrIds, ic.Libpod)
+ if err != nil {
+ return nil, err
+ }
+ for _, p := range pods {
+ report := entities.PodStartReport{Id: p.ID()}
+ errs, err := p.Start(ctx)
+ if err != nil {
+ report.Errs = []error{err}
+ continue
+ }
+ if len(errs) > 0 {
+ for _, v := range errs {
+ report.Errs = append(report.Errs, v)
+ }
+ reports = append(reports, &report)
+ continue
+ }
+ reports = append(reports, &report)
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) PodRm(ctx context.Context, namesOrIds []string, options entities.PodRmOptions) ([]*entities.PodRmReport, error) {
+ var (
+ reports []*entities.PodRmReport
+ )
+ pods, err := getPodsByContext(options.All, options.Latest, namesOrIds, ic.Libpod)
+ if err != nil {
+ return nil, err
+ }
+ for _, p := range pods {
+ report := entities.PodRmReport{Id: p.ID()}
+ err := ic.Libpod.RemovePod(ctx, p, true, options.Force)
+ if err != nil {
+ report.Err = err
+ continue
+ }
+ reports = append(reports, &report)
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) PodCreate(ctx context.Context, opts entities.PodCreateOptions) (*entities.PodCreateReport, error) {
+ podSpec := specgen.NewPodSpecGenerator()
+ opts.ToPodSpecGen(podSpec)
+ pod, err := podSpec.MakePod(ic.Libpod)
+ if err != nil {
+ return nil, err
+ }
+ return &entities.PodCreateReport{Id: pod.ID()}, nil
+}
diff --git a/pkg/domain/infra/abi/runtime.go b/pkg/domain/infra/abi/runtime.go
new file mode 100644
index 000000000..b53fb6d3a
--- /dev/null
+++ b/pkg/domain/infra/abi/runtime.go
@@ -0,0 +1,17 @@
+// +build ABISupport
+
+package abi
+
+import (
+ "github.com/containers/libpod/libpod"
+)
+
+// Image-related runtime linked against libpod library
+type ImageEngine struct {
+ Libpod *libpod.Runtime
+}
+
+// Container-related runtime linked against libpod library
+type ContainerEngine struct {
+ Libpod *libpod.Runtime
+}
diff --git a/pkg/domain/infra/abi/volumes.go b/pkg/domain/infra/abi/volumes.go
new file mode 100644
index 000000000..bdae4359d
--- /dev/null
+++ b/pkg/domain/infra/abi/volumes.go
@@ -0,0 +1,159 @@
+// +build ABISupport
+
+package abi
+
+import (
+ "context"
+
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/pkg/domain/entities"
+ "github.com/containers/libpod/pkg/domain/filters"
+ "github.com/containers/libpod/pkg/domain/infra/abi/parse"
+ "github.com/pkg/errors"
+)
+
+func (ic *ContainerEngine) VolumeCreate(ctx context.Context, opts entities.VolumeCreateOptions) (*entities.IdOrNameResponse, error) {
+ var (
+ volumeOptions []libpod.VolumeCreateOption
+ )
+ if len(opts.Name) > 0 {
+ volumeOptions = append(volumeOptions, libpod.WithVolumeName(opts.Name))
+ }
+ if len(opts.Driver) > 0 {
+ volumeOptions = append(volumeOptions, libpod.WithVolumeDriver(opts.Driver))
+ }
+ if len(opts.Label) > 0 {
+ volumeOptions = append(volumeOptions, libpod.WithVolumeLabels(opts.Label))
+ }
+ if len(opts.Options) > 0 {
+ parsedOptions, err := parse.ParseVolumeOptions(opts.Options)
+ if err != nil {
+ return nil, err
+ }
+ volumeOptions = append(volumeOptions, parsedOptions...)
+ }
+ vol, err := ic.Libpod.NewVolume(ctx, volumeOptions...)
+ if err != nil {
+ return nil, err
+ }
+ return &entities.IdOrNameResponse{IdOrName: vol.Name()}, nil
+}
+
+func (ic *ContainerEngine) VolumeRm(ctx context.Context, namesOrIds []string, opts entities.VolumeRmOptions) ([]*entities.VolumeRmReport, error) {
+ var (
+ err error
+ reports []*entities.VolumeRmReport
+ vols []*libpod.Volume
+ )
+ if opts.All {
+ vols, err = ic.Libpod.Volumes()
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ for _, id := range namesOrIds {
+ vol, err := ic.Libpod.LookupVolume(id)
+ if err != nil {
+ reports = append(reports, &entities.VolumeRmReport{
+ Err: err,
+ Id: id,
+ })
+ continue
+ }
+ vols = append(vols, vol)
+ }
+ }
+ for _, vol := range vols {
+ reports = append(reports, &entities.VolumeRmReport{
+ Err: ic.Libpod.RemoveVolume(ctx, vol, opts.Force),
+ Id: vol.Name(),
+ })
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) VolumeInspect(ctx context.Context, namesOrIds []string, opts entities.VolumeInspectOptions) ([]*entities.VolumeInspectReport, error) {
+ var (
+ err error
+ reports []*entities.VolumeInspectReport
+ vols []*libpod.Volume
+ )
+
+ // Note: as with previous implementation, a single failure here
+ // results a return.
+ if opts.All {
+ vols, err = ic.Libpod.GetAllVolumes()
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ for _, v := range namesOrIds {
+ vol, err := ic.Libpod.LookupVolume(v)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error inspecting volume %s", v)
+ }
+ vols = append(vols, vol)
+ }
+ }
+ for _, v := range vols {
+ config := entities.VolumeConfigResponse{
+ Name: v.Name(),
+ Driver: v.Driver(),
+ Mountpoint: v.MountPoint(),
+ CreatedAt: v.CreatedTime(),
+ Labels: v.Labels(),
+ Scope: v.Scope(),
+ Options: v.Options(),
+ UID: v.UID(),
+ GID: v.GID(),
+ }
+ reports = append(reports, &entities.VolumeInspectReport{VolumeConfigResponse: &config})
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) VolumePrune(ctx context.Context, opts entities.VolumePruneOptions) ([]*entities.VolumePruneReport, error) {
+ var (
+ reports []*entities.VolumePruneReport
+ )
+ pruned, err := ic.Libpod.PruneVolumes(ctx)
+ if err != nil {
+ return nil, err
+ }
+ for k, v := range pruned {
+ reports = append(reports, &entities.VolumePruneReport{
+ Err: v,
+ Id: k,
+ })
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) VolumeList(ctx context.Context, opts entities.VolumeListOptions) ([]*entities.VolumeListReport, error) {
+ var (
+ reports []*entities.VolumeListReport
+ )
+ volumeFilters, err := filters.GenerateVolumeFilters(opts.Filter)
+ if err != nil {
+ return nil, err
+ }
+ vols, err := ic.Libpod.Volumes(volumeFilters...)
+ if err != nil {
+ return nil, err
+ }
+ for _, v := range vols {
+ config := entities.VolumeConfigResponse{
+ Name: v.Name(),
+ Driver: v.Driver(),
+ Mountpoint: v.MountPoint(),
+ CreatedAt: v.CreatedTime(),
+ Labels: v.Labels(),
+ Scope: v.Scope(),
+ Options: v.Options(),
+ UID: v.UID(),
+ GID: v.GID(),
+ }
+ reports = append(reports, &entities.VolumeListReport{VolumeConfigResponse: config})
+ }
+ return reports, nil
+}
diff --git a/pkg/domain/infra/runtime_abi.go b/pkg/domain/infra/runtime_abi.go
new file mode 100644
index 000000000..f11026571
--- /dev/null
+++ b/pkg/domain/infra/runtime_abi.go
@@ -0,0 +1,38 @@
+// +build ABISupport
+
+package infra
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/containers/libpod/pkg/bindings"
+ "github.com/containers/libpod/pkg/domain/entities"
+ "github.com/containers/libpod/pkg/domain/infra/tunnel"
+)
+
+// NewContainerEngine factory provides a libpod runtime for container-related operations
+func NewContainerEngine(facts entities.EngineOptions) (entities.ContainerEngine, error) {
+ switch facts.EngineMode {
+ case entities.ABIMode:
+ r, err := NewLibpodRuntime(facts.FlagSet, facts)
+ return r, err
+ case entities.TunnelMode:
+ ctx, err := bindings.NewConnection(context.Background(), facts.Uri, facts.Identities...)
+ return &tunnel.ContainerEngine{ClientCxt: ctx}, err
+ }
+ return nil, fmt.Errorf("runtime mode '%v' is not supported", facts.EngineMode)
+}
+
+// NewContainerEngine factory provides a libpod runtime for image-related operations
+func NewImageEngine(facts entities.EngineOptions) (entities.ImageEngine, error) {
+ switch facts.EngineMode {
+ case entities.ABIMode:
+ r, err := NewLibpodImageRuntime(facts.FlagSet, facts)
+ return r, err
+ case entities.TunnelMode:
+ ctx, err := bindings.NewConnection(context.Background(), facts.Uri, facts.Identities...)
+ return &tunnel.ImageEngine{ClientCxt: ctx}, err
+ }
+ return nil, fmt.Errorf("runtime mode '%v' is not supported", facts.EngineMode)
+}
diff --git a/pkg/domain/infra/runtime_image_proxy.go b/pkg/domain/infra/runtime_image_proxy.go
new file mode 100644
index 000000000..befc66b9a
--- /dev/null
+++ b/pkg/domain/infra/runtime_image_proxy.go
@@ -0,0 +1,21 @@
+// +build ABISupport
+
+package infra
+
+import (
+ "context"
+
+ "github.com/containers/libpod/pkg/domain/entities"
+ "github.com/containers/libpod/pkg/domain/infra/abi"
+ "github.com/spf13/pflag"
+)
+
+// ContainerEngine Image Proxy will be EOL'ed after podmanV2 is separated from libpod repo
+
+func NewLibpodImageRuntime(flags *pflag.FlagSet, opts entities.EngineOptions) (entities.ImageEngine, error) {
+ r, err := GetRuntime(context.Background(), flags, opts)
+ if err != nil {
+ return nil, err
+ }
+ return &abi.ImageEngine{Libpod: r}, nil
+}
diff --git a/pkg/domain/infra/runtime_libpod.go b/pkg/domain/infra/runtime_libpod.go
new file mode 100644
index 000000000..d59759707
--- /dev/null
+++ b/pkg/domain/infra/runtime_libpod.go
@@ -0,0 +1,328 @@
+package infra
+
+import (
+ "context"
+ "fmt"
+ "os"
+
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/pkg/cgroups"
+ "github.com/containers/libpod/pkg/domain/entities"
+ "github.com/containers/libpod/pkg/namespaces"
+ "github.com/containers/libpod/pkg/rootless"
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/pkg/errors"
+ flag "github.com/spf13/pflag"
+)
+
+type engineOpts struct {
+ name string
+ renumber bool
+ migrate bool
+ noStore bool
+ withFDS bool
+ flags entities.EngineOptions
+}
+
+// GetRuntimeMigrate gets a libpod runtime that will perform a migration of existing containers
+func GetRuntimeMigrate(ctx context.Context, fs *flag.FlagSet, ef entities.EngineOptions, newRuntime string) (*libpod.Runtime, error) {
+ return getRuntime(ctx, fs, &engineOpts{
+ name: newRuntime,
+ renumber: false,
+ migrate: true,
+ noStore: false,
+ withFDS: true,
+ flags: ef,
+ })
+}
+
+// GetRuntimeDisableFDs gets a libpod runtime that will disable sd notify
+func GetRuntimeDisableFDs(ctx context.Context, fs *flag.FlagSet, ef entities.EngineOptions) (*libpod.Runtime, error) {
+ return getRuntime(ctx, fs, &engineOpts{
+ renumber: false,
+ migrate: false,
+ noStore: false,
+ withFDS: false,
+ flags: ef,
+ })
+}
+
+// GetRuntimeRenumber gets a libpod runtime that will perform a lock renumber
+func GetRuntimeRenumber(ctx context.Context, fs *flag.FlagSet, ef entities.EngineOptions) (*libpod.Runtime, error) {
+ return getRuntime(ctx, fs, &engineOpts{
+ renumber: true,
+ migrate: false,
+ noStore: false,
+ withFDS: true,
+ flags: ef,
+ })
+}
+
+// GetRuntime generates a new libpod runtime configured by command line options
+func GetRuntime(ctx context.Context, flags *flag.FlagSet, ef entities.EngineOptions) (*libpod.Runtime, error) {
+ return getRuntime(ctx, flags, &engineOpts{
+ renumber: false,
+ migrate: false,
+ noStore: false,
+ withFDS: true,
+ flags: ef,
+ })
+}
+
+// GetRuntimeNoStore generates a new libpod runtime configured by command line options
+func GetRuntimeNoStore(ctx context.Context, fs *flag.FlagSet, ef entities.EngineOptions) (*libpod.Runtime, error) {
+ return getRuntime(ctx, fs, &engineOpts{
+ renumber: false,
+ migrate: false,
+ noStore: true,
+ withFDS: true,
+ flags: ef,
+ })
+}
+
+func getRuntime(ctx context.Context, fs *flag.FlagSet, opts *engineOpts) (*libpod.Runtime, error) {
+ options := []libpod.RuntimeOption{}
+ storageOpts := storage.StoreOptions{}
+ storageSet := false
+
+ uidmapFlag := fs.Lookup("uidmap")
+ gidmapFlag := fs.Lookup("gidmap")
+ subuidname := fs.Lookup("subuidname")
+ subgidname := fs.Lookup("subgidname")
+ if (uidmapFlag != nil && gidmapFlag != nil && subuidname != nil && subgidname != nil) &&
+ (uidmapFlag.Changed || gidmapFlag.Changed || subuidname.Changed || subgidname.Changed) {
+ userns, _ := fs.GetString("userns")
+ uidmapVal, _ := fs.GetStringSlice("uidmap")
+ gidmapVal, _ := fs.GetStringSlice("gidmap")
+ subuidVal, _ := fs.GetString("subuidname")
+ subgidVal, _ := fs.GetString("subgidname")
+ mappings, err := ParseIDMapping(namespaces.UsernsMode(userns), uidmapVal, gidmapVal, subuidVal, subgidVal)
+ if err != nil {
+ return nil, err
+ }
+ storageOpts.UIDMap = mappings.UIDMap
+ storageOpts.GIDMap = mappings.GIDMap
+
+ storageSet = true
+ }
+
+ if fs.Changed("root") {
+ storageSet = true
+ storageOpts.GraphRoot = opts.flags.Root
+ }
+ if fs.Changed("runroot") {
+ storageSet = true
+ storageOpts.RunRoot = opts.flags.Runroot
+ }
+ if len(storageOpts.RunRoot) > 50 {
+ return nil, errors.New("the specified runroot is longer than 50 characters")
+ }
+ if fs.Changed("storage-driver") {
+ storageSet = true
+ storageOpts.GraphDriverName = opts.flags.StorageDriver
+ // Overriding the default storage driver caused GraphDriverOptions from storage.conf to be ignored
+ storageOpts.GraphDriverOptions = []string{}
+ }
+ // This should always be checked after storage-driver is checked
+ if len(opts.flags.StorageOpts) > 0 {
+ storageSet = true
+ storageOpts.GraphDriverOptions = opts.flags.StorageOpts
+ }
+ if opts.migrate {
+ options = append(options, libpod.WithMigrate())
+ if opts.name != "" {
+ options = append(options, libpod.WithMigrateRuntime(opts.name))
+ }
+ }
+
+ if opts.renumber {
+ options = append(options, libpod.WithRenumber())
+ }
+
+ // Only set this if the user changes storage config on the command line
+ if storageSet {
+ options = append(options, libpod.WithStorageConfig(storageOpts))
+ }
+
+ if !storageSet && opts.noStore {
+ options = append(options, libpod.WithNoStore())
+ }
+ // TODO CLI flags for image config?
+ // TODO CLI flag for signature policy?
+
+ if len(opts.flags.Namespace) > 0 {
+ options = append(options, libpod.WithNamespace(opts.flags.Namespace))
+ }
+
+ if fs.Changed("runtime") {
+ options = append(options, libpod.WithOCIRuntime(opts.flags.Runtime))
+ }
+
+ if fs.Changed("conmon") {
+ options = append(options, libpod.WithConmonPath(opts.flags.ConmonPath))
+ }
+ if fs.Changed("tmpdir") {
+ options = append(options, libpod.WithTmpDir(opts.flags.TmpDir))
+ }
+ if fs.Changed("network-cmd-path") {
+ options = append(options, libpod.WithNetworkCmdPath(opts.flags.NetworkCmdPath))
+ }
+
+ if fs.Changed("events-backend") {
+ options = append(options, libpod.WithEventsLogger(opts.flags.EventsBackend))
+ }
+
+ if fs.Changed("cgroup-manager") {
+ options = append(options, libpod.WithCgroupManager(opts.flags.CGroupManager))
+ } else {
+ unified, err := cgroups.IsCgroup2UnifiedMode()
+ if err != nil {
+ return nil, err
+ }
+ if rootless.IsRootless() && !unified {
+ options = append(options, libpod.WithCgroupManager("cgroupfs"))
+ }
+ }
+
+ // TODO flag to set libpod static dir?
+ // TODO flag to set libpod tmp dir?
+
+ if fs.Changed("cni-config-dir") {
+ options = append(options, libpod.WithCNIConfigDir(opts.flags.CniConfigDir))
+ }
+ if fs.Changed("default-mounts-file") {
+ options = append(options, libpod.WithDefaultMountsFile(opts.flags.DefaultMountsFile))
+ }
+ if fs.Changed("hooks-dir") {
+ options = append(options, libpod.WithHooksDir(opts.flags.HooksDir...))
+ }
+
+ // TODO flag to set CNI plugins dir?
+
+ // TODO I don't think these belong here?
+ // Will follow up with a different PR to address
+ //
+ // Pod create options
+
+ infraImageFlag := fs.Lookup("infra-image")
+ if infraImageFlag != nil && infraImageFlag.Changed {
+ infraImage, _ := fs.GetString("infra-image")
+ options = append(options, libpod.WithDefaultInfraImage(infraImage))
+ }
+
+ infraCommandFlag := fs.Lookup("infra-command")
+ if infraCommandFlag != nil && infraImageFlag.Changed {
+ infraCommand, _ := fs.GetString("infra-command")
+ options = append(options, libpod.WithDefaultInfraCommand(infraCommand))
+ }
+
+ if !opts.withFDS {
+ options = append(options, libpod.WithEnableSDNotify())
+ }
+ return libpod.NewRuntime(ctx, options...)
+}
+
+// ParseIDMapping takes idmappings and subuid and subgid maps and returns a storage mapping
+func ParseIDMapping(mode namespaces.UsernsMode, uidMapSlice, gidMapSlice []string, subUIDMap, subGIDMap string) (*storage.IDMappingOptions, error) {
+ options := storage.IDMappingOptions{
+ HostUIDMapping: true,
+ HostGIDMapping: true,
+ }
+
+ if mode.IsKeepID() {
+ if len(uidMapSlice) > 0 || len(gidMapSlice) > 0 {
+ return nil, errors.New("cannot specify custom mappings with --userns=keep-id")
+ }
+ if len(subUIDMap) > 0 || len(subGIDMap) > 0 {
+ return nil, errors.New("cannot specify subuidmap or subgidmap with --userns=keep-id")
+ }
+ if rootless.IsRootless() {
+ min := func(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+ }
+
+ uid := rootless.GetRootlessUID()
+ gid := rootless.GetRootlessGID()
+
+ uids, gids, err := rootless.GetConfiguredMappings()
+ if err != nil {
+ return nil, errors.Wrapf(err, "cannot read mappings")
+ }
+ maxUID, maxGID := 0, 0
+ for _, u := range uids {
+ maxUID += u.Size
+ }
+ for _, g := range gids {
+ maxGID += g.Size
+ }
+
+ options.UIDMap, options.GIDMap = nil, nil
+
+ options.UIDMap = append(options.UIDMap, idtools.IDMap{ContainerID: 0, HostID: 1, Size: min(uid, maxUID)})
+ options.UIDMap = append(options.UIDMap, idtools.IDMap{ContainerID: uid, HostID: 0, Size: 1})
+ if maxUID > uid {
+ options.UIDMap = append(options.UIDMap, idtools.IDMap{ContainerID: uid + 1, HostID: uid + 1, Size: maxUID - uid})
+ }
+
+ options.GIDMap = append(options.GIDMap, idtools.IDMap{ContainerID: 0, HostID: 1, Size: min(gid, maxGID)})
+ options.GIDMap = append(options.GIDMap, idtools.IDMap{ContainerID: gid, HostID: 0, Size: 1})
+ if maxGID > gid {
+ options.GIDMap = append(options.GIDMap, idtools.IDMap{ContainerID: gid + 1, HostID: gid + 1, Size: maxGID - gid})
+ }
+
+ options.HostUIDMapping = false
+ options.HostGIDMapping = false
+ }
+ // Simply ignore the setting and do not setup an inner namespace for root as it is a no-op
+ return &options, nil
+ }
+
+ if subGIDMap == "" && subUIDMap != "" {
+ subGIDMap = subUIDMap
+ }
+ if subUIDMap == "" && subGIDMap != "" {
+ subUIDMap = subGIDMap
+ }
+ if len(gidMapSlice) == 0 && len(uidMapSlice) != 0 {
+ gidMapSlice = uidMapSlice
+ }
+ if len(uidMapSlice) == 0 && len(gidMapSlice) != 0 {
+ uidMapSlice = gidMapSlice
+ }
+ if len(uidMapSlice) == 0 && subUIDMap == "" && os.Getuid() != 0 {
+ uidMapSlice = []string{fmt.Sprintf("0:%d:1", os.Getuid())}
+ }
+ if len(gidMapSlice) == 0 && subGIDMap == "" && os.Getuid() != 0 {
+ gidMapSlice = []string{fmt.Sprintf("0:%d:1", os.Getgid())}
+ }
+
+ if subUIDMap != "" && subGIDMap != "" {
+ mappings, err := idtools.NewIDMappings(subUIDMap, subGIDMap)
+ if err != nil {
+ return nil, err
+ }
+ options.UIDMap = mappings.UIDs()
+ options.GIDMap = mappings.GIDs()
+ }
+ parsedUIDMap, err := idtools.ParseIDMap(uidMapSlice, "UID")
+ if err != nil {
+ return nil, err
+ }
+ parsedGIDMap, err := idtools.ParseIDMap(gidMapSlice, "GID")
+ if err != nil {
+ return nil, err
+ }
+ options.UIDMap = append(options.UIDMap, parsedUIDMap...)
+ options.GIDMap = append(options.GIDMap, parsedGIDMap...)
+ if len(options.UIDMap) > 0 {
+ options.HostUIDMapping = false
+ }
+ if len(options.GIDMap) > 0 {
+ options.HostGIDMapping = false
+ }
+ return &options, nil
+}
diff --git a/pkg/domain/infra/runtime_proxy.go b/pkg/domain/infra/runtime_proxy.go
new file mode 100644
index 000000000..2e38c74b9
--- /dev/null
+++ b/pkg/domain/infra/runtime_proxy.go
@@ -0,0 +1,21 @@
+// +build ABISupport
+
+package infra
+
+import (
+ "context"
+
+ "github.com/containers/libpod/pkg/domain/entities"
+ "github.com/containers/libpod/pkg/domain/infra/abi"
+ flag "github.com/spf13/pflag"
+)
+
+// ContainerEngine Proxy will be EOL'ed after podmanV2 is separated from libpod repo
+
+func NewLibpodRuntime(flags *flag.FlagSet, opts entities.EngineOptions) (entities.ContainerEngine, error) {
+ r, err := GetRuntime(context.Background(), flags, opts)
+ if err != nil {
+ return nil, err
+ }
+ return &abi.ContainerEngine{Libpod: r}, nil
+}
diff --git a/pkg/domain/infra/runtime_tunnel.go b/pkg/domain/infra/runtime_tunnel.go
new file mode 100644
index 000000000..dc04b4e53
--- /dev/null
+++ b/pkg/domain/infra/runtime_tunnel.go
@@ -0,0 +1,35 @@
+// +build !ABISupport
+
+package infra
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/containers/libpod/pkg/bindings"
+ "github.com/containers/libpod/pkg/domain/entities"
+ "github.com/containers/libpod/pkg/domain/infra/tunnel"
+)
+
+func NewContainerEngine(facts entities.EngineOptions) (entities.ContainerEngine, error) {
+ switch facts.EngineMode {
+ case entities.ABIMode:
+ return nil, fmt.Errorf("direct runtime not supported")
+ case entities.TunnelMode:
+ ctx, err := bindings.NewConnection(context.Background(), facts.Uri, facts.Identities...)
+ return &tunnel.ContainerEngine{ClientCxt: ctx}, err
+ }
+ return nil, fmt.Errorf("runtime mode '%v' is not supported", facts.EngineMode)
+}
+
+// NewImageEngine factory provides a libpod runtime for image-related operations
+func NewImageEngine(facts entities.EngineOptions) (entities.ImageEngine, error) {
+ switch facts.EngineMode {
+ case entities.ABIMode:
+ return nil, fmt.Errorf("direct image runtime not supported")
+ case entities.TunnelMode:
+ ctx, err := bindings.NewConnection(context.Background(), facts.Uri, facts.Identities...)
+ return &tunnel.ImageEngine{ClientCxt: ctx}, err
+ }
+ return nil, fmt.Errorf("runtime mode '%v' is not supported", facts.EngineMode)
+}
diff --git a/pkg/domain/infra/tunnel/containers.go b/pkg/domain/infra/tunnel/containers.go
new file mode 100644
index 000000000..3db38ea5c
--- /dev/null
+++ b/pkg/domain/infra/tunnel/containers.go
@@ -0,0 +1,174 @@
+package tunnel
+
+import (
+ "context"
+
+ "github.com/containers/libpod/pkg/bindings/containers"
+ "github.com/containers/libpod/pkg/domain/entities"
+ "github.com/pkg/errors"
+)
+
+func (ic *ContainerEngine) ContainerExists(ctx context.Context, nameOrId string) (*entities.BoolReport, error) {
+ exists, err := containers.Exists(ic.ClientCxt, nameOrId)
+ return &entities.BoolReport{Value: exists}, err
+}
+
+func (ic *ContainerEngine) ContainerWait(ctx context.Context, namesOrIds []string, options entities.WaitOptions) ([]entities.WaitReport, error) {
+ var (
+ responses []entities.WaitReport
+ )
+ cons, err := getContainersByContext(ic.ClientCxt, false, namesOrIds)
+ if err != nil {
+ return nil, err
+ }
+ for _, c := range cons {
+ response := entities.WaitReport{Id: c.ID}
+ exitCode, err := containers.Wait(ic.ClientCxt, c.ID, &options.Condition)
+ if err != nil {
+ response.Error = err
+ } else {
+ response.ExitCode = exitCode
+ }
+ responses = append(responses, response)
+ }
+ return responses, nil
+}
+
+func (ic *ContainerEngine) ContainerPause(ctx context.Context, namesOrIds []string, options entities.PauseUnPauseOptions) ([]*entities.PauseUnpauseReport, error) {
+ var (
+ reports []*entities.PauseUnpauseReport
+ )
+ ctrs, err := getContainersByContext(ic.ClientCxt, options.All, namesOrIds)
+ if err != nil {
+ return nil, err
+ }
+ for _, c := range ctrs {
+ err := containers.Pause(ic.ClientCxt, c.ID)
+ reports = append(reports, &entities.PauseUnpauseReport{Id: c.ID, Err: err})
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) ContainerUnpause(ctx context.Context, namesOrIds []string, options entities.PauseUnPauseOptions) ([]*entities.PauseUnpauseReport, error) {
+ var (
+ reports []*entities.PauseUnpauseReport
+ )
+ ctrs, err := getContainersByContext(ic.ClientCxt, options.All, namesOrIds)
+ if err != nil {
+ return nil, err
+ }
+ for _, c := range ctrs {
+ err := containers.Unpause(ic.ClientCxt, c.ID)
+ reports = append(reports, &entities.PauseUnpauseReport{Id: c.ID, Err: err})
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) ContainerStop(ctx context.Context, namesOrIds []string, options entities.StopOptions) ([]*entities.StopReport, error) {
+ var (
+ reports []*entities.StopReport
+ )
+ ctrs, err := getContainersByContext(ic.ClientCxt, options.All, namesOrIds)
+ if err != nil {
+ return nil, err
+ }
+ for _, c := range ctrs {
+ report := entities.StopReport{Id: c.ID}
+ report.Err = containers.Stop(ic.ClientCxt, c.ID, &options.Timeout)
+ // TODO we need to associate errors returned by http with common
+ // define.errors so that we can equity tests. this will allow output
+ // to be the same as the native client
+ reports = append(reports, &report)
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) ContainerKill(ctx context.Context, namesOrIds []string, options entities.KillOptions) ([]*entities.KillReport, error) {
+ var (
+ reports []*entities.KillReport
+ )
+ ctrs, err := getContainersByContext(ic.ClientCxt, options.All, namesOrIds)
+ if err != nil {
+ return nil, err
+ }
+ for _, c := range ctrs {
+ reports = append(reports, &entities.KillReport{
+ Id: c.ID,
+ Err: containers.Kill(ic.ClientCxt, c.ID, options.Signal),
+ })
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) ContainerRestart(ctx context.Context, namesOrIds []string, options entities.RestartOptions) ([]*entities.RestartReport, error) {
+ var (
+ reports []*entities.RestartReport
+ timeout *int
+ )
+ if options.Timeout != nil {
+ t := int(*options.Timeout)
+ timeout = &t
+ }
+ ctrs, err := getContainersByContext(ic.ClientCxt, options.All, namesOrIds)
+ if err != nil {
+ return nil, err
+ }
+ for _, c := range ctrs {
+ reports = append(reports, &entities.RestartReport{
+ Id: c.ID,
+ Err: containers.Restart(ic.ClientCxt, c.ID, timeout),
+ })
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string, options entities.RmOptions) ([]*entities.RmReport, error) {
+ var (
+ reports []*entities.RmReport
+ )
+ ctrs, err := getContainersByContext(ic.ClientCxt, options.All, namesOrIds)
+ if err != nil {
+ return nil, err
+ }
+ // TODO there is no endpoint for container eviction. Need to discuss
+ for _, c := range ctrs {
+ reports = append(reports, &entities.RmReport{
+ Id: c.ID,
+ Err: containers.Remove(ic.ClientCxt, c.ID, &options.Force, &options.Volumes),
+ })
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) ContainerInspect(ctx context.Context, namesOrIds []string, options entities.ContainerInspectOptions) ([]*entities.ContainerInspectReport, error) {
+ var (
+ reports []*entities.ContainerInspectReport
+ )
+ ctrs, err := getContainersByContext(ic.ClientCxt, false, namesOrIds)
+ if err != nil {
+ return nil, err
+ }
+ for _, con := range ctrs {
+ data, err := containers.Inspect(ic.ClientCxt, con.ID, &options.Size)
+ if err != nil {
+ return nil, err
+ }
+ reports = append(reports, &entities.ContainerInspectReport{InspectContainerData: data})
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) ContainerTop(ctx context.Context, options entities.TopOptions) (*entities.StringSliceReport, error) {
+ switch {
+ case options.Latest:
+ return nil, errors.New("latest is not supported")
+ case options.NameOrID == "":
+ return nil, errors.New("NameOrID must be specified")
+ }
+
+ topOutput, err := containers.Top(ic.ClientCxt, options.NameOrID, options.Descriptors)
+ if err != nil {
+ return nil, err
+ }
+ return &entities.StringSliceReport{Value: topOutput}, nil
+}
diff --git a/pkg/domain/infra/tunnel/helpers.go b/pkg/domain/infra/tunnel/helpers.go
new file mode 100644
index 000000000..f9183c955
--- /dev/null
+++ b/pkg/domain/infra/tunnel/helpers.go
@@ -0,0 +1,76 @@
+package tunnel
+
+import (
+ "context"
+ "strings"
+
+ "github.com/containers/libpod/libpod/define"
+ "github.com/containers/libpod/pkg/api/handlers/libpod"
+ "github.com/containers/libpod/pkg/bindings"
+ "github.com/containers/libpod/pkg/bindings/containers"
+ "github.com/containers/libpod/pkg/bindings/pods"
+ "github.com/containers/libpod/pkg/domain/entities"
+ "github.com/containers/libpod/pkg/util"
+ "github.com/pkg/errors"
+)
+
+func getContainersByContext(contextWithConnection context.Context, all bool, namesOrIds []string) ([]libpod.ListContainer, error) {
+ var (
+ cons []libpod.ListContainer
+ )
+ if all && len(namesOrIds) > 0 {
+ return nil, errors.New("cannot lookup containers and all")
+ }
+ c, err := containers.List(contextWithConnection, nil, &bindings.PTrue, nil, nil, nil, &bindings.PTrue)
+ if err != nil {
+ return nil, err
+ }
+ if all {
+ return c, err
+ }
+ for _, id := range namesOrIds {
+ var found bool
+ for _, con := range c {
+ if id == con.ID || strings.HasPrefix(con.ID, id) || util.StringInSlice(id, con.Names) {
+ cons = append(cons, con)
+ found = true
+ break
+ }
+ }
+ if !found {
+ return nil, errors.Errorf("unable to find container %q", id)
+ }
+ }
+ return cons, nil
+}
+
+func getPodsByContext(contextWithConnection context.Context, all bool, namesOrIds []string) ([]*entities.ListPodsReport, error) {
+ var (
+ sPods []*entities.ListPodsReport
+ )
+ if all && len(namesOrIds) > 0 {
+ return nil, errors.New("cannot lookup specific pods and all")
+ }
+
+ fPods, err := pods.List(contextWithConnection, nil)
+ if err != nil {
+ return nil, err
+ }
+ if all {
+ return fPods, nil
+ }
+ for _, nameOrId := range namesOrIds {
+ var found bool
+ for _, f := range fPods {
+ if f.Name == nameOrId || strings.HasPrefix(f.Id, nameOrId) {
+ sPods = append(sPods, f)
+ found = true
+ break
+ }
+ }
+ if !found {
+ return nil, errors.Wrapf(define.ErrNoSuchPod, "unable to find pod %q", nameOrId)
+ }
+ }
+ return sPods, nil
+}
diff --git a/pkg/domain/infra/tunnel/images.go b/pkg/domain/infra/tunnel/images.go
new file mode 100644
index 000000000..6a3adc9ee
--- /dev/null
+++ b/pkg/domain/infra/tunnel/images.go
@@ -0,0 +1,87 @@
+package tunnel
+
+import (
+ "context"
+
+ images "github.com/containers/libpod/pkg/bindings/images"
+ "github.com/containers/libpod/pkg/domain/entities"
+ "github.com/containers/libpod/pkg/domain/utils"
+)
+
+func (ir *ImageEngine) Exists(_ context.Context, nameOrId string) (*entities.BoolReport, error) {
+ found, err := images.Exists(ir.ClientCxt, nameOrId)
+ return &entities.BoolReport{Value: found}, err
+}
+
+func (ir *ImageEngine) Delete(ctx context.Context, nameOrId []string, opts entities.ImageDeleteOptions) (*entities.ImageDeleteReport, error) {
+ report := entities.ImageDeleteReport{}
+
+ for _, id := range nameOrId {
+ results, err := images.Remove(ir.ClientCxt, id, &opts.Force)
+ if err != nil {
+ return nil, err
+ }
+ for _, e := range results {
+ if a, ok := e["Deleted"]; ok {
+ report.Deleted = append(report.Deleted, a)
+ }
+
+ if a, ok := e["Untagged"]; ok {
+ report.Untagged = append(report.Untagged, a)
+ }
+ }
+ }
+ return &report, nil
+}
+
+func (ir *ImageEngine) List(ctx context.Context, opts entities.ImageListOptions) ([]*entities.ImageSummary, error) {
+ images, err := images.List(ir.ClientCxt, &opts.All, opts.Filters)
+
+ if err != nil {
+ return nil, err
+ }
+
+ is := make([]*entities.ImageSummary, len(images))
+ for i, img := range images {
+ hold := entities.ImageSummary{}
+ if err := utils.DeepCopy(&hold, img); err != nil {
+ return nil, err
+ }
+ is[i] = &hold
+ }
+ return is, nil
+}
+
+func (ir *ImageEngine) History(ctx context.Context, nameOrId string, opts entities.ImageHistoryOptions) (*entities.ImageHistoryReport, error) {
+ results, err := images.History(ir.ClientCxt, nameOrId)
+ if err != nil {
+ return nil, err
+ }
+
+ history := entities.ImageHistoryReport{
+ Layers: make([]entities.ImageHistoryLayer, len(results)),
+ }
+
+ for i, layer := range results {
+ hold := entities.ImageHistoryLayer{}
+ _ = utils.DeepCopy(&hold, layer)
+ history.Layers[i] = hold
+ }
+ return &history, nil
+}
+
+func (ir *ImageEngine) Prune(ctx context.Context, opts entities.ImagePruneOptions) (*entities.ImagePruneReport, error) {
+ results, err := images.Prune(ir.ClientCxt, &opts.All, opts.Filters)
+ if err != nil {
+ return nil, err
+ }
+
+ report := entities.ImagePruneReport{
+ Report: entities.Report{
+ Id: results,
+ Err: nil,
+ },
+ Size: 0,
+ }
+ return &report, nil
+}
diff --git a/pkg/domain/infra/tunnel/pods.go b/pkg/domain/infra/tunnel/pods.go
new file mode 100644
index 000000000..4894874e5
--- /dev/null
+++ b/pkg/domain/infra/tunnel/pods.go
@@ -0,0 +1,179 @@
+package tunnel
+
+import (
+ "context"
+
+ "github.com/containers/libpod/pkg/bindings/pods"
+ "github.com/containers/libpod/pkg/domain/entities"
+ "github.com/containers/libpod/pkg/specgen"
+)
+
+func (ic *ContainerEngine) PodExists(ctx context.Context, nameOrId string) (*entities.BoolReport, error) {
+ exists, err := pods.Exists(ic.ClientCxt, nameOrId)
+ return &entities.BoolReport{Value: exists}, err
+}
+
+func (ic *ContainerEngine) PodKill(ctx context.Context, namesOrIds []string, options entities.PodKillOptions) ([]*entities.PodKillReport, error) {
+ var (
+ reports []*entities.PodKillReport
+ )
+ foundPods, err := getPodsByContext(ic.ClientCxt, options.All, namesOrIds)
+ if err != nil {
+ return nil, err
+ }
+ for _, p := range foundPods {
+ response, err := pods.Kill(ic.ClientCxt, p.Id, &options.Signal)
+ if err != nil {
+ report := entities.PodKillReport{
+ Errs: []error{err},
+ Id: p.Id,
+ }
+ reports = append(reports, &report)
+ continue
+ }
+ reports = append(reports, response)
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) PodPause(ctx context.Context, namesOrIds []string, options entities.PodPauseOptions) ([]*entities.PodPauseReport, error) {
+ var (
+ reports []*entities.PodPauseReport
+ )
+ foundPods, err := getPodsByContext(ic.ClientCxt, options.All, namesOrIds)
+ if err != nil {
+ return nil, err
+ }
+ for _, p := range foundPods {
+ response, err := pods.Pause(ic.ClientCxt, p.Id)
+ if err != nil {
+ report := entities.PodPauseReport{
+ Errs: []error{err},
+ Id: p.Id,
+ }
+ reports = append(reports, &report)
+ continue
+ }
+ reports = append(reports, response)
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) PodUnpause(ctx context.Context, namesOrIds []string, options entities.PodunpauseOptions) ([]*entities.PodUnpauseReport, error) {
+ var (
+ reports []*entities.PodUnpauseReport
+ )
+ foundPods, err := getPodsByContext(ic.ClientCxt, options.All, namesOrIds)
+ if err != nil {
+ return nil, err
+ }
+ for _, p := range foundPods {
+ response, err := pods.Unpause(ic.ClientCxt, p.Id)
+ if err != nil {
+ report := entities.PodUnpauseReport{
+ Errs: []error{err},
+ Id: p.Id,
+ }
+ reports = append(reports, &report)
+ continue
+ }
+ reports = append(reports, response)
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) PodStop(ctx context.Context, namesOrIds []string, options entities.PodStopOptions) ([]*entities.PodStopReport, error) {
+ var (
+ reports []*entities.PodStopReport
+ timeout int = -1
+ )
+ foundPods, err := getPodsByContext(ic.ClientCxt, options.All, namesOrIds)
+ if err != nil {
+ return nil, err
+ }
+ if options.Timeout != -1 {
+ timeout = options.Timeout
+ }
+ for _, p := range foundPods {
+ response, err := pods.Stop(ic.ClientCxt, p.Id, &timeout)
+ if err != nil {
+ report := entities.PodStopReport{
+ Errs: []error{err},
+ Id: p.Id,
+ }
+ reports = append(reports, &report)
+ continue
+ }
+ reports = append(reports, response)
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) PodRestart(ctx context.Context, namesOrIds []string, options entities.PodRestartOptions) ([]*entities.PodRestartReport, error) {
+ var reports []*entities.PodRestartReport
+ foundPods, err := getPodsByContext(ic.ClientCxt, options.All, namesOrIds)
+ if err != nil {
+ return nil, err
+ }
+ for _, p := range foundPods {
+ response, err := pods.Restart(ic.ClientCxt, p.Id)
+ if err != nil {
+ report := entities.PodRestartReport{
+ Errs: []error{err},
+ Id: p.Id,
+ }
+ reports = append(reports, &report)
+ continue
+ }
+ reports = append(reports, response)
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) PodStart(ctx context.Context, namesOrIds []string, options entities.PodStartOptions) ([]*entities.PodStartReport, error) {
+ var reports []*entities.PodStartReport
+ foundPods, err := getPodsByContext(ic.ClientCxt, options.All, namesOrIds)
+ if err != nil {
+ return nil, err
+ }
+ for _, p := range foundPods {
+ response, err := pods.Start(ic.ClientCxt, p.Id)
+ if err != nil {
+ report := entities.PodStartReport{
+ Errs: []error{err},
+ Id: p.Id,
+ }
+ reports = append(reports, &report)
+ continue
+ }
+ reports = append(reports, response)
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) PodRm(ctx context.Context, namesOrIds []string, options entities.PodRmOptions) ([]*entities.PodRmReport, error) {
+ var reports []*entities.PodRmReport
+ foundPods, err := getPodsByContext(ic.ClientCxt, options.All, namesOrIds)
+ if err != nil {
+ return nil, err
+ }
+ for _, p := range foundPods {
+ response, err := pods.Remove(ic.ClientCxt, p.Id, &options.Force)
+ if err != nil {
+ report := entities.PodRmReport{
+ Err: err,
+ Id: p.Id,
+ }
+ reports = append(reports, &report)
+ continue
+ }
+ reports = append(reports, response)
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) PodCreate(ctx context.Context, opts entities.PodCreateOptions) (*entities.PodCreateReport, error) {
+ podSpec := specgen.NewPodSpecGenerator()
+ opts.ToPodSpecGen(podSpec)
+ return pods.CreatePodFromSpec(ic.ClientCxt, podSpec)
+}
diff --git a/pkg/domain/infra/tunnel/runtime.go b/pkg/domain/infra/tunnel/runtime.go
new file mode 100644
index 000000000..c111f99e9
--- /dev/null
+++ b/pkg/domain/infra/tunnel/runtime.go
@@ -0,0 +1,15 @@
+package tunnel
+
+import (
+ "context"
+)
+
+// Image-related runtime using an ssh-tunnel to utilize Podman service
+type ImageEngine struct {
+ ClientCxt context.Context
+}
+
+// Container-related runtime using an ssh-tunnel to utilize Podman service
+type ContainerEngine struct {
+ ClientCxt context.Context
+}
diff --git a/pkg/domain/infra/tunnel/volumes.go b/pkg/domain/infra/tunnel/volumes.go
new file mode 100644
index 000000000..e48a7fa7c
--- /dev/null
+++ b/pkg/domain/infra/tunnel/volumes.go
@@ -0,0 +1,70 @@
+package tunnel
+
+import (
+ "context"
+
+ "github.com/containers/libpod/pkg/bindings/volumes"
+ "github.com/containers/libpod/pkg/domain/entities"
+)
+
+func (ic *ContainerEngine) VolumeCreate(ctx context.Context, opts entities.VolumeCreateOptions) (*entities.IdOrNameResponse, error) {
+ response, err := volumes.Create(ic.ClientCxt, opts)
+ if err != nil {
+ return nil, err
+ }
+ return &entities.IdOrNameResponse{IdOrName: response.Name}, nil
+}
+
+func (ic *ContainerEngine) VolumeRm(ctx context.Context, namesOrIds []string, opts entities.VolumeRmOptions) ([]*entities.VolumeRmReport, error) {
+ var (
+ reports []*entities.VolumeRmReport
+ )
+
+ if opts.All {
+ vols, err := volumes.List(ic.ClientCxt, nil)
+ if err != nil {
+ return nil, err
+ }
+ for _, v := range vols {
+ namesOrIds = append(namesOrIds, v.Name)
+ }
+ }
+ for _, id := range namesOrIds {
+ reports = append(reports, &entities.VolumeRmReport{
+ Err: volumes.Remove(ic.ClientCxt, id, &opts.Force),
+ Id: id,
+ })
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) VolumeInspect(ctx context.Context, namesOrIds []string, opts entities.VolumeInspectOptions) ([]*entities.VolumeInspectReport, error) {
+ var (
+ reports []*entities.VolumeInspectReport
+ )
+ if opts.All {
+ vols, err := volumes.List(ic.ClientCxt, nil)
+ if err != nil {
+ return nil, err
+ }
+ for _, v := range vols {
+ namesOrIds = append(namesOrIds, v.Name)
+ }
+ }
+ for _, id := range namesOrIds {
+ data, err := volumes.Inspect(ic.ClientCxt, id)
+ if err != nil {
+ return nil, err
+ }
+ reports = append(reports, &entities.VolumeInspectReport{VolumeConfigResponse: data})
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) VolumePrune(ctx context.Context, opts entities.VolumePruneOptions) ([]*entities.VolumePruneReport, error) {
+ return volumes.Prune(ic.ClientCxt)
+}
+
+func (ic *ContainerEngine) VolumeList(ctx context.Context, opts entities.VolumeListOptions) ([]*entities.VolumeListReport, error) {
+ return volumes.List(ic.ClientCxt, opts.Filter)
+}
diff --git a/pkg/domain/utils/utils.go b/pkg/domain/utils/utils.go
new file mode 100644
index 000000000..c17769f62
--- /dev/null
+++ b/pkg/domain/utils/utils.go
@@ -0,0 +1,41 @@
+package utils
+
+import (
+ "net/url"
+ "strings"
+
+ jsoniter "github.com/json-iterator/go"
+)
+
+var json = jsoniter.ConfigCompatibleWithStandardLibrary
+
+// DeepCopy does a deep copy of a structure
+// Error checking of parameters delegated to json engine
+var DeepCopy = func(dst interface{}, src interface{}) error {
+ payload, err := json.Marshal(src)
+ if err != nil {
+ return err
+ }
+
+ err = json.Unmarshal(payload, dst)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func ToLibpodFilters(f url.Values) (filters []string) {
+ for k, v := range f {
+ filters = append(filters, k+"="+v[0])
+ }
+ return
+}
+
+func ToUrlValues(f []string) (filters url.Values) {
+ filters = make(url.Values)
+ for _, v := range f {
+ t := strings.SplitN(v, "=", 2)
+ filters.Add(t[0], t[1])
+ }
+ return
+}