diff options
author | Jhon Honce <jhonce@redhat.com> | 2020-03-12 10:49:03 -0700 |
---|---|---|
committer | Jhon Honce <jhonce@redhat.com> | 2020-03-18 16:41:12 -0700 |
commit | fbe743501e2a3ea28fe446754b9b12988b4e7a0e (patch) | |
tree | 7582c99d828bcbc83b6bb312e253f006ac637734 /pkg/domain/infra | |
parent | bd9386ddac4ef6730fbe6ce4104e80f56a48fe43 (diff) | |
download | podman-fbe743501e2a3ea28fe446754b9b12988b4e7a0e.tar.gz podman-fbe743501e2a3ea28fe446754b9b12988b4e7a0e.tar.bz2 podman-fbe743501e2a3ea28fe446754b9b12988b4e7a0e.zip |
V2 podman command
Signed-off-by: Jhon Honce <jhonce@redhat.com>
Diffstat (limited to 'pkg/domain/infra')
-rw-r--r-- | pkg/domain/infra/abi/images.go | 131 | ||||
-rw-r--r-- | pkg/domain/infra/abi/images_test.go | 37 | ||||
-rw-r--r-- | pkg/domain/infra/abi/runtime.go | 19 | ||||
-rw-r--r-- | pkg/domain/infra/runtime_abi.go | 39 | ||||
-rw-r--r-- | pkg/domain/infra/runtime_image_proxy.go | 25 | ||||
-rw-r--r-- | pkg/domain/infra/runtime_libpod.go | 331 | ||||
-rw-r--r-- | pkg/domain/infra/runtime_proxy.go | 54 | ||||
-rw-r--r-- | pkg/domain/infra/runtime_tunnel.go | 35 | ||||
-rw-r--r-- | pkg/domain/infra/tunnel/images.go | 81 | ||||
-rw-r--r-- | pkg/domain/infra/tunnel/runtime.go | 45 |
10 files changed, 797 insertions, 0 deletions
diff --git a/pkg/domain/infra/abi/images.go b/pkg/domain/infra/abi/images.go new file mode 100644 index 000000000..2db08f259 --- /dev/null +++ b/pkg/domain/infra/abi/images.go @@ -0,0 +1,131 @@ +// +build ABISupport + +package abi + +import ( + "context" + + libpodImage "github.com/containers/libpod/libpod/image" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/containers/libpod/pkg/domain/utils" +) + +func (ir *ImageEngine) Delete(ctx context.Context, nameOrId string, opts entities.ImageDeleteOptions) (*entities.ImageDeleteReport, error) { + image, err := ir.Libpod.ImageRuntime().NewFromLocal(nameOrId) + if err != nil { + return nil, err + } + + results, err := ir.Libpod.RemoveImage(ctx, image, opts.Force) + if err != nil { + return nil, err + } + + report := entities.ImageDeleteReport{} + if err := utils.DeepCopy(&report, results); err != nil { + return nil, err + } + return &report, nil +} + +func (ir *ImageEngine) Prune(ctx context.Context, opts entities.ImagePruneOptions) (*entities.ImagePruneReport, error) { + results, err := ir.Libpod.ImageRuntime().PruneImages(ctx, opts.All, []string{}) + if err != nil { + return nil, err + } + + report := entities.ImagePruneReport{} + copy(report.Report.Id, results) + return &report, nil +} + +func (ir *ImageEngine) List(ctx context.Context, opts entities.ImageListOptions) (*entities.ImageListReport, error) { + var ( + images []*libpodImage.Image + err error + ) + + filters := utils.ToLibpodFilters(opts.Filters) + if len(filters) > 0 { + images, err = ir.Libpod.ImageRuntime().GetImagesWithFilters(filters) + } else { + images, err = ir.Libpod.ImageRuntime().GetImages() + } + if err != nil { + return nil, err + } + + report := entities.ImageListReport{ + Images: make([]entities.ImageSummary, len(images)), + } + for i, img := range images { + hold := entities.ImageSummary{} + if err := utils.DeepCopy(&hold, img); err != nil { + return nil, err + } + report.Images[i] = hold + } + return &report, nil +} + +func (ir *ImageEngine) History(ctx context.Context, nameOrId string, opts entities.ImageHistoryOptions) (*entities.ImageHistoryReport, error) { + image, err := ir.Libpod.ImageRuntime().NewFromLocal(nameOrId) + if err != nil { + return nil, err + } + results, err := image.History(ctx) + if err != nil { + return nil, err + } + + history := entities.ImageHistoryReport{ + Layers: make([]entities.ImageHistoryLayer, len(results)), + } + + for i, layer := range results { + history.Layers[i] = ToDomainHistoryLayer(layer) + } + return &history, nil +} + +func ToDomainHistoryLayer(layer *libpodImage.History) entities.ImageHistoryLayer { + l := entities.ImageHistoryLayer{} + l.ID = layer.ID + l.Created = layer.Created.Unix() + l.CreatedBy = layer.CreatedBy + copy(l.Tags, layer.Tags) + l.Size = layer.Size + l.Comment = layer.Comment + return l +} + +// func (r *imageRuntime) Delete(ctx context.Context, nameOrId string, opts entities.ImageDeleteOptions) (*entities.ImageDeleteReport, error) { +// image, err := r.libpod.ImageEngine().NewFromLocal(nameOrId) +// if err != nil { +// return nil, err +// } +// +// results, err := r.libpod.RemoveImage(ctx, image, opts.Force) +// if err != nil { +// return nil, err +// } +// +// report := entities.ImageDeleteReport{} +// if err := utils.DeepCopy(&report, results); err != nil { +// return nil, err +// } +// return &report, nil +// } +// +// func (r *imageRuntime) Prune(ctx context.Context, opts entities.ImagePruneOptions) (*entities.ImagePruneReport, error) { +// // TODO: map FilterOptions +// id, err := r.libpod.ImageEngine().PruneImages(ctx, opts.All, []string{}) +// if err != nil { +// return nil, err +// } +// +// // TODO: Determine Size +// report := entities.ImagePruneReport{} +// copy(report.Report.Id, id) +// return &report, nil +// } diff --git a/pkg/domain/infra/abi/images_test.go b/pkg/domain/infra/abi/images_test.go new file mode 100644 index 000000000..20ef1b150 --- /dev/null +++ b/pkg/domain/infra/abi/images_test.go @@ -0,0 +1,37 @@ +package abi + +// +// import ( +// "context" +// "testing" +// +// "github.com/stretchr/testify/mock" +// ) +// +// type MockImageRuntime struct { +// mock.Mock +// } +// +// func (m *MockImageRuntime) Delete(ctx context.Context, renderer func() interface{}, name string) error { +// _ = m.Called(ctx, renderer, name) +// return nil +// } +// +// func TestImageSuccess(t *testing.T) { +// actual := func() interface{} { return nil } +// +// m := new(MockImageRuntime) +// m.On( +// "Delete", +// mock.AnythingOfType("*context.emptyCtx"), +// mock.AnythingOfType("func() interface {}"), +// "fedora"). +// Return(nil) +// +// r := DirectImageRuntime{m} +// err := r.Delete(context.TODO(), actual, "fedora") +// if err != nil { +// t.Errorf("error should be nil, got: %v", err) +// } +// m.AssertExpectations(t) +// } diff --git a/pkg/domain/infra/abi/runtime.go b/pkg/domain/infra/abi/runtime.go new file mode 100644 index 000000000..479a69586 --- /dev/null +++ b/pkg/domain/infra/abi/runtime.go @@ -0,0 +1,19 @@ +// +build ABISupport + +package abi + +import ( + "github.com/containers/libpod/libpod" + "github.com/containers/libpod/pkg/domain/entities" +) + +// Image-related runtime linked against libpod library +type ImageEngine struct { + Libpod *libpod.Runtime +} + +// Container-related runtime linked against libpod library +type ContainerEngine struct { + entities.ContainerEngine + Libpod *libpod.Runtime +} diff --git a/pkg/domain/infra/runtime_abi.go b/pkg/domain/infra/runtime_abi.go new file mode 100644 index 000000000..de996f567 --- /dev/null +++ b/pkg/domain/infra/runtime_abi.go @@ -0,0 +1,39 @@ +// +build ABISupport + +package infra + +import ( + "context" + "fmt" + + "github.com/containers/libpod/pkg/bindings" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/containers/libpod/pkg/domain/infra/abi" + "github.com/containers/libpod/pkg/domain/infra/tunnel" +) + +// NewContainerEngine factory provides a libpod runtime for container-related operations +func NewContainerEngine(mode entities.EngineMode, opts entities.EngineOptions) (entities.ContainerEngine, error) { + switch mode { + case entities.ABIMode: + r, err := NewLibpodRuntime(opts.FlagSet, opts.Flags) + return &abi.ContainerEngine{ContainerEngine: r}, err + case entities.TunnelMode: + ctx, err := bindings.NewConnection(context.Background(), opts.Uri.String(), opts.Identities...) + return &tunnel.ContainerEngine{ClientCxt: ctx}, err + } + return nil, fmt.Errorf("runtime mode '%v' is not supported", mode) +} + +// NewContainerEngine factory provides a libpod runtime for image-related operations +func NewImageEngine(mode entities.EngineMode, opts entities.EngineOptions) (entities.ImageEngine, error) { + switch mode { + case entities.ABIMode: + r, err := NewLibpodImageRuntime(opts.FlagSet, opts.Flags) + return r, err + case entities.TunnelMode: + ctx, err := bindings.NewConnection(context.Background(), opts.Uri.String(), opts.Identities...) + return &tunnel.ImageEngine{ClientCxt: ctx}, err + } + return nil, fmt.Errorf("runtime mode '%v' is not supported", mode) +} diff --git a/pkg/domain/infra/runtime_image_proxy.go b/pkg/domain/infra/runtime_image_proxy.go new file mode 100644 index 000000000..480e7c8d7 --- /dev/null +++ b/pkg/domain/infra/runtime_image_proxy.go @@ -0,0 +1,25 @@ +// +build ABISupport + +package infra + +import ( + "context" + + "github.com/containers/libpod/pkg/domain/entities" + "github.com/containers/libpod/pkg/domain/infra/abi" + "github.com/spf13/pflag" +) + +// ContainerEngine Image Proxy will be EOL'ed after podmanV2 is separated from libpod repo + +func NewLibpodImageRuntime(flags pflag.FlagSet, opts entities.EngineFlags) (entities.ImageEngine, error) { + r, err := GetRuntime(context.Background(), flags, opts) + if err != nil { + return nil, err + } + return &abi.ImageEngine{Libpod: r}, nil +} + +func (ir *runtime) ShutdownImageRuntime(force bool) error { + return ir.Libpod.Shutdown(force) +} diff --git a/pkg/domain/infra/runtime_libpod.go b/pkg/domain/infra/runtime_libpod.go new file mode 100644 index 000000000..b42c52b6e --- /dev/null +++ b/pkg/domain/infra/runtime_libpod.go @@ -0,0 +1,331 @@ +package infra + +import ( + "context" + "fmt" + "os" + + "github.com/containers/libpod/libpod" + "github.com/containers/libpod/pkg/cgroups" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/containers/libpod/pkg/namespaces" + "github.com/containers/libpod/pkg/rootless" + "github.com/containers/storage" + "github.com/containers/storage/pkg/idtools" + "github.com/pkg/errors" + flag "github.com/spf13/pflag" +) + +type engineOpts struct { + name string + renumber bool + migrate bool + noStore bool + withFDS bool + flags entities.EngineFlags +} + +// GetRuntimeMigrate gets a libpod runtime that will perform a migration of existing containers +func GetRuntimeMigrate(ctx context.Context, fs flag.FlagSet, ef entities.EngineFlags, newRuntime string) (*libpod.Runtime, error) { + return getRuntime(ctx, fs, &engineOpts{ + name: newRuntime, + renumber: false, + migrate: true, + noStore: false, + withFDS: true, + flags: ef, + }) +} + +// GetRuntimeDisableFDs gets a libpod runtime that will disable sd notify +func GetRuntimeDisableFDs(ctx context.Context, fs flag.FlagSet, ef entities.EngineFlags) (*libpod.Runtime, error) { + return getRuntime(ctx, fs, &engineOpts{ + renumber: false, + migrate: false, + noStore: false, + withFDS: false, + flags: ef, + }) +} + +// GetRuntimeRenumber gets a libpod runtime that will perform a lock renumber +func GetRuntimeRenumber(ctx context.Context, fs flag.FlagSet, ef entities.EngineFlags) (*libpod.Runtime, error) { + return getRuntime(ctx, fs, &engineOpts{ + renumber: true, + migrate: false, + noStore: false, + withFDS: true, + flags: ef, + }) +} + +// GetRuntime generates a new libpod runtime configured by command line options +func GetRuntime(ctx context.Context, flags flag.FlagSet, ef entities.EngineFlags) (*libpod.Runtime, error) { + return getRuntime(ctx, flags, &engineOpts{ + renumber: false, + migrate: false, + noStore: false, + withFDS: true, + flags: ef, + }) +} + +// GetRuntimeNoStore generates a new libpod runtime configured by command line options +func GetRuntimeNoStore(ctx context.Context, fs flag.FlagSet, ef entities.EngineFlags) (*libpod.Runtime, error) { + return getRuntime(ctx, fs, &engineOpts{ + renumber: false, + migrate: false, + noStore: true, + withFDS: true, + flags: ef, + }) +} + +func getRuntime(ctx context.Context, fs flag.FlagSet, opts *engineOpts) (*libpod.Runtime, error) { + options := []libpod.RuntimeOption{} + storageOpts := storage.StoreOptions{} + storageSet := false + + uidmapFlag := fs.Lookup("uidmap") + gidmapFlag := fs.Lookup("gidmap") + subuidname := fs.Lookup("subuidname") + subgidname := fs.Lookup("subgidname") + if (uidmapFlag != nil && gidmapFlag != nil && subuidname != nil && subgidname != nil) && + (uidmapFlag.Changed || gidmapFlag.Changed || subuidname.Changed || subgidname.Changed) { + userns, _ := fs.GetString("userns") + uidmapVal, _ := fs.GetStringSlice("uidmap") + gidmapVal, _ := fs.GetStringSlice("gidmap") + subuidVal, _ := fs.GetString("subuidname") + subgidVal, _ := fs.GetString("subgidname") + mappings, err := ParseIDMapping(namespaces.UsernsMode(userns), uidmapVal, gidmapVal, subuidVal, subgidVal) + if err != nil { + return nil, err + } + storageOpts.UIDMap = mappings.UIDMap + storageOpts.GIDMap = mappings.GIDMap + + storageSet = true + } + + if fs.Changed("root") { + storageSet = true + storageOpts.GraphRoot = opts.flags.Root + } + if fs.Changed("runroot") { + storageSet = true + storageOpts.RunRoot = opts.flags.Runroot + } + if len(storageOpts.RunRoot) > 50 { + return nil, errors.New("the specified runroot is longer than 50 characters") + } + if fs.Changed("storage-driver") { + storageSet = true + storageOpts.GraphDriverName = opts.flags.StorageDriver + // Overriding the default storage driver caused GraphDriverOptions from storage.conf to be ignored + storageOpts.GraphDriverOptions = []string{} + } + // This should always be checked after storage-driver is checked + if len(opts.flags.StorageOpts) > 0 { + storageSet = true + storageOpts.GraphDriverOptions = opts.flags.StorageOpts + } + if opts.migrate { + options = append(options, libpod.WithMigrate()) + if opts.name != "" { + options = append(options, libpod.WithMigrateRuntime(opts.name)) + } + } + + if opts.renumber { + options = append(options, libpod.WithRenumber()) + } + + // Only set this if the user changes storage config on the command line + if storageSet { + options = append(options, libpod.WithStorageConfig(storageOpts)) + } + + if !storageSet && opts.noStore { + options = append(options, libpod.WithNoStore()) + } + // TODO CLI flags for image config? + // TODO CLI flag for signature policy? + + if len(opts.flags.Namespace) > 0 { + options = append(options, libpod.WithNamespace(opts.flags.Namespace)) + } + + if fs.Changed("runtime") { + options = append(options, libpod.WithOCIRuntime(opts.flags.Runtime)) + } + + if fs.Changed("conmon") { + options = append(options, libpod.WithConmonPath(opts.flags.ConmonPath)) + } + if fs.Changed("tmpdir") { + options = append(options, libpod.WithTmpDir(opts.flags.TmpDir)) + } + if fs.Changed("network-cmd-path") { + options = append(options, libpod.WithNetworkCmdPath(opts.flags.NetworkCmdPath)) + } + + if fs.Changed("events-backend") { + options = append(options, libpod.WithEventsLogger(opts.flags.EventsBackend)) + } + + if fs.Changed("cgroup-manager") { + options = append(options, libpod.WithCgroupManager(opts.flags.CGroupManager)) + } else { + unified, err := cgroups.IsCgroup2UnifiedMode() + if err != nil { + return nil, err + } + if rootless.IsRootless() && !unified { + options = append(options, libpod.WithCgroupManager("cgroupfs")) + } + } + + // TODO flag to set libpod static dir? + // TODO flag to set libpod tmp dir? + + if fs.Changed("cni-config-dir") { + options = append(options, libpod.WithCNIConfigDir(opts.flags.CniConfigDir)) + } + if fs.Changed("default-mounts-file") { + options = append(options, libpod.WithDefaultMountsFile(opts.flags.DefaultMountsFile)) + } + if fs.Changed("hooks-dir") { + options = append(options, libpod.WithHooksDir(opts.flags.HooksDir...)) + } + + // TODO flag to set CNI plugins dir? + + // TODO I don't think these belong here? + // Will follow up with a different PR to address + // + // Pod create options + + infraImageFlag := fs.Lookup("infra-image") + if infraImageFlag != nil && infraImageFlag.Changed { + infraImage, _ := fs.GetString("infra-image") + options = append(options, libpod.WithDefaultInfraImage(infraImage)) + } + + infraCommandFlag := fs.Lookup("infra-command") + if infraCommandFlag != nil && infraImageFlag.Changed { + infraCommand, _ := fs.GetString("infra-command") + options = append(options, libpod.WithDefaultInfraCommand(infraCommand)) + } + + if !opts.withFDS { + options = append(options, libpod.WithEnableSDNotify()) + } + if fs.Changed("config") { + return libpod.NewRuntimeFromConfig(ctx, opts.flags.Config, options...) + } + return libpod.NewRuntime(ctx, options...) +} + +// ParseIDMapping takes idmappings and subuid and subgid maps and returns a storage mapping +func ParseIDMapping(mode namespaces.UsernsMode, uidMapSlice, gidMapSlice []string, subUIDMap, subGIDMap string) (*storage.IDMappingOptions, error) { + options := storage.IDMappingOptions{ + HostUIDMapping: true, + HostGIDMapping: true, + } + + if mode.IsKeepID() { + if len(uidMapSlice) > 0 || len(gidMapSlice) > 0 { + return nil, errors.New("cannot specify custom mappings with --userns=keep-id") + } + if len(subUIDMap) > 0 || len(subGIDMap) > 0 { + return nil, errors.New("cannot specify subuidmap or subgidmap with --userns=keep-id") + } + if rootless.IsRootless() { + min := func(a, b int) int { + if a < b { + return a + } + return b + } + + uid := rootless.GetRootlessUID() + gid := rootless.GetRootlessGID() + + uids, gids, err := rootless.GetConfiguredMappings() + if err != nil { + return nil, errors.Wrapf(err, "cannot read mappings") + } + maxUID, maxGID := 0, 0 + for _, u := range uids { + maxUID += u.Size + } + for _, g := range gids { + maxGID += g.Size + } + + options.UIDMap, options.GIDMap = nil, nil + + options.UIDMap = append(options.UIDMap, idtools.IDMap{ContainerID: 0, HostID: 1, Size: min(uid, maxUID)}) + options.UIDMap = append(options.UIDMap, idtools.IDMap{ContainerID: uid, HostID: 0, Size: 1}) + if maxUID > uid { + options.UIDMap = append(options.UIDMap, idtools.IDMap{ContainerID: uid + 1, HostID: uid + 1, Size: maxUID - uid}) + } + + options.GIDMap = append(options.GIDMap, idtools.IDMap{ContainerID: 0, HostID: 1, Size: min(gid, maxGID)}) + options.GIDMap = append(options.GIDMap, idtools.IDMap{ContainerID: gid, HostID: 0, Size: 1}) + if maxGID > gid { + options.GIDMap = append(options.GIDMap, idtools.IDMap{ContainerID: gid + 1, HostID: gid + 1, Size: maxGID - gid}) + } + + options.HostUIDMapping = false + options.HostGIDMapping = false + } + // Simply ignore the setting and do not setup an inner namespace for root as it is a no-op + return &options, nil + } + + if subGIDMap == "" && subUIDMap != "" { + subGIDMap = subUIDMap + } + if subUIDMap == "" && subGIDMap != "" { + subUIDMap = subGIDMap + } + if len(gidMapSlice) == 0 && len(uidMapSlice) != 0 { + gidMapSlice = uidMapSlice + } + if len(uidMapSlice) == 0 && len(gidMapSlice) != 0 { + uidMapSlice = gidMapSlice + } + if len(uidMapSlice) == 0 && subUIDMap == "" && os.Getuid() != 0 { + uidMapSlice = []string{fmt.Sprintf("0:%d:1", os.Getuid())} + } + if len(gidMapSlice) == 0 && subGIDMap == "" && os.Getuid() != 0 { + gidMapSlice = []string{fmt.Sprintf("0:%d:1", os.Getgid())} + } + + if subUIDMap != "" && subGIDMap != "" { + mappings, err := idtools.NewIDMappings(subUIDMap, subGIDMap) + if err != nil { + return nil, err + } + options.UIDMap = mappings.UIDs() + options.GIDMap = mappings.GIDs() + } + parsedUIDMap, err := idtools.ParseIDMap(uidMapSlice, "UID") + if err != nil { + return nil, err + } + parsedGIDMap, err := idtools.ParseIDMap(gidMapSlice, "GID") + if err != nil { + return nil, err + } + options.UIDMap = append(options.UIDMap, parsedUIDMap...) + options.GIDMap = append(options.GIDMap, parsedGIDMap...) + if len(options.UIDMap) > 0 { + options.HostUIDMapping = false + } + if len(options.GIDMap) > 0 { + options.HostGIDMapping = false + } + return &options, nil +} diff --git a/pkg/domain/infra/runtime_proxy.go b/pkg/domain/infra/runtime_proxy.go new file mode 100644 index 000000000..d17b8efa1 --- /dev/null +++ b/pkg/domain/infra/runtime_proxy.go @@ -0,0 +1,54 @@ +// +build ABISupport + +package infra + +import ( + "context" + + "github.com/containers/libpod/libpod" + "github.com/containers/libpod/pkg/domain/entities" + flag "github.com/spf13/pflag" +) + +// ContainerEngine Proxy will be EOL'ed after podmanV2 is separated from libpod repo + +type runtime struct { + entities.ContainerEngine + Libpod *libpod.Runtime +} + +func NewLibpodRuntime(flags flag.FlagSet, opts entities.EngineFlags) (entities.ContainerEngine, error) { + r, err := GetRuntime(context.Background(), flags, opts) + if err != nil { + return nil, err + } + return &runtime{Libpod: r}, nil +} + +func (r *runtime) ShutdownRuntime(force bool) error { + return r.Libpod.Shutdown(force) +} + +func (r *runtime) ContainerDelete(ctx context.Context, opts entities.ContainerDeleteOptions) (*entities.ContainerDeleteReport, error) { + panic("implement me") +} + +func (r *runtime) ContainerPrune(ctx context.Context) (*entities.ContainerPruneReport, error) { + panic("implement me") +} + +func (r *runtime) PodDelete(ctx context.Context, opts entities.PodPruneOptions) (*entities.PodDeleteReport, error) { + panic("implement me") +} + +func (r *runtime) PodPrune(ctx context.Context) (*entities.PodPruneReport, error) { + panic("implement me") +} + +func (r *runtime) VolumeDelete(ctx context.Context, opts entities.VolumeDeleteOptions) (*entities.VolumeDeleteReport, error) { + panic("implement me") +} + +func (r *runtime) VolumePrune(ctx context.Context) (*entities.VolumePruneReport, error) { + panic("implement me") +} diff --git a/pkg/domain/infra/runtime_tunnel.go b/pkg/domain/infra/runtime_tunnel.go new file mode 100644 index 000000000..8a606deaf --- /dev/null +++ b/pkg/domain/infra/runtime_tunnel.go @@ -0,0 +1,35 @@ +// +build !ABISupport + +package infra + +import ( + "context" + "fmt" + + "github.com/containers/libpod/pkg/bindings" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/containers/libpod/pkg/domain/infra/tunnel" +) + +func NewContainerEngine(mode entities.EngineMode, opts entities.EngineOptions) (entities.ContainerEngine, error) { + switch mode { + case entities.ABIMode: + return nil, fmt.Errorf("direct runtime not supported") + case entities.TunnelMode: + ctx, err := bindings.NewConnection(context.Background(), opts.Uri.String(), opts.Identities...) + return &tunnel.ContainerEngine{ClientCxt: ctx}, err + } + return nil, fmt.Errorf("runtime mode '%v' is not supported", mode) +} + +// NewImageEngine factory provides a libpod runtime for image-related operations +func NewImageEngine(mode entities.EngineMode, opts entities.EngineOptions) (entities.ImageEngine, error) { + switch mode { + case entities.ABIMode: + return nil, fmt.Errorf("direct image runtime not supported") + case entities.TunnelMode: + ctx, err := bindings.NewConnection(context.Background(), opts.Uri.String(), opts.Identities...) + return &tunnel.ImageEngine{ClientCxt: ctx}, err + } + return nil, fmt.Errorf("runtime mode '%v' is not supported", mode) +} diff --git a/pkg/domain/infra/tunnel/images.go b/pkg/domain/infra/tunnel/images.go new file mode 100644 index 000000000..718685e57 --- /dev/null +++ b/pkg/domain/infra/tunnel/images.go @@ -0,0 +1,81 @@ +package tunnel + +import ( + "context" + "net/url" + + images "github.com/containers/libpod/pkg/bindings/images" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/containers/libpod/pkg/domain/utils" +) + +func (ir *ImageEngine) Delete(ctx context.Context, nameOrId string, opts entities.ImageDeleteOptions) (*entities.ImageDeleteReport, error) { + results, err := images.Remove(ir.ClientCxt, nameOrId, &opts.Force) + if err != nil { + return nil, err + } + + report := entities.ImageDeleteReport{ + Untagged: nil, + Deleted: "", + } + + for _, e := range results { + if a, ok := e["Deleted"]; ok { + report.Deleted = a + } + + if a, ok := e["Untagged"]; ok { + report.Untagged = append(report.Untagged, a) + } + } + return &report, err +} + +func (ir *ImageEngine) List(ctx context.Context, opts entities.ImageListOptions) (*entities.ImageListReport, error) { + images, err := images.List(ir.ClientCxt, &opts.All, opts.Filters) + if err != nil { + return nil, err + } + + report := entities.ImageListReport{ + Images: make([]entities.ImageSummary, len(images)), + } + for i, img := range images { + hold := entities.ImageSummary{} + if err := utils.DeepCopy(&hold, img); err != nil { + return nil, err + } + report.Images[i] = hold + } + return &report, nil +} + +func (ir *ImageEngine) History(ctx context.Context, nameOrId string, opts entities.ImageHistoryOptions) (*entities.ImageHistoryReport, error) { + results, err := images.History(ir.ClientCxt, nameOrId) + if err != nil { + return nil, err + } + + history := entities.ImageHistoryReport{ + Layers: make([]entities.ImageHistoryLayer, len(results)), + } + + for i, layer := range results { + hold := entities.ImageHistoryLayer{} + _ = utils.DeepCopy(&hold, layer) + history.Layers[i] = hold + } + return &history, nil +} + +func (ir *ImageEngine) Prune(ctx context.Context, opts entities.ImagePruneOptions) (*entities.ImagePruneReport, error) { + results, err := images.Prune(ir.ClientCxt, url.Values{}) + if err != nil { + return nil, err + } + + report := entities.ImagePruneReport{} + copy(report.Report.Id, results) + return &report, nil +} diff --git a/pkg/domain/infra/tunnel/runtime.go b/pkg/domain/infra/tunnel/runtime.go new file mode 100644 index 000000000..af433a6d9 --- /dev/null +++ b/pkg/domain/infra/tunnel/runtime.go @@ -0,0 +1,45 @@ +package tunnel + +import ( + "context" + + "github.com/containers/libpod/pkg/domain/entities" +) + +// Image-related runtime using an ssh-tunnel to utilize Podman service +type ImageEngine struct { + ClientCxt context.Context +} + +// Container-related runtime using an ssh-tunnel to utilize Podman service +type ContainerEngine struct { + ClientCxt context.Context +} + +func (r *ContainerEngine) Shutdown(force bool) error { + return nil +} + +func (r *ContainerEngine) ContainerDelete(ctx context.Context, opts entities.ContainerDeleteOptions) (*entities.ContainerDeleteReport, error) { + panic("implement me") +} + +func (r *ContainerEngine) ContainerPrune(ctx context.Context) (*entities.ContainerPruneReport, error) { + panic("implement me") +} + +func (r *ContainerEngine) PodDelete(ctx context.Context, opts entities.PodPruneOptions) (*entities.PodDeleteReport, error) { + panic("implement me") +} + +func (r *ContainerEngine) PodPrune(ctx context.Context) (*entities.PodPruneReport, error) { + panic("implement me") +} + +func (r *ContainerEngine) VolumeDelete(ctx context.Context, opts entities.VolumeDeleteOptions) (*entities.VolumeDeleteReport, error) { + panic("implement me") +} + +func (r *ContainerEngine) VolumePrune(ctx context.Context) (*entities.VolumePruneReport, error) { + panic("implement me") +} |