summaryrefslogtreecommitdiff
path: root/pkg
diff options
context:
space:
mode:
Diffstat (limited to 'pkg')
-rw-r--r--pkg/adapter/containers.go184
-rw-r--r--pkg/adapter/containers_remote.go176
-rw-r--r--pkg/adapter/pods.go53
-rw-r--r--pkg/adapter/pods_remote.go75
-rw-r--r--pkg/adapter/runtime.go28
-rw-r--r--pkg/adapter/runtime_remote.go12
-rw-r--r--pkg/rootless/rootless_linux.c55
-rw-r--r--pkg/varlinkapi/containers.go31
-rw-r--r--pkg/varlinkapi/pods.go22
9 files changed, 579 insertions, 57 deletions
diff --git a/pkg/adapter/containers.go b/pkg/adapter/containers.go
index 931c55a57..5279f11b2 100644
--- a/pkg/adapter/containers.go
+++ b/pkg/adapter/containers.go
@@ -510,3 +510,187 @@ func (r *LocalRuntime) Restore(c *cliconfig.RestoreValues, options libpod.Contai
}
return lastError
}
+
+// Start will start a container
+func (r *LocalRuntime) Start(ctx context.Context, c *cliconfig.StartValues, sigProxy bool) (int, error) {
+ var (
+ exitCode = 125
+ lastError error
+ )
+
+ args := c.InputArgs
+ if c.Latest {
+ lastCtr, err := r.GetLatestContainer()
+ if err != nil {
+ return 0, errors.Wrapf(err, "unable to get latest container")
+ }
+ args = append(args, lastCtr.ID())
+ }
+
+ for _, container := range args {
+ ctr, err := r.LookupContainer(container)
+ if err != nil {
+ if lastError != nil {
+ fmt.Fprintln(os.Stderr, lastError)
+ }
+ lastError = errors.Wrapf(err, "unable to find container %s", container)
+ continue
+ }
+
+ ctrState, err := ctr.State()
+ if err != nil {
+ return exitCode, errors.Wrapf(err, "unable to get container state")
+ }
+
+ ctrRunning := ctrState == libpod.ContainerStateRunning
+
+ if c.Attach {
+ inputStream := os.Stdin
+ if !c.Interactive {
+ inputStream = nil
+ }
+
+ // attach to the container and also start it not already running
+ // If the container is in a pod, also set to recursively start dependencies
+ err = StartAttachCtr(ctx, ctr.Container, os.Stdout, os.Stderr, inputStream, c.DetachKeys, sigProxy, !ctrRunning, ctr.PodID() != "")
+ if errors.Cause(err) == libpod.ErrDetach {
+ // User manually detached
+ // Exit cleanly immediately
+ exitCode = 0
+ return exitCode, nil
+ }
+
+ if ctrRunning {
+ return 0, err
+ }
+
+ if err != nil {
+ return exitCode, errors.Wrapf(err, "unable to start container %s", ctr.ID())
+ }
+
+ if ecode, err := ctr.Wait(); err != nil {
+ if errors.Cause(err) == libpod.ErrNoSuchCtr {
+ // The container may have been removed
+ // Go looking for an exit file
+ rtc, err := r.GetConfig()
+ if err != nil {
+ return 0, err
+ }
+ ctrExitCode, err := ReadExitFile(rtc.TmpDir, ctr.ID())
+ if err != nil {
+ logrus.Errorf("Cannot get exit code: %v", err)
+ exitCode = 127
+ } else {
+ exitCode = ctrExitCode
+ }
+ }
+ } else {
+ exitCode = int(ecode)
+ }
+
+ return exitCode, nil
+ }
+ if ctrRunning {
+ fmt.Println(ctr.ID())
+ continue
+ }
+ // Handle non-attach start
+ // If the container is in a pod, also set to recursively start dependencies
+ if err := ctr.Start(ctx, ctr.PodID() != ""); err != nil {
+ if lastError != nil {
+ fmt.Fprintln(os.Stderr, lastError)
+ }
+ lastError = errors.Wrapf(err, "unable to start container %q", container)
+ continue
+ }
+ fmt.Println(container)
+ }
+ return exitCode, lastError
+}
+
+// PauseContainers removes container(s) based on CLI inputs.
+func (r *LocalRuntime) PauseContainers(ctx context.Context, cli *cliconfig.PauseValues) ([]string, map[string]error, error) {
+ var (
+ ok = []string{}
+ failures = map[string]error{}
+ ctrs []*libpod.Container
+ err error
+ )
+
+ maxWorkers := shared.DefaultPoolSize("pause")
+ if cli.GlobalIsSet("max-workers") {
+ maxWorkers = cli.GlobalFlags.MaxWorks
+ }
+ logrus.Debugf("Setting maximum rm workers to %d", maxWorkers)
+
+ if cli.All {
+ ctrs, err = r.GetRunningContainers()
+ } else {
+ ctrs, err = shortcuts.GetContainersByContext(false, false, cli.InputArgs, r.Runtime)
+ }
+ if err != nil {
+ return ok, failures, err
+ }
+
+ pool := shared.NewPool("pause", maxWorkers, len(ctrs))
+ for _, c := range ctrs {
+ ctr := c
+ pool.Add(shared.Job{
+ ID: ctr.ID(),
+ Fn: func() error {
+ err := ctr.Pause()
+ if err != nil {
+ logrus.Debugf("Failed to pause container %s: %s", ctr.ID(), err.Error())
+ }
+ return err
+ },
+ })
+ }
+ return pool.Run()
+}
+
+// UnpauseContainers removes container(s) based on CLI inputs.
+func (r *LocalRuntime) UnpauseContainers(ctx context.Context, cli *cliconfig.UnpauseValues) ([]string, map[string]error, error) {
+ var (
+ ok = []string{}
+ failures = map[string]error{}
+ ctrs []*libpod.Container
+ err error
+ )
+
+ maxWorkers := shared.DefaultPoolSize("pause")
+ if cli.GlobalIsSet("max-workers") {
+ maxWorkers = cli.GlobalFlags.MaxWorks
+ }
+ logrus.Debugf("Setting maximum rm workers to %d", maxWorkers)
+
+ if cli.All {
+ var filterFuncs []libpod.ContainerFilter
+ filterFuncs = append(filterFuncs, func(c *libpod.Container) bool {
+ state, _ := c.State()
+ return state == libpod.ContainerStatePaused
+ })
+ ctrs, err = r.GetContainers(filterFuncs...)
+ } else {
+ ctrs, err = shortcuts.GetContainersByContext(false, false, cli.InputArgs, r.Runtime)
+ }
+ if err != nil {
+ return ok, failures, err
+ }
+
+ pool := shared.NewPool("pause", maxWorkers, len(ctrs))
+ for _, c := range ctrs {
+ ctr := c
+ pool.Add(shared.Job{
+ ID: ctr.ID(),
+ Fn: func() error {
+ err := ctr.Unpause()
+ if err != nil {
+ logrus.Debugf("Failed to unpause container %s: %s", ctr.ID(), err.Error())
+ }
+ return err
+ },
+ })
+ }
+ return pool.Run()
+}
diff --git a/pkg/adapter/containers_remote.go b/pkg/adapter/containers_remote.go
index 50cff9fa0..cb61871bf 100644
--- a/pkg/adapter/containers_remote.go
+++ b/pkg/adapter/containers_remote.go
@@ -45,6 +45,18 @@ func (c *Container) ID() string {
return c.config.ID
}
+// Pause a container
+func (c *Container) Pause() error {
+ _, err := iopodman.PauseContainer().Call(c.Runtime.Conn, c.ID())
+ return err
+}
+
+// Unpause a container
+func (c *Container) Unpause() error {
+ _, err := iopodman.UnpauseContainer().Call(c.Runtime.Conn, c.ID())
+ return err
+}
+
// Config returns a container config
func (r *LocalRuntime) Config(name string) *libpod.ContainerConfig {
// TODO the Spec being returned is not populated. Matt and I could not figure out why. Will defer
@@ -90,6 +102,19 @@ func (r *LocalRuntime) Spec(name string) (*specs.Spec, error) {
return &data, nil
}
+// LookupContainers is a wrapper for LookupContainer
+func (r *LocalRuntime) LookupContainers(idsOrNames []string) ([]*Container, error) {
+ var containers []*Container
+ for _, name := range idsOrNames {
+ ctr, err := r.LookupContainer(name)
+ if err != nil {
+ return nil, err
+ }
+ containers = append(containers, ctr)
+ }
+ return containers, nil
+}
+
// LookupContainer gets basic information about container over a varlink
// connection and then translates it to a *Container
func (r *LocalRuntime) LookupContainer(idOrName string) (*Container, error) {
@@ -107,6 +132,24 @@ func (r *LocalRuntime) LookupContainer(idOrName string) (*Container, error) {
}, nil
}
+func (r *LocalRuntime) LookupContainersWithStatus(filters []string) ([]*Container, error) {
+ var containers []*Container
+ ctrs, err := iopodman.GetContainersByStatus().Call(r.Conn, filters)
+ if err != nil {
+ return nil, err
+ }
+ // This is not performance savy; if this turns out to be a problematic series of lookups, we need to
+ // create a new endpoint to speed things up
+ for _, ctr := range ctrs {
+ container, err := r.LookupContainer(ctr.Id)
+ if err != nil {
+ return nil, err
+ }
+ containers = append(containers, container)
+ }
+ return containers, nil
+}
+
func (r *LocalRuntime) GetLatestContainer() (*Container, error) {
reply, err := iopodman.GetContainersByContext().Call(r.Conn, false, true, nil)
if err != nil {
@@ -327,22 +370,12 @@ func (r *LocalRuntime) Log(c *cliconfig.LogsValues, options *libpod.LogOptions)
// CreateContainer creates a container from the cli over varlink
func (r *LocalRuntime) CreateContainer(ctx context.Context, c *cliconfig.CreateValues) (string, error) {
- if !c.Bool("detach") {
- // TODO need to add attach when that function becomes available
- return "", errors.New("the remote client only supports detached containers")
- }
results := shared.NewIntermediateLayer(&c.PodmanCommand, true)
return iopodman.CreateContainer().Call(r.Conn, results.MakeVarlink())
}
// Run creates a container overvarlink and then starts it
func (r *LocalRuntime) Run(ctx context.Context, c *cliconfig.RunValues, exitCode int) (int, error) {
- // FIXME
- // podman-remote run -it alpine ls DOES NOT WORK YET
- // podman-remote run -it alpine /bin/sh does, i suspect there is some sort of
- // timing issue between the socket availability and terminal setup and the command
- // being run.
-
// TODO the exit codes for run need to be figured out for remote connections
results := shared.NewIntermediateLayer(&c.PodmanCommand, true)
cid, err := iopodman.CreateContainer().Call(r.Conn, results.MakeVarlink())
@@ -354,8 +387,7 @@ func (r *LocalRuntime) Run(ctx context.Context, c *cliconfig.RunValues, exitCode
fmt.Println(cid)
return 0, err
}
-
- errChan, err := r.attach(ctx, os.Stdin, os.Stdout, cid, true)
+ errChan, err := r.attach(ctx, os.Stdin, os.Stdout, cid, true, c.String("detach-keys"))
if err != nil {
return 0, err
}
@@ -367,7 +399,7 @@ func ReadExitFile(runtimeTmp, ctrID string) (int, error) {
return 0, libpod.ErrNotImplemented
}
-// Ps ...
+// Ps lists containers based on criteria from user
func (r *LocalRuntime) Ps(c *cliconfig.PsValues, opts shared.PsOptions) ([]shared.PsContainerOutput, error) {
var psContainers []shared.PsContainerOutput
last := int64(c.Last)
@@ -439,7 +471,7 @@ func (r *LocalRuntime) Ps(c *cliconfig.PsValues, opts shared.PsOptions) ([]share
return psContainers, nil
}
-func (r *LocalRuntime) attach(ctx context.Context, stdin, stdout *os.File, cid string, start bool) (chan error, error) {
+func (r *LocalRuntime) attach(ctx context.Context, stdin, stdout *os.File, cid string, start bool, detachKeys string) (chan error, error) {
var (
oldTermState *term.State
)
@@ -470,7 +502,7 @@ func (r *LocalRuntime) attach(ctx context.Context, stdin, stdout *os.File, cid s
}
// TODO add detach keys support
- _, err = iopodman.Attach().Send(r.Conn, varlink.Upgrade, cid, "", start)
+ _, err = iopodman.Attach().Send(r.Conn, varlink.Upgrade, cid, detachKeys, start)
if err != nil {
restoreTerminal(oldTermState)
return nil, err
@@ -531,7 +563,7 @@ func (r *LocalRuntime) Attach(ctx context.Context, c *cliconfig.AttachValues) er
if c.NoStdin {
inputStream = nil
}
- errChan, err := r.attach(ctx, inputStream, os.Stdout, c.InputArgs[0], false)
+ errChan, err := r.attach(ctx, inputStream, os.Stdout, c.InputArgs[0], false, c.DetachKeys)
if err != nil {
return err
}
@@ -609,3 +641,115 @@ func (r *LocalRuntime) Restore(c *cliconfig.RestoreValues, options libpod.Contai
}
return lastError
}
+
+// Start starts an already created container
+func (r *LocalRuntime) Start(ctx context.Context, c *cliconfig.StartValues, sigProxy bool) (int, error) {
+ var (
+ finalErr error
+ exitCode = 125
+ )
+ // TODO Figure out how to deal with exit codes
+ inputStream := os.Stdin
+ if !c.Interactive {
+ inputStream = nil
+ }
+
+ containerIDs, err := iopodman.GetContainersByContext().Call(r.Conn, false, c.Latest, c.InputArgs)
+ if err != nil {
+ return exitCode, err
+ }
+ if len(containerIDs) < 1 {
+ return exitCode, errors.New("failed to find containers to start")
+ }
+ // start.go makes sure that if attach, there can be only one ctr
+ if c.Attach {
+ errChan, err := r.attach(ctx, inputStream, os.Stdout, containerIDs[0], true, c.DetachKeys)
+ if err != nil {
+ return exitCode, nil
+ }
+ err = <-errChan
+ return 0, err
+ }
+
+ // TODO the notion of starting a pod container and its deps still needs to be worked through
+ // Everything else is detached
+ for _, cid := range containerIDs {
+ reply, err := iopodman.StartContainer().Call(r.Conn, cid)
+ if err != nil {
+ if finalErr != nil {
+ fmt.Println(err)
+ }
+ finalErr = err
+ } else {
+ fmt.Println(reply)
+ }
+ }
+ return exitCode, finalErr
+}
+
+// PauseContainers pauses container(s) based on CLI inputs.
+func (r *LocalRuntime) PauseContainers(ctx context.Context, cli *cliconfig.PauseValues) ([]string, map[string]error, error) {
+ var (
+ ok []string
+ failures = map[string]error{}
+ ctrs []*Container
+ err error
+ )
+
+ if cli.All {
+ filters := []string{libpod.ContainerStateRunning.String()}
+ ctrs, err = r.LookupContainersWithStatus(filters)
+ } else {
+ ctrs, err = r.LookupContainers(cli.InputArgs)
+ }
+ if err != nil {
+ return ok, failures, err
+ }
+
+ for _, c := range ctrs {
+ c := c
+ err := c.Pause()
+ if err != nil {
+ failures[c.ID()] = err
+ } else {
+ ok = append(ok, c.ID())
+ }
+ }
+ return ok, failures, nil
+}
+
+// UnpauseContainers unpauses containers based on input
+func (r *LocalRuntime) UnpauseContainers(ctx context.Context, cli *cliconfig.UnpauseValues) ([]string, map[string]error, error) {
+ var (
+ ok = []string{}
+ failures = map[string]error{}
+ ctrs []*Container
+ err error
+ )
+
+ maxWorkers := shared.DefaultPoolSize("unpause")
+ if cli.GlobalIsSet("max-workers") {
+ maxWorkers = cli.GlobalFlags.MaxWorks
+ }
+ logrus.Debugf("Setting maximum rm workers to %d", maxWorkers)
+
+ if cli.All {
+ filters := []string{libpod.ContainerStatePaused.String()}
+ ctrs, err = r.LookupContainersWithStatus(filters)
+ } else {
+ ctrs, err = r.LookupContainers(cli.InputArgs)
+ }
+ if err != nil {
+ return ok, failures, err
+ }
+ for _, c := range ctrs {
+ c := c
+ err := c.Unpause()
+ if err != nil {
+ failures[c.ID()] = err
+ } else {
+ ok = append(ok, c.ID())
+ }
+ }
+ return ok, failures, nil
+}
diff --git a/pkg/adapter/pods.go b/pkg/adapter/pods.go
index 669971789..bb7d9cce6 100644
--- a/pkg/adapter/pods.go
+++ b/pkg/adapter/pods.go
@@ -4,20 +4,16 @@ package adapter
import (
"context"
- "github.com/pkg/errors"
"strings"
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/pkg/adapter/shortcuts"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
)
-// Pod ...
-type Pod struct {
- *libpod.Pod
-}
-
// PodContainerStats is struct containing an adapter Pod and a libpod
// ContainerStats and is used primarily for outputing pod stats.
type PodContainerStats struct {
@@ -25,6 +21,49 @@ type PodContainerStats struct {
ContainerStats map[string]*libpod.ContainerStats
}
+// PrunePods removes pods
+func (r *LocalRuntime) PrunePods(ctx context.Context, cli *cliconfig.PodPruneValues) ([]string, map[string]error, error) {
+ var (
+ ok = []string{}
+ failures = map[string]error{}
+ )
+
+ maxWorkers := shared.DefaultPoolSize("rm")
+ if cli.GlobalIsSet("max-workers") {
+ maxWorkers = cli.GlobalFlags.MaxWorks
+ }
+ logrus.Debugf("Setting maximum rm workers to %d", maxWorkers)
+
+ states := []string{shared.PodStateStopped, shared.PodStateExited}
+ if cli.Force {
+ states = append(states, shared.PodStateRunning)
+ }
+
+ pods, err := r.GetPodsByStatus(states)
+ if err != nil {
+ return ok, failures, err
+ }
+ if len(pods) < 1 {
+ return ok, failures, nil
+ }
+
+ pool := shared.NewPool("pod_prune", maxWorkers, len(pods))
+ for _, p := range pods {
+ p := p
+
+ pool.Add(shared.Job{p.ID(),
+ func() error {
+ err := r.Runtime.RemovePod(ctx, p, cli.Force, cli.Force)
+ if err != nil {
+ logrus.Debugf("Failed to remove pod %s: %s", p.ID(), err.Error())
+ }
+ return err
+ },
+ })
+ }
+ return pool.Run()
+}
+
// RemovePods ...
func (r *LocalRuntime) RemovePods(ctx context.Context, cli *cliconfig.PodRmValues) ([]string, []error) {
var (
@@ -38,7 +77,7 @@ func (r *LocalRuntime) RemovePods(ctx context.Context, cli *cliconfig.PodRmValue
}
for _, p := range pods {
- if err := r.RemovePod(ctx, p, cli.Force, cli.Force); err != nil {
+ if err := r.Runtime.RemovePod(ctx, p, cli.Force, cli.Force); err != nil {
errs = append(errs, err)
} else {
podids = append(podids, p.ID())
diff --git a/pkg/adapter/pods_remote.go b/pkg/adapter/pods_remote.go
index 4a32607a2..7cf38aac0 100644
--- a/pkg/adapter/pods_remote.go
+++ b/pkg/adapter/pods_remote.go
@@ -14,13 +14,9 @@ import (
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/pkg/varlinkapi"
"github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
)
-// Pod ...
-type Pod struct {
- remotepod
-}
-
// PodContainerStats is struct containing an adapter Pod and a libpod
// ContainerStats and is used primarily for outputing pod stats.
type PodContainerStats struct {
@@ -28,13 +24,6 @@ type PodContainerStats struct {
ContainerStats map[string]*libpod.ContainerStats
}
-type remotepod struct {
- config *libpod.PodConfig
- state *libpod.PodInspectState
- containers []libpod.PodContainerInfo
- Runtime *LocalRuntime
-}
-
// RemovePods removes one or more based on the cli context.
func (r *LocalRuntime) RemovePods(ctx context.Context, cli *cliconfig.PodRmValues) ([]string, []error) {
var (
@@ -214,6 +203,23 @@ func (r *LocalRuntime) GetAllPods() ([]*Pod, error) {
return pods, nil
}
+// GetPodsByStatus returns a slice of pods filtered by a libpod status
+func (r *LocalRuntime) GetPodsByStatus(statuses []string) ([]*Pod, error) {
+ podIDs, err := iopodman.GetPodsByStatus().Call(r.Conn, statuses)
+ if err != nil {
+ return nil, err
+ }
+ pods := make([]*Pod, 0, len(podIDs))
+ for _, p := range podIDs {
+ pod, err := r.LookupPod(p)
+ if err != nil {
+ return nil, err
+ }
+ pods = append(pods, pod)
+ }
+ return pods, nil
+}
+
// ID returns the id of a remote pod
func (p *Pod) ID() string {
return p.config.ID
@@ -508,3 +514,48 @@ func (p *Pod) GetPodStats(previousContainerStats map[string]*libpod.ContainerSta
}
return newContainerStats, nil
}
+
+// RemovePod removes a pod
+// If removeCtrs is specified, containers will be removed
+// Otherwise, a pod that is not empty will return an error and not be removed
+// If force is specified with removeCtrs, all containers will be stopped before
+// being removed
+// Otherwise, the pod will not be removed if any containers are running
+func (r *LocalRuntime) RemovePod(ctx context.Context, p *Pod, removeCtrs, force bool) error {
+ _, err := iopodman.RemovePod().Call(r.Conn, p.ID(), force)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// PrunePods...
+func (r *LocalRuntime) PrunePods(ctx context.Context, cli *cliconfig.PodPruneValues) ([]string, map[string]error, error) {
+ var (
+ ok = []string{}
+ failures = map[string]error{}
+ )
+ states := []string{shared.PodStateStopped, shared.PodStateExited}
+ if cli.Force {
+ states = append(states, shared.PodStateRunning)
+ }
+
+ ids, err := iopodman.GetPodsByStatus().Call(r.Conn, states)
+ if err != nil {
+ return ok, failures, err
+ }
+ if len(ids) < 1 {
+ return ok, failures, nil
+ }
+
+ for _, id := range ids {
+ _, err := iopodman.RemovePod().Call(r.Conn, id, cli.Force)
+ if err != nil {
+ logrus.Debugf("Failed to remove pod %s: %s", id, err.Error())
+ failures[id] = err
+ } else {
+ ok = append(ok, id)
+ }
+ }
+ return ok, failures, nil
+}
diff --git a/pkg/adapter/runtime.go b/pkg/adapter/runtime.go
index b5ec9f7a9..753f7c944 100644
--- a/pkg/adapter/runtime.go
+++ b/pkg/adapter/runtime.go
@@ -7,7 +7,6 @@ import (
"context"
"io"
"io/ioutil"
- "k8s.io/api/core/v1"
"os"
"text/template"
@@ -25,6 +24,7 @@ import (
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/storage/pkg/archive"
"github.com/pkg/errors"
+ "k8s.io/api/core/v1"
)
// LocalRuntime describes a typical libpod runtime
@@ -43,6 +43,11 @@ type Container struct {
*libpod.Container
}
+// Pod encapsulates the libpod.Pod structure, helps with remote vs. local
+type Pod struct {
+ *libpod.Pod
+}
+
// Volume ...
type Volume struct {
*libpod.Volume
@@ -369,3 +374,24 @@ func (r *LocalRuntime) Diff(c *cliconfig.DiffValues, to string) ([]archive.Chang
func (r *LocalRuntime) GenerateKube(c *cliconfig.GenerateKubeValues) (*v1.Pod, *v1.Service, error) {
return shared.GenerateKube(c.InputArgs[0], c.Service, r.Runtime)
}
+
+// GetPodsByStatus returns a slice of pods filtered by a libpod status
+func (r *LocalRuntime) GetPodsByStatus(statuses []string) ([]*libpod.Pod, error) {
+
+ filterFunc := func(p *libpod.Pod) bool {
+ state, _ := shared.GetPodStatus(p)
+ for _, status := range statuses {
+ if state == status {
+ return true
+ }
+ }
+ return false
+ }
+
+ pods, err := r.Runtime.Pods(filterFunc)
+ if err != nil {
+ return nil, err
+ }
+
+ return pods, nil
+}
diff --git a/pkg/adapter/runtime_remote.go b/pkg/adapter/runtime_remote.go
index 71f7380db..dcb0924ce 100644
--- a/pkg/adapter/runtime_remote.go
+++ b/pkg/adapter/runtime_remote.go
@@ -99,6 +99,18 @@ type remoteContainer struct {
state *libpod.ContainerState
}
+// Pod ...
+type Pod struct {
+ remotepod
+}
+
+type remotepod struct {
+ config *libpod.PodConfig
+ state *libpod.PodInspectState
+ containers []libpod.PodContainerInfo
+ Runtime *LocalRuntime
+}
+
type VolumeFilter func(*Volume) bool
// Volume is embed for libpod volumes
diff --git a/pkg/rootless/rootless_linux.c b/pkg/rootless/rootless_linux.c
index 9cb79ed4d..1d32b1adb 100644
--- a/pkg/rootless/rootless_linux.c
+++ b/pkg/rootless/rootless_linux.c
@@ -16,11 +16,13 @@
#include <sys/types.h>
#include <sys/prctl.h>
#include <dirent.h>
+#include <sys/select.h>
static const char *_max_user_namespaces = "/proc/sys/user/max_user_namespaces";
static const char *_unprivileged_user_namespaces = "/proc/sys/kernel/unprivileged_userns_clone";
-static int n_files;
+static int open_files_max_fd;
+fd_set open_files_set;
static void __attribute__((constructor)) init()
{
@@ -32,11 +34,16 @@ static void __attribute__((constructor)) init()
{
struct dirent *ent;
+ FD_ZERO (&open_files_set);
for (ent = readdir (d); ent; ent = readdir (d))
{
int fd = atoi (ent->d_name);
- if (fd > n_files && fd != dirfd (d))
- n_files = fd;
+ if (fd != dirfd (d))
+ {
+ if (fd > open_files_max_fd)
+ open_files_max_fd = fd;
+ FD_SET (fd, &open_files_set);
+ }
}
closedir (d);
}
@@ -164,8 +171,11 @@ reexec_userns_join (int userns, int mountns)
{
/* We passed down these fds, close them. */
int f;
- for (f = 3; f < n_files; f++)
- close (f);
+ for (f = 3; f < open_files_max_fd; f++)
+ {
+ if (FD_ISSET (f, &open_files_set))
+ close (f);
+ }
return pid;
}
@@ -274,22 +284,25 @@ reexec_in_user_namespace (int ready)
check_proc_sys_userns_file (_max_user_namespaces);
check_proc_sys_userns_file (_unprivileged_user_namespaces);
}
- if (pid) {
- if (do_socket_activation) {
- long num_fds;
- num_fds = strtol(listen_fds, NULL, 10);
- if (num_fds != LONG_MIN && num_fds != LONG_MAX) {
- long i;
- for (i = 0; i < num_fds; i++) {
- close(3+i);
+ if (pid)
+ {
+ if (do_socket_activation)
+ {
+ long num_fds;
+ num_fds = strtol (listen_fds, NULL, 10);
+ if (num_fds != LONG_MIN && num_fds != LONG_MAX)
+ {
+ long i;
+ for (i = 3; i < num_fds + 3; i++)
+ if (FD_ISSET (i, &open_files_set))
+ close (i);
+ }
+ unsetenv ("LISTEN_PID");
+ unsetenv ("LISTEN_FDS");
+ unsetenv ("LISTEN_FDNAMES");
}
- }
- unsetenv("LISTEN_PID");
- unsetenv("LISTEN_FDS");
- unsetenv("LISTEN_FDNAMES");
+ return pid;
}
- return pid;
- }
argv = get_cmd_line_args (ppid);
if (argv == NULL)
@@ -300,8 +313,8 @@ reexec_in_user_namespace (int ready)
if (do_socket_activation) {
char s[32];
- sprintf(s, "%d", getpid());
- setenv("LISTEN_PID", s, true);
+ sprintf (s, "%d", getpid());
+ setenv ("LISTEN_PID", s, true);
}
setenv ("_CONTAINERS_USERNS_CONFIGURED", "init", 1);
diff --git a/pkg/varlinkapi/containers.go b/pkg/varlinkapi/containers.go
index 17792ccfe..237407050 100644
--- a/pkg/varlinkapi/containers.go
+++ b/pkg/varlinkapi/containers.go
@@ -128,6 +128,37 @@ func (i *LibpodAPI) GetContainersByContext(call iopodman.VarlinkCall, all, lates
return call.ReplyGetContainersByContext(ids)
}
+// GetContainersByStatus returns a slice of containers filtered by a libpod status
+func (i *LibpodAPI) GetContainersByStatus(call iopodman.VarlinkCall, statuses []string) error {
+ var (
+ filterFuncs []libpod.ContainerFilter
+ containers []iopodman.Container
+ )
+ for _, status := range statuses {
+ lpstatus, err := libpod.StringToContainerStatus(status)
+ if err != nil {
+ return call.ReplyErrorOccurred(err.Error())
+ }
+ filterFuncs = append(filterFuncs, func(c *libpod.Container) bool {
+ state, _ := c.State()
+ return state == lpstatus
+ })
+ }
+ filteredContainers, err := i.Runtime.GetContainers(filterFuncs...)
+ if err != nil {
+ return call.ReplyErrorOccurred(err.Error())
+ }
+ opts := shared.PsOptions{Size: true, Namespace: true}
+ for _, ctr := range filteredContainers {
+ batchInfo, err := shared.BatchContainerOp(ctr, opts)
+ if err != nil {
+ return call.ReplyErrorOccurred(err.Error())
+ }
+ containers = append(containers, makeListContainer(ctr.ID(), batchInfo))
+ }
+ return call.ReplyGetContainersByStatus(containers)
+}
+
// InspectContainer ...
func (i *LibpodAPI) InspectContainer(call iopodman.VarlinkCall, name string) error {
ctr, err := i.Runtime.LookupContainer(name)
diff --git a/pkg/varlinkapi/pods.go b/pkg/varlinkapi/pods.go
index ac8e24747..f34375bf5 100644
--- a/pkg/varlinkapi/pods.go
+++ b/pkg/varlinkapi/pods.go
@@ -101,6 +101,28 @@ func (i *LibpodAPI) GetPod(call iopodman.VarlinkCall, name string) error {
return call.ReplyGetPod(listPod)
}
+// GetPodsByStatus returns a slice of pods filtered by a libpod status
+func (i *LibpodAPI) GetPodsByStatus(call iopodman.VarlinkCall, statuses []string) error {
+ filterFuncs := func(p *libpod.Pod) bool {
+ state, _ := shared.GetPodStatus(p)
+ for _, status := range statuses {
+ if state == status {
+ return true
+ }
+ }
+ return false
+ }
+ filteredPods, err := i.Runtime.Pods(filterFuncs)
+ if err != nil {
+ return call.ReplyErrorOccurred(err.Error())
+ }
+ podIDs := make([]string, 0, len(filteredPods))
+ for _, p := range filteredPods {
+ podIDs = append(podIDs, p.ID())
+ }
+ return call.ReplyGetPodsByStatus(podIDs)
+}
+
// InspectPod ...
func (i *LibpodAPI) InspectPod(call iopodman.VarlinkCall, name string) error {
pod, err := i.Runtime.LookupPod(name)