summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--cmd/podman/commands.go3
-rw-r--r--cmd/podman/kill.go21
-rw-r--r--cmd/podman/main.go3
-rw-r--r--cmd/podman/ps.go187
-rw-r--r--cmd/podman/rm.go72
-rw-r--r--cmd/podman/shared/container.go171
-rw-r--r--cmd/podman/shared/workers.go133
-rw-r--r--cmd/podman/stop.go21
-rw-r--r--cmd/podman/umount.go47
-rw-r--r--cmd/podman/utils.go49
-rw-r--r--cmd/podman/varlink/io.podman.varlink43
-rw-r--r--cmd/podman/wait.go20
-rw-r--r--libpod/container.go23
-rw-r--r--libpod/container_api.go4
-rw-r--r--libpod/container_internal.go6
-rw-r--r--libpod/container_internal_linux.go4
-rw-r--r--libpod/oci.go1
-rw-r--r--pkg/adapter/containers.go137
-rw-r--r--pkg/adapter/containers_remote.go131
-rw-r--r--pkg/adapter/runtime.go65
-rw-r--r--pkg/adapter/shortcuts/shortcuts.go39
-rw-r--r--pkg/varlinkapi/containers.go49
-rw-r--r--pkg/varlinkapi/util.go33
-rw-r--r--test/e2e/common_test.go30
-rw-r--r--test/e2e/libpod_suite_test.go21
-rw-r--r--test/e2e/search_test.go145
-rw-r--r--test/utils/utils.go2
27 files changed, 1002 insertions, 458 deletions
diff --git a/cmd/podman/commands.go b/cmd/podman/commands.go
index e9afcbc06..7c660f7cb 100644
--- a/cmd/podman/commands.go
+++ b/cmd/podman/commands.go
@@ -16,7 +16,6 @@ func getMainCommands() []*cobra.Command {
_execCommand,
_generateCommand,
_playCommand,
- &_psCommand,
_loginCommand,
_logoutCommand,
_mountCommand,
@@ -24,12 +23,10 @@ func getMainCommands() []*cobra.Command {
_portCommand,
_refreshCommand,
_restartCommand,
- _rmCommand,
_searchCommand,
_startCommand,
_statsCommand,
_topCommand,
- _umountCommand,
_unpauseCommand,
}
diff --git a/cmd/podman/kill.go b/cmd/podman/kill.go
index 6019fbfec..20142e0bf 100644
--- a/cmd/podman/kill.go
+++ b/cmd/podman/kill.go
@@ -1,9 +1,6 @@
package main
import (
- "fmt"
- "reflect"
-
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/pkg/adapter"
"github.com/docker/docker/pkg/signal"
@@ -71,21 +68,5 @@ func killCmd(c *cliconfig.KillValues) error {
if err != nil {
return err
}
-
- for _, id := range ok {
- fmt.Println(id)
- }
-
- if len(failures) > 0 {
- keys := reflect.ValueOf(failures).MapKeys()
- lastKey := keys[len(keys)-1].String()
- lastErr := failures[lastKey]
- delete(failures, lastKey)
-
- for _, err := range failures {
- outputError(err)
- }
- return lastErr
- }
- return nil
+ return printCmdResults(ok, failures)
}
diff --git a/cmd/podman/main.go b/cmd/podman/main.go
index b44cf9f0a..7c765a0e0 100644
--- a/cmd/podman/main.go
+++ b/cmd/podman/main.go
@@ -52,13 +52,16 @@ var mainCommands = []*cobra.Command{
_loadCommand,
_logsCommand,
podCommand.Command,
+ &_psCommand,
_pullCommand,
_pushCommand,
+ _rmCommand,
&_rmiCommand,
_runCommand,
_saveCommand,
_stopCommand,
_tagCommand,
+ _umountCommand,
_versionCommand,
_waitCommand,
imageCommand.Command,
diff --git a/cmd/podman/ps.go b/cmd/podman/ps.go
index 759a03b86..5bb88f227 100644
--- a/cmd/podman/ps.go
+++ b/cmd/podman/ps.go
@@ -6,7 +6,6 @@ import (
"os"
"reflect"
"sort"
- "strconv"
"strings"
"text/tabwriter"
"time"
@@ -14,15 +13,12 @@ import (
tm "github.com/buger/goterm"
"github.com/containers/buildah/pkg/formats"
"github.com/containers/libpod/cmd/podman/cliconfig"
- "github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/cmd/podman/shared"
- "github.com/containers/libpod/libpod"
- "github.com/containers/libpod/pkg/util"
+ "github.com/containers/libpod/pkg/adapter"
"github.com/cri-o/ocicni/pkg/ocicni"
"github.com/docker/go-units"
"github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
- "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/fields"
)
@@ -205,10 +201,6 @@ func psCmd(c *cliconfig.PsValues) error {
span, _ := opentracing.StartSpanFromContext(Ctx, "psCmd")
defer span.Finish()
}
- // TODO disable when single rootless userns merges
- if c.Bool("size") && os.Geteuid() != 0 {
- return errors.New("the --size option is not presently supported without root")
- }
var watch bool
@@ -224,7 +216,7 @@ func psCmd(c *cliconfig.PsValues) error {
return errors.Wrapf(err, "error with flags passed")
}
- runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand)
+ runtime, err := adapter.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
@@ -279,128 +271,6 @@ func checkFlagsPassed(c *cliconfig.PsValues) error {
return nil
}
-func generateContainerFilterFuncs(filter, filterValue string, runtime *libpod.Runtime) (func(container *libpod.Container) bool, error) {
- switch filter {
- case "id":
- return func(c *libpod.Container) bool {
- return strings.Contains(c.ID(), filterValue)
- }, nil
- case "label":
- var filterArray []string = strings.SplitN(filterValue, "=", 2)
- var filterKey string = filterArray[0]
- if len(filterArray) > 1 {
- filterValue = filterArray[1]
- } else {
- filterValue = ""
- }
- return func(c *libpod.Container) bool {
- for labelKey, labelValue := range c.Labels() {
- if labelKey == filterKey && ("" == filterValue || labelValue == filterValue) {
- return true
- }
- }
- return false
- }, nil
- case "name":
- return func(c *libpod.Container) bool {
- return strings.Contains(c.Name(), filterValue)
- }, nil
- case "exited":
- exitCode, err := strconv.ParseInt(filterValue, 10, 32)
- if err != nil {
- return nil, errors.Wrapf(err, "exited code out of range %q", filterValue)
- }
- return func(c *libpod.Container) bool {
- ec, exited, err := c.ExitCode()
- if ec == int32(exitCode) && err == nil && exited == true {
- return true
- }
- return false
- }, nil
- case "status":
- if !util.StringInSlice(filterValue, []string{"created", "running", "paused", "stopped", "exited", "unknown"}) {
- return nil, errors.Errorf("%s is not a valid status", filterValue)
- }
- return func(c *libpod.Container) bool {
- status, err := c.State()
- if err != nil {
- return false
- }
- if filterValue == "stopped" {
- filterValue = "exited"
- }
- state := status.String()
- if status == libpod.ContainerStateConfigured {
- state = "created"
- } else if status == libpod.ContainerStateStopped {
- state = "exited"
- }
- return state == filterValue
- }, nil
- case "ancestor":
- // This needs to refine to match docker
- // - ancestor=(<image-name>[:tag]|<image-id>| ⟨image@digest⟩) - containers created from an image or a descendant.
- return func(c *libpod.Container) bool {
- containerConfig := c.Config()
- if strings.Contains(containerConfig.RootfsImageID, filterValue) || strings.Contains(containerConfig.RootfsImageName, filterValue) {
- return true
- }
- return false
- }, nil
- case "before":
- ctr, err := runtime.LookupContainer(filterValue)
- if err != nil {
- return nil, errors.Errorf("unable to find container by name or id of %s", filterValue)
- }
- containerConfig := ctr.Config()
- createTime := containerConfig.CreatedTime
- return func(c *libpod.Container) bool {
- cc := c.Config()
- return createTime.After(cc.CreatedTime)
- }, nil
- case "since":
- ctr, err := runtime.LookupContainer(filterValue)
- if err != nil {
- return nil, errors.Errorf("unable to find container by name or id of %s", filterValue)
- }
- containerConfig := ctr.Config()
- createTime := containerConfig.CreatedTime
- return func(c *libpod.Container) bool {
- cc := c.Config()
- return createTime.Before(cc.CreatedTime)
- }, nil
- case "volume":
- //- volume=(<volume-name>|<mount-point-destination>)
- return func(c *libpod.Container) bool {
- containerConfig := c.Config()
- var dest string
- arr := strings.Split(filterValue, ":")
- source := arr[0]
- if len(arr) == 2 {
- dest = arr[1]
- }
- for _, mount := range containerConfig.Spec.Mounts {
- if dest != "" && (mount.Source == source && mount.Destination == dest) {
- return true
- }
- if dest == "" && mount.Source == source {
- return true
- }
- }
- return false
- }, nil
- case "health":
- return func(c *libpod.Container) bool {
- hcStatus, err := c.HealthCheckStatus()
- if err != nil {
- return false
- }
- return hcStatus == filterValue
- }, nil
- }
- return nil, errors.Errorf("%s is an invalid filter", filter)
-}
-
// generate the accurate header based on template given
func (p *psTemplateParams) headerMap() map[string]string {
v := reflect.Indirect(reflect.ValueOf(p))
@@ -550,11 +420,9 @@ func dumpJSON(containers []shared.PsContainerOutput) error {
return nil
}
-func psDisplay(c *cliconfig.PsValues, runtime *libpod.Runtime) error {
+func psDisplay(c *cliconfig.PsValues, runtime *adapter.LocalRuntime) error {
var (
- filterFuncs []libpod.ContainerFilter
- outputContainers []*libpod.Container
- err error
+ err error
)
opts := shared.PsOptions{
All: c.All,
@@ -570,51 +438,8 @@ func psDisplay(c *cliconfig.PsValues, runtime *libpod.Runtime) error {
Sync: c.Sync,
}
- maxWorkers := shared.Parallelize("ps")
- if c.GlobalIsSet("max-workers") {
- maxWorkers = c.GlobalFlags.MaxWorks
- }
- logrus.Debugf("Setting maximum workers to %d", maxWorkers)
-
- filters := c.Filter
- if len(filters) > 0 {
- for _, f := range filters {
- filterSplit := strings.SplitN(f, "=", 2)
- if len(filterSplit) < 2 {
- return errors.Errorf("filter input must be in the form of filter=value: %s is invalid", f)
- }
- generatedFunc, err := generateContainerFilterFuncs(filterSplit[0], filterSplit[1], runtime)
- if err != nil {
- return errors.Wrapf(err, "invalid filter")
- }
- filterFuncs = append(filterFuncs, generatedFunc)
- }
- }
- if !opts.Latest {
- // Get all containers
- containers, err := runtime.GetContainers(filterFuncs...)
- if err != nil {
- return err
- }
-
- // We only want the last few containers
- if opts.Last > 0 && opts.Last <= len(containers) {
- return errors.Errorf("--last not yet supported")
- } else {
- outputContainers = containers
- }
- } else {
- // Get just the latest container
- // Ignore filters
- latestCtr, err := runtime.GetLatestContainer()
- if err != nil {
- return err
- }
-
- outputContainers = []*libpod.Container{latestCtr}
- }
-
- pss := shared.PBatch(outputContainers, maxWorkers, opts)
+ pss, err := runtime.Ps(c, opts)
+ // Here and down
if opts.Sort != "" {
pss, err = sortPsOutput(opts.Sort, pss)
if err != nil {
diff --git a/cmd/podman/rm.go b/cmd/podman/rm.go
index 52e281402..66f70a36f 100644
--- a/cmd/podman/rm.go
+++ b/cmd/podman/rm.go
@@ -4,12 +4,9 @@ import (
"fmt"
"github.com/containers/libpod/cmd/podman/cliconfig"
- "github.com/containers/libpod/cmd/podman/libpodruntime"
- "github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod"
- "github.com/containers/libpod/libpod/image"
+ "github.com/containers/libpod/pkg/adapter"
"github.com/pkg/errors"
- "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
@@ -48,78 +45,29 @@ func init() {
markFlagHiddenForRemoteClient("latest", flags)
}
-// saveCmd saves the image to either docker-archive or oci
+// rmCmd removes one or more containers
func rmCmd(c *cliconfig.RmValues) error {
- var (
- deleteFuncs []shared.ParallelWorkerInput
- )
-
- ctx := getContext()
- runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand)
+ runtime, err := adapter.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
- failureCnt := 0
- delContainers, err := getAllOrLatestContainers(&c.PodmanCommand, runtime, -1, "all")
+ ok, failures, err := runtime.RemoveContainers(getContext(), c)
if err != nil {
- if c.Force && len(c.InputArgs) > 0 {
- if errors.Cause(err) == libpod.ErrNoSuchCtr {
- err = nil
+ if errors.Cause(err) == libpod.ErrNoSuchCtr {
+ if len(c.InputArgs) > 1 {
+ exitCode = 125
} else {
- failureCnt++
- }
- runtime.RemoveContainersFromStorage(c.InputArgs)
- }
- if len(delContainers) == 0 {
- if err != nil && failureCnt == 0 {
exitCode = 1
}
- return err
- }
- if err != nil {
- if errors.Cause(err) == libpod.ErrNoSuchCtr {
- exitCode = 1
- }
- fmt.Println(err.Error())
- }
- }
-
- for _, container := range delContainers {
- con := container
- f := func() error {
- return runtime.RemoveContainer(ctx, con, c.Force, c.Volumes)
- }
-
- deleteFuncs = append(deleteFuncs, shared.ParallelWorkerInput{
- ContainerID: con.ID(),
- ParallelFunc: f,
- })
- }
- maxWorkers := shared.Parallelize("rm")
- if c.GlobalIsSet("max-workers") {
- maxWorkers = c.GlobalFlags.MaxWorks
- }
- logrus.Debugf("Setting maximum workers to %d", maxWorkers)
-
- // Run the parallel funcs
- deleteErrors, errCount := shared.ParallelExecuteWorkerPool(maxWorkers, deleteFuncs)
- err = printParallelOutput(deleteErrors, errCount)
- if err != nil {
- for _, result := range deleteErrors {
- if result != nil && errors.Cause(result) != image.ErrNoSuchCtr {
- failureCnt++
- }
- }
- if failureCnt == 0 {
- exitCode = 1
}
+ return err
}
- if failureCnt > 0 {
+ if len(failures) > 0 {
exitCode = 125
}
- return err
+ return printCmdResults(ok, failures)
}
diff --git a/cmd/podman/shared/container.go b/cmd/podman/shared/container.go
index 6826191c5..7bef62355 100644
--- a/cmd/podman/shared/container.go
+++ b/cmd/podman/shared/container.go
@@ -44,7 +44,6 @@ type PsOptions struct {
Quiet bool
Size bool
Sort string
- Label string
Namespace bool
Sync bool
}
@@ -274,6 +273,176 @@ func worker(wg *sync.WaitGroup, jobs <-chan workerInput, results chan<- PsContai
}
}
+func generateContainerFilterFuncs(filter, filterValue string, r *libpod.Runtime) (func(container *libpod.Container) bool, error) {
+ switch filter {
+ case "id":
+ return func(c *libpod.Container) bool {
+ return strings.Contains(c.ID(), filterValue)
+ }, nil
+ case "label":
+ var filterArray []string = strings.SplitN(filterValue, "=", 2)
+ var filterKey string = filterArray[0]
+ if len(filterArray) > 1 {
+ filterValue = filterArray[1]
+ } else {
+ filterValue = ""
+ }
+ return func(c *libpod.Container) bool {
+ for labelKey, labelValue := range c.Labels() {
+ if labelKey == filterKey && ("" == filterValue || labelValue == filterValue) {
+ return true
+ }
+ }
+ return false
+ }, nil
+ case "name":
+ return func(c *libpod.Container) bool {
+ return strings.Contains(c.Name(), filterValue)
+ }, nil
+ case "exited":
+ exitCode, err := strconv.ParseInt(filterValue, 10, 32)
+ if err != nil {
+ return nil, errors.Wrapf(err, "exited code out of range %q", filterValue)
+ }
+ return func(c *libpod.Container) bool {
+ ec, exited, err := c.ExitCode()
+ if ec == int32(exitCode) && err == nil && exited == true {
+ return true
+ }
+ return false
+ }, nil
+ case "status":
+ if !util.StringInSlice(filterValue, []string{"created", "running", "paused", "stopped", "exited", "unknown"}) {
+ return nil, errors.Errorf("%s is not a valid status", filterValue)
+ }
+ return func(c *libpod.Container) bool {
+ status, err := c.State()
+ if err != nil {
+ return false
+ }
+ if filterValue == "stopped" {
+ filterValue = "exited"
+ }
+ state := status.String()
+ if status == libpod.ContainerStateConfigured {
+ state = "created"
+ } else if status == libpod.ContainerStateStopped {
+ state = "exited"
+ }
+ return state == filterValue
+ }, nil
+ case "ancestor":
+ // This needs to refine to match docker
+ // - ancestor=(<image-name>[:tag]|<image-id>| ⟨image@digest⟩) - containers created from an image or a descendant.
+ return func(c *libpod.Container) bool {
+ containerConfig := c.Config()
+ if strings.Contains(containerConfig.RootfsImageID, filterValue) || strings.Contains(containerConfig.RootfsImageName, filterValue) {
+ return true
+ }
+ return false
+ }, nil
+ case "before":
+ ctr, err := r.LookupContainer(filterValue)
+ if err != nil {
+ return nil, errors.Errorf("unable to find container by name or id of %s", filterValue)
+ }
+ containerConfig := ctr.Config()
+ createTime := containerConfig.CreatedTime
+ return func(c *libpod.Container) bool {
+ cc := c.Config()
+ return createTime.After(cc.CreatedTime)
+ }, nil
+ case "since":
+ ctr, err := r.LookupContainer(filterValue)
+ if err != nil {
+ return nil, errors.Errorf("unable to find container by name or id of %s", filterValue)
+ }
+ containerConfig := ctr.Config()
+ createTime := containerConfig.CreatedTime
+ return func(c *libpod.Container) bool {
+ cc := c.Config()
+ return createTime.Before(cc.CreatedTime)
+ }, nil
+ case "volume":
+ //- volume=(<volume-name>|<mount-point-destination>)
+ return func(c *libpod.Container) bool {
+ containerConfig := c.Config()
+ var dest string
+ arr := strings.Split(filterValue, ":")
+ source := arr[0]
+ if len(arr) == 2 {
+ dest = arr[1]
+ }
+ for _, mount := range containerConfig.Spec.Mounts {
+ if dest != "" && (mount.Source == source && mount.Destination == dest) {
+ return true
+ }
+ if dest == "" && mount.Source == source {
+ return true
+ }
+ }
+ return false
+ }, nil
+ case "health":
+ return func(c *libpod.Container) bool {
+ hcStatus, err := c.HealthCheckStatus()
+ if err != nil {
+ return false
+ }
+ return hcStatus == filterValue
+ }, nil
+ }
+ return nil, errors.Errorf("%s is an invalid filter", filter)
+}
+
+// GetPsContainerOutput returns a slice of containers specifically for ps output
+func GetPsContainerOutput(r *libpod.Runtime, opts PsOptions, filters []string, maxWorkers int) ([]PsContainerOutput, error) {
+ var (
+ filterFuncs []libpod.ContainerFilter
+ outputContainers []*libpod.Container
+ )
+
+ if len(filters) > 0 {
+ for _, f := range filters {
+ filterSplit := strings.SplitN(f, "=", 2)
+ if len(filterSplit) < 2 {
+ return nil, errors.Errorf("filter input must be in the form of filter=value: %s is invalid", f)
+ }
+ generatedFunc, err := generateContainerFilterFuncs(filterSplit[0], filterSplit[1], r)
+ if err != nil {
+ return nil, errors.Wrapf(err, "invalid filter")
+ }
+ filterFuncs = append(filterFuncs, generatedFunc)
+ }
+ }
+ if !opts.Latest {
+ // Get all containers
+ containers, err := r.GetContainers(filterFuncs...)
+ if err != nil {
+ return nil, err
+ }
+
+ // We only want the last few containers
+ if opts.Last > 0 && opts.Last <= len(containers) {
+ return nil, errors.Errorf("--last not yet supported")
+ } else {
+ outputContainers = containers
+ }
+ } else {
+ // Get just the latest container
+ // Ignore filters
+ latestCtr, err := r.GetLatestContainer()
+ if err != nil {
+ return nil, err
+ }
+
+ outputContainers = []*libpod.Container{latestCtr}
+ }
+
+ pss := PBatch(outputContainers, maxWorkers, opts)
+ return pss, nil
+}
+
// PBatch is performs batch operations on a container in parallel. It spawns the number of workers
// relative to the the number of parallel operations desired.
func PBatch(containers []*libpod.Container, workers int, opts PsOptions) []PsContainerOutput {
diff --git a/cmd/podman/shared/workers.go b/cmd/podman/shared/workers.go
new file mode 100644
index 000000000..112af89cc
--- /dev/null
+++ b/cmd/podman/shared/workers.go
@@ -0,0 +1,133 @@
+package shared
+
+import (
+ "reflect"
+ "runtime"
+ "strings"
+ "sync"
+
+ "github.com/sirupsen/logrus"
+)
+
+// JobFunc provides the function signature for the pool'ed functions
+type JobFunc func() error
+
+// Job defines the function to run
+type Job struct {
+ ID string
+ Fn JobFunc
+}
+
+// JobResult defines the results from the function ran
+type JobResult struct {
+ Job Job
+ Err error
+}
+
+// Pool defines the worker pool and queues
+type Pool struct {
+ id string
+ wg *sync.WaitGroup
+ jobs chan Job
+ results chan JobResult
+ size int
+ capacity int
+}
+
+// NewPool creates and initializes a new Pool
+func NewPool(id string, size int, capacity int) *Pool {
+ var wg sync.WaitGroup
+
+ // min for int...
+ s := size
+ if s > capacity {
+ s = capacity
+ }
+
+ return &Pool{
+ id,
+ &wg,
+ make(chan Job, capacity),
+ make(chan JobResult, capacity),
+ s,
+ capacity,
+ }
+}
+
+// Add Job to pool for parallel processing
+func (p *Pool) Add(job Job) {
+ p.wg.Add(1)
+ p.jobs <- job
+}
+
+// Run the Job's in the pool, gather and return results
+func (p *Pool) Run() ([]string, map[string]error, error) {
+ var (
+ ok = []string{}
+ failures = map[string]error{}
+ )
+
+ for w := 0; w < p.size; w++ {
+ w := w
+ go p.newWorker(w)
+ }
+ close(p.jobs)
+ p.wg.Wait()
+
+ close(p.results)
+ for r := range p.results {
+ if r.Err == nil {
+ ok = append(ok, r.Job.ID)
+ } else {
+ failures[r.Job.ID] = r.Err
+ }
+ }
+
+ if logrus.GetLevel() == logrus.DebugLevel {
+ for i, f := range failures {
+ logrus.Debugf("Pool[%s, %s: %s]", p.id, i, f.Error())
+ }
+ }
+
+ return ok, failures, nil
+}
+
+// newWorker creates new parallel workers to monitor jobs channel from Pool
+func (p *Pool) newWorker(slot int) {
+ for job := range p.jobs {
+ err := job.Fn()
+ p.results <- JobResult{job, err}
+ if logrus.GetLevel() == logrus.DebugLevel {
+ n := strings.Split(runtime.FuncForPC(reflect.ValueOf(job.Fn).Pointer()).Name(), ".")
+ logrus.Debugf("Worker#%d finished job %s/%s (%v)", slot, n[2:], job.ID, err)
+ }
+ p.wg.Done()
+ }
+}
+
+// DefaultPoolSize provides the maximum number of parallel workers (int) as calculated by a basic
+// heuristic. This can be overriden by the --max-workers primary switch to podman.
+func DefaultPoolSize(name string) int {
+ numCpus := runtime.NumCPU()
+ switch name {
+ case "kill":
+ case "pause":
+ case "rm":
+ case "unpause":
+ if numCpus <= 3 {
+ return numCpus * 3
+ }
+ return numCpus * 4
+ case "ps":
+ return 8
+ case "restart":
+ return numCpus * 2
+ case "stop":
+ if numCpus <= 2 {
+ return 4
+ } else {
+ return numCpus * 3
+ }
+ }
+ return 3
+}
diff --git a/cmd/podman/stop.go b/cmd/podman/stop.go
index e27be64f6..38d90fe81 100644
--- a/cmd/podman/stop.go
+++ b/cmd/podman/stop.go
@@ -1,9 +1,6 @@
package main
import (
- "fmt"
- "reflect"
-
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/pkg/adapter"
@@ -68,21 +65,5 @@ func stopCmd(c *cliconfig.StopValues) error {
if err != nil {
return err
}
-
- for _, id := range ok {
- fmt.Println(id)
- }
-
- if len(failures) > 0 {
- keys := reflect.ValueOf(failures).MapKeys()
- lastKey := keys[len(keys)-1].String()
- lastErr := failures[lastKey]
- delete(failures, lastKey)
-
- for _, err := range failures {
- outputError(err)
- }
- return lastErr
- }
- return nil
+ return printCmdResults(ok, failures)
}
diff --git a/cmd/podman/umount.go b/cmd/podman/umount.go
index a938c7c38..914e37cfa 100644
--- a/cmd/podman/umount.go
+++ b/cmd/podman/umount.go
@@ -1,20 +1,16 @@
package main
import (
- "fmt"
-
"github.com/containers/libpod/cmd/podman/cliconfig"
- "github.com/containers/libpod/cmd/podman/libpodruntime"
- "github.com/containers/libpod/libpod"
- "github.com/containers/storage"
+ "github.com/containers/libpod/pkg/adapter"
"github.com/pkg/errors"
- "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
var (
umountCommand cliconfig.UmountValues
- description = `Container storage increments a mount counter each time a container is mounted.
+
+ description = `Container storage increments a mount counter each time a container is mounted.
When a container is unmounted, the mount counter is decremented. The container's root filesystem is physically unmounted only when the mount counter reaches zero indicating no other processes are using the mount.
@@ -51,42 +47,15 @@ func init() {
}
func umountCmd(c *cliconfig.UmountValues) error {
- runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand)
+ runtime, err := adapter.GetRuntime(&c.PodmanCommand)
if err != nil {
- return errors.Wrapf(err, "could not get runtime")
+ return errors.Wrapf(err, "error creating runtime")
}
defer runtime.Shutdown(false)
- force := c.Force
- umountAll := c.All
-
- containers, err := getAllOrLatestContainers(&c.PodmanCommand, runtime, -1, "all")
+ ok, failures, err := runtime.UmountRootFilesystems(getContext(), c)
if err != nil {
- if len(containers) == 0 {
- return err
- }
- fmt.Println(err.Error())
- }
-
- umountContainerErrStr := "error unmounting container"
- var lastError error
- for _, ctr := range containers {
- ctrState, err := ctr.State()
- if ctrState == libpod.ContainerStateRunning || err != nil {
- continue
- }
-
- if err = ctr.Unmount(force); err != nil {
- if umountAll && errors.Cause(err) == storage.ErrLayerNotMounted {
- continue
- }
- if lastError != nil {
- logrus.Error(lastError)
- }
- lastError = errors.Wrapf(err, "%s %s", umountContainerErrStr, ctr.ID())
- continue
- }
- fmt.Printf("%s\n", ctr.ID())
+ return err
}
- return lastError
+ return printCmdResults(ok, failures)
}
diff --git a/cmd/podman/utils.go b/cmd/podman/utils.go
index c763940db..81bd02faa 100644
--- a/cmd/podman/utils.go
+++ b/cmd/podman/utils.go
@@ -2,11 +2,12 @@ package main
import (
"fmt"
+ "reflect"
"github.com/spf13/pflag"
)
-//printParallelOutput takes the map of parallel worker results and outputs them
+// printParallelOutput takes the map of parallel worker results and outputs them
// to stdout
func printParallelOutput(m map[string]error, errCount int) error {
var lastError error
@@ -23,6 +24,26 @@ func printParallelOutput(m map[string]error, errCount int) error {
return lastError
}
+// print results from CLI command
+func printCmdResults(ok []string, failures map[string]error) error {
+ for _, id := range ok {
+ fmt.Println(id)
+ }
+
+ if len(failures) > 0 {
+ keys := reflect.ValueOf(failures).MapKeys()
+ lastKey := keys[len(keys)-1].String()
+ lastErr := failures[lastKey]
+ delete(failures, lastKey)
+
+ for _, err := range failures {
+ outputError(err)
+ }
+ return lastErr
+ }
+ return nil
+}
+
// markFlagHiddenForRemoteClient makes the flag not appear as part of the CLI
// on the remote-client
func markFlagHiddenForRemoteClient(flagName string, flags *pflag.FlagSet) {
@@ -30,3 +51,29 @@ func markFlagHiddenForRemoteClient(flagName string, flags *pflag.FlagSet) {
flags.MarkHidden(flagName)
}
}
+
+// TODO: remove when adapter package takes over this functionality
+// func joinContainerOrCreateRootlessUserNS(runtime *libpod.Runtime, ctr *libpod.Container) (bool, int, error) {
+// if os.Geteuid() == 0 {
+// return false, 0, nil
+// }
+// s, err := ctr.State()
+// if err != nil {
+// return false, -1, err
+// }
+// opts := rootless.Opts{
+// Argument: ctr.ID(),
+// }
+// if s == libpod.ContainerStateRunning || s == libpod.ContainerStatePaused {
+// data, err := ioutil.ReadFile(ctr.Config().ConmonPidFile)
+// if err != nil {
+// return false, -1, errors.Wrapf(err, "cannot read conmon PID file %q", ctr.Config().ConmonPidFile)
+// }
+// conmonPid, err := strconv.Atoi(string(data))
+// if err != nil {
+// return false, -1, errors.Wrapf(err, "cannot parse PID %q", data)
+// }
+// return rootless.JoinDirectUserAndMountNSWithOpts(uint(conmonPid), &opts)
+// }
+// return rootless.BecomeRootInUserNSWithOpts(&opts)
+// }
diff --git a/cmd/podman/varlink/io.podman.varlink b/cmd/podman/varlink/io.podman.varlink
index 9098a9297..d8905326c 100644
--- a/cmd/podman/varlink/io.podman.varlink
+++ b/cmd/podman/varlink/io.podman.varlink
@@ -133,6 +133,47 @@ type ContainerStats (
pids: int
)
+type PsOpts (
+ all: bool,
+ filters: ?[]string,
+ last: ?int,
+ latest: ?bool,
+ noTrunc: ?bool,
+ pod: ?bool,
+ quiet: ?bool,
+ sort: ?string,
+ sync: ?bool
+)
+
+type PsContainer (
+ id: string,
+ image: string,
+ command: string,
+ created: string,
+ ports: string,
+ names: string,
+ isInfra: bool,
+ status: string,
+ state: string,
+ pidNum: int,
+ rootFsSize: int,
+ rwSize: int,
+ pod: string,
+ createdAt: string,
+ exitedAt: string,
+ startedAt: string,
+ labels: [string]string,
+ nsPid: string,
+ cgroup: string,
+ ipc: string,
+ mnt: string,
+ net: string,
+ pidNs: string,
+ user: string,
+ uts: string,
+ mounts: string
+)
+
# ContainerMount describes the struct for mounts in a container
type ContainerMount (
destination: string,
@@ -474,6 +515,8 @@ method GetInfo() -> (info: PodmanInfo)
# See also [GetContainer](#GetContainer).
method ListContainers() -> (containers: []Container)
+method Ps(opts: PsOpts) -> (containers: []PsContainer)
+
# GetContainer returns information about a single container. If a container
# with the given id doesn't exist, a [ContainerNotFound](#ContainerNotFound)
# error will be returned. See also [ListContainers](ListContainers) and
diff --git a/cmd/podman/wait.go b/cmd/podman/wait.go
index 4449898a0..827ac6826 100644
--- a/cmd/podman/wait.go
+++ b/cmd/podman/wait.go
@@ -1,8 +1,6 @@
package main
import (
- "fmt"
- "reflect"
"time"
"github.com/containers/libpod/cmd/podman/cliconfig"
@@ -62,21 +60,5 @@ func waitCmd(c *cliconfig.WaitValues) error {
if err != nil {
return err
}
-
- for _, id := range ok {
- fmt.Println(id)
- }
-
- if len(failures) > 0 {
- keys := reflect.ValueOf(failures).MapKeys()
- lastKey := keys[len(keys)-1].String()
- lastErr := failures[lastKey]
- delete(failures, lastKey)
-
- for _, err := range failures {
- outputError(err)
- }
- return lastErr
- }
- return nil
+ return printCmdResults(ok, failures)
}
diff --git a/libpod/container.go b/libpod/container.go
index 6d5e063ab..523e571b1 100644
--- a/libpod/container.go
+++ b/libpod/container.go
@@ -401,6 +401,29 @@ func (t ContainerStatus) String() string {
return "bad state"
}
+// StringToContainerStatus converts a string representation of a containers
+// status into an actual container status type
+func StringToContainerStatus(status string) (ContainerStatus, error) {
+ switch status {
+ case ContainerStateUnknown.String():
+ return ContainerStateUnknown, nil
+ case ContainerStateConfigured.String():
+ return ContainerStateConfigured, nil
+ case ContainerStateCreated.String():
+ return ContainerStateCreated, nil
+ case ContainerStateRunning.String():
+ return ContainerStateRunning, nil
+ case ContainerStateStopped.String():
+ return ContainerStateStopped, nil
+ case ContainerStatePaused.String():
+ return ContainerStatePaused, nil
+ case ContainerStateExited.String():
+ return ContainerStateExited, nil
+ default:
+ return ContainerStateUnknown, errors.Wrapf(ErrInvalidArg, "unknown container state: %s", status)
+ }
+}
+
// Config accessors
// Unlocked
diff --git a/libpod/container_api.go b/libpod/container_api.go
index 2a2381923..465b23831 100644
--- a/libpod/container_api.go
+++ b/libpod/container_api.go
@@ -15,7 +15,7 @@ import (
"github.com/containers/libpod/pkg/lookup"
"github.com/containers/storage/pkg/stringid"
"github.com/docker/docker/oci/caps"
- opentracing "github.com/opentracing/opentracing-go"
+ "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/util/wait"
@@ -174,7 +174,7 @@ func (c *Container) StopWithTimeout(timeout uint) error {
if c.state.State == ContainerStateConfigured ||
c.state.State == ContainerStateUnknown ||
c.state.State == ContainerStatePaused {
- return errors.Wrapf(ErrCtrStateInvalid, "can only stop created, running, or stopped containers")
+ return errors.Wrapf(ErrCtrStateInvalid, "can only stop created, running, or stopped containers. %s in state %s", c.ID(), c.state.State.String())
}
if c.state.State == ContainerStateStopped ||
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index 22df36c11..3c7319963 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -948,7 +948,7 @@ func (c *Container) start() error {
// Internal, non-locking function to stop container
func (c *Container) stop(timeout uint) error {
- logrus.Debugf("Stopping ctr %s with timeout %d", c.ID(), timeout)
+ logrus.Debugf("Stopping ctr %s (timeout %d)", c.ID(), timeout)
if err := c.runtime.ociRuntime.stopContainer(c, timeout); err != nil {
return err
@@ -1064,14 +1064,16 @@ func (c *Container) mountStorage() (string, error) {
func (c *Container) cleanupStorage() error {
if !c.state.Mounted {
// Already unmounted, do nothing
- logrus.Debugf("Storage is already unmounted, skipping...")
+ logrus.Debugf("Container %s storage is already unmounted, skipping...", c.ID())
return nil
}
+
for _, mount := range c.config.Mounts {
if err := c.unmountSHM(mount); err != nil {
return err
}
}
+
if c.config.Rootfs != "" {
return nil
}
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index 4d6bf61a3..eeffa4705 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -30,7 +30,7 @@ import (
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
"github.com/opencontainers/selinux/go-selinux/label"
- opentracing "github.com/opentracing/opentracing-go"
+ "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
@@ -48,6 +48,8 @@ func (c *Container) unmountSHM(mount string) error {
if err := unix.Unmount(mount, unix.MNT_DETACH); err != nil {
if err != syscall.EINVAL {
logrus.Warnf("container %s failed to unmount %s : %v", c.ID(), mount, err)
+ } else {
+ logrus.Debugf("container %s failed to unmount %s : %v", c.ID(), mount, err)
}
}
return nil
diff --git a/libpod/oci.go b/libpod/oci.go
index 62331b879..189359753 100644
--- a/libpod/oci.go
+++ b/libpod/oci.go
@@ -143,6 +143,7 @@ func waitContainerStop(ctr *Container, timeout time.Duration) error {
return nil
case <-time.After(timeout):
close(chControl)
+ logrus.Debugf("container %s did not die within timeout %d", ctr.ID(), timeout)
return errors.Errorf("container %s did not die within timeout", ctr.ID())
}
}
diff --git a/pkg/adapter/containers.go b/pkg/adapter/containers.go
index 1bca99cec..8ce506542 100644
--- a/pkg/adapter/containers.go
+++ b/pkg/adapter/containers.go
@@ -18,6 +18,7 @@ import (
"github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/pkg/adapter/shortcuts"
+ "github.com/containers/storage"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -62,52 +63,144 @@ func (r *LocalRuntime) StopContainers(ctx context.Context, cli *cliconfig.StopVa
timeout = &t
}
- var (
- ok = []string{}
- failures = map[string]error{}
- )
+ maxWorkers := shared.DefaultPoolSize("stop")
+ if cli.GlobalIsSet("max-workers") {
+ maxWorkers = cli.GlobalFlags.MaxWorks
+ }
+ logrus.Debugf("Setting maximum stop workers to %d", maxWorkers)
ctrs, err := shortcuts.GetContainersByContext(cli.All, cli.Latest, cli.InputArgs, r.Runtime)
if err != nil {
- return ok, failures, err
+ return nil, nil, err
}
+ pool := shared.NewPool("stop", maxWorkers, len(ctrs))
for _, c := range ctrs {
+ c := c
+
if timeout == nil {
t := c.StopTimeout()
timeout = &t
logrus.Debugf("Set timeout to container %s default (%d)", c.ID(), *timeout)
}
- if err := c.StopWithTimeout(*timeout); err == nil {
- ok = append(ok, c.ID())
- } else if errors.Cause(err) == libpod.ErrCtrStopped {
- ok = append(ok, c.ID())
- logrus.Debugf("Container %s is already stopped", c.ID())
- } else {
- failures[c.ID()] = err
- }
+
+ pool.Add(shared.Job{
+ c.ID(),
+ func() error {
+ err := c.StopWithTimeout(*timeout)
+ if err != nil {
+ if errors.Cause(err) == libpod.ErrCtrStopped {
+ logrus.Debugf("Container %s is already stopped", c.ID())
+ return nil
+ }
+ logrus.Debugf("Failed to stop container %s: %s", c.ID(), err.Error())
+ }
+ return err
+ },
+ })
}
- return ok, failures, nil
+ return pool.Run()
}
// KillContainers sends signal to container(s) based on CLI inputs.
// Returns list of successful id(s), map of failed id(s) + error, or error not from container
func (r *LocalRuntime) KillContainers(ctx context.Context, cli *cliconfig.KillValues, signal syscall.Signal) ([]string, map[string]error, error) {
+ maxWorkers := shared.DefaultPoolSize("kill")
+ if cli.GlobalIsSet("max-workers") {
+ maxWorkers = cli.GlobalFlags.MaxWorks
+ }
+ logrus.Debugf("Setting maximum kill workers to %d", maxWorkers)
+
+ ctrs, err := shortcuts.GetContainersByContext(cli.All, cli.Latest, cli.InputArgs, r.Runtime)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ pool := shared.NewPool("kill", maxWorkers, len(ctrs))
+ for _, c := range ctrs {
+ c := c
+
+ pool.Add(shared.Job{
+ c.ID(),
+ func() error {
+ return c.Kill(uint(signal))
+ },
+ })
+ }
+ return pool.Run()
+}
+
+// RemoveContainers removes container(s) based on CLI inputs.
+func (r *LocalRuntime) RemoveContainers(ctx context.Context, cli *cliconfig.RmValues) ([]string, map[string]error, error) {
var (
ok = []string{}
failures = map[string]error{}
)
+ maxWorkers := shared.DefaultPoolSize("rm")
+ if cli.GlobalIsSet("max-workers") {
+ maxWorkers = cli.GlobalFlags.MaxWorks
+ }
+ logrus.Debugf("Setting maximum rm workers to %d", maxWorkers)
+
ctrs, err := shortcuts.GetContainersByContext(cli.All, cli.Latest, cli.InputArgs, r.Runtime)
if err != nil {
+ // Force may be used to remove containers no longer found in the database
+ if cli.Force && len(cli.InputArgs) > 0 && errors.Cause(err) == libpod.ErrNoSuchCtr {
+ r.RemoveContainersFromStorage(cli.InputArgs)
+ }
return ok, failures, err
}
+ pool := shared.NewPool("rm", maxWorkers, len(ctrs))
for _, c := range ctrs {
- if err := c.Kill(uint(signal)); err == nil {
- ok = append(ok, c.ID())
+ c := c
+
+ pool.Add(shared.Job{
+ c.ID(),
+ func() error {
+ err := r.RemoveContainer(ctx, c, cli.Force, cli.Volumes)
+ if err != nil {
+ logrus.Debugf("Failed to remove container %s: %s", c.ID(), err.Error())
+ }
+ return err
+ },
+ })
+ }
+ return pool.Run()
+}
+
+// UmountRootFilesystems removes container(s) based on CLI inputs.
+func (r *LocalRuntime) UmountRootFilesystems(ctx context.Context, cli *cliconfig.UmountValues) ([]string, map[string]error, error) {
+ var (
+ ok = []string{}
+ failures = map[string]error{}
+ )
+
+ ctrs, err := shortcuts.GetContainersByContext(cli.All, cli.Latest, cli.InputArgs, r.Runtime)
+ if err != nil {
+ return ok, failures, err
+ }
+
+ for _, ctr := range ctrs {
+ state, err := ctr.State()
+ if err != nil {
+ logrus.Debugf("Error umounting container %s state: %s", ctr.ID(), err.Error())
+ continue
+ }
+ if state == libpod.ContainerStateRunning {
+ logrus.Debugf("Error umounting container %s, is running", ctr.ID())
+ continue
+ }
+
+ if err := ctr.Unmount(cli.Force); err != nil {
+ if cli.All && errors.Cause(err) == storage.ErrLayerNotMounted {
+ logrus.Debugf("Error umounting container %s, storage.ErrLayerNotMounted", ctr.ID())
+ continue
+ }
+ failures[ctr.ID()] = errors.Wrapf(err, "error unmounting continaner %s", ctr.ID())
} else {
- failures[c.ID()] = err
+ ok = append(ok, ctr.ID())
}
}
return ok, failures, nil
@@ -304,3 +397,13 @@ func ReadExitFile(runtimeTmp, ctrID string) (int, error) {
return exitCode, nil
}
+
+// Ps ...
+func (r *LocalRuntime) Ps(c *cliconfig.PsValues, opts shared.PsOptions) ([]shared.PsContainerOutput, error) {
+ maxWorkers := shared.Parallelize("ps")
+ if c.GlobalIsSet("max-workers") {
+ maxWorkers = c.GlobalFlags.MaxWorks
+ }
+ logrus.Debugf("Setting maximum workers to %d", maxWorkers)
+ return shared.GetPsContainerOutput(r.Runtime, opts, c.Filter, maxWorkers)
+}
diff --git a/pkg/adapter/containers_remote.go b/pkg/adapter/containers_remote.go
index 3730827c7..424c431df 100644
--- a/pkg/adapter/containers_remote.go
+++ b/pkg/adapter/containers_remote.go
@@ -12,11 +12,12 @@ import (
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/shared"
- "github.com/containers/libpod/cmd/podman/varlink"
- "github.com/containers/libpod/libpod"
- "github.com/containers/libpod/pkg/inspect"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
+
+ iopodman "github.com/containers/libpod/cmd/podman/varlink"
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/pkg/inspect"
"github.com/varlink/go/varlink"
)
@@ -128,7 +129,7 @@ func (c *Container) Name() string {
return c.config.Name
}
-// StopContainers stops requested containers using CLI inputs.
+// StopContainers stops requested containers using varlink.
// Returns the list of stopped container ids, map of failed to stop container ids + errors, or any non-container error
func (r *LocalRuntime) StopContainers(ctx context.Context, cli *cliconfig.StopValues) ([]string, map[string]error, error) {
var (
@@ -152,7 +153,7 @@ func (r *LocalRuntime) StopContainers(ctx context.Context, cli *cliconfig.StopVa
return ok, failures, nil
}
-// KillContainers sends signal to container(s) based on CLI inputs.
+// KillContainers sends signal to container(s) based on varlink.
// Returns list of successful id(s), map of failed id(s) + error, or error not from container
func (r *LocalRuntime) KillContainers(ctx context.Context, cli *cliconfig.KillValues, signal syscall.Signal) ([]string, map[string]error, error) {
var (
@@ -176,6 +177,52 @@ func (r *LocalRuntime) KillContainers(ctx context.Context, cli *cliconfig.KillVa
return ok, failures, nil
}
+// RemoveContainer removes container(s) based on varlink inputs.
+func (r *LocalRuntime) RemoveContainers(ctx context.Context, cli *cliconfig.RmValues) ([]string, map[string]error, error) {
+ ids, err := iopodman.GetContainersByContext().Call(r.Conn, cli.All, cli.Latest, cli.InputArgs)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var (
+ ok = []string{}
+ failures = map[string]error{}
+ )
+
+ for _, id := range ids {
+ _, err := iopodman.RemoveContainer().Call(r.Conn, id, cli.Force, cli.Volumes)
+ if err != nil {
+ failures[id] = err
+ } else {
+ ok = append(ok, id)
+ }
+ }
+ return ok, failures, nil
+}
+
+// UmountRootFilesystems umounts container(s) root filesystems based on varlink inputs
+func (r *LocalRuntime) UmountRootFilesystems(ctx context.Context, cli *cliconfig.UmountValues) ([]string, map[string]error, error) {
+ ids, err := iopodman.GetContainersByContext().Call(r.Conn, cli.All, cli.Latest, cli.InputArgs)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var (
+ ok = []string{}
+ failures = map[string]error{}
+ )
+
+ for _, id := range ids {
+ err := iopodman.UnmountContainer().Call(r.Conn, id, cli.Force)
+ if err != nil {
+ failures[id] = err
+ } else {
+ ok = append(ok, id)
+ }
+ }
+ return ok, failures, nil
+}
+
// WaitOnContainers waits for all given container(s) to stop.
// interval is currently ignored.
func (r *LocalRuntime) WaitOnContainers(ctx context.Context, cli *cliconfig.WaitValues, interval time.Duration) ([]string, map[string]error, error) {
@@ -227,7 +274,7 @@ func BatchContainerOp(ctr *Container, opts shared.PsOptions) (shared.BatchContai
// Logs one or more containers over a varlink connection
func (r *LocalRuntime) Log(c *cliconfig.LogsValues, options *libpod.LogOptions) error {
- //GetContainersLogs
+ // GetContainersLogs
reply, err := iopodman.GetContainersLogs().Send(r.Conn, uint64(varlink.More), c.InputArgs, c.Follow, c.Latest, options.Since.Format(time.RFC3339Nano), int64(c.Tail), c.Timestamps)
if err != nil {
return errors.Wrapf(err, "failed to get container logs")
@@ -292,3 +339,75 @@ func (r *LocalRuntime) Run(ctx context.Context, c *cliconfig.RunValues, exitCode
func ReadExitFile(runtimeTmp, ctrID string) (int, error) {
return 0, libpod.ErrNotImplemented
}
+
+// Ps ...
+func (r *LocalRuntime) Ps(c *cliconfig.PsValues, opts shared.PsOptions) ([]shared.PsContainerOutput, error) {
+ var psContainers []shared.PsContainerOutput
+ last := int64(c.Last)
+ PsOpts := iopodman.PsOpts{
+ All: c.All,
+ Filters: &c.Filter,
+ Last: &last,
+ Latest: &c.Latest,
+ NoTrunc: &c.NoTrunct,
+ Pod: &c.Pod,
+ Quiet: &c.Quiet,
+ Sort: &c.Sort,
+ Sync: &c.Sync,
+ }
+ containers, err := iopodman.Ps().Call(r.Conn, PsOpts)
+ if err != nil {
+ return nil, err
+ }
+ for _, ctr := range containers {
+ createdAt, err := time.Parse(time.RFC3339Nano, ctr.CreatedAt)
+ if err != nil {
+ return nil, err
+ }
+ exitedAt, err := time.Parse(time.RFC3339Nano, ctr.ExitedAt)
+ if err != nil {
+ return nil, err
+ }
+ startedAt, err := time.Parse(time.RFC3339Nano, ctr.StartedAt)
+ if err != nil {
+ return nil, err
+ }
+ containerSize := shared.ContainerSize{
+ RootFsSize: ctr.RootFsSize,
+ RwSize: ctr.RwSize,
+ }
+ state, err := libpod.StringToContainerStatus(ctr.State)
+ if err != nil {
+ return nil, err
+ }
+ psc := shared.PsContainerOutput{
+ ID: ctr.Id,
+ Image: ctr.Image,
+ Command: ctr.Command,
+ Created: ctr.Created,
+ Ports: ctr.Ports,
+ Names: ctr.Names,
+ IsInfra: ctr.IsInfra,
+ Status: ctr.Status,
+ State: state,
+ Pid: int(ctr.PidNum),
+ Size: &containerSize,
+ Pod: ctr.Pod,
+ CreatedAt: createdAt,
+ ExitedAt: exitedAt,
+ StartedAt: startedAt,
+ Labels: ctr.Labels,
+ PID: ctr.NsPid,
+ Cgroup: ctr.Cgroup,
+ IPC: ctr.Ipc,
+ MNT: ctr.Mnt,
+ NET: ctr.Net,
+ PIDNS: ctr.PidNs,
+ User: ctr.User,
+ UTS: ctr.Uts,
+ Mounts: ctr.Mounts,
+ }
+ psContainers = append(psContainers, psc)
+ }
+ return psContainers, nil
+}
diff --git a/pkg/adapter/runtime.go b/pkg/adapter/runtime.go
index 182a04044..d45bdb56d 100644
--- a/pkg/adapter/runtime.go
+++ b/pkg/adapter/runtime.go
@@ -310,6 +310,46 @@ func (r *LocalRuntime) HealthCheck(c *cliconfig.HealthCheckValues) (libpod.Healt
return r.Runtime.HealthCheck(c.InputArgs[0])
}
+// JoinOrCreateRootlessPod joins the specified pod if it is running or it creates a new user namespace
+// if the pod is stopped
+// func (r *LocalRuntime) JoinOrCreateRootlessPod(pod *Pod) (bool, int, error) {
+// if os.Geteuid() == 0 {
+// return false, 0, nil
+// }
+// opts := rootless.Opts{
+// Argument: pod.ID(),
+// }
+//
+// inspect, err := pod.Inspect()
+// if err != nil {
+// return false, 0, err
+// }
+// for _, ctr := range inspect.Containers {
+// prevCtr, err := r.LookupContainer(ctr.ID)
+// if err != nil {
+// return false, -1, err
+// }
+// s, err := prevCtr.State()
+// if err != nil {
+// return false, -1, err
+// }
+// if s != libpod.ContainerStateRunning && s != libpod.ContainerStatePaused {
+// continue
+// }
+// data, err := ioutil.ReadFile(prevCtr.Config().ConmonPidFile)
+// if err != nil {
+// return false, -1, errors.Wrapf(err, "cannot read conmon PID file %q", prevCtr.Config().ConmonPidFile)
+// }
+// conmonPid, err := strconv.Atoi(string(data))
+// if err != nil {
+// return false, -1, errors.Wrapf(err, "cannot parse PID %q", data)
+// }
+// return rootless.JoinDirectUserAndMountNSWithOpts(uint(conmonPid), &opts)
+// }
+//
+// return rootless.BecomeRootInUserNSWithOpts(&opts)
+// }
+
// Events is a wrapper to libpod to obtain libpod/podman events
func (r *LocalRuntime) Events(c *cliconfig.EventValues) error {
var (
@@ -363,3 +403,28 @@ func (r *LocalRuntime) Events(c *cliconfig.EventValues) error {
func (r *LocalRuntime) Diff(c *cliconfig.DiffValues, to string) ([]archive.Change, error) {
return r.Runtime.GetDiff("", to)
}
+
+// func (r *LocalRuntime) joinContainerOrCreateRootlessUserNS(ctr *libpod.Container) (bool, int, error) {
+// if os.Geteuid() == 0 {
+// return false, 0, nil
+// }
+// s, err := ctr.State()
+// if err != nil {
+// return false, -1, err
+// }
+// opts := rootless.Opts{
+// Argument: ctr.ID(),
+// }
+// if s == libpod.ContainerStateRunning || s == libpod.ContainerStatePaused {
+// data, err := ioutil.ReadFile(ctr.Config().ConmonPidFile)
+// if err != nil {
+// return false, -1, errors.Wrapf(err, "Container %s cannot read conmon PID file %q", ctr.ID(), ctr.Config().ConmonPidFile)
+// }
+// conmonPid, err := strconv.Atoi(string(data))
+// if err != nil {
+// return false, -1, errors.Wrapf(err, "Container %s cannot parse PID %q", ctr.ID(), data)
+// }
+// return rootless.JoinDirectUserAndMountNSWithOpts(uint(conmonPid), &opts)
+// }
+// return rootless.BecomeRootInUserNSWithOpts(&opts)
+// }
diff --git a/pkg/adapter/shortcuts/shortcuts.go b/pkg/adapter/shortcuts/shortcuts.go
index 677d88457..3e4eff555 100644
--- a/pkg/adapter/shortcuts/shortcuts.go
+++ b/pkg/adapter/shortcuts/shortcuts.go
@@ -1,6 +1,8 @@
package shortcuts
-import "github.com/containers/libpod/libpod"
+import (
+ "github.com/containers/libpod/libpod"
+)
// GetPodsByContext gets pods whether all, latest, or a slice of names/ids
func GetPodsByContext(all, latest bool, pods []string, runtime *libpod.Runtime) ([]*libpod.Pod, error) {
@@ -27,28 +29,23 @@ func GetPodsByContext(all, latest bool, pods []string, runtime *libpod.Runtime)
}
// GetContainersByContext gets pods whether all, latest, or a slice of names/ids
-func GetContainersByContext(all, latest bool, names []string, runtime *libpod.Runtime) ([]*libpod.Container, error) {
- var ctrs = []*libpod.Container{}
+func GetContainersByContext(all, latest bool, names []string, runtime *libpod.Runtime) (ctrs []*libpod.Container, err error) {
+ var ctr *libpod.Container
+ ctrs = []*libpod.Container{}
if all {
- return runtime.GetAllContainers()
- }
-
- if latest {
- c, err := runtime.GetLatestContainer()
- if err != nil {
- return nil, err
- }
- ctrs = append(ctrs, c)
- return ctrs, nil
- }
-
- for _, c := range names {
- ctr, err := runtime.LookupContainer(c)
- if err != nil {
- return nil, err
- }
+ ctrs, err = runtime.GetAllContainers()
+ } else if latest {
+ ctr, err = runtime.GetLatestContainer()
ctrs = append(ctrs, ctr)
+ } else {
+ for _, n := range names {
+ ctr, e := runtime.LookupContainer(n)
+ if e != nil && err == nil {
+ err = e
+ }
+ ctrs = append(ctrs, ctr)
+ }
}
- return ctrs, nil
+ return
}
diff --git a/pkg/varlinkapi/containers.go b/pkg/varlinkapi/containers.go
index ac1352dac..816a72953 100644
--- a/pkg/varlinkapi/containers.go
+++ b/pkg/varlinkapi/containers.go
@@ -47,6 +47,55 @@ func (i *LibpodAPI) ListContainers(call iopodman.VarlinkCall) error {
return call.ReplyListContainers(listContainers)
}
+func (i *LibpodAPI) Ps(call iopodman.VarlinkCall, opts iopodman.PsOpts) error {
+ var (
+ containers []iopodman.PsContainer
+ )
+ maxWorkers := shared.Parallelize("ps")
+ psOpts := makePsOpts(opts)
+ filters := []string{}
+ if opts.Filters != nil {
+ filters = *opts.Filters
+ }
+ psContainerOutputs, err := shared.GetPsContainerOutput(i.Runtime, psOpts, filters, maxWorkers)
+ if err != nil {
+ return call.ReplyErrorOccurred(err.Error())
+ }
+
+ for _, ctr := range psContainerOutputs {
+ container := iopodman.PsContainer{
+ Id: ctr.ID,
+ Image: ctr.Image,
+ Command: ctr.Command,
+ Created: ctr.Created,
+ Ports: ctr.Ports,
+ Names: ctr.Names,
+ IsInfra: ctr.IsInfra,
+ Status: ctr.Status,
+ State: ctr.State.String(),
+ PidNum: int64(ctr.Pid),
+ RootFsSize: ctr.Size.RootFsSize,
+ RwSize: ctr.Size.RwSize,
+ Pod: ctr.Pod,
+ CreatedAt: ctr.CreatedAt.Format(time.RFC3339Nano),
+ ExitedAt: ctr.ExitedAt.Format(time.RFC3339Nano),
+ StartedAt: ctr.StartedAt.Format(time.RFC3339Nano),
+ Labels: ctr.Labels,
+ NsPid: ctr.PID,
+ Cgroup: ctr.Cgroup,
+ Ipc: ctr.Cgroup,
+ Mnt: ctr.MNT,
+ Net: ctr.NET,
+ PidNs: ctr.PIDNS,
+ User: ctr.User,
+ Uts: ctr.UTS,
+ Mounts: ctr.Mounts,
+ }
+ containers = append(containers, container)
+ }
+ return call.ReplyPs(containers)
+}
+
// GetContainer ...
func (i *LibpodAPI) GetContainer(call iopodman.VarlinkCall, id string) error {
ctr, err := i.Runtime.LookupContainer(id)
diff --git a/pkg/varlinkapi/util.go b/pkg/varlinkapi/util.go
index 3c4b9b79a..8716c963a 100644
--- a/pkg/varlinkapi/util.go
+++ b/pkg/varlinkapi/util.go
@@ -162,3 +162,36 @@ func stringPullPolicyToType(s string) buildah.PullPolicy {
}
return buildah.PullIfMissing
}
+
+func derefBool(inBool *bool) bool {
+ if inBool == nil {
+ return false
+ }
+ return *inBool
+}
+
+func derefString(in *string) string {
+ if in == nil {
+ return ""
+ }
+ return *in
+}
+
+func makePsOpts(inOpts iopodman.PsOpts) shared.PsOptions {
+ last := 0
+ if inOpts.Last != nil {
+ lastT := *inOpts.Last
+ last = int(lastT)
+ }
+ return shared.PsOptions{
+ All: inOpts.All,
+ Last: last,
+ Latest: derefBool(inOpts.Latest),
+ NoTrunc: derefBool(inOpts.NoTrunc),
+ Pod: derefBool(inOpts.Pod),
+ Size: true,
+ Sort: derefString(inOpts.Sort),
+ Namespace: true,
+ Sync: derefBool(inOpts.Sync),
+ }
+}
diff --git a/test/e2e/common_test.go b/test/e2e/common_test.go
index b20b3b37e..58f94f27e 100644
--- a/test/e2e/common_test.go
+++ b/test/e2e/common_test.go
@@ -3,7 +3,6 @@ package integration
import (
"encoding/json"
"fmt"
- "github.com/containers/libpod/pkg/rootless"
"io/ioutil"
"os"
"os/exec"
@@ -12,6 +11,7 @@ import (
"strings"
"testing"
+ "github.com/containers/libpod/pkg/rootless"
"github.com/containers/storage"
"github.com/containers/libpod/pkg/inspect"
@@ -86,7 +86,7 @@ func TestLibpod(t *testing.T) {
}
var _ = SynchronizedBeforeSuite(func() []byte {
- //Cache images
+ // Cache images
cwd, _ := os.Getwd()
INTEGRATION_ROOT = filepath.Join(cwd, "../../")
podman := PodmanTestCreate("/tmp")
@@ -134,18 +134,18 @@ func (p *PodmanTestIntegration) Setup() {
p.ArtifactPath = ARTIFACT_DIR
}
-//var _ = BeforeSuite(func() {
-// cwd, _ := os.Getwd()
-// INTEGRATION_ROOT = filepath.Join(cwd, "../../")
-// podman := PodmanTestCreate("/tmp")
-// podman.ArtifactPath = ARTIFACT_DIR
-// if _, err := os.Stat(ARTIFACT_DIR); os.IsNotExist(err) {
-// if err = os.Mkdir(ARTIFACT_DIR, 0777); err != nil {
-// fmt.Printf("%q\n", err)
-// os.Exit(1)
-// }
-// }
-//})
+// var _ = BeforeSuite(func() {
+// cwd, _ := os.Getwd()
+// INTEGRATION_ROOT = filepath.Join(cwd, "../../")
+// podman := PodmanTestCreate("/tmp")
+// podman.ArtifactPath = ARTIFACT_DIR
+// if _, err := os.Stat(ARTIFACT_DIR); os.IsNotExist(err) {
+// if err = os.Mkdir(ARTIFACT_DIR, 0777); err != nil {
+// fmt.Printf("%q\n", err)
+// os.Exit(1)
+// }
+// }
+// })
// for _, image := range CACHE_IMAGES {
// if err := podman.CreateArtifact(image); err != nil {
// fmt.Printf("%q\n", err)
@@ -172,7 +172,7 @@ func (p *PodmanTestIntegration) Setup() {
// os.Exit(1)
// }
// LockTmpDir = path
-//})
+// })
var _ = AfterSuite(func() {
sort.Sort(testResultsSortedLength{testResults})
diff --git a/test/e2e/libpod_suite_test.go b/test/e2e/libpod_suite_test.go
index 685a08340..a69c1ba9a 100644
--- a/test/e2e/libpod_suite_test.go
+++ b/test/e2e/libpod_suite_test.go
@@ -61,9 +61,12 @@ func (p *PodmanTestIntegration) PodmanPID(args []string) (*PodmanSessionIntegrat
func (p *PodmanTestIntegration) Cleanup() {
// Remove all containers
stopall := p.Podman([]string{"stop", "-a", "--timeout", "0"})
- stopall.WaitWithDefaultTimeout()
+ // stopall.WaitWithDefaultTimeout()
+ stopall.Wait(90)
+
session := p.Podman([]string{"rm", "-fa"})
session.Wait(90)
+
// Nuke tempdir
if err := os.RemoveAll(p.TempDir); err != nil {
fmt.Printf("%q\n", err)
@@ -141,7 +144,7 @@ func (p *PodmanTestIntegration) CreatePod(name string) (*PodmanSessionIntegratio
return session, session.ExitCode(), session.OutputToString()
}
-//RunTopContainer runs a simple container in the background that
+// RunTopContainer runs a simple container in the background that
// runs top. If the name passed != "", it will have a name
func (p *PodmanTestIntegration) RunTopContainer(name string) *PodmanSessionIntegration {
var podmanArgs = []string{"run"}
@@ -161,7 +164,7 @@ func (p *PodmanTestIntegration) RunTopContainerInPod(name, pod string) *PodmanSe
return p.Podman(podmanArgs)
}
-//RunLsContainer runs a simple container in the background that
+// RunLsContainer runs a simple container in the background that
// simply runs ls. If the name passed != "", it will have a name
func (p *PodmanTestIntegration) RunLsContainer(name string) (*PodmanSessionIntegration, int, string) {
var podmanArgs = []string{"run"}
@@ -215,13 +218,19 @@ func PodmanTestCreate(tempDir string) *PodmanTestIntegration {
return PodmanTestCreateUtil(tempDir, false)
}
-//MakeOptions assembles all the podman main options
+// MakeOptions assembles all the podman main options
func (p *PodmanTestIntegration) makeOptions(args []string) []string {
- podmanOptions := strings.Split(fmt.Sprintf("--root %s --runroot %s --runtime %s --conmon %s --cni-config-dir %s --cgroup-manager %s --tmpdir %s",
- p.CrioRoot, p.RunRoot, p.OCIRuntime, p.ConmonBinary, p.CNIConfigDir, p.CgroupManager, p.TmpDir), " ")
+ var debug string
+ if _, ok := os.LookupEnv("DEBUG"); ok {
+ debug = "--log-level=debug --syslog=true "
+ }
+
+ podmanOptions := strings.Split(fmt.Sprintf("%s--root %s --runroot %s --runtime %s --conmon %s --cni-config-dir %s --cgroup-manager %s --tmpdir %s",
+ debug, p.CrioRoot, p.RunRoot, p.OCIRuntime, p.ConmonBinary, p.CNIConfigDir, p.CgroupManager, p.TmpDir), " ")
if os.Getenv("HOOK_OPTION") != "" {
podmanOptions = append(podmanOptions, os.Getenv("HOOK_OPTION"))
}
+
podmanOptions = append(podmanOptions, strings.Split(p.StorageOptions, " ")...)
podmanOptions = append(podmanOptions, args...)
return podmanOptions
diff --git a/test/e2e/search_test.go b/test/e2e/search_test.go
index 589389b3b..61d581c6d 100644
--- a/test/e2e/search_test.go
+++ b/test/e2e/search_test.go
@@ -3,47 +3,79 @@
package integration
import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
"os"
"strconv"
+ "text/template"
. "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
+type endpoint struct {
+ Host string
+ Port string
+}
+
+func (e *endpoint) Address() string {
+ return fmt.Sprintf("%s:%s", e.Host, e.Port)
+}
+
var _ = Describe("Podman search", func() {
var (
tempdir string
err error
podmanTest *PodmanTestIntegration
)
+
+ var registryEndpoints = []endpoint{
+ {"localhost", "5001"},
+ {"localhost", "5002"},
+ {"localhost", "5003"},
+ {"localhost", "5004"},
+ {"localhost", "5005"},
+ {"localhost", "5006"},
+ {"localhost", "5007"},
+ {"localhost", "5008"},
+ {"localhost", "5009"},
+ }
+
const regFileContents = `
- [registries.search]
- registries = ['localhost:5000']
+[registries.search]
+registries = ['{{.Host}}:{{.Port}}']
- [registries.insecure]
- registries = ['localhost:5000']`
+[registries.insecure]
+registries = ['{{.Host}}:{{.Port}}']`
+ registryFileTmpl := template.Must(template.New("registryFile").Parse(regFileContents))
const badRegFileContents = `
- [registries.search]
- registries = ['localhost:5000']
- # empty
- [registries.insecure]
- registries = []`
+[registries.search]
+registries = ['{{.Host}}:{{.Port}}']
+# empty
+[registries.insecure]
+registries = []`
+ registryFileBadTmpl := template.Must(template.New("registryFileBad").Parse(badRegFileContents))
const regFileContents2 = `
- [registries.search]
- registries = ['localhost:5000', 'localhost:6000']
+[registries.search]
+registries = ['{{.Host}}:{{.Port}}', '{{.Host}}:6000']
+
+[registries.insecure]
+registries = ['{{.Host}}:{{.Port}}']`
+ registryFileTwoTmpl := template.Must(template.New("registryFileTwo").Parse(regFileContents2))
- [registries.insecure]
- registries = ['localhost:5000']`
BeforeEach(func() {
tempdir, err = CreateTempDirInTempDir()
if err != nil {
os.Exit(1)
}
+
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
+
podmanTest.RestoreAllArtifacts()
})
@@ -51,7 +83,6 @@ var _ = Describe("Podman search", func() {
podmanTest.Cleanup()
f := CurrentGinkgoTestDescription()
processTestResult(f)
-
})
It("podman search", func() {
@@ -134,11 +165,13 @@ var _ = Describe("Podman search", func() {
if podmanTest.Host.Arch == "ppc64le" {
Skip("No registry image for ppc64le")
}
- lock := GetPortLock("5000")
+ lock := GetPortLock(registryEndpoints[0].Port)
defer lock.Unlock()
podmanTest.RestoreArtifact(registry)
- fakereg := podmanTest.Podman([]string{"run", "-d", "--name", "registry", "-p", "5000:5000", registry, "/entrypoint.sh", "/etc/docker/registry/config.yml"})
+ fakereg := podmanTest.Podman([]string{"run", "-d", "--name", "registry",
+ "-p", fmt.Sprintf("%s:5000", registryEndpoints[0].Port),
+ registry, "/entrypoint.sh", "/etc/docker/registry/config.yml"})
fakereg.WaitWithDefaultTimeout()
Expect(fakereg.ExitCode()).To(Equal(0))
@@ -146,7 +179,8 @@ var _ = Describe("Podman search", func() {
Skip("Can not start docker registry.")
}
- search := podmanTest.Podman([]string{"search", "localhost:5000/fake/image:andtag", "--tls-verify=false"})
+ search := podmanTest.Podman([]string{"search",
+ fmt.Sprintf("%s/fake/image:andtag", registryEndpoints[0].Address()), "--tls-verify=false"})
search.WaitWithDefaultTimeout()
// if this test succeeded, there will be no output (there is no entry named fake/image:andtag in an empty registry)
@@ -160,10 +194,12 @@ var _ = Describe("Podman search", func() {
if podmanTest.Host.Arch == "ppc64le" {
Skip("No registry image for ppc64le")
}
- lock := GetPortLock("5000")
+ lock := GetPortLock(registryEndpoints[3].Port)
defer lock.Unlock()
podmanTest.RestoreArtifact(registry)
- registry := podmanTest.Podman([]string{"run", "-d", "--name", "registry3", "-p", "5000:5000", registry, "/entrypoint.sh", "/etc/docker/registry/config.yml"})
+ registry := podmanTest.Podman([]string{"run", "-d", "--name", "registry3",
+ "-p", fmt.Sprintf("%s:5000", registryEndpoints[3].Port), registry,
+ "/entrypoint.sh", "/etc/docker/registry/config.yml"})
registry.WaitWithDefaultTimeout()
Expect(registry.ExitCode()).To(Equal(0))
@@ -171,10 +207,11 @@ var _ = Describe("Podman search", func() {
Skip("Can not start docker registry.")
}
- push := podmanTest.Podman([]string{"push", "--tls-verify=false", "--remove-signatures", ALPINE, "localhost:5000/my-alpine"})
+ image := fmt.Sprintf("%s/my-alpine", registryEndpoints[3].Address())
+ push := podmanTest.Podman([]string{"push", "--tls-verify=false", "--remove-signatures", ALPINE, image})
push.WaitWithDefaultTimeout()
Expect(push.ExitCode()).To(Equal(0))
- search := podmanTest.Podman([]string{"search", "localhost:5000/my-alpine", "--tls-verify=false"})
+ search := podmanTest.Podman([]string{"search", image, "--tls-verify=false"})
search.WaitWithDefaultTimeout()
Expect(search.ExitCode()).To(Equal(0))
@@ -185,10 +222,12 @@ var _ = Describe("Podman search", func() {
if podmanTest.Host.Arch == "ppc64le" {
Skip("No registry image for ppc64le")
}
- lock := GetPortLock("5000")
+
+ lock := GetPortLock(registryEndpoints[4].Port)
defer lock.Unlock()
podmanTest.RestoreArtifact(registry)
- registry := podmanTest.Podman([]string{"run", "-d", "--name", "registry4", "-p", "5000:5000", registry, "/entrypoint.sh", "/etc/docker/registry/config.yml"})
+ registry := podmanTest.Podman([]string{"run", "-d", "-p", fmt.Sprintf("%s:5000", registryEndpoints[4].Port),
+ "--name", "registry4", registry, "/entrypoint.sh", "/etc/docker/registry/config.yml"})
registry.WaitWithDefaultTimeout()
Expect(registry.ExitCode()).To(Equal(0))
@@ -196,14 +235,18 @@ var _ = Describe("Podman search", func() {
Skip("Can not start docker registry.")
}
- push := podmanTest.Podman([]string{"push", "--tls-verify=false", "--remove-signatures", ALPINE, "localhost:5000/my-alpine"})
+ image := fmt.Sprintf("%s/my-alpine", registryEndpoints[4].Address())
+ push := podmanTest.Podman([]string{"push", "--tls-verify=false", "--remove-signatures", ALPINE, image})
push.WaitWithDefaultTimeout()
Expect(push.ExitCode()).To(Equal(0))
// registries.conf set up
- podmanTest.setRegistriesConfigEnv([]byte(regFileContents))
+ var buffer bytes.Buffer
+ registryFileTmpl.Execute(&buffer, registryEndpoints[4])
+ podmanTest.setRegistriesConfigEnv(buffer.Bytes())
+ ioutil.WriteFile(fmt.Sprintf("%s/registry4.conf", tempdir), buffer.Bytes(), 0644)
- search := podmanTest.Podman([]string{"search", "localhost:5000/my-alpine"})
+ search := podmanTest.Podman([]string{"search", image})
search.WaitWithDefaultTimeout()
Expect(search.ExitCode()).To(Equal(0))
@@ -219,24 +262,29 @@ var _ = Describe("Podman search", func() {
if podmanTest.Host.Arch == "ppc64le" {
Skip("No registry image for ppc64le")
}
- lock := GetPortLock("5000")
+ lock := GetPortLock(registryEndpoints[5].Port)
defer lock.Unlock()
podmanTest.RestoreArtifact(registry)
- registry := podmanTest.Podman([]string{"run", "-d", "-p", "5000:5000", "--name", "registry5", registry})
+ registry := podmanTest.Podman([]string{"run", "-d", "-p", fmt.Sprintf("%s:5000", registryEndpoints[5].Port),
+ "--name", "registry5", registry})
registry.WaitWithDefaultTimeout()
Expect(registry.ExitCode()).To(Equal(0))
if !WaitContainerReady(podmanTest, "registry5", "listening on", 20, 1) {
Skip("Can not start docker registry.")
}
- push := podmanTest.Podman([]string{"push", "--tls-verify=false", "--remove-signatures", ALPINE, "localhost:5000/my-alpine"})
+
+ image := fmt.Sprintf("%s/my-alpine", registryEndpoints[5].Address())
+ push := podmanTest.Podman([]string{"push", "--tls-verify=false", "--remove-signatures", ALPINE, image})
push.WaitWithDefaultTimeout()
Expect(push.ExitCode()).To(Equal(0))
- // registries.conf set up
- podmanTest.setRegistriesConfigEnv([]byte(regFileContents))
+ var buffer bytes.Buffer
+ registryFileTmpl.Execute(&buffer, registryEndpoints[5])
+ podmanTest.setRegistriesConfigEnv(buffer.Bytes())
+ ioutil.WriteFile(fmt.Sprintf("%s/registry5.conf", tempdir), buffer.Bytes(), 0644)
- search := podmanTest.Podman([]string{"search", "localhost:5000/my-alpine", "--tls-verify=true"})
+ search := podmanTest.Podman([]string{"search", image, "--tls-verify=true"})
search.WaitWithDefaultTimeout()
Expect(search.ExitCode()).To(Equal(0))
@@ -252,24 +300,29 @@ var _ = Describe("Podman search", func() {
if podmanTest.Host.Arch == "ppc64le" {
Skip("No registry image for ppc64le")
}
- lock := GetPortLock("5000")
+ lock := GetPortLock(registryEndpoints[6].Port)
defer lock.Unlock()
podmanTest.RestoreArtifact(registry)
- registry := podmanTest.Podman([]string{"run", "-d", "-p", "5000:5000", "--name", "registry6", registry})
+ registry := podmanTest.Podman([]string{"run", "-d", "-p", fmt.Sprintf("%s:5000", registryEndpoints[6].Port),
+ "--name", "registry6", registry})
registry.WaitWithDefaultTimeout()
Expect(registry.ExitCode()).To(Equal(0))
if !WaitContainerReady(podmanTest, "registry6", "listening on", 20, 1) {
Skip("Can not start docker registry.")
}
- push := podmanTest.Podman([]string{"push", "--tls-verify=false", "--remove-signatures", ALPINE, "localhost:5000/my-alpine"})
+
+ image := fmt.Sprintf("%s/my-alpine", registryEndpoints[6].Address())
+ push := podmanTest.Podman([]string{"push", "--tls-verify=false", "--remove-signatures", ALPINE, image})
push.WaitWithDefaultTimeout()
Expect(push.ExitCode()).To(Equal(0))
- // registries.conf set up
- podmanTest.setRegistriesConfigEnv([]byte(badRegFileContents))
+ var buffer bytes.Buffer
+ registryFileBadTmpl.Execute(&buffer, registryEndpoints[6])
+ podmanTest.setRegistriesConfigEnv(buffer.Bytes())
+ ioutil.WriteFile(fmt.Sprintf("%s/registry6.conf", tempdir), buffer.Bytes(), 0644)
- search := podmanTest.Podman([]string{"search", "localhost:5000/my-alpine"})
+ search := podmanTest.Podman([]string{"search", image})
search.WaitWithDefaultTimeout()
Expect(search.ExitCode()).To(Equal(0))
@@ -285,10 +338,14 @@ var _ = Describe("Podman search", func() {
if podmanTest.Host.Arch == "ppc64le" {
Skip("No registry image for ppc64le")
}
- lock := GetPortLock("5000")
- defer lock.Unlock()
+ lock7 := GetPortLock(registryEndpoints[7].Port)
+ defer lock7.Unlock()
+ lock8 := GetPortLock("6000")
+ defer lock8.Unlock()
+
podmanTest.RestoreArtifact(registry)
- registryLocal := podmanTest.Podman([]string{"run", "-d", "-p", "5000:5000", "--name", "registry7", registry})
+ registryLocal := podmanTest.Podman([]string{"run", "-d", "-p", fmt.Sprintf("%s:5000", registryEndpoints[7].Port),
+ "--name", "registry7", registry})
registryLocal.WaitWithDefaultTimeout()
Expect(registryLocal.ExitCode()).To(Equal(0))
@@ -303,12 +360,16 @@ var _ = Describe("Podman search", func() {
if !WaitContainerReady(podmanTest, "registry8", "listening on", 20, 1) {
Skip("Can not start docker registry.")
}
+
push := podmanTest.Podman([]string{"push", "--tls-verify=false", "--remove-signatures", ALPINE, "localhost:6000/my-alpine"})
push.WaitWithDefaultTimeout()
Expect(push.ExitCode()).To(Equal(0))
// registries.conf set up
- podmanTest.setRegistriesConfigEnv([]byte(regFileContents2))
+ var buffer bytes.Buffer
+ registryFileTwoTmpl.Execute(&buffer, registryEndpoints[8])
+ podmanTest.setRegistriesConfigEnv(buffer.Bytes())
+ ioutil.WriteFile(fmt.Sprintf("%s/registry8.conf", tempdir), buffer.Bytes(), 0644)
search := podmanTest.Podman([]string{"search", "my-alpine"})
search.WaitWithDefaultTimeout()
diff --git a/test/utils/utils.go b/test/utils/utils.go
index 499466f5a..6308197b8 100644
--- a/test/utils/utils.go
+++ b/test/utils/utils.go
@@ -311,6 +311,8 @@ func (s *PodmanSession) IsJSONOutputValid() bool {
// WaitWithDefaultTimeout waits for process finished with defaultWaitTimeout
func (s *PodmanSession) WaitWithDefaultTimeout() {
s.Wait(defaultWaitTimeout)
+ os.Stdout.Sync()
+ os.Stderr.Sync()
fmt.Println("output:", s.OutputToString())
}