diff options
-rw-r--r-- | cmd/podman/common/specgen.go | 6 | ||||
-rw-r--r-- | cmd/podman/inspect/inspect.go | 56 | ||||
-rw-r--r-- | pkg/api/handlers/libpod/containers_create.go | 5 | ||||
-rw-r--r-- | pkg/domain/entities/engine_container.go | 2 | ||||
-rw-r--r-- | pkg/domain/entities/engine_image.go | 2 | ||||
-rw-r--r-- | pkg/domain/infra/abi/containers.go | 65 | ||||
-rw-r--r-- | pkg/domain/infra/abi/images.go | 14 | ||||
-rw-r--r-- | pkg/domain/infra/tunnel/containers.go | 35 | ||||
-rw-r--r-- | pkg/domain/infra/tunnel/images.go | 15 | ||||
-rw-r--r-- | pkg/specgen/generate/container.go | 159 | ||||
-rw-r--r-- | pkg/specgen/generate/container_create.go | 4 | ||||
-rw-r--r-- | pkg/specgen/generate/oci.go | 2 | ||||
-rw-r--r-- | pkg/specgen/generate/storage.go | 4 | ||||
-rw-r--r-- | pkg/specgen/generate/validate.go | 159 | ||||
-rw-r--r-- | test/e2e/inspect_test.go | 40 | ||||
-rw-r--r-- | test/e2e/logs_test.go | 20 | ||||
-rw-r--r-- | test/e2e/run_test.go | 24 |
17 files changed, 481 insertions, 131 deletions
diff --git a/cmd/podman/common/specgen.go b/cmd/podman/common/specgen.go index 0b44ef544..e6a524358 100644 --- a/cmd/podman/common/specgen.go +++ b/cmd/podman/common/specgen.go @@ -563,6 +563,8 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *ContainerCLIOpts, args []string // we dont think these are in the spec // init - initbinary // initpath + s.Init = c.Init + s.InitPath = c.InitPath s.Stdin = c.Interactive // quiet // DeviceCgroupRules: c.StringSlice("device-cgroup-rule"), @@ -625,7 +627,7 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *ContainerCLIOpts, args []string if retries < 0 { return errors.Errorf("must specify restart policy retry count as a number greater than 0") } - var retriesUint uint = uint(retries) + var retriesUint = uint(retries) s.RestartRetries = &retriesUint default: return errors.Errorf("invalid restart policy: may specify retries at most once") @@ -635,8 +637,6 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *ContainerCLIOpts, args []string s.Remove = c.Rm s.StopTimeout = &c.StopTimeout - // TODO where should we do this? - // func verifyContainerResources(config *cc.CreateConfig, update bool) ([]string, error) { return nil } diff --git a/cmd/podman/inspect/inspect.go b/cmd/podman/inspect/inspect.go index 1ed033ec3..d80bbffdd 100644 --- a/cmd/podman/inspect/inspect.go +++ b/cmd/podman/inspect/inspect.go @@ -3,12 +3,14 @@ package inspect import ( "context" "fmt" + "os" "strings" "github.com/containers/buildah/pkg/formats" "github.com/containers/libpod/cmd/podman/registry" "github.com/containers/libpod/pkg/domain/entities" "github.com/pkg/errors" + "github.com/sirupsen/logrus" "github.com/spf13/cobra" ) @@ -78,6 +80,7 @@ func newInspector(options entities.InspectOptions) (*inspector, error) { func (i *inspector) inspect(namesOrIDs []string) error { // data - dumping place for inspection results. var data []interface{} //nolint + var errs []error ctx := context.Background() if len(namesOrIDs) == 0 { @@ -97,24 +100,27 @@ func (i *inspector) inspect(namesOrIDs []string) error { // Inspect - note that AllType requires us to expensively query one-by-one. switch tmpType { case AllType: - all, err := i.inspectAll(ctx, namesOrIDs) + allData, allErrs, err := i.inspectAll(ctx, namesOrIDs) if err != nil { return err } - data = all + data = allData + errs = allErrs case ImageType: - imgData, err := i.imageEngine.Inspect(ctx, namesOrIDs, i.options) + imgData, allErrs, err := i.imageEngine.Inspect(ctx, namesOrIDs, i.options) if err != nil { return err } + errs = allErrs for i := range imgData { data = append(data, imgData[i]) } case ContainerType: - ctrData, err := i.containerEngine.ContainerInspect(ctx, namesOrIDs, i.options) + ctrData, allErrs, err := i.containerEngine.ContainerInspect(ctx, namesOrIDs, i.options) if err != nil { return err } + errs = allErrs for i := range ctrData { data = append(data, ctrData[i]) } @@ -122,30 +128,54 @@ func (i *inspector) inspect(namesOrIDs []string) error { return errors.Errorf("invalid type %q: must be %q, %q or %q", i.options.Type, ImageType, ContainerType, AllType) } + // Always print an empty array + if data == nil { + data = []interface{}{} + } + var out formats.Writer if i.options.Format == "json" || i.options.Format == "" { // "" for backwards compat out = formats.JSONStructArray{Output: data} } else { out = formats.StdoutTemplateArray{Output: data, Template: inspectFormat(i.options.Format)} } - return out.Out() + if err := out.Out(); err != nil { + logrus.Errorf("Error printing inspect output: %v", err) + } + if len(errs) > 0 { + if len(errs) > 1 { + for _, err := range errs[1:] { + fmt.Fprintf(os.Stderr, "error inspecting object: %v\n", err) + } + } + return errors.Errorf("error inspecting object: %v", errs[0]) + } + return nil } -func (i *inspector) inspectAll(ctx context.Context, namesOrIDs []string) ([]interface{}, error) { +func (i *inspector) inspectAll(ctx context.Context, namesOrIDs []string) ([]interface{}, []error, error) { var data []interface{} //nolint + allErrs := []error{} for _, name := range namesOrIDs { - imgData, err := i.imageEngine.Inspect(ctx, []string{name}, i.options) - if err == nil { - data = append(data, imgData[0]) + ctrData, errs, err := i.containerEngine.ContainerInspect(ctx, []string{name}, i.options) + if err != nil { + return nil, nil, err + } + if len(errs) == 0 { + data = append(data, ctrData[0]) continue } - ctrData, err := i.containerEngine.ContainerInspect(ctx, []string{name}, i.options) + imgData, errs, err := i.imageEngine.Inspect(ctx, []string{name}, i.options) if err != nil { - return nil, err + return nil, nil, err + } + if len(errs) > 0 { + allErrs = append(allErrs, errors.Errorf("no such object: %q", name)) + continue } - data = append(data, ctrData[0]) + data = append(data, imgData[0]) } - return data, nil + return data, allErrs, nil } func inspectFormat(row string) string { diff --git a/pkg/api/handlers/libpod/containers_create.go b/pkg/api/handlers/libpod/containers_create.go index 71f440bce..8fbff9be7 100644 --- a/pkg/api/handlers/libpod/containers_create.go +++ b/pkg/api/handlers/libpod/containers_create.go @@ -22,7 +22,8 @@ func CreateContainer(w http.ResponseWriter, r *http.Request) { utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "Decode()")) return } - if err := generate.CompleteSpec(r.Context(), runtime, &sg); err != nil { + warn, err := generate.CompleteSpec(r.Context(), runtime, &sg) + if err != nil { utils.InternalServerError(w, err) return } @@ -31,6 +32,6 @@ func CreateContainer(w http.ResponseWriter, r *http.Request) { utils.InternalServerError(w, err) return } - response := entities.ContainerCreateResponse{ID: ctr.ID()} + response := entities.ContainerCreateResponse{ID: ctr.ID(), Warnings: warn} utils.WriteJSON(w, http.StatusCreated, response) } diff --git a/pkg/domain/entities/engine_container.go b/pkg/domain/entities/engine_container.go index 979df7581..837550a2e 100644 --- a/pkg/domain/entities/engine_container.go +++ b/pkg/domain/entities/engine_container.go @@ -24,7 +24,7 @@ type ContainerEngine interface { ContainerExists(ctx context.Context, nameOrID string) (*BoolReport, error) ContainerExport(ctx context.Context, nameOrID string, options ContainerExportOptions) error ContainerInit(ctx context.Context, namesOrIds []string, options ContainerInitOptions) ([]*ContainerInitReport, error) - ContainerInspect(ctx context.Context, namesOrIds []string, options InspectOptions) ([]*ContainerInspectReport, error) + ContainerInspect(ctx context.Context, namesOrIds []string, options InspectOptions) ([]*ContainerInspectReport, []error, error) ContainerKill(ctx context.Context, namesOrIds []string, options KillOptions) ([]*KillReport, error) ContainerList(ctx context.Context, options ContainerListOptions) ([]ListContainer, error) ContainerLogs(ctx context.Context, containers []string, options ContainerLogsOptions) error diff --git a/pkg/domain/entities/engine_image.go b/pkg/domain/entities/engine_image.go index 60fb20b6e..7ece24c60 100644 --- a/pkg/domain/entities/engine_image.go +++ b/pkg/domain/entities/engine_image.go @@ -13,7 +13,7 @@ type ImageEngine interface { Exists(ctx context.Context, nameOrID string) (*BoolReport, error) History(ctx context.Context, nameOrID string, opts ImageHistoryOptions) (*ImageHistoryReport, error) Import(ctx context.Context, opts ImageImportOptions) (*ImageImportReport, error) - Inspect(ctx context.Context, namesOrIDs []string, opts InspectOptions) ([]*ImageInspectReport, error) + Inspect(ctx context.Context, namesOrIDs []string, opts InspectOptions) ([]*ImageInspectReport, []error, error) List(ctx context.Context, opts ImageListOptions) ([]*ImageSummary, error) Load(ctx context.Context, opts ImageLoadOptions) (*ImageLoadReport, error) Prune(ctx context.Context, opts ImagePruneOptions) (*ImagePruneReport, error) diff --git a/pkg/domain/infra/abi/containers.go b/pkg/domain/infra/abi/containers.go index d2c8aefdc..8e0ffc075 100644 --- a/pkg/domain/infra/abi/containers.go +++ b/pkg/domain/infra/abi/containers.go @@ -338,20 +338,51 @@ func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string, return reports, nil } -func (ic *ContainerEngine) ContainerInspect(ctx context.Context, namesOrIds []string, options entities.InspectOptions) ([]*entities.ContainerInspectReport, error) { - ctrs, err := getContainersByContext(false, options.Latest, namesOrIds, ic.Libpod) - if err != nil { - return nil, err +func (ic *ContainerEngine) ContainerInspect(ctx context.Context, namesOrIds []string, options entities.InspectOptions) ([]*entities.ContainerInspectReport, []error, error) { + if options.Latest { + ctr, err := ic.Libpod.GetLatestContainer() + if err != nil { + if errors.Cause(err) == define.ErrNoSuchCtr { + return nil, []error{errors.Wrapf(err, "no containers to inspect")}, nil + } + return nil, nil, err + } + + inspect, err := ctr.Inspect(options.Size) + if err != nil { + return nil, nil, err + } + + return []*entities.ContainerInspectReport{ + { + InspectContainerData: inspect, + }, + }, nil, nil } - reports := make([]*entities.ContainerInspectReport, 0, len(ctrs)) - for _, c := range ctrs { - data, err := c.Inspect(options.Size) + var ( + reports = make([]*entities.ContainerInspectReport, 0, len(namesOrIds)) + errs = []error{} + ) + for _, name := range namesOrIds { + ctr, err := ic.Libpod.LookupContainer(name) if err != nil { - return nil, err + // ErrNoSuchCtr is non-fatal, other errors will be + // treated as fatal. + if errors.Cause(err) == define.ErrNoSuchCtr { + errs = append(errs, errors.Errorf("no such container %s", name)) + continue + } + return nil, nil, err + } + + inspect, err := ctr.Inspect(options.Size) + if err != nil { + return nil, nil, err } - reports = append(reports, &entities.ContainerInspectReport{InspectContainerData: data}) + + reports = append(reports, &entities.ContainerInspectReport{InspectContainerData: inspect}) } - return reports, nil + return reports, errs, nil } func (ic *ContainerEngine) ContainerTop(ctx context.Context, options entities.TopOptions) (*entities.StringSliceReport, error) { @@ -511,9 +542,14 @@ func (ic *ContainerEngine) ContainerRestore(ctx context.Context, namesOrIds []st } func (ic *ContainerEngine) ContainerCreate(ctx context.Context, s *specgen.SpecGenerator) (*entities.ContainerCreateReport, error) { - if err := generate.CompleteSpec(ctx, ic.Libpod, s); err != nil { + warn, err := generate.CompleteSpec(ctx, ic.Libpod, s) + if err != nil { return nil, err } + // Print warnings + for _, w := range warn { + fmt.Fprintf(os.Stderr, "%s\n", w) + } ctr, err := generate.MakeContainer(ctx, ic.Libpod, s) if err != nil { return nil, err @@ -773,9 +809,14 @@ func (ic *ContainerEngine) ContainerDiff(ctx context.Context, nameOrID string, o } func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.ContainerRunOptions) (*entities.ContainerRunReport, error) { - if err := generate.CompleteSpec(ctx, ic.Libpod, opts.Spec); err != nil { + warn, err := generate.CompleteSpec(ctx, ic.Libpod, opts.Spec) + if err != nil { return nil, err } + // Print warnings + for _, w := range warn { + fmt.Fprintf(os.Stderr, "%s\n", w) + } ctr, err := generate.MakeContainer(ctx, ic.Libpod, opts.Spec) if err != nil { return nil, err diff --git a/pkg/domain/infra/abi/images.go b/pkg/domain/infra/abi/images.go index e630d9bc8..0f9ddfec4 100644 --- a/pkg/domain/infra/abi/images.go +++ b/pkg/domain/infra/abi/images.go @@ -184,24 +184,28 @@ func (ir *ImageEngine) Pull(ctx context.Context, rawImage string, options entiti return &entities.ImagePullReport{Images: foundIDs}, nil } -func (ir *ImageEngine) Inspect(ctx context.Context, namesOrIDs []string, opts entities.InspectOptions) ([]*entities.ImageInspectReport, error) { +func (ir *ImageEngine) Inspect(ctx context.Context, namesOrIDs []string, opts entities.InspectOptions) ([]*entities.ImageInspectReport, []error, error) { reports := []*entities.ImageInspectReport{} + errs := []error{} for _, i := range namesOrIDs { img, err := ir.Libpod.ImageRuntime().NewFromLocal(i) if err != nil { - return nil, err + // This is probably a no such image, treat as nonfatal. + errs = append(errs, err) + continue } result, err := img.Inspect(ctx) if err != nil { - return nil, err + // This is more likely to be fatal. + return nil, nil, err } report := entities.ImageInspectReport{} if err := domainUtils.DeepCopy(&report, result); err != nil { - return nil, err + return nil, nil, err } reports = append(reports, &report) } - return reports, nil + return reports, errs, nil } func (ir *ImageEngine) Push(ctx context.Context, source string, destination string, options entities.ImagePushOptions) error { diff --git a/pkg/domain/infra/tunnel/containers.go b/pkg/domain/infra/tunnel/containers.go index 8f6f5c8b7..45fbc64f8 100644 --- a/pkg/domain/infra/tunnel/containers.go +++ b/pkg/domain/infra/tunnel/containers.go @@ -185,20 +185,27 @@ func (ic *ContainerEngine) ContainerPrune(ctx context.Context, options entities. return containers.Prune(ic.ClientCxt, options.Filters) } -func (ic *ContainerEngine) ContainerInspect(ctx context.Context, namesOrIds []string, options entities.InspectOptions) ([]*entities.ContainerInspectReport, error) { - ctrs, err := getContainersByContext(ic.ClientCxt, false, namesOrIds) - if err != nil { - return nil, err - } - reports := make([]*entities.ContainerInspectReport, 0, len(ctrs)) - for _, con := range ctrs { - data, err := containers.Inspect(ic.ClientCxt, con.ID, &options.Size) +func (ic *ContainerEngine) ContainerInspect(ctx context.Context, namesOrIds []string, options entities.InspectOptions) ([]*entities.ContainerInspectReport, []error, error) { + var ( + reports = make([]*entities.ContainerInspectReport, 0, len(namesOrIds)) + errs = []error{} + ) + for _, name := range namesOrIds { + inspect, err := containers.Inspect(ic.ClientCxt, name, &options.Size) if err != nil { - return nil, err + errModel, ok := err.(entities.ErrorModel) + if !ok { + return nil, nil, err + } + if errModel.ResponseCode == 404 { + errs = append(errs, errors.Errorf("no such container %q", name)) + continue + } + return nil, nil, err } - reports = append(reports, &entities.ContainerInspectReport{InspectContainerData: data}) + reports = append(reports, &entities.ContainerInspectReport{InspectContainerData: inspect}) } - return reports, nil + return reports, errs, nil } func (ic *ContainerEngine) ContainerTop(ctx context.Context, options entities.TopOptions) (*entities.StringSliceReport, error) { @@ -340,6 +347,9 @@ func (ic *ContainerEngine) ContainerCreate(ctx context.Context, s *specgen.SpecG if err != nil { return nil, err } + for _, w := range response.Warnings { + fmt.Fprintf(os.Stderr, "%s\n", w) + } return &entities.ContainerCreateReport{Id: response.ID}, nil } @@ -497,6 +507,9 @@ func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.Conta if err != nil { return nil, err } + for _, w := range con.Warnings { + fmt.Fprintf(os.Stderr, "%s\n", w) + } report := entities.ContainerRunReport{Id: con.ID} // Attach if !opts.Detach { diff --git a/pkg/domain/infra/tunnel/images.go b/pkg/domain/infra/tunnel/images.go index fc7ac0aa8..9ddc5f1a9 100644 --- a/pkg/domain/infra/tunnel/images.go +++ b/pkg/domain/infra/tunnel/images.go @@ -157,16 +157,25 @@ func (ir *ImageEngine) Untag(ctx context.Context, nameOrID string, tags []string return nil } -func (ir *ImageEngine) Inspect(ctx context.Context, namesOrIDs []string, opts entities.InspectOptions) ([]*entities.ImageInspectReport, error) { +func (ir *ImageEngine) Inspect(ctx context.Context, namesOrIDs []string, opts entities.InspectOptions) ([]*entities.ImageInspectReport, []error, error) { reports := []*entities.ImageInspectReport{} + errs := []error{} for _, i := range namesOrIDs { r, err := images.GetImage(ir.ClientCxt, i, &opts.Size) if err != nil { - return nil, err + errModel, ok := err.(entities.ErrorModel) + if !ok { + return nil, nil, err + } + if errModel.ResponseCode == 404 { + errs = append(errs, errors.Wrapf(err, "unable to inspect %q", i)) + continue + } + return nil, nil, err } reports = append(reports, r) } - return reports, nil + return reports, errs, nil } func (ir *ImageEngine) Load(ctx context.Context, opts entities.ImageLoadOptions) (*entities.ImageLoadReport, error) { diff --git a/pkg/specgen/generate/container.go b/pkg/specgen/generate/container.go index 3d70571d5..df27f225b 100644 --- a/pkg/specgen/generate/container.go +++ b/pkg/specgen/generate/container.go @@ -5,6 +5,7 @@ import ( "github.com/containers/image/v5/manifest" "github.com/containers/libpod/libpod" + "github.com/containers/libpod/libpod/image" ann "github.com/containers/libpod/pkg/annotations" envLib "github.com/containers/libpod/pkg/env" "github.com/containers/libpod/pkg/signal" @@ -13,91 +14,103 @@ import ( "golang.org/x/sys/unix" ) -func CompleteSpec(ctx context.Context, r *libpod.Runtime, s *specgen.SpecGenerator) error { - // If a rootfs is used, then there is no image data - if s.ContainerStorageConfig.Rootfs != "" { - return nil - } - - newImage, err := r.ImageRuntime().NewFromLocal(s.Image) - if err != nil { - return err - } - - _, mediaType, err := newImage.Manifest(ctx) - if err != nil { - return err - } - - if s.HealthConfig == nil && mediaType == manifest.DockerV2Schema2MediaType { - s.HealthConfig, err = newImage.GetHealthCheck(ctx) +// Fill any missing parts of the spec generator (e.g. from the image). +// Returns a set of warnings or any fatal error that occurred. +func CompleteSpec(ctx context.Context, r *libpod.Runtime, s *specgen.SpecGenerator) ([]string, error) { + var ( + newImage *image.Image + err error + ) + + // Only add image configuration if we have an image + if s.Image != "" { + newImage, err = r.ImageRuntime().NewFromLocal(s.Image) if err != nil { - return err + return nil, err } - } - // Image stop signal - if s.StopSignal == nil { - stopSignal, err := newImage.StopSignal(ctx) + _, mediaType, err := newImage.Manifest(ctx) if err != nil { - return err + return nil, err } - if stopSignal != "" { - sig, err := signal.ParseSignalNameOrNumber(stopSignal) + + if s.HealthConfig == nil && mediaType == manifest.DockerV2Schema2MediaType { + s.HealthConfig, err = newImage.GetHealthCheck(ctx) if err != nil { - return err + return nil, err + } + } + + // Image stop signal + if s.StopSignal == nil { + stopSignal, err := newImage.StopSignal(ctx) + if err != nil { + return nil, err + } + if stopSignal != "" { + sig, err := signal.ParseSignalNameOrNumber(stopSignal) + if err != nil { + return nil, err + } + s.StopSignal = &sig } - s.StopSignal = &sig } } rtc, err := r.GetConfig() if err != nil { - return err + return nil, err } // Get Default Environment defaultEnvs, err := envLib.ParseSlice(rtc.Containers.Env) if err != nil { - return errors.Wrap(err, "Env fields in containers.conf failed to parse") + return nil, errors.Wrap(err, "Env fields in containers.conf failed to parse") } - // Image envs from the image if they don't exist - // already, overriding the default environments - imageEnvs, err := newImage.Env(ctx) - if err != nil { - return err - } + var envs map[string]string - envs, err := envLib.ParseSlice(imageEnvs) - if err != nil { - return errors.Wrap(err, "Env fields from image failed to parse") - } - s.Env = envLib.Join(envLib.Join(defaultEnvs, envs), s.Env) + if newImage != nil { + // Image envs from the image if they don't exist + // already, overriding the default environments + imageEnvs, err := newImage.Env(ctx) + if err != nil { + return nil, err + } - labels, err := newImage.Labels(ctx) - if err != nil { - return err + envs, err = envLib.ParseSlice(imageEnvs) + if err != nil { + return nil, errors.Wrap(err, "Env fields from image failed to parse") + } } - // labels from the image that dont exist already - if len(labels) > 0 && s.Labels == nil { - s.Labels = make(map[string]string) - } - for k, v := range labels { - if _, exists := s.Labels[k]; !exists { - s.Labels[k] = v + s.Env = envLib.Join(envLib.Join(defaultEnvs, envs), s.Env) + + // Labels and Annotations + annotations := make(map[string]string) + if newImage != nil { + labels, err := newImage.Labels(ctx) + if err != nil { + return nil, err } - } - // annotations + // labels from the image that dont exist already + if len(labels) > 0 && s.Labels == nil { + s.Labels = make(map[string]string) + } + for k, v := range labels { + if _, exists := s.Labels[k]; !exists { + s.Labels[k] = v + } + } - // Add annotations from the image - annotations, err := newImage.Annotations(ctx) - if err != nil { - return err - } - for k, v := range annotations { - annotations[k] = v + // Add annotations from the image + imgAnnotations, err := newImage.Annotations(ctx) + if err != nil { + return nil, err + } + for k, v := range imgAnnotations { + annotations[k] = v + } } // in the event this container is in a pod, and the pod has an infra container @@ -121,40 +134,42 @@ func CompleteSpec(ctx context.Context, r *libpod.Runtime, s *specgen.SpecGenerat s.Annotations = annotations // workdir - workingDir, err := newImage.WorkingDir(ctx) - if err != nil { - return err - } - if len(s.WorkDir) < 1 && len(workingDir) > 1 { - s.WorkDir = workingDir + if newImage != nil { + workingDir, err := newImage.WorkingDir(ctx) + if err != nil { + return nil, err + } + if len(s.WorkDir) < 1 && len(workingDir) > 1 { + s.WorkDir = workingDir + } } if len(s.SeccompProfilePath) < 1 { p, err := libpod.DefaultSeccompPath() if err != nil { - return err + return nil, err } s.SeccompProfilePath = p } - if len(s.User) == 0 { + if len(s.User) == 0 && newImage != nil { s.User, err = newImage.User(ctx) if err != nil { - return err + return nil, err } } if err := finishThrottleDevices(s); err != nil { - return err + return nil, err } // Unless already set via the CLI, check if we need to disable process // labels or set the defaults. if len(s.SelinuxOpts) == 0 { if err := setLabelOpts(s, r, s.PidNS, s.IpcNS); err != nil { - return err + return nil, err } } - return nil + return verifyContainerResources(s) } // finishThrottleDevices takes the temporary representation of the throttle diff --git a/pkg/specgen/generate/container_create.go b/pkg/specgen/generate/container_create.go index 33075b543..869601e93 100644 --- a/pkg/specgen/generate/container_create.go +++ b/pkg/specgen/generate/container_create.go @@ -16,7 +16,9 @@ import ( "github.com/sirupsen/logrus" ) -// MakeContainer creates a container based on the SpecGenerator +// MakeContainer creates a container based on the SpecGenerator. +// Returns the created, container and any warnings resulting from creating the +// container, or an error. func MakeContainer(ctx context.Context, rt *libpod.Runtime, s *specgen.SpecGenerator) (*libpod.Container, error) { rtc, err := rt.GetConfig() if err != nil { diff --git a/pkg/specgen/generate/oci.go b/pkg/specgen/generate/oci.go index 266abd28d..1c34f622b 100644 --- a/pkg/specgen/generate/oci.go +++ b/pkg/specgen/generate/oci.go @@ -112,7 +112,7 @@ func makeCommand(ctx context.Context, s *specgen.SpecGenerator, img *image.Image if initPath == "" { return nil, errors.Errorf("no path to init binary found but container requested an init") } - finalCommand = append([]string{initPath, "--"}, finalCommand...) + finalCommand = append([]string{"/dev/init", "--"}, finalCommand...) } return finalCommand, nil diff --git a/pkg/specgen/generate/storage.go b/pkg/specgen/generate/storage.go index 241c9adeb..0d78421a6 100644 --- a/pkg/specgen/generate/storage.go +++ b/pkg/specgen/generate/storage.go @@ -314,8 +314,8 @@ func addContainerInitBinary(s *specgen.SpecGenerator, path string) (spec.Mount, if !s.PidNS.IsPrivate() { return mount, fmt.Errorf("cannot add init binary as PID 1 (PID namespace isn't private)") } - if s.Systemd == "true" || s.Systemd == "always" { - return mount, fmt.Errorf("cannot use container-init binary with systemd") + if s.Systemd == "always" { + return mount, fmt.Errorf("cannot use container-init binary with systemd=always") } if _, err := os.Stat(path); os.IsNotExist(err) { return mount, errors.Wrap(err, "container-init binary not found on the host") diff --git a/pkg/specgen/generate/validate.go b/pkg/specgen/generate/validate.go new file mode 100644 index 000000000..bb3ca9907 --- /dev/null +++ b/pkg/specgen/generate/validate.go @@ -0,0 +1,159 @@ +package generate + +import ( + "github.com/containers/common/pkg/sysinfo" + "github.com/containers/libpod/pkg/cgroups" + "github.com/containers/libpod/pkg/specgen" + "github.com/pkg/errors" +) + +// Verify resource limits are sanely set, removing any limits that are not +// possible with the current cgroups config. +func verifyContainerResources(s *specgen.SpecGenerator) ([]string, error) { + warnings := []string{} + + cgroup2, err := cgroups.IsCgroup2UnifiedMode() + if err != nil || cgroup2 { + return warnings, err + } + + sysInfo := sysinfo.New(true) + + if s.ResourceLimits == nil { + return warnings, nil + } + + // Memory checks + if s.ResourceLimits.Memory != nil { + memory := s.ResourceLimits.Memory + if memory.Limit != nil && !sysInfo.MemoryLimit { + warnings = append(warnings, "Your kernel does not support memory limit capabilities or the cgroup is not mounted. Limitation discarded.") + memory.Limit = nil + memory.Swap = nil + } + if memory.Limit != nil && memory.Swap != nil && !sysInfo.SwapLimit { + warnings = append(warnings, "Your kernel does not support swap limit capabilities,or the cgroup is not mounted. Memory limited without swap.") + memory.Swap = nil + } + if memory.Limit != nil && memory.Swap != nil && *memory.Swap < *memory.Limit { + return warnings, errors.New("minimum memoryswap limit should be larger than memory limit, see usage") + } + if memory.Limit == nil && memory.Swap != nil { + return warnings, errors.New("you should always set a memory limit when using a memoryswap limit, see usage") + } + if memory.Swappiness != nil { + if !sysInfo.MemorySwappiness { + warnings = append(warnings, "Your kernel does not support memory swappiness capabilities, or the cgroup is not mounted. Memory swappiness discarded.") + memory.Swappiness = nil + } else { + if *memory.Swappiness < 0 || *memory.Swappiness > 100 { + return warnings, errors.Errorf("invalid value: %v, valid memory swappiness range is 0-100", *memory.Swappiness) + } + } + } + if memory.Reservation != nil && !sysInfo.MemoryReservation { + warnings = append(warnings, "Your kernel does not support memory soft limit capabilities or the cgroup is not mounted. Limitation discarded.") + memory.Reservation = nil + } + if memory.Limit != nil && memory.Reservation != nil && *memory.Limit < *memory.Reservation { + return warnings, errors.New("minimum memory limit cannot be less than memory reservation limit, see usage") + } + if memory.Kernel != nil && !sysInfo.KernelMemory { + warnings = append(warnings, "Your kernel does not support kernel memory limit capabilities or the cgroup is not mounted. Limitation discarded.") + memory.Kernel = nil + } + if memory.DisableOOMKiller != nil && *memory.DisableOOMKiller && !sysInfo.OomKillDisable { + warnings = append(warnings, "Your kernel does not support OomKillDisable. OomKillDisable discarded.") + memory.DisableOOMKiller = nil + } + } + + // Pids checks + if s.ResourceLimits.Pids != nil { + pids := s.ResourceLimits.Pids + // TODO: Should this be 0, or checking that ResourceLimits.Pids + // is set at all? + if pids.Limit > 0 && !sysInfo.PidsLimit { + warnings = append(warnings, "Your kernel does not support pids limit capabilities or the cgroup is not mounted. PIDs limit discarded.") + s.ResourceLimits.Pids = nil + } + } + + // CPU Checks + if s.ResourceLimits.CPU != nil { + cpu := s.ResourceLimits.CPU + if cpu.Shares != nil && !sysInfo.CPUShares { + warnings = append(warnings, "Your kernel does not support CPU shares or the cgroup is not mounted. Shares discarded.") + cpu.Shares = nil + } + if cpu.Period != nil && !sysInfo.CPUCfsPeriod { + warnings = append(warnings, "Your kernel does not support CPU cfs period or the cgroup is not mounted. Period discarded.") + cpu.Period = nil + } + if cpu.Period != nil && (*cpu.Period < 1000 || *cpu.Period > 1000000) { + return warnings, errors.New("CPU cfs period cannot be less than 1ms (i.e. 1000) or larger than 1s (i.e. 1000000)") + } + if cpu.Quota != nil && !sysInfo.CPUCfsQuota { + warnings = append(warnings, "Your kernel does not support CPU cfs quota or the cgroup is not mounted. Quota discarded.") + cpu.Quota = nil + } + if cpu.Quota != nil && *cpu.Quota < 1000 { + return warnings, errors.New("CPU cfs quota cannot be less than 1ms (i.e. 1000)") + } + if (cpu.Cpus != "" || cpu.Mems != "") && !sysInfo.Cpuset { + warnings = append(warnings, "Your kernel does not support cpuset or the cgroup is not mounted. CPUset discarded.") + cpu.Cpus = "" + cpu.Mems = "" + } + + cpusAvailable, err := sysInfo.IsCpusetCpusAvailable(cpu.Cpus) + if err != nil { + return warnings, errors.Errorf("invalid value %s for cpuset cpus", cpu.Cpus) + } + if !cpusAvailable { + return warnings, errors.Errorf("requested CPUs are not available - requested %s, available: %s", cpu.Cpus, sysInfo.Cpus) + } + + memsAvailable, err := sysInfo.IsCpusetMemsAvailable(cpu.Mems) + if err != nil { + return warnings, errors.Errorf("invalid value %s for cpuset mems", cpu.Mems) + } + if !memsAvailable { + return warnings, errors.Errorf("requested memory nodes are not available - requested %s, available: %s", cpu.Mems, sysInfo.Mems) + } + } + + // Blkio checks + if s.ResourceLimits.BlockIO != nil { + blkio := s.ResourceLimits.BlockIO + if blkio.Weight != nil && !sysInfo.BlkioWeight { + warnings = append(warnings, "Your kernel does not support Block I/O weight or the cgroup is not mounted. Weight discarded.") + blkio.Weight = nil + } + if blkio.Weight != nil && (*blkio.Weight > 1000 || *blkio.Weight < 10) { + return warnings, errors.New("range of blkio weight is from 10 to 1000") + } + if len(blkio.WeightDevice) > 0 && !sysInfo.BlkioWeightDevice { + warnings = append(warnings, "Your kernel does not support Block I/O weight_device or the cgroup is not mounted. Weight-device discarded.") + blkio.WeightDevice = nil + } + if len(blkio.ThrottleReadBpsDevice) > 0 && !sysInfo.BlkioReadBpsDevice { + warnings = append(warnings, "Your kernel does not support BPS Block I/O read limit or the cgroup is not mounted. Block I/O BPS read limit discarded") + blkio.ThrottleReadBpsDevice = nil + } + if len(blkio.ThrottleWriteBpsDevice) > 0 && !sysInfo.BlkioWriteBpsDevice { + warnings = append(warnings, "Your kernel does not support BPS Block I/O write limit or the cgroup is not mounted. Block I/O BPS write limit discarded.") + blkio.ThrottleWriteBpsDevice = nil + } + if len(blkio.ThrottleReadIOPSDevice) > 0 && !sysInfo.BlkioReadIOpsDevice { + warnings = append(warnings, "Your kernel does not support IOPS Block read limit or the cgroup is not mounted. Block I/O IOPS read limit discarded.") + blkio.ThrottleReadIOPSDevice = nil + } + if len(blkio.ThrottleWriteIOPSDevice) > 0 && !sysInfo.BlkioWriteIOpsDevice { + warnings = append(warnings, "Your kernel does not support IOPS Block I/O write limit or the cgroup is not mounted. Block I/O IOPS write limit discarded.") + blkio.ThrottleWriteIOPSDevice = nil + } + } + + return warnings, nil +} diff --git a/test/e2e/inspect_test.go b/test/e2e/inspect_test.go index 62f69f1c1..2fad38a36 100644 --- a/test/e2e/inspect_test.go +++ b/test/e2e/inspect_test.go @@ -223,4 +223,44 @@ var _ = Describe("Podman inspect", func() { Expect(baseJSON[0].ID).To(Equal(ctrJSON[0].ID)) }) + + It("podman inspect always produces a valid array", func() { + baseInspect := podmanTest.Podman([]string{"inspect", "doesNotExist"}) + baseInspect.WaitWithDefaultTimeout() + Expect(baseInspect.ExitCode()).To(Not(Equal(0))) + emptyJSON := baseInspect.InspectContainerToJSON() + Expect(len(emptyJSON)).To(Equal(0)) + }) + + It("podman inspect one container with not exist returns 1-length valid array", func() { + ctrName := "testCtr" + create := podmanTest.Podman([]string{"create", "--name", ctrName, ALPINE, "sh"}) + create.WaitWithDefaultTimeout() + Expect(create.ExitCode()).To(Equal(0)) + + baseInspect := podmanTest.Podman([]string{"inspect", ctrName, "doesNotExist"}) + baseInspect.WaitWithDefaultTimeout() + Expect(baseInspect.ExitCode()).To(Not(Equal(0))) + baseJSON := baseInspect.InspectContainerToJSON() + Expect(len(baseJSON)).To(Equal(1)) + Expect(baseJSON[0].Name).To(Equal(ctrName)) + }) + + It("podman inspect container + image with same name gives container", func() { + ctrName := "testcontainer" + create := podmanTest.PodmanNoCache([]string{"create", "--name", ctrName, ALPINE, "sh"}) + create.WaitWithDefaultTimeout() + Expect(create.ExitCode()).To(Equal(0)) + + tag := podmanTest.PodmanNoCache([]string{"tag", ALPINE, ctrName + ":latest"}) + tag.WaitWithDefaultTimeout() + Expect(tag.ExitCode()).To(Equal(0)) + + baseInspect := podmanTest.Podman([]string{"inspect", ctrName}) + baseInspect.WaitWithDefaultTimeout() + Expect(baseInspect.ExitCode()).To(Equal(0)) + baseJSON := baseInspect.InspectContainerToJSON() + Expect(len(baseJSON)).To(Equal(1)) + Expect(baseJSON[0].Name).To(Equal(ctrName)) + }) }) diff --git a/test/e2e/logs_test.go b/test/e2e/logs_test.go index f9446e0c6..5d8ce24e9 100644 --- a/test/e2e/logs_test.go +++ b/test/e2e/logs_test.go @@ -283,10 +283,22 @@ var _ = Describe("Podman logs", func() { results.WaitWithDefaultTimeout() Expect(results).To(Exit(0)) - // Verify that the cleanup process worked correctly and we can recreate a container with the same name - logc = podmanTest.Podman([]string{"run", "--rm", "--name", containerName, "-dt", ALPINE, "true"}) - logc.WaitWithDefaultTimeout() - Expect(logc).To(Exit(0)) + // TODO: we should actually check for two podman lines, + // but as of 2020-06-17 there's a race condition in which + // 'logs -f' may not catch all output from a container + Expect(results.OutputToString()).To(ContainSubstring("podman")) + + // Container should now be terminatING or terminatED, but we + // have no guarantee of which: 'logs -f' does not necessarily + // wait for cleanup. Run 'inspect' and accept either state. + inspect := podmanTest.Podman([]string{"container", "inspect", "--format", "{{.State.Status}}", containerName}) + inspect.WaitWithDefaultTimeout() + if inspect.ExitCode() == 0 { + Expect(inspect.OutputToString()).To(Equal("exited")) + // TODO: add 2-second wait loop to confirm cleanup + } else { + Expect(inspect.ErrorToString()).To(ContainSubstring("no such container")) + } }) It("follow output stopped container", func() { diff --git a/test/e2e/run_test.go b/test/e2e/run_test.go index 76944b3db..6dce0b48d 100644 --- a/test/e2e/run_test.go +++ b/test/e2e/run_test.go @@ -151,12 +151,36 @@ var _ = Describe("Podman run", func() { session := podmanTest.Podman([]string{"run", "--init", ALPINE, "ls"}) session.WaitWithDefaultTimeout() Expect(session.ExitCode()).To(Equal(0)) + result := podmanTest.Podman([]string{"inspect", "-l"}) + result.WaitWithDefaultTimeout() + Expect(result.ExitCode()).To(Equal(0)) + conData := result.InspectContainerToJSON() + Expect(conData[0].Path).To(Equal("/dev/init")) + Expect(conData[0].Config.Annotations["io.podman.annotations.init"]).To(Equal("TRUE")) }) It("podman run a container with --init and --init-path", func() { session := podmanTest.Podman([]string{"run", "--init", "--init-path", "/usr/libexec/podman/catatonit", ALPINE, "ls"}) session.WaitWithDefaultTimeout() Expect(session.ExitCode()).To(Equal(0)) + result := podmanTest.Podman([]string{"inspect", "-l"}) + result.WaitWithDefaultTimeout() + Expect(result.ExitCode()).To(Equal(0)) + conData := result.InspectContainerToJSON() + Expect(conData[0].Path).To(Equal("/dev/init")) + Expect(conData[0].Config.Annotations["io.podman.annotations.init"]).To(Equal("TRUE")) + }) + + It("podman run a container without --init", func() { + session := podmanTest.Podman([]string{"run", ALPINE, "ls"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + result := podmanTest.Podman([]string{"inspect", "-l"}) + result.WaitWithDefaultTimeout() + Expect(result.ExitCode()).To(Equal(0)) + conData := result.InspectContainerToJSON() + Expect(conData[0].Path).To(Equal("ls")) + Expect(conData[0].Config.Annotations["io.podman.annotations.init"]).To(Equal("FALSE")) }) It("podman run seccomp test", func() { |