diff options
-rw-r--r-- | cmd/podman/common.go | 2 | ||||
-rw-r--r-- | cmd/podman/ps.go | 13 | ||||
-rw-r--r-- | cmd/podman/shared/create.go | 10 | ||||
-rw-r--r-- | cmd/podman/utils.go | 29 | ||||
-rw-r--r-- | docs/podman-build.1.md | 14 | ||||
-rw-r--r-- | docs/podman-ps.1.md | 1 | ||||
-rw-r--r-- | libpod/container_internal.go | 22 | ||||
-rw-r--r-- | libpod/container_internal_linux.go | 10 | ||||
-rw-r--r-- | libpod/healthcheck.go | 145 | ||||
-rw-r--r-- | libpod/oci_linux.go | 17 | ||||
-rw-r--r-- | libpod/runtime_ctr.go | 5 | ||||
-rw-r--r-- | test/e2e/common_test.go | 10 | ||||
-rw-r--r-- | test/e2e/healthcheck_run_test.go | 94 | ||||
-rw-r--r-- | test/e2e/run_userns_test.go | 15 | ||||
-rw-r--r-- | troubleshooting.md | 9 |
15 files changed, 336 insertions, 60 deletions
diff --git a/cmd/podman/common.go b/cmd/podman/common.go index 8b42ed673..771738302 100644 --- a/cmd/podman/common.go +++ b/cmd/podman/common.go @@ -293,7 +293,7 @@ func getCreateFlags(c *cliconfig.PodmanCommand) { ) createFlags.String( "healthcheck-interval", "30s", - "set an interval for the healthchecks", + "set an interval for the healthchecks (a value of disable results in no automatic timer setup)", ) createFlags.Uint( "healthcheck-retries", 3, diff --git a/cmd/podman/ps.go b/cmd/podman/ps.go index ad942da2e..27774f95d 100644 --- a/cmd/podman/ps.go +++ b/cmd/podman/ps.go @@ -423,7 +423,7 @@ func generateContainerFilterFuncs(filter, filterValue string, runtime *libpod.Ru return false }, nil case "status": - if !util.StringInSlice(filterValue, []string{"created", "running", "paused", "exited", "unknown"}) { + if !util.StringInSlice(filterValue, []string{"created", "running", "paused", "stopped", "exited", "unknown"}) { return nil, errors.Errorf("%s is not a valid status", filterValue) } return func(c *libpod.Container) bool { @@ -431,6 +431,9 @@ func generateContainerFilterFuncs(filter, filterValue string, runtime *libpod.Ru if err != nil { return false } + if filterValue == "stopped" { + filterValue = "exited" + } state := status.String() if status == libpod.ContainerStateConfigured { state = "created" @@ -491,6 +494,14 @@ func generateContainerFilterFuncs(filter, filterValue string, runtime *libpod.Ru } return false }, nil + case "health": + return func(c *libpod.Container) bool { + hcStatus, err := c.HealthCheckStatus() + if err != nil { + return false + } + return hcStatus == filterValue + }, nil } return nil, errors.Errorf("%s is an invalid filter", filter) } diff --git a/cmd/podman/shared/create.go b/cmd/podman/shared/create.go index 55eb3ce83..5ce0b8865 100644 --- a/cmd/podman/shared/create.go +++ b/cmd/podman/shared/create.go @@ -868,21 +868,21 @@ func makeHealthCheckFromCli(c *cliconfig.PodmanCommand) (*manifest.Schema2Health hc := manifest.Schema2HealthConfig{ Test: cmd, } + + if inInterval == "disable" { + inInterval = "0" + } intervalDuration, err := time.ParseDuration(inInterval) if err != nil { return nil, errors.Wrapf(err, "invalid healthcheck-interval %s ", inInterval) } - if intervalDuration < time.Duration(time.Second*1) { - return nil, errors.New("healthcheck-interval must be at least 1 second") - } - hc.Interval = intervalDuration if inRetries < 1 { return nil, errors.New("healthcheck-retries must be greater than 0.") } - + hc.Retries = int(inRetries) timeoutDuration, err := time.ParseDuration(inTimeout) if err != nil { return nil, errors.Wrapf(err, "invalid healthcheck-timeout %s", inTimeout) diff --git a/cmd/podman/utils.go b/cmd/podman/utils.go index 4ec0f8a13..45d081512 100644 --- a/cmd/podman/utils.go +++ b/cmd/podman/utils.go @@ -200,35 +200,6 @@ func getPodsFromContext(c *cliconfig.PodmanCommand, r *libpod.Runtime) ([]*libpo return pods, lastError } -func getVolumesFromContext(c *cliconfig.PodmanCommand, r *libpod.Runtime) ([]*libpod.Volume, error) { - args := c.InputArgs - var ( - vols []*libpod.Volume - lastError error - err error - ) - - if c.Bool("all") { - vols, err = r.Volumes() - if err != nil { - return nil, errors.Wrapf(err, "unable to get all volumes") - } - } - - for _, i := range args { - vol, err := r.GetVolume(i) - if err != nil { - if lastError != nil { - logrus.Errorf("%q", lastError) - } - lastError = errors.Wrapf(err, "unable to find volume %s", i) - continue - } - vols = append(vols, vol) - } - return vols, lastError -} - //printParallelOutput takes the map of parallel worker results and outputs them // to stdout func printParallelOutput(m map[string]error, errCount int) error { diff --git a/docs/podman-build.1.md b/docs/podman-build.1.md index 42fa9a359..ccc8bd900 100644 --- a/docs/podman-build.1.md +++ b/docs/podman-build.1.md @@ -288,13 +288,21 @@ process. **--pull** -Pull the image if it is not present. If this flag is disabled (with -*--pull=false*) and the image is not present, the image will not be pulled. +When the flag is enabled, attempt to pull the latest image from the registries +listed in registries.conf if a local image does not exist or the image is newer +than the one in storage. Raise an error if the image is not in any listed +registry and is not present locally. + +If the flag is disabled (with *--pull=false*), do not pull the image from the +registry, use only the local version. Raise an error if the image is not +present locally. + Defaults to *true*. **--pull-always** -Pull the image even if a version of the image is already present. +Pull the image from the first registry it is found in as listed in registries.conf. +Raise an error if not found in the registries, even if the image is present locally. **--quiet, -q** diff --git a/docs/podman-ps.1.md b/docs/podman-ps.1.md index 811fbbc2f..685a52bda 100644 --- a/docs/podman-ps.1.md +++ b/docs/podman-ps.1.md @@ -100,6 +100,7 @@ Valid filters are listed below: | before | [ID] or [Name] Containers created before this container | | since | [ID] or [Name] Containers created since this container | | volume | [VolumeName] or [MountpointDestination] Volume mounted in container | +| health | [Status] healthy or unhealthy | **--help**, **-h** diff --git a/libpod/container_internal.go b/libpod/container_internal.go index 13e660dc3..7a90bc7d4 100644 --- a/libpod/container_internal.go +++ b/libpod/container_internal.go @@ -833,6 +833,12 @@ func (c *Container) init(ctx context.Context) error { if err := c.save(); err != nil { return err } + if c.config.HealthCheckConfig != nil { + if err := c.createTimer(); err != nil { + logrus.Error(err) + } + } + defer c.newContainerEvent(events.Init) return c.completeNetworkSetup() } @@ -956,6 +962,15 @@ func (c *Container) start() error { c.state.State = ContainerStateRunning + if c.config.HealthCheckConfig != nil { + if err := c.updateHealthStatus(HealthCheckStarting); err != nil { + logrus.Error(err) + } + if err := c.startTimer(); err != nil { + logrus.Error(err) + } + } + defer c.newContainerEvent(events.Start) return c.save() @@ -1123,6 +1138,13 @@ func (c *Container) cleanup(ctx context.Context) error { logrus.Debugf("Cleaning up container %s", c.ID()) + // Remove healthcheck unit/timer file if it execs + if c.config.HealthCheckConfig != nil { + if err := c.removeTimer(); err != nil { + logrus.Error(err) + } + } + // Clean up network namespace, if present if err := c.cleanupNetwork(); err != nil { lastError = err diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go index 2a7808bdf..c6c9ceb0c 100644 --- a/libpod/container_internal_linux.go +++ b/libpod/container_internal_linux.go @@ -203,7 +203,8 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) { } // Check if the spec file mounts contain the label Relabel flags z or Z. // If they do, relabel the source directory and then remove the option. - for _, m := range g.Mounts() { + for i := range g.Config.Mounts { + m := &g.Config.Mounts[i] var options []string for _, o := range m.Options { switch o { @@ -219,6 +220,13 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) { } } m.Options = options + + // If we are using a user namespace, we will use an intermediate + // directory to bind mount volumes + if c.state.UserNSRoot != "" && strings.HasPrefix(m.Source, c.runtime.config.VolumePath) { + newSourceDir := filepath.Join(c.state.UserNSRoot, "volumes") + m.Source = strings.Replace(m.Source, c.runtime.config.VolumePath, newSourceDir, 1) + } } g.SetProcessSelinuxLabel(c.ProcessLabel()) diff --git a/libpod/healthcheck.go b/libpod/healthcheck.go index d2c0ea0fb..d8f56860b 100644 --- a/libpod/healthcheck.go +++ b/libpod/healthcheck.go @@ -3,13 +3,16 @@ package libpod import ( "bufio" "bytes" + "fmt" "io/ioutil" "os" + "os/exec" "path/filepath" "strings" "time" "github.com/containers/libpod/pkg/inspect" + "github.com/coreos/go-systemd/dbus" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -47,6 +50,10 @@ const ( HealthCheckHealthy string = "healthy" // HealthCheckUnhealthy describes an unhealthy container HealthCheckUnhealthy string = "unhealthy" + // HealthCheckStarting describes the time between when the container starts + // and the start-period (time allowed for the container to start and application + // to be running) expires. + HealthCheckStarting string = "starting" ) // hcWriteCloser allows us to use bufio as a WriteCloser @@ -68,17 +75,18 @@ func (r *Runtime) HealthCheck(name string) (HealthCheckStatus, error) { } hcStatus, err := checkHealthCheckCanBeRun(container) if err == nil { - return container.RunHealthCheck() + return container.runHealthCheck() } return hcStatus, err } -// RunHealthCheck runs the health check as defined by the container -func (c *Container) RunHealthCheck() (HealthCheckStatus, error) { +// runHealthCheck runs the health check as defined by the container +func (c *Container) runHealthCheck() (HealthCheckStatus, error) { var ( - newCommand []string - returnCode int - capture bytes.Buffer + newCommand []string + returnCode int + capture bytes.Buffer + inStartPeriod bool ) hcStatus, err := checkHealthCheckCanBeRun(c) if err != nil { @@ -111,12 +119,28 @@ func (c *Container) RunHealthCheck() (HealthCheckStatus, error) { returnCode = 1 } timeEnd := time.Now() + if c.HealthCheckConfig().StartPeriod > 0 { + // there is a start-period we need to honor; we add startPeriod to container start time + startPeriodTime := c.state.StartedTime.Add(c.HealthCheckConfig().StartPeriod) + if timeStart.Before(startPeriodTime) { + // we are still in the start period, flip the inStartPeriod bool + inStartPeriod = true + logrus.Debugf("healthcheck for %s being run in start-period", c.ID()) + } + } + eventLog := capture.String() if len(eventLog) > MaxHealthCheckLogLength { eventLog = eventLog[:MaxHealthCheckLogLength] } + + if timeEnd.Sub(timeStart) > c.HealthCheckConfig().Timeout { + returnCode = -1 + hcResult = HealthCheckFailure + hcErr = errors.Errorf("healthcheck command exceeded timeout of %s", c.HealthCheckConfig().Timeout.String()) + } hcl := newHealthCheckLog(timeStart, timeEnd, returnCode, eventLog) - if err := c.updateHealthCheckLog(hcl); err != nil { + if err := c.updateHealthCheckLog(hcl, inStartPeriod); err != nil { return hcResult, errors.Wrapf(err, "unable to update health check log %s for %s", c.healthCheckLogPath(), c.ID()) } return hcResult, hcErr @@ -145,8 +169,23 @@ func newHealthCheckLog(start, end time.Time, exitCode int, log string) inspect.H } } +// updatedHealthCheckStatus updates the health status of the container +// in the healthcheck log +func (c *Container) updateHealthStatus(status string) error { + healthCheck, err := c.GetHealthCheckLog() + if err != nil { + return err + } + healthCheck.Status = status + newResults, err := json.Marshal(healthCheck) + if err != nil { + return errors.Wrapf(err, "unable to marshall healthchecks for writing status") + } + return ioutil.WriteFile(c.healthCheckLogPath(), newResults, 0700) +} + // UpdateHealthCheckLog parses the health check results and writes the log -func (c *Container) updateHealthCheckLog(hcl inspect.HealthCheckLog) error { +func (c *Container) updateHealthCheckLog(hcl inspect.HealthCheckLog, inStartPeriod bool) error { healthCheck, err := c.GetHealthCheckLog() if err != nil { return err @@ -159,11 +198,13 @@ func (c *Container) updateHealthCheckLog(hcl inspect.HealthCheckLog) error { if len(healthCheck.Status) < 1 { healthCheck.Status = HealthCheckHealthy } - // increment failing streak - healthCheck.FailingStreak = healthCheck.FailingStreak + 1 - // if failing streak > retries, then status to unhealthy - if int(healthCheck.FailingStreak) > c.HealthCheckConfig().Retries { - healthCheck.Status = HealthCheckUnhealthy + if !inStartPeriod { + // increment failing streak + healthCheck.FailingStreak = healthCheck.FailingStreak + 1 + // if failing streak > retries, then status to unhealthy + if int(healthCheck.FailingStreak) >= c.HealthCheckConfig().Retries { + healthCheck.Status = HealthCheckUnhealthy + } } } healthCheck.Log = append(healthCheck.Log, hcl) @@ -199,3 +240,81 @@ func (c *Container) GetHealthCheckLog() (inspect.HealthCheckResults, error) { } return healthCheck, nil } + +// createTimer systemd timers for healthchecks of a container +func (c *Container) createTimer() error { + if c.disableHealthCheckSystemd() { + return nil + } + podman, err := os.Executable() + if err != nil { + return errors.Wrapf(err, "failed to get path for podman for a health check timer") + } + + var cmd = []string{"--unit", fmt.Sprintf("%s", c.ID()), fmt.Sprintf("--on-unit-inactive=%s", c.HealthCheckConfig().Interval.String()), "--timer-property=AccuracySec=1s", podman, "healthcheck", "run", c.ID()} + + conn, err := dbus.NewSystemdConnection() + if err != nil { + return errors.Wrapf(err, "unable to get systemd connection to add healthchecks") + } + conn.Close() + logrus.Debugf("creating systemd-transient files: %s %s", "systemd-run", cmd) + systemdRun := exec.Command("systemd-run", cmd...) + _, err = systemdRun.CombinedOutput() + if err != nil { + return err + } + return nil +} + +// startTimer starts a systemd timer for the healthchecks +func (c *Container) startTimer() error { + if c.disableHealthCheckSystemd() { + return nil + } + conn, err := dbus.NewSystemdConnection() + if err != nil { + return errors.Wrapf(err, "unable to get systemd connection to start healthchecks") + } + defer conn.Close() + _, err = conn.StartUnit(fmt.Sprintf("%s.service", c.ID()), "fail", nil) + return err +} + +// removeTimer removes the systemd timer and unit files +// for the container +func (c *Container) removeTimer() error { + if c.disableHealthCheckSystemd() { + return nil + } + conn, err := dbus.NewSystemdConnection() + if err != nil { + return errors.Wrapf(err, "unable to get systemd connection to remove healthchecks") + } + defer conn.Close() + serviceFile := fmt.Sprintf("%s.timer", c.ID()) + _, err = conn.StopUnit(serviceFile, "fail", nil) + return err +} + +// HealthCheckStatus returns the current state of a container with a healthcheck +func (c *Container) HealthCheckStatus() (string, error) { + if !c.HasHealthCheck() { + return "", errors.Errorf("container %s has no defined healthcheck", c.ID()) + } + results, err := c.GetHealthCheckLog() + if err != nil { + return "", errors.Wrapf(err, "unable to get healthcheck log for %s", c.ID()) + } + return results.Status, nil +} + +func (c *Container) disableHealthCheckSystemd() bool { + if os.Getenv("DISABLE_HC_SYSTEMD") == "true" { + return true + } + if c.config.HealthCheckConfig.Interval == 0 { + return true + } + return false +} diff --git a/libpod/oci_linux.go b/libpod/oci_linux.go index 2737a641e..f85c5ee62 100644 --- a/libpod/oci_linux.go +++ b/libpod/oci_linux.go @@ -106,6 +106,23 @@ func (r *OCIRuntime) createContainer(ctr *Container, cgroupParent string, restor if err != nil { return } + + if ctr.state.UserNSRoot != "" { + _, err := os.Stat(ctr.runtime.config.VolumePath) + if err != nil && !os.IsNotExist(err) { + return + } + if err == nil { + volumesTarget := filepath.Join(ctr.state.UserNSRoot, "volumes") + if err := idtools.MkdirAs(volumesTarget, 0700, ctr.RootUID(), ctr.RootGID()); err != nil { + return + } + if err = unix.Mount(ctr.runtime.config.VolumePath, volumesTarget, "none", unix.MS_BIND, ""); err != nil { + return + } + } + } + err = r.createOCIContainer(ctr, cgroupParent, restoreOptions) }() wg.Wait() diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go index 3b74a65dd..f23dc86dd 100644 --- a/libpod/runtime_ctr.go +++ b/libpod/runtime_ctr.go @@ -186,8 +186,11 @@ func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options .. return nil, errors.Wrapf(err, "error creating named volume %q", vol.Source) } ctr.config.Spec.Mounts[i].Source = newVol.MountPoint() + if err := os.Chown(ctr.config.Spec.Mounts[i].Source, ctr.RootUID(), ctr.RootGID()); err != nil { + return nil, errors.Wrapf(err, "cannot chown %q to %d:%d", ctr.config.Spec.Mounts[i].Source, ctr.RootUID(), ctr.RootGID()) + } if err := ctr.copyWithTarFromImage(ctr.config.Spec.Mounts[i].Destination, ctr.config.Spec.Mounts[i].Source); err != nil && !os.IsNotExist(err) { - return nil, errors.Wrapf(err, "Failed to copy content into new volume mount %q", vol.Source) + return nil, errors.Wrapf(err, "failed to copy content into new volume mount %q", vol.Source) } continue } diff --git a/test/e2e/common_test.go b/test/e2e/common_test.go index 54b2cbec2..b20b3b37e 100644 --- a/test/e2e/common_test.go +++ b/test/e2e/common_test.go @@ -239,7 +239,7 @@ func PodmanTestCreateUtil(tempDir string, remote bool) *PodmanTestIntegration { ociRuntime = "/usr/bin/runc" } } - + os.Setenv("DISABLE_HC_SYSTEMD", "true") CNIConfigDir := "/etc/cni/net.d" p := &PodmanTestIntegration{ @@ -314,6 +314,14 @@ func (s *PodmanSessionIntegration) InspectImageJSON() []inspect.ImageData { return i } +// InspectContainer returns a container's inspect data in JSON format +func (p *PodmanTestIntegration) InspectContainer(name string) []inspect.ContainerData { + cmd := []string{"inspect", name} + session := p.Podman(cmd) + session.WaitWithDefaultTimeout() + return session.InspectContainerToJSON() +} + func processTestResult(f GinkgoTestDescription) { tr := testResult{length: f.Duration.Seconds(), name: f.TestText} testResults = append(testResults, tr) diff --git a/test/e2e/healthcheck_run_test.go b/test/e2e/healthcheck_run_test.go index f178e8ad5..ec97fdf4a 100644 --- a/test/e2e/healthcheck_run_test.go +++ b/test/e2e/healthcheck_run_test.go @@ -83,4 +83,98 @@ var _ = Describe("Podman healthcheck run", func() { hc.WaitWithDefaultTimeout() Expect(hc.ExitCode()).To(Equal(125)) }) + + It("podman healthcheck should be starting", func() { + session := podmanTest.Podman([]string{"run", "-dt", "--name", "hc", "--healthcheck-retries", "2", "--healthcheck-command", "\"CMD-SHELL ls /foo || exit 1\"", ALPINE, "top"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + inspect := podmanTest.InspectContainer("hc") + Expect(inspect[0].State.Healthcheck.Status).To(Equal("starting")) + }) + + It("podman healthcheck failed checks in start-period should not change status", func() { + session := podmanTest.Podman([]string{"run", "-dt", "--name", "hc", "--healthcheck-start-period", "2m", "--healthcheck-retries", "2", "--healthcheck-command", "\"CMD-SHELL ls /foo || exit 1\"", ALPINE, "top"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + + hc := podmanTest.Podman([]string{"healthcheck", "run", "hc"}) + hc.WaitWithDefaultTimeout() + Expect(hc.ExitCode()).To(Equal(1)) + + hc = podmanTest.Podman([]string{"healthcheck", "run", "hc"}) + hc.WaitWithDefaultTimeout() + Expect(hc.ExitCode()).To(Equal(1)) + + hc = podmanTest.Podman([]string{"healthcheck", "run", "hc"}) + hc.WaitWithDefaultTimeout() + Expect(hc.ExitCode()).To(Equal(1)) + + inspect := podmanTest.InspectContainer("hc") + Expect(inspect[0].State.Healthcheck.Status).To(Equal("starting")) + }) + + It("podman healthcheck failed checks must reach retries before unhealthy ", func() { + session := podmanTest.Podman([]string{"run", "-dt", "--name", "hc", "--healthcheck-retries", "2", "--healthcheck-command", "\"CMD-SHELL ls /foo || exit 1\"", ALPINE, "top"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + + hc := podmanTest.Podman([]string{"healthcheck", "run", "hc"}) + hc.WaitWithDefaultTimeout() + Expect(hc.ExitCode()).To(Equal(1)) + + inspect := podmanTest.InspectContainer("hc") + Expect(inspect[0].State.Healthcheck.Status).To(Equal("starting")) + + hc = podmanTest.Podman([]string{"healthcheck", "run", "hc"}) + hc.WaitWithDefaultTimeout() + Expect(hc.ExitCode()).To(Equal(1)) + + inspect = podmanTest.InspectContainer("hc") + Expect(inspect[0].State.Healthcheck.Status).To(Equal("unhealthy")) + + }) + + It("podman healthcheck good check results in healthy even in start-period", func() { + session := podmanTest.Podman([]string{"run", "-dt", "--name", "hc", "--healthcheck-start-period", "2m", "--healthcheck-retries", "2", "--healthcheck-command", "\"CMD-SHELL\" \"ls\" \"||\" \"exit\" \"1\"", ALPINE, "top"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + + hc := podmanTest.Podman([]string{"healthcheck", "run", "hc"}) + hc.WaitWithDefaultTimeout() + Expect(hc.ExitCode()).To(Equal(0)) + + inspect := podmanTest.InspectContainer("hc") + Expect(inspect[0].State.Healthcheck.Status).To(Equal("healthy")) + }) + + It("podman healthcheck single healthy result changes failed to healthy", func() { + session := podmanTest.Podman([]string{"run", "-dt", "--name", "hc", "--healthcheck-retries", "2", "--healthcheck-command", "\"CMD-SHELL\" \"ls\" \"/foo\" \"||\" \"exit\" \"1\"", ALPINE, "top"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + + hc := podmanTest.Podman([]string{"healthcheck", "run", "hc"}) + hc.WaitWithDefaultTimeout() + Expect(hc.ExitCode()).To(Equal(1)) + + inspect := podmanTest.InspectContainer("hc") + Expect(inspect[0].State.Healthcheck.Status).To(Equal("starting")) + + hc = podmanTest.Podman([]string{"healthcheck", "run", "hc"}) + hc.WaitWithDefaultTimeout() + Expect(hc.ExitCode()).To(Equal(1)) + + inspect = podmanTest.InspectContainer("hc") + Expect(inspect[0].State.Healthcheck.Status).To(Equal("unhealthy")) + + foo := podmanTest.Podman([]string{"exec", "hc", "touch", "/foo"}) + foo.WaitWithDefaultTimeout() + Expect(foo.ExitCode()).To(BeZero()) + + hc = podmanTest.Podman([]string{"healthcheck", "run", "hc"}) + hc.WaitWithDefaultTimeout() + Expect(hc.ExitCode()).To(Equal(0)) + + inspect = podmanTest.InspectContainer("hc") + Expect(inspect[0].State.Healthcheck.Status).To(Equal("healthy")) + }) }) diff --git a/test/e2e/run_userns_test.go b/test/e2e/run_userns_test.go index c6c94d2f6..5c38a8950 100644 --- a/test/e2e/run_userns_test.go +++ b/test/e2e/run_userns_test.go @@ -69,6 +69,21 @@ var _ = Describe("Podman UserNS support", func() { Expect(ok).To(BeTrue()) }) + It("podman uidmapping and gidmapping with a volume", func() { + if os.Getenv("SKIP_USERNS") != "" { + Skip("Skip userns tests.") + } + if _, err := os.Stat("/proc/self/uid_map"); err != nil { + Skip("User namespaces not supported.") + } + + session := podmanTest.Podman([]string{"run", "--uidmap=0:1:70000", "--gidmap=0:20000:70000", "-v", "my-foo-volume:/foo:Z", "busybox", "echo", "hello"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + ok, _ := session.GrepString("hello") + Expect(ok).To(BeTrue()) + }) + It("podman uidmapping and gidmapping --net=host", func() { if os.Getenv("SKIP_USERNS") != "" { Skip("Skip userns tests.") diff --git a/troubleshooting.md b/troubleshooting.md index 882afef0c..08d79723a 100644 --- a/troubleshooting.md +++ b/troubleshooting.md @@ -210,18 +210,17 @@ cannot find newuidmap: exec: "newuidmap": executable file not found in $PATH Install a version of shadow-utils that includes these executables. Note RHEL7 and Centos 7 will not have support for this until RHEL7.7 is released. -### 10) podman fails to run in user namespace because /etc/subuid is not properly populated. +### 10) rootless setup user: invalid argument Rootless podman requires the user running it to have a range of UIDs listed in /etc/subuid and /etc/subgid. #### Symptom -If you are running podman or buildah as a user, you get an error complaining about -a missing subuid ranges in /etc/subuid. +An user, either via --user or through the default configured for the image, is not mapped inside the namespace. ``` -podman run -ti fedora sh -No subuid ranges found for user "johndoe" in /etc/subuid +podman run --rm -ti --user 1000000 alpine echo hi +Error: container create failed: container_linux.go:344: starting container process caused "setup user: invalid argument" ``` #### Solution |