From b1bf91a22a8d5d4d676e48efa2073944baff14c1 Mon Sep 17 00:00:00 2001 From: cdoern Date: Tue, 18 Jan 2022 15:46:11 -0500 Subject: Podman pod create --share-parent vs --share=cgroup separated cgroupNS sharing from setting the pod as the cgroup parent, made a new flag --share-parent which sets the pod as the cgroup parent for all containers entering the pod remove cgroup from the default kernel namespaces since we want the same default behavior as before which is just the cgroup parent. resolves #12765 Signed-off-by: cdoern Signed-off-by: cdoern Signed-off-by: cdoern --- test/e2e/pod_create_test.go | 43 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) (limited to 'test/e2e') diff --git a/test/e2e/pod_create_test.go b/test/e2e/pod_create_test.go index f0abfd80c..cd7f72ac0 100644 --- a/test/e2e/pod_create_test.go +++ b/test/e2e/pod_create_test.go @@ -1068,4 +1068,47 @@ ENTRYPOINT ["sleep","99999"] }) + It("podman pod create --share-parent test", func() { + SkipIfRootlessCgroupsV1("rootless cannot use cgroups with cgroupsv1") + podCreate := podmanTest.Podman([]string{"pod", "create", "--share-parent=false"}) + podCreate.WaitWithDefaultTimeout() + Expect(podCreate).Should(Exit(0)) + + ctrCreate := podmanTest.Podman([]string{"run", "-dt", "--pod", podCreate.OutputToString(), ALPINE}) + ctrCreate.WaitWithDefaultTimeout() + Expect(ctrCreate).Should(Exit(0)) + + inspectPod := podmanTest.Podman([]string{"pod", "inspect", podCreate.OutputToString()}) + inspectPod.WaitWithDefaultTimeout() + Expect(inspectPod).Should(Exit(0)) + data := inspectPod.InspectPodToJSON() + + inspect := podmanTest.InspectContainer(ctrCreate.OutputToString()) + Expect(data.CgroupPath).To(HaveLen(0)) + if podmanTest.CgroupManager == "cgroupfs" || !rootless.IsRootless() { + Expect(inspect[0].HostConfig.CgroupParent).To(HaveLen(0)) + } else if podmanTest.CgroupManager == "systemd" { + Expect(inspect[0].HostConfig.CgroupParent).To(Equal("user.slice")) + } + + podCreate2 := podmanTest.Podman([]string{"pod", "create", "--share", "cgroup,ipc,net,uts", "--share-parent=false", "--infra-name", "cgroupCtr"}) + podCreate2.WaitWithDefaultTimeout() + Expect(podCreate2).Should(Exit(0)) + + ctrCreate2 := podmanTest.Podman([]string{"run", "-dt", "--pod", podCreate2.OutputToString(), ALPINE}) + ctrCreate2.WaitWithDefaultTimeout() + Expect(ctrCreate2).Should(Exit(0)) + + inspectInfra := podmanTest.InspectContainer("cgroupCtr") + + inspect2 := podmanTest.InspectContainer(ctrCreate2.OutputToString()) + + Expect(inspect2[0].HostConfig.CgroupMode).To(ContainSubstring(inspectInfra[0].ID)) + + podCreate3 := podmanTest.Podman([]string{"pod", "create", "--share", "cgroup"}) + podCreate3.WaitWithDefaultTimeout() + Expect(podCreate3).ShouldNot(Exit(0)) + + }) + }) -- cgit v1.2.3-54-g00ecf From 74cba33c787adfb512d5a6eb53f548b0432122b5 Mon Sep 17 00:00:00 2001 From: Aditya R Date: Thu, 3 Feb 2022 18:40:31 +0530 Subject: healthcheck, libpod: Read healthcheck event output from os pipe It seems we are ignoring output from healthcheck session. Open a valid pipe to healthcheck session in order read its output. Use common pipe for both `stdout/stderr` since that was the previous behviour as well. Signed-off-by: Aditya R --- libpod/healthcheck.go | 36 +++++++++++++++++------------------- test/e2e/healthcheck_run_test.go | 22 ++++++++++++++++++++++ 2 files changed, 39 insertions(+), 19 deletions(-) (limited to 'test/e2e') diff --git a/libpod/healthcheck.go b/libpod/healthcheck.go index 53bad47b4..40af9aec3 100644 --- a/libpod/healthcheck.go +++ b/libpod/healthcheck.go @@ -2,7 +2,6 @@ package libpod import ( "bufio" - "bytes" "io/ioutil" "os" "path/filepath" @@ -22,16 +21,6 @@ const ( MaxHealthCheckLogLength = 500 ) -// hcWriteCloser allows us to use bufio as a WriteCloser -type hcWriteCloser struct { - *bufio.Writer -} - -// Used to add a closer to bufio -func (hcwc hcWriteCloser) Close() error { - return nil -} - // HealthCheck verifies the state and validity of the healthcheck configuration // on the container and then executes the healthcheck func (r *Runtime) HealthCheck(name string) (define.HealthCheckStatus, error) { @@ -51,7 +40,6 @@ func (c *Container) runHealthCheck() (define.HealthCheckStatus, error) { var ( newCommand []string returnCode int - capture bytes.Buffer inStartPeriod bool ) hcCommand := c.HealthCheckConfig().Test @@ -73,20 +61,30 @@ func (c *Container) runHealthCheck() (define.HealthCheckStatus, error) { if len(newCommand) < 1 || newCommand[0] == "" { return define.HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID()) } - captureBuffer := bufio.NewWriter(&capture) - hcw := hcWriteCloser{ - captureBuffer, + rPipe, wPipe, err := os.Pipe() + if err != nil { + return define.HealthCheckInternalError, errors.Wrapf(err, "unable to create pipe for healthcheck session") } + defer wPipe.Close() + defer rPipe.Close() + streams := new(define.AttachStreams) - streams.OutputStream = hcw - streams.ErrorStream = hcw streams.InputStream = bufio.NewReader(os.Stdin) - + streams.OutputStream = wPipe + streams.ErrorStream = wPipe streams.AttachOutput = true streams.AttachError = true streams.AttachInput = true + stdout := []string{} + go func() { + scanner := bufio.NewScanner(rPipe) + for scanner.Scan() { + stdout = append(stdout, scanner.Text()) + } + }() + logrus.Debugf("executing health check command %s for %s", strings.Join(newCommand, " "), c.ID()) timeStart := time.Now() hcResult := define.HealthCheckSuccess @@ -119,7 +117,7 @@ func (c *Container) runHealthCheck() (define.HealthCheckStatus, error) { } } - eventLog := capture.String() + eventLog := strings.Join(stdout, "\n") if len(eventLog) > MaxHealthCheckLogLength { eventLog = eventLog[:MaxHealthCheckLogLength] } diff --git a/test/e2e/healthcheck_run_test.go b/test/e2e/healthcheck_run_test.go index c84488145..866edbf0e 100644 --- a/test/e2e/healthcheck_run_test.go +++ b/test/e2e/healthcheck_run_test.go @@ -54,6 +54,28 @@ var _ = Describe("Podman healthcheck run", func() { Expect(hc).Should(Exit(125)) }) + It("podman run healthcheck and logs should contain healthcheck output", func() { + session := podmanTest.Podman([]string{"run", "--name", "test-logs", "-dt", "--health-interval", "1s", "--health-cmd", "echo working", "busybox", "sleep", "3600"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(0)) + + // Buy a little time to get container running + for i := 0; i < 5; i++ { + hc := podmanTest.Podman([]string{"healthcheck", "run", "test-logs"}) + hc.WaitWithDefaultTimeout() + exitCode := hc.ExitCode() + if exitCode == 0 || i == 4 { + break + } + time.Sleep(1 * time.Second) + } + + hc := podmanTest.Podman([]string{"container", "inspect", "--format", "{{.State.Healthcheck.Log}}", "test-logs"}) + hc.WaitWithDefaultTimeout() + Expect(hc).Should(Exit(0)) + Expect(hc.OutputToString()).To(ContainSubstring("working")) + }) + It("podman healthcheck from image's config (not container config)", func() { // Regression test for #12226: a health check may be defined in // the container or the container-config of an image. -- cgit v1.2.3-54-g00ecf From 77ca2498e9d0e71b5bd0867306775dc158554612 Mon Sep 17 00:00:00 2001 From: Matthew Heon Date: Wed, 9 Feb 2022 15:02:55 -0500 Subject: Modify /etc/resolv.conf when connecting/disconnecting The `podman network connect` and `podman network disconnect` commands give containers access to different networks than the ones they were created with; these networks can also have DNS servers associated with them. Until now, however, we did not modify resolv.conf as network membership changed. With this PR, `podman network connect` will add any new nameservers supported by the new network to the container's /etc/resolv.conf, and `podman network disconnect` command will do the opposite, removing the network's nameservers from `/etc/resolv.conf`. Fixes #9603 Signed-off-by: Matthew Heon --- libpod/container_internal_linux.go | 134 +++++++++++++++++++++++----- libpod/networking_linux.go | 48 +++++++++- test/e2e/network_connect_disconnect_test.go | 31 +++++++ 3 files changed, 190 insertions(+), 23 deletions(-) (limited to 'test/e2e') diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go index 95f1634a8..afa351c17 100644 --- a/libpod/container_internal_linux.go +++ b/libpod/container_internal_linux.go @@ -2045,19 +2045,8 @@ func (c *Container) generateResolvConf() (string, error) { } } - ipv6 := false - // If network status is set check for ipv6 and dns namesevers netStatus := c.getNetworkStatus() for _, status := range netStatus { - for _, netInt := range status.Interfaces { - for _, netAddress := range netInt.Subnets { - // Note: only using To16() does not work since it also returns a valid ip for ipv4 - if netAddress.IPNet.IP.To4() == nil && netAddress.IPNet.IP.To16() != nil { - ipv6 = true - } - } - } - if status.DNSServerIPs != nil { for _, nsIP := range status.DNSServerIPs { networkNameServers = append(networkNameServers, nsIP.String()) @@ -2070,16 +2059,9 @@ func (c *Container) generateResolvConf() (string, error) { } } - if c.config.NetMode.IsSlirp4netns() { - ctrNetworkSlipOpts := []string{} - if c.config.NetworkOptions != nil { - ctrNetworkSlipOpts = append(ctrNetworkSlipOpts, c.config.NetworkOptions["slirp4netns"]...) - } - slirpOpts, err := parseSlirp4netnsNetworkOptions(c.runtime, ctrNetworkSlipOpts) - if err != nil { - return "", err - } - ipv6 = slirpOpts.enableIPv6 + ipv6, err := c.checkForIPv6(netStatus) + if err != nil { + return "", err } // Ensure that the container's /etc/resolv.conf is compatible with its @@ -2160,6 +2142,116 @@ func (c *Container) generateResolvConf() (string, error) { return destPath, nil } +// Check if a container uses IPv6. +func (c *Container) checkForIPv6(netStatus map[string]types.StatusBlock) (bool, error) { + for _, status := range netStatus { + for _, netInt := range status.Interfaces { + for _, netAddress := range netInt.Subnets { + // Note: only using To16() does not work since it also returns a valid ip for ipv4 + if netAddress.IPNet.IP.To4() == nil && netAddress.IPNet.IP.To16() != nil { + return true, nil + } + } + } + } + + if c.config.NetMode.IsSlirp4netns() { + ctrNetworkSlipOpts := []string{} + if c.config.NetworkOptions != nil { + ctrNetworkSlipOpts = append(ctrNetworkSlipOpts, c.config.NetworkOptions["slirp4netns"]...) + } + slirpOpts, err := parseSlirp4netnsNetworkOptions(c.runtime, ctrNetworkSlipOpts) + if err != nil { + return false, err + } + return slirpOpts.enableIPv6, nil + } + + return false, nil +} + +// Add a new nameserver to the container's resolv.conf, ensuring that it is the +// first nameserver present. +// Usable only with running containers. +func (c *Container) addNameserver(ips []string) error { + // Take no action if container is not running. + if !c.ensureState(define.ContainerStateRunning, define.ContainerStateCreated) { + return nil + } + + // Do we have a resolv.conf at all? + path, ok := c.state.BindMounts["/etc/resolv.conf"] + if !ok { + return nil + } + + // Read in full contents, parse out existing nameservers + contents, err := ioutil.ReadFile(path) + if err != nil { + return err + } + ns := resolvconf.GetNameservers(contents) + options := resolvconf.GetOptions(contents) + search := resolvconf.GetSearchDomains(contents) + + // We could verify that it doesn't already exist + // but extra nameservers shouldn't harm anything. + // Ensure we are the first entry in resolv.conf though, otherwise we + // might be after user-added servers. + ns = append(ips, ns...) + + // We're rewriting the container's resolv.conf as part of this, but we + // hold the container lock, so there should be no risk of parallel + // modification. + if _, err := resolvconf.Build(path, ns, search, options); err != nil { + return errors.Wrapf(err, "error adding new nameserver to container %s resolv.conf", c.ID()) + } + + return nil +} + +// Remove an entry from the existing resolv.conf of the container. +// Usable only with running containers. +func (c *Container) removeNameserver(ips []string) error { + // Take no action if container is not running. + if !c.ensureState(define.ContainerStateRunning, define.ContainerStateCreated) { + return nil + } + + // Do we have a resolv.conf at all? + path, ok := c.state.BindMounts["/etc/resolv.conf"] + if !ok { + return nil + } + + // Read in full contents, parse out existing nameservers + contents, err := ioutil.ReadFile(path) + if err != nil { + return err + } + ns := resolvconf.GetNameservers(contents) + options := resolvconf.GetOptions(contents) + search := resolvconf.GetSearchDomains(contents) + + toRemove := make(map[string]bool) + for _, ip := range ips { + toRemove[ip] = true + } + + newNS := make([]string, 0, len(ns)) + for _, server := range ns { + if !toRemove[server] { + newNS = append(newNS, server) + } + } + + if _, err := resolvconf.Build(path, newNS, search, options); err != nil { + return errors.Wrapf(err, "error removing nameservers from container %s resolv.conf", c.ID()) + } + + return nil +} + // updateHosts updates the container's hosts file func (c *Container) updateHosts(path string) error { var hosts string diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go index e55e9d114..19d5c7f76 100644 --- a/libpod/networking_linux.go +++ b/libpod/networking_linux.go @@ -1170,6 +1170,7 @@ func (c *Container) NetworkDisconnect(nameOrID, netName string, force bool) erro } // update network status if container is running + oldStatus, statusExist := networkStatus[netName] delete(networkStatus, netName) c.state.NetworkStatus = networkStatus err = c.save() @@ -1180,8 +1181,26 @@ func (c *Container) NetworkDisconnect(nameOrID, netName string, force bool) erro // Reload ports when there are still connected networks, maybe we removed the network interface with the child ip. // Reloading without connected networks does not make sense, so we can skip this step. if rootless.IsRootless() && len(networkStatus) > 0 { - return c.reloadRootlessRLKPortMapping() + if err := c.reloadRootlessRLKPortMapping(); err != nil { + return err + } + } + + // Update resolv.conf if required + if statusExist { + stringIPs := make([]string, 0, len(oldStatus.DNSServerIPs)) + for _, ip := range oldStatus.DNSServerIPs { + stringIPs = append(stringIPs, ip.String()) + } + if len(stringIPs) == 0 { + return nil + } + logrus.Debugf("Removing DNS Servers %v from resolv.conf", stringIPs) + if err := c.removeNameserver(stringIPs); err != nil { + return err + } } + return nil } @@ -1263,11 +1282,36 @@ func (c *Container) NetworkConnect(nameOrID, netName string, netOpts types.PerNe if err != nil { return err } + // The first network needs a port reload to set the correct child ip for the rootlessport process. // Adding a second network does not require a port reload because the child ip is still valid. if rootless.IsRootless() && len(networks) == 0 { - return c.reloadRootlessRLKPortMapping() + if err := c.reloadRootlessRLKPortMapping(); err != nil { + return err + } } + + ipv6, err := c.checkForIPv6(networkStatus) + if err != nil { + return err + } + + // Update resolv.conf if required + stringIPs := make([]string, 0, len(results[netName].DNSServerIPs)) + for _, ip := range results[netName].DNSServerIPs { + if (ip.To4() == nil) && !ipv6 { + continue + } + stringIPs = append(stringIPs, ip.String()) + } + if len(stringIPs) == 0 { + return nil + } + logrus.Debugf("Adding DNS Servers %v to resolv.conf", stringIPs) + if err := c.addNameserver(stringIPs); err != nil { + return err + } + return nil } diff --git a/test/e2e/network_connect_disconnect_test.go b/test/e2e/network_connect_disconnect_test.go index 82b9dcd09..b200aa5d3 100644 --- a/test/e2e/network_connect_disconnect_test.go +++ b/test/e2e/network_connect_disconnect_test.go @@ -2,6 +2,7 @@ package integration import ( "os" + "strings" . "github.com/containers/podman/v4/test/utils" "github.com/containers/storage/pkg/stringid" @@ -77,6 +78,11 @@ var _ = Describe("Podman network connect and disconnect", func() { Expect(session).Should(Exit(0)) defer podmanTest.removeCNINetwork(netName) + gw := podmanTest.Podman([]string{"network", "inspect", netName, "--format", "{{(index .Subnets 0).Gateway}}"}) + gw.WaitWithDefaultTimeout() + Expect(gw).Should(Exit(0)) + ns := gw.OutputToString() + ctr := podmanTest.Podman([]string{"run", "-dt", "--name", "test", "--network", netName, ALPINE, "top"}) ctr.WaitWithDefaultTimeout() Expect(ctr).Should(Exit(0)) @@ -85,6 +91,11 @@ var _ = Describe("Podman network connect and disconnect", func() { exec.WaitWithDefaultTimeout() Expect(exec).Should(Exit(0)) + exec2 := podmanTest.Podman([]string{"exec", "-it", "test", "cat", "/etc/resolv.conf"}) + exec2.WaitWithDefaultTimeout() + Expect(exec2).Should(Exit(0)) + Expect(strings.Contains(exec2.OutputToString(), ns)).To(BeTrue()) + dis := podmanTest.Podman([]string{"network", "disconnect", netName, "test"}) dis.WaitWithDefaultTimeout() Expect(dis).Should(Exit(0)) @@ -98,6 +109,11 @@ var _ = Describe("Podman network connect and disconnect", func() { exec = podmanTest.Podman([]string{"exec", "-it", "test", "ip", "addr", "show", "eth0"}) exec.WaitWithDefaultTimeout() Expect(exec).Should(ExitWithError()) + + exec3 := podmanTest.Podman([]string{"exec", "-it", "test", "cat", "/etc/resolv.conf"}) + exec3.WaitWithDefaultTimeout() + Expect(exec3).Should(Exit(0)) + Expect(strings.Contains(exec3.OutputToString(), ns)).To(BeFalse()) }) It("bad network name in connect should result in error", func() { @@ -182,6 +198,16 @@ var _ = Describe("Podman network connect and disconnect", func() { Expect(session).Should(Exit(0)) defer podmanTest.removeCNINetwork(newNetName) + gw := podmanTest.Podman([]string{"network", "inspect", newNetName, "--format", "{{(index .Subnets 0).Gateway}}"}) + gw.WaitWithDefaultTimeout() + Expect(gw).Should(Exit(0)) + ns := gw.OutputToString() + + exec2 := podmanTest.Podman([]string{"exec", "-it", "test", "cat", "/etc/resolv.conf"}) + exec2.WaitWithDefaultTimeout() + Expect(exec2).Should(Exit(0)) + Expect(strings.Contains(exec2.OutputToString(), ns)).To(BeFalse()) + ip := "10.11.100.99" mac := "44:11:44:11:44:11" connect := podmanTest.Podman([]string{"network", "connect", "--ip", ip, "--mac-address", mac, newNetName, "test"}) @@ -206,6 +232,11 @@ var _ = Describe("Podman network connect and disconnect", func() { Expect(exec.OutputToString()).Should(ContainSubstring(ip)) Expect(exec.OutputToString()).Should(ContainSubstring(mac)) + exec3 := podmanTest.Podman([]string{"exec", "-it", "test", "cat", "/etc/resolv.conf"}) + exec3.WaitWithDefaultTimeout() + Expect(exec3).Should(Exit(0)) + Expect(strings.Contains(exec3.OutputToString(), ns)).To(BeTrue()) + // make sure no logrus errors are shown https://github.com/containers/podman/issues/9602 rm := podmanTest.Podman([]string{"rm", "--time=0", "-f", "test"}) rm.WaitWithDefaultTimeout() -- cgit v1.2.3-54-g00ecf