From 34739f44131e9c493c4021d119c368c82d080c84 Mon Sep 17 00:00:00 2001 From: Stefan Weil Date: Sun, 24 Oct 2021 22:23:13 +0200 Subject: Replace 'an user' => 'a user' Signed-off-by: Stefan Weil --- libpod/network/cni/cni_types.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'libpod') diff --git a/libpod/network/cni/cni_types.go b/libpod/network/cni/cni_types.go index 91fd1c27b..35548f4f9 100644 --- a/libpod/network/cni/cni_types.go +++ b/libpod/network/cni/cni_types.go @@ -175,7 +175,7 @@ func newIPAMLocalHostRange(subnet types.IPNet, leaseRange *types.LeaseRange, gw Subnet: subnet.String(), } - // an user provided a range, we add it here + // a user provided a range, we add it here if leaseRange != nil { if leaseRange.StartIP != nil { hostRange.RangeStart = leaseRange.StartIP.String() -- cgit v1.2.3-54-g00ecf From b3eaa08c5fc8164c62052aaf37776ee1813e1b47 Mon Sep 17 00:00:00 2001 From: Daniel J Walsh Date: Mon, 18 Oct 2021 11:54:44 -0400 Subject: Generate Kube should not print default structs If podman uses Workdir="/" or the workdir specified in the image, it should not add it to the yaml. If Podman find environment variables in the image, they should not get added to the yaml. If the container or pod do not have changes to SELinux we should not print seLinuxOpt{} If the container or pod do not change any dns options the yaml should not have a dnsOption={} If the container is not privileged it should not have privileged=false in the yaml. Fixes: https://github.com/containers/podman/issues/11995 Signed-off-by: Daniel J Walsh --- docs/source/markdown/podman-generate-kube.1.md | 56 ++-------------------- libpod/kube.go | 64 +++++++++++++++++--------- test/e2e/generate_kube_test.go | 9 +++- 3 files changed, 55 insertions(+), 74 deletions(-) (limited to 'libpod') diff --git a/docs/source/markdown/podman-generate-kube.1.md b/docs/source/markdown/podman-generate-kube.1.md index 9ae3941ec..a583afcf9 100644 --- a/docs/source/markdown/podman-generate-kube.1.md +++ b/docs/source/markdown/podman-generate-kube.1.md @@ -54,13 +54,7 @@ spec: - docker-entrypoint.sh - mysqld env: - - name: PATH - value: /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - - name: TERM - value: xterm - name: HOSTNAME - - name: container - value: podman - name: GOSU_VERSION value: "1.10" - name: GPG_KEYS @@ -77,14 +71,14 @@ spec: ports: - containerPort: 3306 hostPort: 36533 - protocol: TCP resources: {} securityContext: - allowPrivilegeEscalation: true - privileged: false - readOnlyRootFilesystem: false + capabilities: + drop: + - CAP_MKNOD + - CAP_NET_RAW + - CAP_AUDIT_WRITE tty: true - workingDir: / status: {} ``` @@ -106,31 +100,18 @@ spec: containers: - command: - /bin/sh - env: - - name: PATH - value: /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - - name: TERM - value: xterm - - name: container - value: podman image: docker.io/library/alpine:latest name: test-bind-mount resources: {} securityContext: - allowPrivilegeEscalation: true capabilities: drop: - CAP_MKNOD - CAP_NET_RAW - CAP_AUDIT_WRITE - privileged: false - readOnlyRootFilesystem: false - seLinuxOptions: {} volumeMounts: - mountPath: /volume name: home-user-my-data-host - workingDir: / - dnsConfig: {} restartPolicy: Never volumes: - hostPath: @@ -158,31 +139,18 @@ spec: containers: - command: - /bin/sh - env: - - name: PATH - value: /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - - name: TERM - value: xterm - - name: container - value: podman image: docker.io/library/alpine:latest name: test-bind-mount resources: {} securityContext: - allowPrivilegeEscalation: true capabilities: drop: - CAP_MKNOD - CAP_NET_RAW - CAP_AUDIT_WRITE - privileged: false - readOnlyRootFilesystem: false - seLinuxOptions: {} volumeMounts: - mountPath: /volume name: priceless-data-pvc - workingDir: / - dnsConfig: {} restartPolicy: Never volumes: - name: priceless-data-pvc @@ -210,22 +178,9 @@ spec: - command: - python3 - /root/code/graph.py - env: - - name: PATH - value: /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - - name: TERM - value: xterm - - name: HOSTNAME - - name: container - value: podman image: quay.io/baude/demoweb:latest name: practicalarchimedes resources: {} - securityContext: - allowPrivilegeEscalation: true - capabilities: {} - privileged: false - readOnlyRootFilesystem: false tty: true workingDir: /root/code status: {} @@ -242,7 +197,6 @@ spec: - name: "8050" nodePort: 31269 port: 8050 - protocol: TCP targetPort: 0 selector: app: demoweb diff --git a/libpod/kube.go b/libpod/kube.go index f5291ce60..d2ac15749 100644 --- a/libpod/kube.go +++ b/libpod/kube.go @@ -332,7 +332,7 @@ func newPodObject(podName string, annotations map[string]string, initCtrs, conta InitContainers: initCtrs, Volumes: volumes, } - if dnsOptions != nil { + if dnsOptions != nil && (len(dnsOptions.Nameservers)+len(dnsOptions.Searches)+len(dnsOptions.Options) > 0) { ps.DNSConfig = dnsOptions } p := v1.Pod{ @@ -447,11 +447,6 @@ func containerToV1Container(ctx context.Context, c *Container) (v1.Container, [] kubeVolumes = append(kubeVolumes, volumes...) } - envVariables, err := libpodEnvVarsToKubeEnvVars(c.config.Spec.Process.Env) - if err != nil { - return kubeContainer, kubeVolumes, nil, annotations, err - } - portmappings, err := c.PortMappings() if err != nil { return kubeContainer, kubeVolumes, nil, annotations, err @@ -489,15 +484,23 @@ func containerToV1Container(ctx context.Context, c *Container) (v1.Container, [] kubeContainer.Command = nil } + if c.WorkingDir() != "/" && imgData.Config.WorkingDir != c.WorkingDir() { + kubeContainer.WorkingDir = c.WorkingDir() + } + if imgData.User == c.User() { kubeSec.RunAsGroup, kubeSec.RunAsUser = nil, nil } - kubeContainer.WorkingDir = c.WorkingDir() + envVariables, err := libpodEnvVarsToKubeEnvVars(c.config.Spec.Process.Env, imgData.Config.Env) + if err != nil { + return kubeContainer, kubeVolumes, nil, annotations, err + } + kubeContainer.Env = envVariables + kubeContainer.Ports = ports // This should not be applicable //container.EnvFromSource = - kubeContainer.Env = envVariables kubeContainer.SecurityContext = kubeSec kubeContainer.StdinOnce = false kubeContainer.TTY = c.config.Spec.Process.Terminal @@ -600,9 +603,14 @@ func ocicniPortMappingToContainerPort(portMappings []ocicni.PortMapping) ([]v1.C } // libpodEnvVarsToKubeEnvVars converts a key=value string slice to []v1.EnvVar -func libpodEnvVarsToKubeEnvVars(envs []string) ([]v1.EnvVar, error) { +func libpodEnvVarsToKubeEnvVars(envs []string, imageEnvs []string) ([]v1.EnvVar, error) { defaultEnv := env.DefaultEnvVariables() envVars := make([]v1.EnvVar, 0, len(envs)) + imageMap := make(map[string]string, len(imageEnvs)) + for _, ie := range envs { + split := strings.SplitN(ie, "=", 2) + imageMap[split[0]] = split[1] + } for _, e := range envs { split := strings.SplitN(e, "=", 2) if len(split) != 2 { @@ -611,6 +619,9 @@ func libpodEnvVarsToKubeEnvVars(envs []string) ([]v1.EnvVar, error) { if defaultEnv[split[0]] == split[1] { continue } + if imageMap[split[0]] == split[1] { + continue + } ev := v1.EnvVar{ Name: split[0], Value: split[1], @@ -808,33 +819,42 @@ func generateKubeSecurityContext(c *Container) (*v1.SecurityContext, error) { capabilities = newCaps } + sc := v1.SecurityContext{ + // RunAsNonRoot is an optional parameter; our first implementations should be root only; however + // I'm leaving this as a bread-crumb for later + //RunAsNonRoot: &nonRoot, + } + if capabilities != nil { + sc.Capabilities = capabilities + } var selinuxOpts v1.SELinuxOptions opts := strings.SplitN(c.config.Spec.Annotations[define.InspectAnnotationLabel], ":", 2) - if len(opts) == 2 { + switch len(opts) { + case 2: switch opts[0] { case "type": selinuxOpts.Type = opts[1] + sc.SELinuxOptions = &selinuxOpts case "level": selinuxOpts.Level = opts[1] + sc.SELinuxOptions = &selinuxOpts } - } - if len(opts) == 1 { + case 1: if opts[0] == "disable" { selinuxOpts.Type = "spc_t" + sc.SELinuxOptions = &selinuxOpts } } - sc := v1.SecurityContext{ - Capabilities: capabilities, - Privileged: &privileged, - SELinuxOptions: &selinuxOpts, - // RunAsNonRoot is an optional parameter; our first implementations should be root only; however - // I'm leaving this as a bread-crumb for later - //RunAsNonRoot: &nonRoot, - ReadOnlyRootFilesystem: &ro, - AllowPrivilegeEscalation: &allowPrivEscalation, + if !allowPrivEscalation { + sc.AllowPrivilegeEscalation = &allowPrivEscalation + } + if privileged { + sc.Privileged = &privileged + } + if ro { + sc.ReadOnlyRootFilesystem = &ro } - if c.User() != "" { if !c.batched { c.lock.Lock() diff --git a/test/e2e/generate_kube_test.go b/test/e2e/generate_kube_test.go index 07515fe7b..21f4ad8fa 100644 --- a/test/e2e/generate_kube_test.go +++ b/test/e2e/generate_kube_test.go @@ -67,6 +67,10 @@ var _ = Describe("Podman generate kube", func() { err := yaml.Unmarshal(kube.Out.Contents(), pod) Expect(err).To(BeNil()) Expect(pod.Spec.HostNetwork).To(Equal(false)) + Expect(pod.Spec.SecurityContext).To(BeNil()) + Expect(pod.Spec.DNSConfig).To(BeNil()) + Expect(pod.Spec.Containers[0].WorkingDir).To(Equal("")) + Expect(pod.Spec.Containers[0].Env).To(BeNil()) numContainers := 0 for range pod.Spec.Containers { @@ -103,6 +107,7 @@ var _ = Describe("Podman generate kube", func() { err = yaml.Unmarshal(kube.Out.Contents(), pod) Expect(err).To(BeNil()) Expect(kube.OutputToString()).To(ContainSubstring("type: spc_t")) + }) It("podman generate service kube on container with --security-opt type", func() { @@ -1079,7 +1084,7 @@ USER test1` top1.WaitWithDefaultTimeout() Expect(top1).Should(Exit(0)) - top2 := podmanTest.Podman([]string{"run", "-dt", "--name", "top2", "--pod", "pod1", "--label", "io.containers.autoupdate=registry", "--label", "io.containers.autoupdate.authfile=/some/authfile.json", ALPINE, "top"}) + top2 := podmanTest.Podman([]string{"run", "-dt", "--name", "top2", "--workdir", "/root", "--pod", "pod1", "--label", "io.containers.autoupdate=registry", "--label", "io.containers.autoupdate.authfile=/some/authfile.json", ALPINE, "top"}) top2.WaitWithDefaultTimeout() Expect(top2).Should(Exit(0)) @@ -1090,6 +1095,8 @@ USER test1` pod := new(v1.Pod) err := yaml.Unmarshal(kube.Out.Contents(), pod) Expect(err).To(BeNil()) + Expect(pod.Spec.Containers[0].WorkingDir).To(Equal("")) + Expect(pod.Spec.Containers[1].WorkingDir).To(Equal("/root")) for _, ctr := range []string{"top1", "top2"} { v, ok := pod.GetAnnotations()["io.containers.autoupdate/"+ctr] -- cgit v1.2.3-54-g00ecf From c3f3e6d3b8feabb3116dd5867f2e1117bdb5734f Mon Sep 17 00:00:00 2001 From: Matthew Heon Date: Wed, 20 Oct 2021 10:29:32 -0400 Subject: Remove infra ID from DB before removing containers If we interrupt pod removal between removing containers and removing the whole pod, the infra ID was still in the DB, and most pod operations would try to retrieve the infra container (and would this fail). Clear the infra ID from the DB just before we remove all containers to prevent this. Fixes #12034 [NO NEW TESTS NEEDED] This is a very narrow race and I have no idea how to repro it. Signed-off-by: Matthew Heon --- libpod/runtime_pod_linux.go | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'libpod') diff --git a/libpod/runtime_pod_linux.go b/libpod/runtime_pod_linux.go index 7571fdfff..a90ea6125 100644 --- a/libpod/runtime_pod_linux.go +++ b/libpod/runtime_pod_linux.go @@ -264,6 +264,15 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool) } } + // Clear infra container ID before we remove the infra container. + // There is a potential issue if we don't do that, and removal is + // interrupted between RemoveAllContainers() below and the pod's removal + // later - we end up with a reference to a nonexistent infra container. + p.state.InfraContainerID = "" + if err := p.save(); err != nil { + return err + } + // Remove all containers in the pod from the state. if err := r.state.RemovePodContainers(p); err != nil { // If this fails, there isn't much more we can do. -- cgit v1.2.3-54-g00ecf From 3b67336b608efd72a0e7a023b85b1b3dc2dc4746 Mon Sep 17 00:00:00 2001 From: cdoern Date: Mon, 4 Oct 2021 10:18:01 -0400 Subject: Pod Rm Infra Improvements Made changes so that if the pod contains all exited containers and only infra is running, remove the pod. resolves #11713 Signed-off-by: cdoern --- docs/source/markdown/podman-pod-rm.1.md | 2 +- libpod/container_internal.go | 2 +- libpod/runtime_pod_linux.go | 3 +-- test/e2e/pod_rm_test.go | 17 +++++++++++++++++ 4 files changed, 20 insertions(+), 4 deletions(-) (limited to 'libpod') diff --git a/docs/source/markdown/podman-pod-rm.1.md b/docs/source/markdown/podman-pod-rm.1.md index d185385b6..ce91dab5b 100644 --- a/docs/source/markdown/podman-pod-rm.1.md +++ b/docs/source/markdown/podman-pod-rm.1.md @@ -7,7 +7,7 @@ podman\-pod\-rm - Remove one or more stopped pods and containers **podman pod rm** [*options*] *pod* ## DESCRIPTION -**podman pod rm** will remove one or more stopped pods and their containers from the host. The pod name or ID can be used. The \-f option stops all containers and then removes them before removing the pod. +**podman pod rm** will remove one or more stopped pods and their containers from the host. The pod name or ID can be used. The \-f option stops all containers and then removes them before removing the pod. If all containers added by the user are in an exited state, the pod will be removed. ## OPTIONS diff --git a/libpod/container_internal.go b/libpod/container_internal.go index 2ca49758d..d71179017 100644 --- a/libpod/container_internal.go +++ b/libpod/container_internal.go @@ -2100,7 +2100,7 @@ func (c *Container) checkReadyForRemoval() error { return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is in invalid state", c.ID()) } - if c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) { + if c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) && !c.IsInfra() { return errors.Wrapf(define.ErrCtrStateInvalid, "cannot remove container %s as it is %s - running or paused containers cannot be removed without force", c.ID(), c.state.State.String()) } diff --git a/libpod/runtime_pod_linux.go b/libpod/runtime_pod_linux.go index a90ea6125..3a6098de8 100644 --- a/libpod/runtime_pod_linux.go +++ b/libpod/runtime_pod_linux.go @@ -177,10 +177,9 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool) if err != nil { return err } - numCtrs := len(ctrs) - // If the only container in the pod is the pause container, remove the pod and container unconditionally. + // If the only running container in the pod is the pause container, remove the pod and container unconditionally. pauseCtrID := p.state.InfraContainerID if numCtrs == 1 && ctrs[0].ID() == pauseCtrID { removeCtrs = true diff --git a/test/e2e/pod_rm_test.go b/test/e2e/pod_rm_test.go index c5d91d679..ac1f322ef 100644 --- a/test/e2e/pod_rm_test.go +++ b/test/e2e/pod_rm_test.go @@ -301,4 +301,21 @@ var _ = Describe("Podman pod rm", func() { Expect(session).Should(Exit(0)) Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0)) }) + + It("podman pod rm with exited containers", func() { + _, ec, podid := podmanTest.CreatePod(nil) + Expect(ec).To(Equal(0)) + + session := podmanTest.Podman([]string{"run", "--pod", podid, ALPINE}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(0)) + + session = podmanTest.Podman([]string{"run", "--pod", podid, ALPINE}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(0)) + + result := podmanTest.Podman([]string{"pod", "rm", podid}) + result.WaitWithDefaultTimeout() + Expect(result).Should(Exit(0)) + }) }) -- cgit v1.2.3-54-g00ecf From 465e27cf10d1019297dfed643ed22d3d558a00da Mon Sep 17 00:00:00 2001 From: Nalin Dahyabhai Date: Mon, 18 Oct 2021 12:29:12 -0400 Subject: Use exponential backoff when waiting for a journal entry When looking for a cursor that matches the first journal entry for a given container, wait and try to find it using exponential backoff. [NO NEW TESTS NEEDED] Signed-off-by: Nalin Dahyabhai --- libpod/container_log_linux.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'libpod') diff --git a/libpod/container_log_linux.go b/libpod/container_log_linux.go index ca1e11ef5..562169ce2 100644 --- a/libpod/container_log_linux.go +++ b/libpod/container_log_linux.go @@ -91,8 +91,12 @@ func (c *Container) readFromJournal(ctx context.Context, options *logs.LogOption var cursorError error for i := 1; i <= 3; i++ { cursor, cursorError = journal.GetCursor() + hundreds := 1 + for j := 1; j < i; j++ { + hundreds *= 2 + } if cursorError != nil { - time.Sleep(time.Duration(i*100) * time.Millisecond) + time.Sleep(time.Duration(hundreds*100) * time.Millisecond) continue } break -- cgit v1.2.3-54-g00ecf From 955d01f5a81ffe376dd883fec118db8256fb450b Mon Sep 17 00:00:00 2001 From: Stefan Weil Date: Sun, 24 Oct 2021 22:44:38 +0200 Subject: [NO NEW TESTS NEEDED] Fix off-by-one index comparision (reported by LGTM) LGTM alert: Off-by-one index comparison against length may lead to out-of-bounds read. Signed-off-by: Stefan Weil --- libpod/info.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'libpod') diff --git a/libpod/info.go b/libpod/info.go index 2eba4bbff..47fda0725 100644 --- a/libpod/info.go +++ b/libpod/info.go @@ -333,7 +333,7 @@ func readKernelVersion() (string, error) { return "", err } f := bytes.Fields(buf) - if len(f) < 2 { + if len(f) < 3 { return string(bytes.TrimSpace(buf)), nil } return string(f[2]), nil -- cgit v1.2.3-54-g00ecf From 7275d389b6ad3238c800e079238fb098886536de Mon Sep 17 00:00:00 2001 From: Paul Holzinger Date: Tue, 26 Oct 2021 16:11:46 +0200 Subject: Document to not set K8S envars for CNI Setting these environment variables can cause issues with custom CNI plugins, see #12083. Signed-off-by: Paul Holzinger --- libpod/network/cni/run.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'libpod') diff --git a/libpod/network/cni/run.go b/libpod/network/cni/run.go index 14634262c..3f78d79a4 100644 --- a/libpod/network/cni/run.go +++ b/libpod/network/cni/run.go @@ -199,10 +199,8 @@ func getRuntimeConfig(netns, conName, conID, networkName string, ports []cniPort IfName: opts.InterfaceName, Args: [][2]string{ {"IgnoreUnknown", "1"}, - // FIXME: Should we set the K8S args? - //{"K8S_POD_NAMESPACE", conName}, - //{"K8S_POD_INFRA_CONTAINER_ID", conID}, - // K8S_POD_NAME is used by dnsname to get the container name + // Do not set the K8S env vars, see https://github.com/containers/podman/issues/12083. + // Only K8S_POD_NAME is used by dnsname to get the container name. {"K8S_POD_NAME", conName}, }, CapabilityArgs: map[string]interface{}{}, -- cgit v1.2.3-54-g00ecf From 47afa6d96218605f9ca5b78c5a420f38e4c84cf9 Mon Sep 17 00:00:00 2001 From: Hironori Shiina Date: Fri, 22 Oct 2021 21:04:48 -0400 Subject: Fix a few problems in 'podman logs --tail' with journald driver The following problems regarding `logs --tail` with the journald log driver are fixed: - One more line than a specified value is displayed. - '--tail 0' displays all lines while the other log drivers displays nothing. - Partial lines are not considered. - If the journald events backend is used and a container has exited, nothing is displayed. Integration tests that should have detected the bugs are also fixed. The tests are executed with json-file log driver three times without this fix. Signed-off-by: Hironori Shiina --- libpod/container_log_linux.go | 33 ++++++++++++++------- test/e2e/logs_test.go | 69 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 91 insertions(+), 11 deletions(-) (limited to 'libpod') diff --git a/libpod/container_log_linux.go b/libpod/container_log_linux.go index 562169ce2..4029d0af7 100644 --- a/libpod/container_log_linux.go +++ b/libpod/container_log_linux.go @@ -121,7 +121,24 @@ func (c *Container) readFromJournal(ctx context.Context, options *logs.LogOption }() tailQueue := []*logs.LogLine{} // needed for options.Tail - doTail := options.Tail > 0 + doTail := options.Tail >= 0 + doTailFunc := func() { + // Flush *once* we hit the end of the journal. + startIndex := int64(len(tailQueue)) + outputLines := int64(0) + for startIndex > 0 && outputLines < options.Tail { + startIndex-- + for startIndex > 0 && tailQueue[startIndex].Partial() { + startIndex-- + } + outputLines++ + } + for i := startIndex; i < int64(len(tailQueue)); i++ { + logChannel <- tailQueue[i] + } + tailQueue = nil + doTail = false + } lastReadCursor := "" for { select { @@ -152,16 +169,7 @@ func (c *Container) readFromJournal(ctx context.Context, options *logs.LogOption // Hit the end of the journal (so far?). if cursor == lastReadCursor { if doTail { - // Flush *once* we hit the end of the journal. - startIndex := int64(len(tailQueue)-1) - options.Tail - if startIndex < 0 { - startIndex = 0 - } - for i := startIndex; i < int64(len(tailQueue)); i++ { - logChannel <- tailQueue[i] - } - tailQueue = nil - doTail = false + doTailFunc() } // Unless we follow, quit. if !options.Follow { @@ -194,6 +202,9 @@ func (c *Container) readFromJournal(ctx context.Context, options *logs.LogOption return } if status == events.Exited { + if doTail { + doTailFunc() + } return } continue diff --git a/test/e2e/logs_test.go b/test/e2e/logs_test.go index 71d30f063..16e48da19 100644 --- a/test/e2e/logs_test.go +++ b/test/e2e/logs_test.go @@ -13,6 +13,19 @@ import ( . "github.com/onsi/gomega/gexec" ) +func isEventBackendJournald(podmanTest *PodmanTestIntegration) bool { + if !podmanTest.RemoteTest { + // If not remote test, '--events-backend' is set to 'file' or 'none' + return false + } + info := podmanTest.Podman([]string{"info", "--format", "{{.Host.EventLogger}}"}) + info.WaitWithDefaultTimeout() + if info.OutputToString() == "journald" { + return true + } + return false +} + var _ = Describe("Podman logs", func() { var ( tempdir string @@ -38,8 +51,18 @@ var _ = Describe("Podman logs", func() { }) for _, log := range []string{"k8s-file", "journald", "json-file"} { + // This is important to move the 'log' var to the correct scope under Ginkgo flow. + log := log + + skipIfJournaldInContainer := func() { + if log == "journald" { + SkipIfInContainer("journalctl inside a container doesn't work correctly") + } + } It("all lines: "+log, func() { + skipIfJournaldInContainer() + logc := podmanTest.Podman([]string{"run", "--log-driver", log, "-dt", ALPINE, "sh", "-c", "echo podman; echo podman; echo podman"}) logc.WaitWithDefaultTimeout() Expect(logc).To(Exit(0)) @@ -53,6 +76,8 @@ var _ = Describe("Podman logs", func() { }) It("tail two lines: "+log, func() { + skipIfJournaldInContainer() + logc := podmanTest.Podman([]string{"run", "--log-driver", log, "-dt", ALPINE, "sh", "-c", "echo podman; echo podman; echo podman"}) logc.WaitWithDefaultTimeout() Expect(logc).To(Exit(0)) @@ -65,6 +90,8 @@ var _ = Describe("Podman logs", func() { }) It("tail zero lines: "+log, func() { + skipIfJournaldInContainer() + logc := podmanTest.Podman([]string{"run", "--log-driver", log, "-dt", ALPINE, "sh", "-c", "echo podman; echo podman; echo podman"}) logc.WaitWithDefaultTimeout() Expect(logc).To(Exit(0)) @@ -77,6 +104,8 @@ var _ = Describe("Podman logs", func() { }) It("tail 99 lines: "+log, func() { + skipIfJournaldInContainer() + logc := podmanTest.Podman([]string{"run", "--log-driver", log, "-dt", ALPINE, "sh", "-c", "echo podman; echo podman; echo podman"}) logc.WaitWithDefaultTimeout() Expect(logc).To(Exit(0)) @@ -89,6 +118,8 @@ var _ = Describe("Podman logs", func() { }) It("tail 800 lines: "+log, func() { + skipIfJournaldInContainer() + logc := podmanTest.Podman([]string{"run", "--log-driver", log, "-dt", ALPINE, "sh", "-c", "i=1; while [ \"$i\" -ne 1000 ]; do echo \"line $i\"; i=$((i + 1)); done"}) logc.WaitWithDefaultTimeout() Expect(logc).To(Exit(0)) @@ -101,6 +132,8 @@ var _ = Describe("Podman logs", func() { }) It("tail 2 lines with timestamps: "+log, func() { + skipIfJournaldInContainer() + logc := podmanTest.Podman([]string{"run", "--log-driver", log, "-dt", ALPINE, "sh", "-c", "echo podman; echo podman; echo podman"}) logc.WaitWithDefaultTimeout() Expect(logc).To(Exit(0)) @@ -113,6 +146,8 @@ var _ = Describe("Podman logs", func() { }) It("since time 2017-08-07: "+log, func() { + skipIfJournaldInContainer() + logc := podmanTest.Podman([]string{"run", "--log-driver", log, "-dt", ALPINE, "sh", "-c", "echo podman; echo podman; echo podman"}) logc.WaitWithDefaultTimeout() Expect(logc).To(Exit(0)) @@ -125,6 +160,8 @@ var _ = Describe("Podman logs", func() { }) It("since duration 10m: "+log, func() { + skipIfJournaldInContainer() + logc := podmanTest.Podman([]string{"run", "--log-driver", log, "-dt", ALPINE, "sh", "-c", "echo podman; echo podman; echo podman"}) logc.WaitWithDefaultTimeout() Expect(logc).To(Exit(0)) @@ -137,6 +174,8 @@ var _ = Describe("Podman logs", func() { }) It("until duration 10m: "+log, func() { + skipIfJournaldInContainer() + logc := podmanTest.Podman([]string{"run", "--log-driver", log, "-dt", ALPINE, "sh", "-c", "echo podman; echo podman; echo podman"}) logc.WaitWithDefaultTimeout() Expect(logc).To(Exit(0)) @@ -149,6 +188,7 @@ var _ = Describe("Podman logs", func() { }) It("until time NOW: "+log, func() { + skipIfJournaldInContainer() logc := podmanTest.Podman([]string{"run", "--log-driver", log, "-dt", ALPINE, "sh", "-c", "echo podman; echo podman; echo podman"}) logc.WaitWithDefaultTimeout() @@ -165,13 +205,17 @@ var _ = Describe("Podman logs", func() { }) It("latest and container name should fail: "+log, func() { + skipIfJournaldInContainer() + results := podmanTest.Podman([]string{"logs", "-l", "foobar"}) results.WaitWithDefaultTimeout() Expect(results).To(ExitWithError()) }) It("two containers showing short container IDs: "+log, func() { + skipIfJournaldInContainer() SkipIfRemote("FIXME: podman-remote logs does not support showing two containers at the same time") + log1 := podmanTest.Podman([]string{"run", "--log-driver", log, "-dt", ALPINE, "sh", "-c", "echo podman; echo podman; echo podman"}) log1.WaitWithDefaultTimeout() Expect(log1).Should(Exit(0)) @@ -192,6 +236,8 @@ var _ = Describe("Podman logs", func() { }) It("podman logs on a created container should result in 0 exit code: "+log, func() { + skipIfJournaldInContainer() + session := podmanTest.Podman([]string{"create", "--log-driver", log, "-t", "--name", "log", ALPINE}) session.WaitWithDefaultTimeout() Expect(session).To(Exit(0)) @@ -202,6 +248,8 @@ var _ = Describe("Podman logs", func() { }) It("streaming output: "+log, func() { + skipIfJournaldInContainer() + containerName := "logs-f" logc := podmanTest.Podman([]string{"run", "--log-driver", log, "--name", containerName, "-dt", ALPINE, "sh", "-c", "echo podman-1; sleep 1; echo podman-2"}) @@ -210,6 +258,14 @@ var _ = Describe("Podman logs", func() { results := podmanTest.Podman([]string{"logs", "-f", containerName}) results.WaitWithDefaultTimeout() + + if log == "journald" && !isEventBackendJournald(podmanTest) { + // --follow + journald log-driver is only supported with journald events-backend(PR #10431) + Expect(results).To(Exit(125)) + Expect(results.ErrorToString()).To(ContainSubstring("using --follow with the journald --log-driver but without the journald --events-backend")) + return + } + Expect(results).To(Exit(0)) Expect(results.OutputToString()).To(ContainSubstring("podman-1")) @@ -233,6 +289,8 @@ var _ = Describe("Podman logs", func() { }) It("follow output stopped container: "+log, func() { + skipIfJournaldInContainer() + containerName := "logs-f" logc := podmanTest.Podman([]string{"run", "--log-driver", log, "--name", containerName, "-d", ALPINE, "true"}) @@ -241,10 +299,17 @@ var _ = Describe("Podman logs", func() { results := podmanTest.Podman([]string{"logs", "-f", containerName}) results.WaitWithDefaultTimeout() + if log == "journald" && !isEventBackendJournald(podmanTest) { + // --follow + journald log-driver is only supported with journald events-backend(PR #10431) + Expect(results).To(Exit(125)) + return + } Expect(results).To(Exit(0)) }) It("using container with container log-size: "+log, func() { + skipIfJournaldInContainer() + logc := podmanTest.Podman([]string{"run", "--log-driver", log, "--log-opt=max-size=10k", "-d", ALPINE, "sh", "-c", "echo podman podman podman"}) logc.WaitWithDefaultTimeout() Expect(logc).To(Exit(0)) @@ -266,6 +331,8 @@ var _ = Describe("Podman logs", func() { }) It("Make sure logs match expected length: "+log, func() { + skipIfJournaldInContainer() + logc := podmanTest.Podman([]string{"run", "--log-driver", log, "-t", "--name", "test", ALPINE, "sh", "-c", "echo 1; echo 2"}) logc.WaitWithDefaultTimeout() Expect(logc).To(Exit(0)) @@ -284,6 +351,8 @@ var _ = Describe("Podman logs", func() { }) It("podman logs test stdout and stderr: "+log, func() { + skipIfJournaldInContainer() + cname := "log-test" logc := podmanTest.Podman([]string{"run", "--log-driver", log, "--name", cname, ALPINE, "sh", "-c", "echo stdout; echo stderr >&2"}) logc.WaitWithDefaultTimeout() -- cgit v1.2.3-54-g00ecf From a67bf0f9213e38e74681134a9265c88121960808 Mon Sep 17 00:00:00 2001 From: Paul Holzinger Date: Tue, 26 Oct 2021 15:32:01 +0200 Subject: Slirp4netns with ipv6 set net.ipv6.conf.default.accept_dad=0 Duplicate Address Detection slows the ipv6 setup down for 1-2 seconds. Since slirp4netns is run it is own namespace and not directly routed we can skip this to make the ipv6 address immediately available. We change the default to make sure the slirp tap interface gets the correct value assigned so DAD is disabled for it. Also make sure to change this value back to the original after slirp4netns is ready in case users rely on this sysctl. Fixes #11062 Signed-off-by: Paul Holzinger --- libpod/networking_slirp4netns.go | 39 +++++++++++++++++++++++++++++++++++++++ test/e2e/run_networking_test.go | 20 ++++++++++++++++++++ 2 files changed, 59 insertions(+) (limited to 'libpod') diff --git a/libpod/networking_slirp4netns.go b/libpod/networking_slirp4netns.go index c06d215e1..e24b54032 100644 --- a/libpod/networking_slirp4netns.go +++ b/libpod/networking_slirp4netns.go @@ -16,6 +16,7 @@ import ( "syscall" "time" + "github.com/containernetworking/plugins/pkg/ns" "github.com/containers/podman/v3/pkg/errorhandling" "github.com/containers/podman/v3/pkg/rootless" "github.com/containers/podman/v3/pkg/rootlessport" @@ -58,6 +59,8 @@ type slirp4netnsNetworkOptions struct { outboundAddr6 string } +const ipv6ConfDefaultAcceptDadSysctl = "/proc/sys/net/ipv6/conf/default/accept_dad" + func checkSlirpFlags(path string) (*slirpFeatures, error) { cmd := exec.Command(path, "--help") out, err := cmd.CombinedOutput() @@ -297,6 +300,39 @@ func (r *Runtime) setupSlirp4netns(ctr *Container) error { } cmd.Stdout = logFile cmd.Stderr = logFile + + var slirpReadyChan (chan struct{}) + + if netOptions.enableIPv6 { + slirpReadyChan = make(chan struct{}) + defer close(slirpReadyChan) + go func() { + err := ns.WithNetNSPath(netnsPath, func(_ ns.NetNS) error { + // Duplicate Address Detection slows the ipv6 setup down for 1-2 seconds. + // Since slirp4netns is run it is own namespace and not directly routed + // we can skip this to make the ipv6 address immediately available. + // We change the default to make sure the slirp tap interface gets the + // correct value assigned so DAD is disabled for it + // Also make sure to change this value back to the original after slirp4netns + // is ready in case users rely on this sysctl. + orgValue, err := ioutil.ReadFile(ipv6ConfDefaultAcceptDadSysctl) + if err != nil { + return err + } + err = ioutil.WriteFile(ipv6ConfDefaultAcceptDadSysctl, []byte("0"), 0644) + if err != nil { + return err + } + // wait for slirp to finish setup + <-slirpReadyChan + return ioutil.WriteFile(ipv6ConfDefaultAcceptDadSysctl, orgValue, 0644) + }) + if err != nil { + logrus.Warnf("failed to set net.ipv6.conf.default.accept_dad sysctl: %v", err) + } + }() + } + if err := cmd.Start(); err != nil { return errors.Wrapf(err, "failed to start slirp4netns process") } @@ -310,6 +346,9 @@ func (r *Runtime) setupSlirp4netns(ctr *Container) error { if err := waitForSync(syncR, cmd, logFile, 1*time.Second); err != nil { return err } + if slirpReadyChan != nil { + slirpReadyChan <- struct{}{} + } // Set a default slirp subnet. Parsing a string with the net helper is easier than building the struct myself _, ctr.slirp4netnsSubnet, _ = net.ParseCIDR(defaultSlirp4netnsSubnet) diff --git a/test/e2e/run_networking_test.go b/test/e2e/run_networking_test.go index 84707732b..8d3b19b88 100644 --- a/test/e2e/run_networking_test.go +++ b/test/e2e/run_networking_test.go @@ -357,6 +357,26 @@ var _ = Describe("Podman run networking", func() { Expect(ncBusy).To(ExitWithError()) }) + It("podman run slirp4netns verify net.ipv6.conf.default.accept_dad=0", func() { + session := podmanTest.Podman([]string{"run", "--network", "slirp4netns:enable_ipv6=true", ALPINE, "ip", "addr"}) + session.Wait(30) + Expect(session).Should(Exit(0)) + // check the ipv6 setup id done without delay (https://github.com/containers/podman/issues/11062) + Expect(session.OutputToString()).To(ContainSubstring("inet6 fd00::")) + + const ipv6ConfDefaultAcceptDadSysctl = "/proc/sys/net/ipv6/conf/all/accept_dad" + + cat := SystemExec("cat", []string{ipv6ConfDefaultAcceptDadSysctl}) + cat.Wait(30) + Expect(cat).Should(Exit(0)) + sysctlValue := cat.OutputToString() + + session = podmanTest.Podman([]string{"run", "--network", "slirp4netns:enable_ipv6=true", ALPINE, "cat", ipv6ConfDefaultAcceptDadSysctl}) + session.Wait(30) + Expect(session).Should(Exit(0)) + Expect(session.OutputToString()).To(Equal(sysctlValue)) + }) + It("podman run network expose host port 18082 to container port 8000 using slirp4netns port handler", func() { session := podmanTest.Podman([]string{"run", "--network", "slirp4netns:port_handler=slirp4netns", "-dt", "-p", "18082:8000", ALPINE, "/bin/sh"}) session.Wait(30) -- cgit v1.2.3-54-g00ecf From 246782133cc36e66db0f4facccd7dabcd76a92d5 Mon Sep 17 00:00:00 2001 From: Giuseppe Scrivano Date: Wed, 27 Oct 2021 09:44:54 +0200 Subject: runtime: check for pause pid existence check that the pause pid exists before trying to move it to a separate scope. Closes: https://github.com/containers/podman/issues/12065 Signed-off-by: Giuseppe Scrivano --- libpod/runtime.go | 6 +++++- test/e2e/system_reset_test.go | 2 ++ 2 files changed, 7 insertions(+), 1 deletion(-) (limited to 'libpod') diff --git a/libpod/runtime.go b/libpod/runtime.go index c22d87324..a99f55fb3 100644 --- a/libpod/runtime.go +++ b/libpod/runtime.go @@ -541,7 +541,11 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (retErr error) { return err } if became { - utils.MovePauseProcessToScope(pausePid) + // Check if the pause process was created. If it was created, then + // move it to its own systemd scope. + if _, err = os.Stat(pausePid); err == nil { + utils.MovePauseProcessToScope(pausePid) + } os.Exit(ret) } } diff --git a/test/e2e/system_reset_test.go b/test/e2e/system_reset_test.go index 102526a46..93ab166cd 100644 --- a/test/e2e/system_reset_test.go +++ b/test/e2e/system_reset_test.go @@ -60,6 +60,8 @@ var _ = Describe("podman system reset", func() { session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) + Expect(session.ErrorToString()).To(Not(ContainSubstring("Failed to add pause process"))) + // If remote then the API service should have exited // On local tests this is a noop podmanTest.StartRemoteService() -- cgit v1.2.3-54-g00ecf From 2d6252b98a94482346cc8dd16f97b4c59d16dc4d Mon Sep 17 00:00:00 2001 From: Giuseppe Scrivano Date: Thu, 28 Oct 2021 11:18:48 +0200 Subject: runtime: change PID existence check commit 6b3b0a17c625bdf71b0ec8b783b288886d8e48d7 introduced a check for the PID file before attempting to move the PID to a new scope. This is still vulnerable to TOCTOU race condition though, since the PID file or the PID can be removed/killed after the check was successful but before it was used. Closes: https://github.com/containers/podman/issues/12065 [NO NEW TESTS NEEDED] it fixes a CI flake Signed-off-by: Giuseppe Scrivano --- libpod/runtime.go | 4 +--- utils/utils.go | 16 +++++++++++++++- 2 files changed, 16 insertions(+), 4 deletions(-) (limited to 'libpod') diff --git a/libpod/runtime.go b/libpod/runtime.go index a99f55fb3..a208b6db4 100644 --- a/libpod/runtime.go +++ b/libpod/runtime.go @@ -543,9 +543,7 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (retErr error) { if became { // Check if the pause process was created. If it was created, then // move it to its own systemd scope. - if _, err = os.Stat(pausePid); err == nil { - utils.MovePauseProcessToScope(pausePid) - } + utils.MovePauseProcessToScope(pausePid) os.Exit(ret) } } diff --git a/utils/utils.go b/utils/utils.go index 55af41bb3..109ae088b 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -14,6 +14,7 @@ import ( "github.com/containers/podman/v3/libpod/define" "github.com/containers/podman/v3/pkg/cgroups" "github.com/containers/storage/pkg/archive" + "github.com/godbus/dbus/v5" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -177,13 +178,26 @@ func RunsOnSystemd() bool { func moveProcessToScope(pidPath, slice, scope string) error { data, err := ioutil.ReadFile(pidPath) if err != nil { + // do not raise an error if the file doesn't exist + if os.IsNotExist(err) { + return nil + } return errors.Wrapf(err, "cannot read pid file %s", pidPath) } pid, err := strconv.ParseUint(string(data), 10, 0) if err != nil { return errors.Wrapf(err, "cannot parse pid file %s", pidPath) } - return RunUnderSystemdScope(int(pid), slice, scope) + err = RunUnderSystemdScope(int(pid), slice, scope) + + // If the PID is not valid anymore, do not return an error. + if dbusErr, ok := err.(dbus.Error); ok { + if dbusErr.Name == "org.freedesktop.DBus.Error.UnixProcessIdUnknown" { + return nil + } + } + + return err } // MovePauseProcessToScope moves the pause process used for rootless mode to keep the namespaces alive to -- cgit v1.2.3-54-g00ecf From 6bf6d72372005447a3f4a09f31b08e0202f99bc4 Mon Sep 17 00:00:00 2001 From: Adrian Reber Date: Wed, 27 Oct 2021 13:38:43 +0000 Subject: Set Checkpointed state to false after restore A restored container still had the state set to 'Checkpointed: true' which seems wrong if it running again. [NO NEW TESTS NEEDED] Signed-off-by: Adrian Reber --- libpod/container_internal_linux.go | 1 + 1 file changed, 1 insertion(+) (limited to 'libpod') diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go index 310110679..208e089b5 100644 --- a/libpod/container_internal_linux.go +++ b/libpod/container_internal_linux.go @@ -1483,6 +1483,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti logrus.Debugf("Restored container %s", c.ID()) c.state.State = define.ContainerStateRunning + c.state.Checkpointed = false if !options.Keep { // Delete all checkpoint related files. At this point, in theory, all files -- cgit v1.2.3-54-g00ecf From c8b7ca2ba2ef9a57103b5c05e6e29b54b0c6fa48 Mon Sep 17 00:00:00 2001 From: Valentin Rothberg Date: Fri, 29 Oct 2021 10:28:38 +0200 Subject: pod/container create: resolve conflicts of generated names Address the TOCTOU when generating random names by having at most 10 attempts to assign a random name when creating a pod or container. [NO TESTS NEEDED] since I do not know a way to force a conflict with randomly generated names in a reasonable time frame. Fixes: #11735 Signed-off-by: Valentin Rothberg --- libpod/container_internal.go | 24 +++++++++++++++++++++--- libpod/runtime_ctr.go | 9 --------- libpod/runtime_pod_linux.go | 40 ++++++++++++++++++++++++++-------------- 3 files changed, 47 insertions(+), 26 deletions(-) (limited to 'libpod') diff --git a/libpod/container_internal.go b/libpod/container_internal.go index d71179017..54d6b1303 100644 --- a/libpod/container_internal.go +++ b/libpod/container_internal.go @@ -479,9 +479,27 @@ func (c *Container) setupStorage(ctx context.Context) error { c.setupStorageMapping(&options.IDMappingOptions, &c.config.IDMappings) - containerInfo, err := c.runtime.storageService.CreateContainerStorage(ctx, c.runtime.imageContext, c.config.RootfsImageName, c.config.RootfsImageID, c.config.Name, c.config.ID, options) - if err != nil { - return errors.Wrapf(err, "error creating container storage") + // Unless the user has specified a name, use a randomly generated one. + // Note that name conflicts may occur (see #11735), so we need to loop. + generateName := c.config.Name == "" + var containerInfo ContainerInfo + var containerInfoErr error + for { + if generateName { + name, err := c.runtime.generateName() + if err != nil { + return err + } + c.config.Name = name + } + containerInfo, containerInfoErr = c.runtime.storageService.CreateContainerStorage(ctx, c.runtime.imageContext, c.config.RootfsImageName, c.config.RootfsImageID, c.config.Name, c.config.ID, options) + + if !generateName || errors.Cause(containerInfoErr) != storage.ErrDuplicateName { + break + } + } + if containerInfoErr != nil { + return errors.Wrapf(containerInfoErr, "error creating container storage") } c.config.IDMappings.UIDMap = containerInfo.UIDMap diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go index 7d3891f6e..f27e854a4 100644 --- a/libpod/runtime_ctr.go +++ b/libpod/runtime_ctr.go @@ -322,15 +322,6 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai } } - if ctr.config.Name == "" { - name, err := r.generateName() - if err != nil { - return nil, err - } - - ctr.config.Name = name - } - // Check CGroup parent sanity, and set it if it was not set. // Only if we're actually configuring CGroups. if !ctr.config.NoCgroups { diff --git a/libpod/runtime_pod_linux.go b/libpod/runtime_pod_linux.go index 3a6098de8..de00eb8e0 100644 --- a/libpod/runtime_pod_linux.go +++ b/libpod/runtime_pod_linux.go @@ -43,18 +43,6 @@ func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, option } } - if pod.config.Name == "" { - name, err := r.generateName() - if err != nil { - return nil, err - } - pod.config.Name = name - } - - if p.InfraContainerSpec != nil && p.InfraContainerSpec.Hostname == "" { - p.InfraContainerSpec.Hostname = pod.config.Name - } - // Allocate a lock for the pod lock, err := r.lockManager.AllocateLock() if err != nil { @@ -131,9 +119,33 @@ func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, option logrus.Infof("Pod has an infra container, but shares no namespaces") } - if err := r.state.AddPod(pod); err != nil { - return nil, errors.Wrapf(err, "error adding pod to state") + // Unless the user has specified a name, use a randomly generated one. + // Note that name conflicts may occur (see #11735), so we need to loop. + generateName := pod.config.Name == "" + var addPodErr error + for { + if generateName { + name, err := r.generateName() + if err != nil { + return nil, err + } + pod.config.Name = name + } + + if p.InfraContainerSpec != nil && p.InfraContainerSpec.Hostname == "" { + p.InfraContainerSpec.Hostname = pod.config.Name + } + if addPodErr = r.state.AddPod(pod); addPodErr == nil { + return pod, nil + } + if !generateName || (errors.Cause(addPodErr) != define.ErrPodExists && errors.Cause(addPodErr) != define.ErrCtrExists) { + break + } + } + if addPodErr != nil { + return nil, errors.Wrapf(addPodErr, "error adding pod to state") } + return pod, nil } -- cgit v1.2.3-54-g00ecf From de852ebd0561be53e6f9d1a6b2ec9f35d1aab9bb Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Tue, 9 Nov 2021 14:07:49 +1100 Subject: shm_lock: Handle ENOSPC better in AllocateSemaphore When starting a container libpod/runtime_pod_linux.go:NewPod calls libpod/lock/lock.go:AllocateLock ends up in here. If you exceed num_locks, in response to a "podman run ..." you will see: Error: error allocating lock for new container: no space left on device As noted inline, this error is technically true as it is talking about the SHM area, but for anyone who has not dug into the source (i.e. me, before a few hours ago :) your initial thought is going to be that your disk is full. I spent quite a bit of time trying to diagnose what disk, partition, overlay, etc. was filling up before I realised this was actually due to leaking from failing containers. This overrides this case to give a more explicit message that hopefully puts people on the right track to fixing this faster. You will now see: $ ./bin/podman run --rm -it fedora bash Error: error allocating lock for new container: allocation failed; exceeded num_locks (20) [NO NEW TESTS NEEDED] (just changes an existing error message) Signed-off-by: Ian Wienand --- libpod/lock/shm/shm_lock.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'libpod') diff --git a/libpod/lock/shm/shm_lock.go b/libpod/lock/shm/shm_lock.go index 322e92a8f..fea02a619 100644 --- a/libpod/lock/shm/shm_lock.go +++ b/libpod/lock/shm/shm_lock.go @@ -130,8 +130,17 @@ func (locks *SHMLocks) AllocateSemaphore() (uint32, error) { // semaphore indexes, and can still return error codes. retCode := C.allocate_semaphore(locks.lockStruct) if retCode < 0 { + var err = syscall.Errno(-1 * retCode) // Negative errno returned - return 0, syscall.Errno(-1 * retCode) + if errors.Is(err, syscall.ENOSPC) { + // ENOSPC expands to "no space left on device". While it is technically true + // that there's no room in the SHM inn for this lock, this tends to send normal people + // down the path of checking disk-space which is not actually their problem. + // Give a clue that it's actually due to num_locks filling up. + var errFull = errors.Errorf("allocation failed; exceeded num_locks (%d)", locks.maxLocks) + return uint32(retCode), errFull + } + return uint32(retCode), syscall.Errno(-1 * retCode) } return uint32(retCode), nil -- cgit v1.2.3-54-g00ecf From cedf1a3d43fbb1f0c40cd286ee6c50863560e627 Mon Sep 17 00:00:00 2001 From: Boaz Shuster Date: Sun, 7 Nov 2021 13:21:25 +0200 Subject: podman-generate-kube - remove empty structs from YAML [NO NEW TESTS NEEDED] Signed-off-by: Boaz Shuster --- libpod/kube.go | 78 ++++++++++++++++++++++++++++++++++++++-- pkg/domain/infra/abi/generate.go | 3 +- 2 files changed, 77 insertions(+), 4 deletions(-) (limited to 'libpod') diff --git a/libpod/kube.go b/libpod/kube.go index d2ac15749..b34734513 100644 --- a/libpod/kube.go +++ b/libpod/kube.go @@ -166,9 +166,83 @@ func (v *Volume) GenerateForKube() *v1.PersistentVolumeClaim { } } +// YAMLPodSpec represents the same k8s API core PodSpec struct with a small +// change and that is having Containers as a pointer to YAMLContainer. +// Because Go doesn't omit empty struct and we want to omit Status in YAML +// if it's empty. Fixes: GH-11998 +type YAMLPodSpec struct { + v1.PodSpec + Containers []*YAMLContainer `json:"containers"` +} + +// YAMLPod represents the same k8s API core Pod struct with a small +// change and that is having Spec as a pointer to YAMLPodSpec and +// Status as a pointer to k8s API core PodStatus. +// Because Go doesn't omit empty struct and we want to omit Status in YAML +// if it's empty. Fixes: GH-11998 +type YAMLPod struct { + v1.Pod + Spec *YAMLPodSpec `json:"spec,omitempty"` + Status *v1.PodStatus `json:"status,omitempty"` +} + +// YAMLService represents the same k8s API core Service struct with a small +// change and that is having Status as a pointer to k8s API core ServiceStatus. +// Because Go doesn't omit empty struct and we want to omit Status in YAML +// if it's empty. Fixes: GH-11998 +type YAMLService struct { + v1.Service + Status *v1.ServiceStatus `json:"status,omitempty"` +} + +// YAMLContainer represents the same k8s API core Container struct with a small +// change and that is having Resources as a pointer to k8s API core ResourceRequirements. +// Because Go doesn't omit empty struct and we want to omit Status in YAML +// if it's empty. Fixes: GH-11998 +type YAMLContainer struct { + v1.Container + Resources *v1.ResourceRequirements `json:"resources,omitempty"` +} + +// ConvertV1PodToYAMLPod takes k8s API core Pod and returns a pointer to YAMLPod +func ConvertV1PodToYAMLPod(pod *v1.Pod) *YAMLPod { + cs := []*YAMLContainer{} + for _, cc := range pod.Spec.Containers { + var res *v1.ResourceRequirements = nil + if len(cc.Resources.Limits) > 0 || len(cc.Resources.Requests) > 0 { + res = &cc.Resources + } + cs = append(cs, &YAMLContainer{Container: cc, Resources: res}) + } + mpo := &YAMLPod{Pod: *pod} + mpo.Spec = &YAMLPodSpec{PodSpec: (*pod).Spec, Containers: cs} + for _, ctr := range pod.Spec.Containers { + if ctr.SecurityContext == nil || ctr.SecurityContext.SELinuxOptions == nil { + continue + } + selinuxOpts := ctr.SecurityContext.SELinuxOptions + if selinuxOpts.User == "" && selinuxOpts.Role == "" && selinuxOpts.Type == "" && selinuxOpts.Level == "" { + ctr.SecurityContext.SELinuxOptions = nil + } + } + dnsCfg := pod.Spec.DNSConfig + if dnsCfg != nil && (len(dnsCfg.Nameservers)+len(dnsCfg.Searches)+len(dnsCfg.Options) > 0) { + mpo.Spec.DNSConfig = dnsCfg + } + status := pod.Status + if status.Phase != "" || len(status.Conditions) > 0 || + status.Message != "" || status.Reason != "" || + status.NominatedNodeName != "" || status.HostIP != "" || + status.PodIP != "" || status.StartTime != nil || + len(status.InitContainerStatuses) > 0 || len(status.ContainerStatuses) > 0 || status.QOSClass != "" || len(status.EphemeralContainerStatuses) > 0 { + mpo.Status = &status + } + return mpo +} + // GenerateKubeServiceFromV1Pod creates a v1 service object from a v1 pod object -func GenerateKubeServiceFromV1Pod(pod *v1.Pod, servicePorts []v1.ServicePort) v1.Service { - service := v1.Service{} +func GenerateKubeServiceFromV1Pod(pod *v1.Pod, servicePorts []v1.ServicePort) YAMLService { + service := YAMLService{} selector := make(map[string]string) selector["app"] = pod.Labels["app"] ports := servicePorts diff --git a/pkg/domain/infra/abi/generate.go b/pkg/domain/infra/abi/generate.go index 081a2464b..a4d6bcf86 100644 --- a/pkg/domain/infra/abi/generate.go +++ b/pkg/domain/infra/abi/generate.go @@ -124,8 +124,7 @@ func (ic *ContainerEngine) GenerateKube(ctx context.Context, nameOrIDs []string, if err != nil { return nil, err } - - b, err := generateKubeYAML(po) + b, err := generateKubeYAML(libpod.ConvertV1PodToYAMLPod(po)) if err != nil { return nil, err } -- cgit v1.2.3-54-g00ecf From 6d9b1e4b8e7f72981cabcf576a8771ff458c0385 Mon Sep 17 00:00:00 2001 From: Matthew Heon Date: Fri, 12 Nov 2021 09:52:28 -0500 Subject: Fix partial log line handling with journald log driver Patch originally by Paul Holzinger (sourced from [1]). This is necessary to get the tests to pass in order to include a batch of other, related journald fixes in `podman logs`. [1] https://github.com/containers/podman/pull/12274#issuecomment-967168173 Signed-off-by: Matthew Heon --- libpod/container_log_linux.go | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'libpod') diff --git a/libpod/container_log_linux.go b/libpod/container_log_linux.go index 4029d0af7..b6b780bab 100644 --- a/libpod/container_log_linux.go +++ b/libpod/container_log_linux.go @@ -140,6 +140,7 @@ func (c *Container) readFromJournal(ctx context.Context, options *logs.LogOption doTail = false } lastReadCursor := "" + partial := "" for { select { case <-ctx.Done(): @@ -229,6 +230,12 @@ func (c *Container) readFromJournal(ctx context.Context, options *logs.LogOption logrus.Errorf("Failed parse log line: %v", err) return } + if logLine.Partial() { + partial += logLine.Msg + continue + } + logLine.Msg = partial + logLine.Msg + partial = "" if doTail { tailQueue = append(tailQueue, logLine) continue -- cgit v1.2.3-54-g00ecf