From 57c055f61a96fa49f8a8709297da5e81597e4b48 Mon Sep 17 00:00:00 2001 From: Daniel J Walsh Date: Thu, 16 Sep 2021 06:13:21 -0400 Subject: Ignore mount errors except ErrContainerUnknown when cleaningup container Fixes: https://github.com/containers/podman/issues/11207 [NO TESTS NEEDED] Since I don't know how to get into this situation. Signed-off-by: Daniel J Walsh --- libpod/runtime_cstorage.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'libpod') diff --git a/libpod/runtime_cstorage.go b/libpod/runtime_cstorage.go index cd2f226af..58bd67e6d 100644 --- a/libpod/runtime_cstorage.go +++ b/libpod/runtime_cstorage.go @@ -106,18 +106,18 @@ func (r *Runtime) removeStorageContainer(idOrName string, force bool) error { logrus.Infof("Storage for container %s already removed", ctr.ID) return nil } - return errors.Wrapf(err, "error looking up container %q mounts", idOrName) + logrus.Warnf("Checking if container %q is mounted, attempting to delete: %v", idOrName, err) } if timesMounted > 0 { return errors.Wrapf(define.ErrCtrStateInvalid, "container %q is mounted and cannot be removed without using force", idOrName) } } else if _, err := r.store.Unmount(ctr.ID, true); err != nil { - if errors.Cause(err) == storage.ErrContainerUnknown { + if errors.Is(err, storage.ErrContainerUnknown) { // Container again gone, no error logrus.Infof("Storage for container %s already removed", ctr.ID) return nil } - return errors.Wrapf(err, "error unmounting container %q", idOrName) + logrus.Warnf("Unmounting container %q while attempting to delete storage: %v", idOrName, err) } if err := r.store.DeleteContainer(ctr.ID); err != nil { -- cgit v1.2.3-54-g00ecf From 0f87cfd288e48f55cb16e8ed8839485d22f0764c Mon Sep 17 00:00:00 2001 From: Daniel J Walsh Date: Wed, 22 Sep 2021 13:39:18 -0400 Subject: podman generate kube should not include images command If the command came from the underlying image, then we should not include it in the generate yaml file. Fixes: https://github.com/containers/podman/issues/11672 Signed-off-by: Daniel J Walsh --- libpod/kube.go | 33 +++++++++++++++++++++++---------- pkg/domain/infra/abi/generate.go | 8 ++++---- test/e2e/generate_kube_test.go | 39 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 66 insertions(+), 14 deletions(-) (limited to 'libpod') diff --git a/libpod/kube.go b/libpod/kube.go index af3b0916e..e6b9b0477 100644 --- a/libpod/kube.go +++ b/libpod/kube.go @@ -1,9 +1,11 @@ package libpod import ( + "context" "fmt" "math/rand" "os" + "reflect" "sort" "strconv" "strings" @@ -27,14 +29,14 @@ import ( // GenerateForKube takes a slice of libpod containers and generates // one v1.Pod description that includes just a single container. -func GenerateForKube(ctrs []*Container) (*v1.Pod, error) { +func GenerateForKube(ctx context.Context, ctrs []*Container) (*v1.Pod, error) { // Generate the v1.Pod yaml description - return simplePodWithV1Containers(ctrs) + return simplePodWithV1Containers(ctx, ctrs) } // GenerateForKube takes a slice of libpod containers and generates // one v1.Pod description -func (p *Pod) GenerateForKube() (*v1.Pod, []v1.ServicePort, error) { +func (p *Pod) GenerateForKube(ctx context.Context) (*v1.Pod, []v1.ServicePort, error) { // Generate the v1.Pod yaml description var ( ports []v1.ContainerPort //nolint @@ -78,7 +80,7 @@ func (p *Pod) GenerateForKube() (*v1.Pod, []v1.ServicePort, error) { servicePorts = containerPortsToServicePorts(ports) hostNetwork = infraContainer.NetworkMode() == string(namespaces.NetworkMode(specgen.Host)) } - pod, err := p.podWithContainers(allContainers, ports, hostNetwork) + pod, err := p.podWithContainers(ctx, allContainers, ports, hostNetwork) if err != nil { return nil, servicePorts, err } @@ -218,7 +220,7 @@ func containersToServicePorts(containers []v1.Container) []v1.ServicePort { return sps } -func (p *Pod) podWithContainers(containers []*Container, ports []v1.ContainerPort, hostNetwork bool) (*v1.Pod, error) { +func (p *Pod) podWithContainers(ctx context.Context, containers []*Container, ports []v1.ContainerPort, hostNetwork bool) (*v1.Pod, error) { deDupPodVolumes := make(map[string]*v1.Volume) first := true podContainers := make([]v1.Container, 0, len(containers)) @@ -239,7 +241,7 @@ func (p *Pod) podWithContainers(containers []*Container, ports []v1.ContainerPor isInit := ctr.IsInitCtr() - ctr, volumes, _, err := containerToV1Container(ctr) + ctr, volumes, _, err := containerToV1Container(ctx, ctr) if err != nil { return nil, err } @@ -267,7 +269,7 @@ func (p *Pod) podWithContainers(containers []*Container, ports []v1.ContainerPor deDupPodVolumes[vol.Name] = &vol } } else { - _, _, infraDNS, err := containerToV1Container(ctr) + _, _, infraDNS, err := containerToV1Container(ctx, ctr) if err != nil { return nil, err } @@ -337,7 +339,7 @@ func newPodObject(podName string, annotations map[string]string, initCtrs, conta // simplePodWithV1Containers is a function used by inspect when kube yaml needs to be generated // for a single container. we "insert" that container description in a pod. -func simplePodWithV1Containers(ctrs []*Container) (*v1.Pod, error) { +func simplePodWithV1Containers(ctx context.Context, ctrs []*Container) (*v1.Pod, error) { kubeCtrs := make([]v1.Container, 0, len(ctrs)) kubeInitCtrs := []v1.Container{} kubeVolumes := make([]v1.Volume, 0) @@ -355,7 +357,7 @@ func simplePodWithV1Containers(ctrs []*Container) (*v1.Pod, error) { if !ctr.HostNetwork() { hostNetwork = false } - kubeCtr, kubeVols, ctrDNS, err := containerToV1Container(ctr) + kubeCtr, kubeVols, ctrDNS, err := containerToV1Container(ctx, ctr) if err != nil { return nil, err } @@ -411,7 +413,7 @@ func simplePodWithV1Containers(ctrs []*Container) (*v1.Pod, error) { // containerToV1Container converts information we know about a libpod container // to a V1.Container specification. -func containerToV1Container(c *Container) (v1.Container, []v1.Volume, *v1.PodDNSConfig, error) { +func containerToV1Container(ctx context.Context, c *Container) (v1.Container, []v1.Volume, *v1.PodDNSConfig, error) { kubeContainer := v1.Container{} kubeVolumes := []v1.Volume{} kubeSec, err := generateKubeSecurityContext(c) @@ -463,6 +465,17 @@ func containerToV1Container(c *Container) (v1.Container, []v1.Volume, *v1.PodDNS _, image := c.Image() kubeContainer.Image = image kubeContainer.Stdin = c.Stdin() + img, _, err := c.runtime.libimageRuntime.LookupImage(image, nil) + if err != nil { + return kubeContainer, kubeVolumes, nil, err + } + imgData, err := img.Inspect(ctx, false) + if err != nil { + return kubeContainer, kubeVolumes, nil, err + } + if reflect.DeepEqual(imgData.Config.Cmd, kubeContainer.Command) { + kubeContainer.Command = nil + } kubeContainer.WorkingDir = c.WorkingDir() kubeContainer.Ports = ports diff --git a/pkg/domain/infra/abi/generate.go b/pkg/domain/infra/abi/generate.go index 1e614ce58..081a2464b 100644 --- a/pkg/domain/infra/abi/generate.go +++ b/pkg/domain/infra/abi/generate.go @@ -107,7 +107,7 @@ func (ic *ContainerEngine) GenerateKube(ctx context.Context, nameOrIDs []string, // Generate kube pods and services from pods. if len(pods) >= 1 { - pos, svcs, err := getKubePods(pods, options.Service) + pos, svcs, err := getKubePods(ctx, pods, options.Service) if err != nil { return nil, err } @@ -120,7 +120,7 @@ func (ic *ContainerEngine) GenerateKube(ctx context.Context, nameOrIDs []string, // Generate the kube pods from containers. if len(ctrs) >= 1 { - po, err := libpod.GenerateForKube(ctrs) + po, err := libpod.GenerateForKube(ctx, ctrs) if err != nil { return nil, err } @@ -153,12 +153,12 @@ func (ic *ContainerEngine) GenerateKube(ctx context.Context, nameOrIDs []string, } // getKubePods returns kube pod and service YAML files from podman pods. -func getKubePods(pods []*libpod.Pod, getService bool) ([][]byte, [][]byte, error) { +func getKubePods(ctx context.Context, pods []*libpod.Pod, getService bool) ([][]byte, [][]byte, error) { pos := [][]byte{} svcs := [][]byte{} for _, p := range pods { - po, sp, err := p.GenerateForKube() + po, sp, err := p.GenerateForKube(ctx) if err != nil { return nil, nil, err } diff --git a/test/e2e/generate_kube_test.go b/test/e2e/generate_kube_test.go index bf89a0708..cb556991c 100644 --- a/test/e2e/generate_kube_test.go +++ b/test/e2e/generate_kube_test.go @@ -792,6 +792,45 @@ var _ = Describe("Podman generate kube", func() { Expect(containers[0].Args).To(Equal([]string{"10s"})) }) + It("podman generate kube - no command", func() { + session := podmanTest.Podman([]string{"create", "--name", "test", ALPINE}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(0)) + + kube := podmanTest.Podman([]string{"generate", "kube", "test"}) + kube.WaitWithDefaultTimeout() + Expect(kube).Should(Exit(0)) + + // Now make sure that the container's command is not set to the + // entrypoint and it's arguments to "10s". + pod := new(v1.Pod) + err := yaml.Unmarshal(kube.Out.Contents(), pod) + Expect(err).To(BeNil()) + + containers := pod.Spec.Containers + Expect(len(containers)).To(Equal(1)) + Expect(len(containers[0].Command)).To(Equal(0)) + + cmd := []string{"echo", "hi"} + session = podmanTest.Podman(append([]string{"create", "--name", "test1", ALPINE}, cmd...)) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(0)) + + kube = podmanTest.Podman([]string{"generate", "kube", "test1"}) + kube.WaitWithDefaultTimeout() + Expect(kube).Should(Exit(0)) + + // Now make sure that the container's command is not set to the + // entrypoint and it's arguments to "10s". + pod = new(v1.Pod) + err = yaml.Unmarshal(kube.Out.Contents(), pod) + Expect(err).To(BeNil()) + + containers = pod.Spec.Containers + Expect(len(containers)).To(Equal(1)) + Expect(containers[0].Command).To(Equal(cmd)) + }) + It("podman generate kube - use entrypoint from image", func() { // Build an image with an entrypoint. containerfile := `FROM quay.io/libpod/alpine:latest -- cgit v1.2.3-54-g00ecf From 31df5b78fcdf3a492ef063eb1b98a3b4715e5969 Mon Sep 17 00:00:00 2001 From: Paul Holzinger Date: Fri, 24 Sep 2021 10:44:46 +0200 Subject: rootful: do not set XDG_RUNTIME_DIR for cni plugins The dnsname plugin tries to use XDG_RUNTIME_DIR to store files. podman run will have XDG_RUNTIME_DIR set and thus the cni plugin can use it. The problem is that XDG_RUNTIME_DIR is unset for the conmon process for rootful users. This causes issues since the cleanup process is spawned by conmon and thus not have XDG_RUNTIME_DIR set to same value as podman run. Because of it dnsname will not find the config files and cannot correctly cleanup. To fix this we should also unset XDG_RUNTIME_DIR for the cni plugins as rootful. Signed-off-by: Paul Holzinger --- libpod/network/cni/cni_exec.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'libpod') diff --git a/libpod/network/cni/cni_exec.go b/libpod/network/cni/cni_exec.go index c4d7f49f7..ae857bcfb 100644 --- a/libpod/network/cni/cni_exec.go +++ b/libpod/network/cni/cni_exec.go @@ -30,6 +30,7 @@ import ( "github.com/containernetworking/cni/pkg/invoke" "github.com/containernetworking/cni/pkg/version" + "github.com/containers/podman/v3/pkg/rootless" ) type cniExec struct { @@ -67,6 +68,17 @@ func (e *cniExec) ExecPlugin(ctx context.Context, pluginPath string, stdinData [ c.Stdout = stdout c.Stderr = stderr + // The dnsname plugin tries to use XDG_RUNTIME_DIR to store files. + // podman run will have XDG_RUNTIME_DIR set and thus the cni plugin can use + // it. The problem is that XDG_RUNTIME_DIR is unset for the conmon process + // for rootful users. This causes issues since the cleanup process is spawned + // by conmon and thus not have XDG_RUNTIME_DIR set to same value as podman run. + // Because of it dnsname will not find the config files and cannot correctly cleanup. + // To fix this we should also unset XDG_RUNTIME_DIR for the cni plugins as rootful. + if !rootless.IsRootless() { + c.Env = append(c.Env, "XDG_RUNTIME_DIR=") + } + err := c.Run() if err != nil { return nil, annotatePluginError(err, pluginPath, stdout.Bytes(), stderr.Bytes()) -- cgit v1.2.3-54-g00ecf From ba74d6e6ecc2cc8e1480a466e29a078a93e24afa Mon Sep 17 00:00:00 2001 From: Valentin Rothberg Date: Tue, 28 Sep 2021 17:01:22 +0200 Subject: libpod: do not call (*container).Config() Access the container's config field directly inside of libpod instead of calling `Config()` which in turn creates expensive JSON deep copies. Accessing the field directly drops memory consumption of a simple `podman run --rm busybox true` from 1245kB to 410kB. [NO TESTS NEEDED] Signed-off-by: Valentin Rothberg --- libpod/container_copy_linux.go | 2 +- libpod/container_internal.go | 7 +++--- libpod/container_path_resolution.go | 8 +++---- libpod/kube.go | 2 +- libpod/networking_slirp4netns.go | 2 +- libpod/oci_conmon_linux.go | 2 +- libpod/pod.go | 6 ++--- libpod/pod_api.go | 44 ++++++++++++++++++------------------- 8 files changed, 35 insertions(+), 38 deletions(-) (limited to 'libpod') diff --git a/libpod/container_copy_linux.go b/libpod/container_copy_linux.go index a35824289..7d4dd0d46 100644 --- a/libpod/container_copy_linux.go +++ b/libpod/container_copy_linux.go @@ -174,7 +174,7 @@ func (c *Container) copyToArchive(ctx context.Context, path string, writer io.Wr // getContainerUser returns the specs.User and ID mappings of the container. func getContainerUser(container *Container, mountPoint string) (specs.User, error) { - userspec := container.Config().User + userspec := container.config.User uid, gid, _, err := chrootuser.GetUser(mountPoint, userspec) u := specs.User{ diff --git a/libpod/container_internal.go b/libpod/container_internal.go index 18b80475b..2ca49758d 100644 --- a/libpod/container_internal.go +++ b/libpod/container_internal.go @@ -982,12 +982,11 @@ func (c *Container) checkDependenciesRunning() ([]string, error) { } // Check the status - conf := depCtr.Config() state, err := depCtr.State() if err != nil { return nil, errors.Wrapf(err, "error retrieving state of dependency %s of container %s", dep, c.ID()) } - if state != define.ContainerStateRunning && !conf.IsInfra { + if state != define.ContainerStateRunning && !depCtr.config.IsInfra { notRunning = append(notRunning, dep) } depCtrs[dep] = depCtr @@ -1063,7 +1062,7 @@ func (c *Container) cniHosts() string { var hosts string if len(c.state.NetworkStatus) > 0 && len(c.state.NetworkStatus[0].IPs) > 0 { ipAddress := strings.Split(c.state.NetworkStatus[0].IPs[0].Address.String(), "/")[0] - hosts += fmt.Sprintf("%s\t%s %s\n", ipAddress, c.Hostname(), c.Config().Name) + hosts += fmt.Sprintf("%s\t%s %s\n", ipAddress, c.Hostname(), c.config.Name) } return hosts } @@ -2127,7 +2126,7 @@ func (c *Container) canWithPrevious() error { // JSON files for later export func (c *Container) prepareCheckpointExport() error { // save live config - if _, err := metadata.WriteJSONFile(c.Config(), c.bundlePath(), metadata.ConfigDumpFile); err != nil { + if _, err := metadata.WriteJSONFile(c.config, c.bundlePath(), metadata.ConfigDumpFile); err != nil { return err } diff --git a/libpod/container_path_resolution.go b/libpod/container_path_resolution.go index ec7306ca1..bb2ef1a73 100644 --- a/libpod/container_path_resolution.go +++ b/libpod/container_path_resolution.go @@ -112,7 +112,7 @@ func (c *Container) resolvePath(mountPoint string, containerPath string) (string func findVolume(c *Container, containerPath string) (*Volume, error) { runtime := c.Runtime() cleanedContainerPath := filepath.Clean(containerPath) - for _, vol := range c.Config().NamedVolumes { + for _, vol := range c.config.NamedVolumes { if cleanedContainerPath == filepath.Clean(vol.Dest) { return runtime.GetVolume(vol.Name) } @@ -124,7 +124,7 @@ func findVolume(c *Container, containerPath string) (*Volume, error) { // Volume's destination. func isPathOnVolume(c *Container, containerPath string) bool { cleanedContainerPath := filepath.Clean(containerPath) - for _, vol := range c.Config().NamedVolumes { + for _, vol := range c.config.NamedVolumes { if cleanedContainerPath == filepath.Clean(vol.Dest) { return true } @@ -141,7 +141,7 @@ func isPathOnVolume(c *Container, containerPath string) bool { // path of a Mount. Returns a matching Mount or nil. func findBindMount(c *Container, containerPath string) *specs.Mount { cleanedPath := filepath.Clean(containerPath) - for _, m := range c.Config().Spec.Mounts { + for _, m := range c.config.Spec.Mounts { if m.Type != "bind" { continue } @@ -157,7 +157,7 @@ func findBindMount(c *Container, containerPath string) *specs.Mount { // Mount's destination. func isPathOnBindMount(c *Container, containerPath string) bool { cleanedContainerPath := filepath.Clean(containerPath) - for _, m := range c.Config().Spec.Mounts { + for _, m := range c.config.Spec.Mounts { if cleanedContainerPath == filepath.Clean(m.Destination) { return true } diff --git a/libpod/kube.go b/libpod/kube.go index e6b9b0477..b92753f1b 100644 --- a/libpod/kube.go +++ b/libpod/kube.go @@ -90,7 +90,7 @@ func (p *Pod) GenerateForKube(ctx context.Context) (*v1.Pod, []v1.ServicePort, e // so set it at here for _, ctr := range allContainers { if !ctr.IsInfra() { - switch ctr.Config().RestartPolicy { + switch ctr.config.RestartPolicy { case define.RestartPolicyAlways: pod.Spec.RestartPolicy = v1.RestartPolicyAlways case define.RestartPolicyOnFailure: diff --git a/libpod/networking_slirp4netns.go b/libpod/networking_slirp4netns.go index a09027b72..07c3aae3c 100644 --- a/libpod/networking_slirp4netns.go +++ b/libpod/networking_slirp4netns.go @@ -222,7 +222,7 @@ func (r *Runtime) setupSlirp4netns(ctr *Container) error { defer errorhandling.CloseQuiet(syncR) defer errorhandling.CloseQuiet(syncW) - havePortMapping := len(ctr.Config().PortMappings) > 0 + havePortMapping := len(ctr.config.PortMappings) > 0 logPath := filepath.Join(ctr.runtime.config.Engine.TmpDir, fmt.Sprintf("slirp4netns-%s.log", ctr.config.ID)) ctrNetworkSlipOpts := []string{} diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go index 8a823e4fc..c2b472f76 100644 --- a/libpod/oci_conmon_linux.go +++ b/libpod/oci_conmon_linux.go @@ -1148,7 +1148,7 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co if ctr.config.NetMode.IsSlirp4netns() || rootless.IsRootless() { if ctr.config.PostConfigureNetNS { - havePortMapping := len(ctr.Config().PortMappings) > 0 + havePortMapping := len(ctr.config.PortMappings) > 0 if havePortMapping { ctr.rootlessPortSyncR, ctr.rootlessPortSyncW, err = os.Pipe() if err != nil { diff --git a/libpod/pod.go b/libpod/pod.go index 0d5d629cd..d9db06285 100644 --- a/libpod/pod.go +++ b/libpod/pod.go @@ -104,8 +104,7 @@ func (p *Pod) PidMode() string { if err != nil { return "" } - conf := infra.Config() - ctrSpec := conf.Spec + ctrSpec := infra.config.Spec if ctrSpec != nil && ctrSpec.Linux != nil { for _, ns := range ctrSpec.Linux.Namespaces { if ns.Type == specs.PIDNamespace { @@ -126,8 +125,7 @@ func (p *Pod) UserNSMode() string { if err != nil { return "" } - conf := infra.Config() - ctrSpec := conf.Spec + ctrSpec := infra.config.Spec if ctrSpec != nil && ctrSpec.Linux != nil { for _, ns := range ctrSpec.Linux.Namespaces { if ns.Type == specs.UserNamespace { diff --git a/libpod/pod_api.go b/libpod/pod_api.go index 4c3b1b0b7..cd0ac4ca6 100644 --- a/libpod/pod_api.go +++ b/libpod/pod_api.go @@ -34,7 +34,7 @@ func (p *Pod) startInitContainers(ctx context.Context) error { } // If the container is a once init container, we need to remove it // after it runs - if initCon.Config().InitContainerType == define.OneShotInitContainer { + if initCon.config.InitContainerType == define.OneShotInitContainer { icLock := initCon.lock icLock.Lock() if err := p.runtime.removeContainer(ctx, initCon, false, false, true); err != nil { @@ -588,37 +588,37 @@ func (p *Pod) Inspect() (*define.InspectPodData, error) { return nil, err } infraConfig = new(define.InspectPodInfraConfig) - infraConfig.HostNetwork = !infra.Config().ContainerNetworkConfig.UseImageHosts - infraConfig.StaticIP = infra.Config().ContainerNetworkConfig.StaticIP - infraConfig.NoManageResolvConf = infra.Config().UseImageResolvConf - infraConfig.NoManageHosts = infra.Config().UseImageHosts + infraConfig.HostNetwork = !infra.config.ContainerNetworkConfig.UseImageHosts + infraConfig.StaticIP = infra.config.ContainerNetworkConfig.StaticIP + infraConfig.NoManageResolvConf = infra.config.UseImageResolvConf + infraConfig.NoManageHosts = infra.config.UseImageHosts infraConfig.PidNS = p.PidMode() infraConfig.UserNS = p.UserNSMode() - if len(infra.Config().ContainerNetworkConfig.DNSServer) > 0 { - infraConfig.DNSServer = make([]string, 0, len(infra.Config().ContainerNetworkConfig.DNSServer)) - for _, entry := range infra.Config().ContainerNetworkConfig.DNSServer { + if len(infra.config.ContainerNetworkConfig.DNSServer) > 0 { + infraConfig.DNSServer = make([]string, 0, len(infra.config.ContainerNetworkConfig.DNSServer)) + for _, entry := range infra.config.ContainerNetworkConfig.DNSServer { infraConfig.DNSServer = append(infraConfig.DNSServer, entry.String()) } } - if len(infra.Config().ContainerNetworkConfig.DNSSearch) > 0 { - infraConfig.DNSSearch = make([]string, 0, len(infra.Config().ContainerNetworkConfig.DNSSearch)) - infraConfig.DNSSearch = append(infraConfig.DNSSearch, infra.Config().ContainerNetworkConfig.DNSSearch...) + if len(infra.config.ContainerNetworkConfig.DNSSearch) > 0 { + infraConfig.DNSSearch = make([]string, 0, len(infra.config.ContainerNetworkConfig.DNSSearch)) + infraConfig.DNSSearch = append(infraConfig.DNSSearch, infra.config.ContainerNetworkConfig.DNSSearch...) } - if len(infra.Config().ContainerNetworkConfig.DNSOption) > 0 { - infraConfig.DNSOption = make([]string, 0, len(infra.Config().ContainerNetworkConfig.DNSOption)) - infraConfig.DNSOption = append(infraConfig.DNSOption, infra.Config().ContainerNetworkConfig.DNSOption...) + if len(infra.config.ContainerNetworkConfig.DNSOption) > 0 { + infraConfig.DNSOption = make([]string, 0, len(infra.config.ContainerNetworkConfig.DNSOption)) + infraConfig.DNSOption = append(infraConfig.DNSOption, infra.config.ContainerNetworkConfig.DNSOption...) } - if len(infra.Config().HostAdd) > 0 { - infraConfig.HostAdd = make([]string, 0, len(infra.Config().HostAdd)) - infraConfig.HostAdd = append(infraConfig.HostAdd, infra.Config().HostAdd...) + if len(infra.config.HostAdd) > 0 { + infraConfig.HostAdd = make([]string, 0, len(infra.config.HostAdd)) + infraConfig.HostAdd = append(infraConfig.HostAdd, infra.config.HostAdd...) } - if len(infra.Config().ContainerNetworkConfig.Networks) > 0 { - infraConfig.Networks = make([]string, 0, len(infra.Config().ContainerNetworkConfig.Networks)) - infraConfig.Networks = append(infraConfig.Networks, infra.Config().ContainerNetworkConfig.Networks...) + if len(infra.config.ContainerNetworkConfig.Networks) > 0 { + infraConfig.Networks = make([]string, 0, len(infra.config.ContainerNetworkConfig.Networks)) + infraConfig.Networks = append(infraConfig.Networks, infra.config.ContainerNetworkConfig.Networks...) } - infraConfig.NetworkOptions = infra.Config().ContainerNetworkConfig.NetworkOptions - infraConfig.PortBindings = makeInspectPortBindings(infra.Config().ContainerNetworkConfig.PortMappings, nil) + infraConfig.NetworkOptions = infra.config.ContainerNetworkConfig.NetworkOptions + infraConfig.PortBindings = makeInspectPortBindings(infra.config.ContainerNetworkConfig.PortMappings, nil) } inspectData := define.InspectPodData{ -- cgit v1.2.3-54-g00ecf From 5064fd519cfd09395f9632ecb94492949b7a8367 Mon Sep 17 00:00:00 2001 From: Matthew Heon Date: Tue, 28 Sep 2021 14:12:18 -0400 Subject: Ensure pod ID bucket is properly updated on rename As we were not updating the pod ID bucket, removing a pod with containers still in it (including the infra container, which will always suffer from this) will not properly update the name registry to remove the name of any renamed containers. This patch ensures that does not happen - all containers will be fully removed, even if renamed. Fixes #11750 Signed-off-by: Matthew Heon --- libpod/boltdb_state.go | 17 +++++++++++++++++ test/e2e/rename_test.go | 25 +++++++++++++++++++++++++ 2 files changed, 42 insertions(+) (limited to 'libpod') diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go index 5df3e8961..160f428d7 100644 --- a/libpod/boltdb_state.go +++ b/libpod/boltdb_state.go @@ -1756,6 +1756,23 @@ func (s *BoltState) SafeRewriteContainerConfig(ctr *Container, oldName, newName if err := allCtrsBkt.Put([]byte(ctr.ID()), []byte(newName)); err != nil { return errors.Wrapf(err, "error renaming container %s in all containers bucket in DB", ctr.ID()) } + if ctr.config.Pod != "" { + podsBkt, err := getPodBucket(tx) + if err != nil { + return err + } + podBkt := podsBkt.Bucket([]byte(ctr.config.Pod)) + if podBkt == nil { + return errors.Wrapf(define.ErrInternal, "bucket for pod %s does not exist", ctr.config.Pod) + } + podCtrBkt := podBkt.Bucket(containersBkt) + if podCtrBkt == nil { + return errors.Wrapf(define.ErrInternal, "pod %s does not have a containers bucket", ctr.config.Pod) + } + if err := podCtrBkt.Put([]byte(ctr.ID()), []byte(newName)); err != nil { + return errors.Wrapf(err, "error renaming container %s in pod %s members bucket", ctr.ID(), ctr.config.Pod) + } + } } } diff --git a/test/e2e/rename_test.go b/test/e2e/rename_test.go index 0bd1792c9..e5e69c25c 100644 --- a/test/e2e/rename_test.go +++ b/test/e2e/rename_test.go @@ -111,4 +111,29 @@ var _ = Describe("podman rename", func() { Expect(ps).Should(Exit(0)) Expect(ps.OutputToString()).To(ContainSubstring(newName)) }) + + It("Rename a container that is part of a pod", func() { + podName := "testPod" + infraName := "infra1" + pod := podmanTest.Podman([]string{"pod", "create", "--name", podName, "--infra-name", infraName}) + pod.WaitWithDefaultTimeout() + Expect(pod).Should(Exit(0)) + + infraName2 := "infra2" + rename := podmanTest.Podman([]string{"rename", infraName, infraName2}) + rename.WaitWithDefaultTimeout() + Expect(rename).Should(Exit(0)) + + remove := podmanTest.Podman([]string{"pod", "rm", "-f", podName}) + remove.WaitWithDefaultTimeout() + Expect(remove).Should(Exit(0)) + + create := podmanTest.Podman([]string{"create", "--name", infraName2, ALPINE, "top"}) + create.WaitWithDefaultTimeout() + Expect(create).Should(Exit(0)) + + create2 := podmanTest.Podman([]string{"create", "--name", infraName, ALPINE, "top"}) + create2.WaitWithDefaultTimeout() + Expect(create2).Should(Exit(0)) + }) }) -- cgit v1.2.3-54-g00ecf From 7d72e83240890c338dc3d2b17295ba09f9878707 Mon Sep 17 00:00:00 2001 From: Urvashi Mohnani Date: Mon, 27 Sep 2021 15:12:47 -0400 Subject: [NO TESTS NEEDED] Add port configuration to first regular container When generating a kube yaml and there is a port configuration add the configuration to the first regular container in the pod and not to the init container. Signed-off-by: Urvashi Mohnani --- libpod/kube.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'libpod') diff --git a/libpod/kube.go b/libpod/kube.go index b92753f1b..25f672c28 100644 --- a/libpod/kube.go +++ b/libpod/kube.go @@ -253,7 +253,9 @@ func (p *Pod) podWithContainers(ctx context.Context, containers []*Container, po // We add the original port declarations from the libpod infra container // to the first kubernetes container description because otherwise we loose // the original container/port bindings. - if first && len(ports) > 0 { + // Add the port configuration to the first regular container or the first + // init container if only init containers have been created in the pod. + if first && len(ports) > 0 && (!isInit || len(containers) == 2) { ctr.Ports = ports first = false } -- cgit v1.2.3-54-g00ecf