summaryrefslogtreecommitdiff
path: root/libpod
diff options
context:
space:
mode:
authorOpenShift Merge Robot <openshift-merge-robot@users.noreply.github.com>2021-09-30 08:35:07 -0400
committerGitHub <noreply@github.com>2021-09-30 08:35:07 -0400
commitd8bdbf5b66c860a73f5d4e301535c0ee40d8d719 (patch)
treeee5e4c6705632064406f49147d2e5fb51947d300 /libpod
parentf0ae84f5120f7ec8d2fc16c6ae9e52725b5d4958 (diff)
parent855746cc9258b85d390d68cd3c61ca0588dd0f8f (diff)
downloadpodman-d8bdbf5b66c860a73f5d4e301535c0ee40d8d719.tar.gz
podman-d8bdbf5b66c860a73f5d4e301535c0ee40d8d719.tar.bz2
podman-d8bdbf5b66c860a73f5d4e301535c0ee40d8d719.zip
Merge pull request #11792 from mheon/340_final
Backports + release notes for v3.4.0 final
Diffstat (limited to 'libpod')
-rw-r--r--libpod/boltdb_state.go17
-rw-r--r--libpod/container_copy_linux.go2
-rw-r--r--libpod/container_internal.go7
-rw-r--r--libpod/container_path_resolution.go8
-rw-r--r--libpod/kube.go39
-rw-r--r--libpod/network/cni/cni_exec.go12
-rw-r--r--libpod/networking_slirp4netns.go2
-rw-r--r--libpod/oci_conmon_linux.go2
-rw-r--r--libpod/pod.go6
-rw-r--r--libpod/pod_api.go44
-rw-r--r--libpod/runtime_cstorage.go6
11 files changed, 93 insertions, 52 deletions
diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go
index 5df3e8961..160f428d7 100644
--- a/libpod/boltdb_state.go
+++ b/libpod/boltdb_state.go
@@ -1756,6 +1756,23 @@ func (s *BoltState) SafeRewriteContainerConfig(ctr *Container, oldName, newName
if err := allCtrsBkt.Put([]byte(ctr.ID()), []byte(newName)); err != nil {
return errors.Wrapf(err, "error renaming container %s in all containers bucket in DB", ctr.ID())
}
+ if ctr.config.Pod != "" {
+ podsBkt, err := getPodBucket(tx)
+ if err != nil {
+ return err
+ }
+ podBkt := podsBkt.Bucket([]byte(ctr.config.Pod))
+ if podBkt == nil {
+ return errors.Wrapf(define.ErrInternal, "bucket for pod %s does not exist", ctr.config.Pod)
+ }
+ podCtrBkt := podBkt.Bucket(containersBkt)
+ if podCtrBkt == nil {
+ return errors.Wrapf(define.ErrInternal, "pod %s does not have a containers bucket", ctr.config.Pod)
+ }
+ if err := podCtrBkt.Put([]byte(ctr.ID()), []byte(newName)); err != nil {
+ return errors.Wrapf(err, "error renaming container %s in pod %s members bucket", ctr.ID(), ctr.config.Pod)
+ }
+ }
}
}
diff --git a/libpod/container_copy_linux.go b/libpod/container_copy_linux.go
index a35824289..7d4dd0d46 100644
--- a/libpod/container_copy_linux.go
+++ b/libpod/container_copy_linux.go
@@ -174,7 +174,7 @@ func (c *Container) copyToArchive(ctx context.Context, path string, writer io.Wr
// getContainerUser returns the specs.User and ID mappings of the container.
func getContainerUser(container *Container, mountPoint string) (specs.User, error) {
- userspec := container.Config().User
+ userspec := container.config.User
uid, gid, _, err := chrootuser.GetUser(mountPoint, userspec)
u := specs.User{
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index 18b80475b..2ca49758d 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -982,12 +982,11 @@ func (c *Container) checkDependenciesRunning() ([]string, error) {
}
// Check the status
- conf := depCtr.Config()
state, err := depCtr.State()
if err != nil {
return nil, errors.Wrapf(err, "error retrieving state of dependency %s of container %s", dep, c.ID())
}
- if state != define.ContainerStateRunning && !conf.IsInfra {
+ if state != define.ContainerStateRunning && !depCtr.config.IsInfra {
notRunning = append(notRunning, dep)
}
depCtrs[dep] = depCtr
@@ -1063,7 +1062,7 @@ func (c *Container) cniHosts() string {
var hosts string
if len(c.state.NetworkStatus) > 0 && len(c.state.NetworkStatus[0].IPs) > 0 {
ipAddress := strings.Split(c.state.NetworkStatus[0].IPs[0].Address.String(), "/")[0]
- hosts += fmt.Sprintf("%s\t%s %s\n", ipAddress, c.Hostname(), c.Config().Name)
+ hosts += fmt.Sprintf("%s\t%s %s\n", ipAddress, c.Hostname(), c.config.Name)
}
return hosts
}
@@ -2127,7 +2126,7 @@ func (c *Container) canWithPrevious() error {
// JSON files for later export
func (c *Container) prepareCheckpointExport() error {
// save live config
- if _, err := metadata.WriteJSONFile(c.Config(), c.bundlePath(), metadata.ConfigDumpFile); err != nil {
+ if _, err := metadata.WriteJSONFile(c.config, c.bundlePath(), metadata.ConfigDumpFile); err != nil {
return err
}
diff --git a/libpod/container_path_resolution.go b/libpod/container_path_resolution.go
index ec7306ca1..bb2ef1a73 100644
--- a/libpod/container_path_resolution.go
+++ b/libpod/container_path_resolution.go
@@ -112,7 +112,7 @@ func (c *Container) resolvePath(mountPoint string, containerPath string) (string
func findVolume(c *Container, containerPath string) (*Volume, error) {
runtime := c.Runtime()
cleanedContainerPath := filepath.Clean(containerPath)
- for _, vol := range c.Config().NamedVolumes {
+ for _, vol := range c.config.NamedVolumes {
if cleanedContainerPath == filepath.Clean(vol.Dest) {
return runtime.GetVolume(vol.Name)
}
@@ -124,7 +124,7 @@ func findVolume(c *Container, containerPath string) (*Volume, error) {
// Volume's destination.
func isPathOnVolume(c *Container, containerPath string) bool {
cleanedContainerPath := filepath.Clean(containerPath)
- for _, vol := range c.Config().NamedVolumes {
+ for _, vol := range c.config.NamedVolumes {
if cleanedContainerPath == filepath.Clean(vol.Dest) {
return true
}
@@ -141,7 +141,7 @@ func isPathOnVolume(c *Container, containerPath string) bool {
// path of a Mount. Returns a matching Mount or nil.
func findBindMount(c *Container, containerPath string) *specs.Mount {
cleanedPath := filepath.Clean(containerPath)
- for _, m := range c.Config().Spec.Mounts {
+ for _, m := range c.config.Spec.Mounts {
if m.Type != "bind" {
continue
}
@@ -157,7 +157,7 @@ func findBindMount(c *Container, containerPath string) *specs.Mount {
// Mount's destination.
func isPathOnBindMount(c *Container, containerPath string) bool {
cleanedContainerPath := filepath.Clean(containerPath)
- for _, m := range c.Config().Spec.Mounts {
+ for _, m := range c.config.Spec.Mounts {
if cleanedContainerPath == filepath.Clean(m.Destination) {
return true
}
diff --git a/libpod/kube.go b/libpod/kube.go
index af3b0916e..25f672c28 100644
--- a/libpod/kube.go
+++ b/libpod/kube.go
@@ -1,9 +1,11 @@
package libpod
import (
+ "context"
"fmt"
"math/rand"
"os"
+ "reflect"
"sort"
"strconv"
"strings"
@@ -27,14 +29,14 @@ import (
// GenerateForKube takes a slice of libpod containers and generates
// one v1.Pod description that includes just a single container.
-func GenerateForKube(ctrs []*Container) (*v1.Pod, error) {
+func GenerateForKube(ctx context.Context, ctrs []*Container) (*v1.Pod, error) {
// Generate the v1.Pod yaml description
- return simplePodWithV1Containers(ctrs)
+ return simplePodWithV1Containers(ctx, ctrs)
}
// GenerateForKube takes a slice of libpod containers and generates
// one v1.Pod description
-func (p *Pod) GenerateForKube() (*v1.Pod, []v1.ServicePort, error) {
+func (p *Pod) GenerateForKube(ctx context.Context) (*v1.Pod, []v1.ServicePort, error) {
// Generate the v1.Pod yaml description
var (
ports []v1.ContainerPort //nolint
@@ -78,7 +80,7 @@ func (p *Pod) GenerateForKube() (*v1.Pod, []v1.ServicePort, error) {
servicePorts = containerPortsToServicePorts(ports)
hostNetwork = infraContainer.NetworkMode() == string(namespaces.NetworkMode(specgen.Host))
}
- pod, err := p.podWithContainers(allContainers, ports, hostNetwork)
+ pod, err := p.podWithContainers(ctx, allContainers, ports, hostNetwork)
if err != nil {
return nil, servicePorts, err
}
@@ -88,7 +90,7 @@ func (p *Pod) GenerateForKube() (*v1.Pod, []v1.ServicePort, error) {
// so set it at here
for _, ctr := range allContainers {
if !ctr.IsInfra() {
- switch ctr.Config().RestartPolicy {
+ switch ctr.config.RestartPolicy {
case define.RestartPolicyAlways:
pod.Spec.RestartPolicy = v1.RestartPolicyAlways
case define.RestartPolicyOnFailure:
@@ -218,7 +220,7 @@ func containersToServicePorts(containers []v1.Container) []v1.ServicePort {
return sps
}
-func (p *Pod) podWithContainers(containers []*Container, ports []v1.ContainerPort, hostNetwork bool) (*v1.Pod, error) {
+func (p *Pod) podWithContainers(ctx context.Context, containers []*Container, ports []v1.ContainerPort, hostNetwork bool) (*v1.Pod, error) {
deDupPodVolumes := make(map[string]*v1.Volume)
first := true
podContainers := make([]v1.Container, 0, len(containers))
@@ -239,7 +241,7 @@ func (p *Pod) podWithContainers(containers []*Container, ports []v1.ContainerPor
isInit := ctr.IsInitCtr()
- ctr, volumes, _, err := containerToV1Container(ctr)
+ ctr, volumes, _, err := containerToV1Container(ctx, ctr)
if err != nil {
return nil, err
}
@@ -251,7 +253,9 @@ func (p *Pod) podWithContainers(containers []*Container, ports []v1.ContainerPor
// We add the original port declarations from the libpod infra container
// to the first kubernetes container description because otherwise we loose
// the original container/port bindings.
- if first && len(ports) > 0 {
+ // Add the port configuration to the first regular container or the first
+ // init container if only init containers have been created in the pod.
+ if first && len(ports) > 0 && (!isInit || len(containers) == 2) {
ctr.Ports = ports
first = false
}
@@ -267,7 +271,7 @@ func (p *Pod) podWithContainers(containers []*Container, ports []v1.ContainerPor
deDupPodVolumes[vol.Name] = &vol
}
} else {
- _, _, infraDNS, err := containerToV1Container(ctr)
+ _, _, infraDNS, err := containerToV1Container(ctx, ctr)
if err != nil {
return nil, err
}
@@ -337,7 +341,7 @@ func newPodObject(podName string, annotations map[string]string, initCtrs, conta
// simplePodWithV1Containers is a function used by inspect when kube yaml needs to be generated
// for a single container. we "insert" that container description in a pod.
-func simplePodWithV1Containers(ctrs []*Container) (*v1.Pod, error) {
+func simplePodWithV1Containers(ctx context.Context, ctrs []*Container) (*v1.Pod, error) {
kubeCtrs := make([]v1.Container, 0, len(ctrs))
kubeInitCtrs := []v1.Container{}
kubeVolumes := make([]v1.Volume, 0)
@@ -355,7 +359,7 @@ func simplePodWithV1Containers(ctrs []*Container) (*v1.Pod, error) {
if !ctr.HostNetwork() {
hostNetwork = false
}
- kubeCtr, kubeVols, ctrDNS, err := containerToV1Container(ctr)
+ kubeCtr, kubeVols, ctrDNS, err := containerToV1Container(ctx, ctr)
if err != nil {
return nil, err
}
@@ -411,7 +415,7 @@ func simplePodWithV1Containers(ctrs []*Container) (*v1.Pod, error) {
// containerToV1Container converts information we know about a libpod container
// to a V1.Container specification.
-func containerToV1Container(c *Container) (v1.Container, []v1.Volume, *v1.PodDNSConfig, error) {
+func containerToV1Container(ctx context.Context, c *Container) (v1.Container, []v1.Volume, *v1.PodDNSConfig, error) {
kubeContainer := v1.Container{}
kubeVolumes := []v1.Volume{}
kubeSec, err := generateKubeSecurityContext(c)
@@ -463,6 +467,17 @@ func containerToV1Container(c *Container) (v1.Container, []v1.Volume, *v1.PodDNS
_, image := c.Image()
kubeContainer.Image = image
kubeContainer.Stdin = c.Stdin()
+ img, _, err := c.runtime.libimageRuntime.LookupImage(image, nil)
+ if err != nil {
+ return kubeContainer, kubeVolumes, nil, err
+ }
+ imgData, err := img.Inspect(ctx, false)
+ if err != nil {
+ return kubeContainer, kubeVolumes, nil, err
+ }
+ if reflect.DeepEqual(imgData.Config.Cmd, kubeContainer.Command) {
+ kubeContainer.Command = nil
+ }
kubeContainer.WorkingDir = c.WorkingDir()
kubeContainer.Ports = ports
diff --git a/libpod/network/cni/cni_exec.go b/libpod/network/cni/cni_exec.go
index c4d7f49f7..ae857bcfb 100644
--- a/libpod/network/cni/cni_exec.go
+++ b/libpod/network/cni/cni_exec.go
@@ -30,6 +30,7 @@ import (
"github.com/containernetworking/cni/pkg/invoke"
"github.com/containernetworking/cni/pkg/version"
+ "github.com/containers/podman/v3/pkg/rootless"
)
type cniExec struct {
@@ -67,6 +68,17 @@ func (e *cniExec) ExecPlugin(ctx context.Context, pluginPath string, stdinData [
c.Stdout = stdout
c.Stderr = stderr
+ // The dnsname plugin tries to use XDG_RUNTIME_DIR to store files.
+ // podman run will have XDG_RUNTIME_DIR set and thus the cni plugin can use
+ // it. The problem is that XDG_RUNTIME_DIR is unset for the conmon process
+ // for rootful users. This causes issues since the cleanup process is spawned
+ // by conmon and thus not have XDG_RUNTIME_DIR set to same value as podman run.
+ // Because of it dnsname will not find the config files and cannot correctly cleanup.
+ // To fix this we should also unset XDG_RUNTIME_DIR for the cni plugins as rootful.
+ if !rootless.IsRootless() {
+ c.Env = append(c.Env, "XDG_RUNTIME_DIR=")
+ }
+
err := c.Run()
if err != nil {
return nil, annotatePluginError(err, pluginPath, stdout.Bytes(), stderr.Bytes())
diff --git a/libpod/networking_slirp4netns.go b/libpod/networking_slirp4netns.go
index a09027b72..07c3aae3c 100644
--- a/libpod/networking_slirp4netns.go
+++ b/libpod/networking_slirp4netns.go
@@ -222,7 +222,7 @@ func (r *Runtime) setupSlirp4netns(ctr *Container) error {
defer errorhandling.CloseQuiet(syncR)
defer errorhandling.CloseQuiet(syncW)
- havePortMapping := len(ctr.Config().PortMappings) > 0
+ havePortMapping := len(ctr.config.PortMappings) > 0
logPath := filepath.Join(ctr.runtime.config.Engine.TmpDir, fmt.Sprintf("slirp4netns-%s.log", ctr.config.ID))
ctrNetworkSlipOpts := []string{}
diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go
index 8a823e4fc..c2b472f76 100644
--- a/libpod/oci_conmon_linux.go
+++ b/libpod/oci_conmon_linux.go
@@ -1148,7 +1148,7 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co
if ctr.config.NetMode.IsSlirp4netns() || rootless.IsRootless() {
if ctr.config.PostConfigureNetNS {
- havePortMapping := len(ctr.Config().PortMappings) > 0
+ havePortMapping := len(ctr.config.PortMappings) > 0
if havePortMapping {
ctr.rootlessPortSyncR, ctr.rootlessPortSyncW, err = os.Pipe()
if err != nil {
diff --git a/libpod/pod.go b/libpod/pod.go
index 0d5d629cd..d9db06285 100644
--- a/libpod/pod.go
+++ b/libpod/pod.go
@@ -104,8 +104,7 @@ func (p *Pod) PidMode() string {
if err != nil {
return ""
}
- conf := infra.Config()
- ctrSpec := conf.Spec
+ ctrSpec := infra.config.Spec
if ctrSpec != nil && ctrSpec.Linux != nil {
for _, ns := range ctrSpec.Linux.Namespaces {
if ns.Type == specs.PIDNamespace {
@@ -126,8 +125,7 @@ func (p *Pod) UserNSMode() string {
if err != nil {
return ""
}
- conf := infra.Config()
- ctrSpec := conf.Spec
+ ctrSpec := infra.config.Spec
if ctrSpec != nil && ctrSpec.Linux != nil {
for _, ns := range ctrSpec.Linux.Namespaces {
if ns.Type == specs.UserNamespace {
diff --git a/libpod/pod_api.go b/libpod/pod_api.go
index 4c3b1b0b7..cd0ac4ca6 100644
--- a/libpod/pod_api.go
+++ b/libpod/pod_api.go
@@ -34,7 +34,7 @@ func (p *Pod) startInitContainers(ctx context.Context) error {
}
// If the container is a once init container, we need to remove it
// after it runs
- if initCon.Config().InitContainerType == define.OneShotInitContainer {
+ if initCon.config.InitContainerType == define.OneShotInitContainer {
icLock := initCon.lock
icLock.Lock()
if err := p.runtime.removeContainer(ctx, initCon, false, false, true); err != nil {
@@ -588,37 +588,37 @@ func (p *Pod) Inspect() (*define.InspectPodData, error) {
return nil, err
}
infraConfig = new(define.InspectPodInfraConfig)
- infraConfig.HostNetwork = !infra.Config().ContainerNetworkConfig.UseImageHosts
- infraConfig.StaticIP = infra.Config().ContainerNetworkConfig.StaticIP
- infraConfig.NoManageResolvConf = infra.Config().UseImageResolvConf
- infraConfig.NoManageHosts = infra.Config().UseImageHosts
+ infraConfig.HostNetwork = !infra.config.ContainerNetworkConfig.UseImageHosts
+ infraConfig.StaticIP = infra.config.ContainerNetworkConfig.StaticIP
+ infraConfig.NoManageResolvConf = infra.config.UseImageResolvConf
+ infraConfig.NoManageHosts = infra.config.UseImageHosts
infraConfig.PidNS = p.PidMode()
infraConfig.UserNS = p.UserNSMode()
- if len(infra.Config().ContainerNetworkConfig.DNSServer) > 0 {
- infraConfig.DNSServer = make([]string, 0, len(infra.Config().ContainerNetworkConfig.DNSServer))
- for _, entry := range infra.Config().ContainerNetworkConfig.DNSServer {
+ if len(infra.config.ContainerNetworkConfig.DNSServer) > 0 {
+ infraConfig.DNSServer = make([]string, 0, len(infra.config.ContainerNetworkConfig.DNSServer))
+ for _, entry := range infra.config.ContainerNetworkConfig.DNSServer {
infraConfig.DNSServer = append(infraConfig.DNSServer, entry.String())
}
}
- if len(infra.Config().ContainerNetworkConfig.DNSSearch) > 0 {
- infraConfig.DNSSearch = make([]string, 0, len(infra.Config().ContainerNetworkConfig.DNSSearch))
- infraConfig.DNSSearch = append(infraConfig.DNSSearch, infra.Config().ContainerNetworkConfig.DNSSearch...)
+ if len(infra.config.ContainerNetworkConfig.DNSSearch) > 0 {
+ infraConfig.DNSSearch = make([]string, 0, len(infra.config.ContainerNetworkConfig.DNSSearch))
+ infraConfig.DNSSearch = append(infraConfig.DNSSearch, infra.config.ContainerNetworkConfig.DNSSearch...)
}
- if len(infra.Config().ContainerNetworkConfig.DNSOption) > 0 {
- infraConfig.DNSOption = make([]string, 0, len(infra.Config().ContainerNetworkConfig.DNSOption))
- infraConfig.DNSOption = append(infraConfig.DNSOption, infra.Config().ContainerNetworkConfig.DNSOption...)
+ if len(infra.config.ContainerNetworkConfig.DNSOption) > 0 {
+ infraConfig.DNSOption = make([]string, 0, len(infra.config.ContainerNetworkConfig.DNSOption))
+ infraConfig.DNSOption = append(infraConfig.DNSOption, infra.config.ContainerNetworkConfig.DNSOption...)
}
- if len(infra.Config().HostAdd) > 0 {
- infraConfig.HostAdd = make([]string, 0, len(infra.Config().HostAdd))
- infraConfig.HostAdd = append(infraConfig.HostAdd, infra.Config().HostAdd...)
+ if len(infra.config.HostAdd) > 0 {
+ infraConfig.HostAdd = make([]string, 0, len(infra.config.HostAdd))
+ infraConfig.HostAdd = append(infraConfig.HostAdd, infra.config.HostAdd...)
}
- if len(infra.Config().ContainerNetworkConfig.Networks) > 0 {
- infraConfig.Networks = make([]string, 0, len(infra.Config().ContainerNetworkConfig.Networks))
- infraConfig.Networks = append(infraConfig.Networks, infra.Config().ContainerNetworkConfig.Networks...)
+ if len(infra.config.ContainerNetworkConfig.Networks) > 0 {
+ infraConfig.Networks = make([]string, 0, len(infra.config.ContainerNetworkConfig.Networks))
+ infraConfig.Networks = append(infraConfig.Networks, infra.config.ContainerNetworkConfig.Networks...)
}
- infraConfig.NetworkOptions = infra.Config().ContainerNetworkConfig.NetworkOptions
- infraConfig.PortBindings = makeInspectPortBindings(infra.Config().ContainerNetworkConfig.PortMappings, nil)
+ infraConfig.NetworkOptions = infra.config.ContainerNetworkConfig.NetworkOptions
+ infraConfig.PortBindings = makeInspectPortBindings(infra.config.ContainerNetworkConfig.PortMappings, nil)
}
inspectData := define.InspectPodData{
diff --git a/libpod/runtime_cstorage.go b/libpod/runtime_cstorage.go
index cd2f226af..58bd67e6d 100644
--- a/libpod/runtime_cstorage.go
+++ b/libpod/runtime_cstorage.go
@@ -106,18 +106,18 @@ func (r *Runtime) removeStorageContainer(idOrName string, force bool) error {
logrus.Infof("Storage for container %s already removed", ctr.ID)
return nil
}
- return errors.Wrapf(err, "error looking up container %q mounts", idOrName)
+ logrus.Warnf("Checking if container %q is mounted, attempting to delete: %v", idOrName, err)
}
if timesMounted > 0 {
return errors.Wrapf(define.ErrCtrStateInvalid, "container %q is mounted and cannot be removed without using force", idOrName)
}
} else if _, err := r.store.Unmount(ctr.ID, true); err != nil {
- if errors.Cause(err) == storage.ErrContainerUnknown {
+ if errors.Is(err, storage.ErrContainerUnknown) {
// Container again gone, no error
logrus.Infof("Storage for container %s already removed", ctr.ID)
return nil
}
- return errors.Wrapf(err, "error unmounting container %q", idOrName)
+ logrus.Warnf("Unmounting container %q while attempting to delete storage: %v", idOrName, err)
}
if err := r.store.DeleteContainer(ctr.ID); err != nil {