diff options
Diffstat (limited to 'libpod')
-rw-r--r-- | libpod/container_internal.go | 25 | ||||
-rw-r--r-- | libpod/container_log_linux.go | 6 | ||||
-rw-r--r-- | libpod/kube.go | 64 | ||||
-rw-r--r-- | libpod/runtime_pod_linux.go | 12 |
4 files changed, 79 insertions, 28 deletions
diff --git a/libpod/container_internal.go b/libpod/container_internal.go index a7315dd55..994ffeec7 100644 --- a/libpod/container_internal.go +++ b/libpod/container_internal.go @@ -17,12 +17,14 @@ import ( "github.com/containers/buildah/copier" "github.com/containers/buildah/pkg/overlay" butil "github.com/containers/buildah/util" + "github.com/containers/common/pkg/chown" "github.com/containers/podman/v3/libpod/define" "github.com/containers/podman/v3/libpod/events" "github.com/containers/podman/v3/pkg/cgroups" "github.com/containers/podman/v3/pkg/ctime" "github.com/containers/podman/v3/pkg/hooks" "github.com/containers/podman/v3/pkg/hooks/exec" + "github.com/containers/podman/v3/pkg/lookup" "github.com/containers/podman/v3/pkg/rootless" "github.com/containers/podman/v3/pkg/selinux" "github.com/containers/podman/v3/pkg/util" @@ -497,8 +499,12 @@ func (c *Container) setupStorage(ctx context.Context) error { return errors.Wrapf(err, "error creating container storage") } - c.config.IDMappings.UIDMap = containerInfo.UIDMap - c.config.IDMappings.GIDMap = containerInfo.GIDMap + // only reconfig IDMappings if layer was mounted from storage + // if its a external overlay do not reset IDmappings + if !c.config.RootfsOverlay { + c.config.IDMappings.UIDMap = containerInfo.UIDMap + c.config.IDMappings.GIDMap = containerInfo.GIDMap + } processLabel, err := c.processLabel(containerInfo.ProcessLabel) if err != nil { @@ -1527,6 +1533,19 @@ func (c *Container) mountStorage() (_ string, deferredErr error) { } mountPoint = overlayMount.Source + execUser, err := lookup.GetUserGroupInfo(mountPoint, c.config.User, nil) + if err != nil { + return "", err + } + hostUID, hostGID, err := butil.GetHostIDs(util.IDtoolsToRuntimeSpec(c.config.IDMappings.UIDMap), util.IDtoolsToRuntimeSpec(c.config.IDMappings.GIDMap), uint32(execUser.Uid), uint32(execUser.Gid)) + if err != nil { + return "", errors.Wrap(err, "unable to get host UID and host GID") + } + + //note: this should not be recursive, if using external rootfs users should be responsible on configuring ownership. + if err := chown.ChangeHostPathOwnership(mountPoint, false, int(hostUID), int(hostGID)); err != nil { + return "", err + } } if mountPoint == "" { @@ -2096,7 +2115,7 @@ func (c *Container) checkReadyForRemoval() error { return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is in invalid state", c.ID()) } - if c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) { + if c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) && !c.IsInfra() { return errors.Wrapf(define.ErrCtrStateInvalid, "cannot remove container %s as it is %s - running or paused containers cannot be removed without force", c.ID(), c.state.State.String()) } diff --git a/libpod/container_log_linux.go b/libpod/container_log_linux.go index ca1e11ef5..562169ce2 100644 --- a/libpod/container_log_linux.go +++ b/libpod/container_log_linux.go @@ -91,8 +91,12 @@ func (c *Container) readFromJournal(ctx context.Context, options *logs.LogOption var cursorError error for i := 1; i <= 3; i++ { cursor, cursorError = journal.GetCursor() + hundreds := 1 + for j := 1; j < i; j++ { + hundreds *= 2 + } if cursorError != nil { - time.Sleep(time.Duration(i*100) * time.Millisecond) + time.Sleep(time.Duration(hundreds*100) * time.Millisecond) continue } break diff --git a/libpod/kube.go b/libpod/kube.go index d47f47f1c..02ac8ed98 100644 --- a/libpod/kube.go +++ b/libpod/kube.go @@ -332,7 +332,7 @@ func newPodObject(podName string, annotations map[string]string, initCtrs, conta InitContainers: initCtrs, Volumes: volumes, } - if dnsOptions != nil { + if dnsOptions != nil && (len(dnsOptions.Nameservers)+len(dnsOptions.Searches)+len(dnsOptions.Options) > 0) { ps.DNSConfig = dnsOptions } p := v1.Pod{ @@ -447,11 +447,6 @@ func containerToV1Container(ctx context.Context, c *Container) (v1.Container, [] kubeVolumes = append(kubeVolumes, volumes...) } - envVariables, err := libpodEnvVarsToKubeEnvVars(c.config.Spec.Process.Env) - if err != nil { - return kubeContainer, kubeVolumes, nil, annotations, err - } - portmappings, err := c.PortMappings() if err != nil { return kubeContainer, kubeVolumes, nil, annotations, err @@ -489,15 +484,23 @@ func containerToV1Container(ctx context.Context, c *Container) (v1.Container, [] kubeContainer.Command = nil } + if c.WorkingDir() != "/" && imgData.Config.WorkingDir != c.WorkingDir() { + kubeContainer.WorkingDir = c.WorkingDir() + } + if imgData.User == c.User() { kubeSec.RunAsGroup, kubeSec.RunAsUser = nil, nil } - kubeContainer.WorkingDir = c.WorkingDir() + envVariables, err := libpodEnvVarsToKubeEnvVars(c.config.Spec.Process.Env, imgData.Config.Env) + if err != nil { + return kubeContainer, kubeVolumes, nil, annotations, err + } + kubeContainer.Env = envVariables + kubeContainer.Ports = ports // This should not be applicable //container.EnvFromSource = - kubeContainer.Env = envVariables kubeContainer.SecurityContext = kubeSec kubeContainer.StdinOnce = false kubeContainer.TTY = c.config.Spec.Process.Terminal @@ -600,9 +603,14 @@ func ocicniPortMappingToContainerPort(portMappings []types.OCICNIPortMapping) ([ } // libpodEnvVarsToKubeEnvVars converts a key=value string slice to []v1.EnvVar -func libpodEnvVarsToKubeEnvVars(envs []string) ([]v1.EnvVar, error) { +func libpodEnvVarsToKubeEnvVars(envs []string, imageEnvs []string) ([]v1.EnvVar, error) { defaultEnv := env.DefaultEnvVariables() envVars := make([]v1.EnvVar, 0, len(envs)) + imageMap := make(map[string]string, len(imageEnvs)) + for _, ie := range envs { + split := strings.SplitN(ie, "=", 2) + imageMap[split[0]] = split[1] + } for _, e := range envs { split := strings.SplitN(e, "=", 2) if len(split) != 2 { @@ -611,6 +619,9 @@ func libpodEnvVarsToKubeEnvVars(envs []string) ([]v1.EnvVar, error) { if defaultEnv[split[0]] == split[1] { continue } + if imageMap[split[0]] == split[1] { + continue + } ev := v1.EnvVar{ Name: split[0], Value: split[1], @@ -808,33 +819,42 @@ func generateKubeSecurityContext(c *Container) (*v1.SecurityContext, error) { capabilities = newCaps } + sc := v1.SecurityContext{ + // RunAsNonRoot is an optional parameter; our first implementations should be root only; however + // I'm leaving this as a bread-crumb for later + //RunAsNonRoot: &nonRoot, + } + if capabilities != nil { + sc.Capabilities = capabilities + } var selinuxOpts v1.SELinuxOptions opts := strings.SplitN(c.config.Spec.Annotations[define.InspectAnnotationLabel], ":", 2) - if len(opts) == 2 { + switch len(opts) { + case 2: switch opts[0] { case "type": selinuxOpts.Type = opts[1] + sc.SELinuxOptions = &selinuxOpts case "level": selinuxOpts.Level = opts[1] + sc.SELinuxOptions = &selinuxOpts } - } - if len(opts) == 1 { + case 1: if opts[0] == "disable" { selinuxOpts.Type = "spc_t" + sc.SELinuxOptions = &selinuxOpts } } - sc := v1.SecurityContext{ - Capabilities: capabilities, - Privileged: &privileged, - SELinuxOptions: &selinuxOpts, - // RunAsNonRoot is an optional parameter; our first implementations should be root only; however - // I'm leaving this as a bread-crumb for later - //RunAsNonRoot: &nonRoot, - ReadOnlyRootFilesystem: &ro, - AllowPrivilegeEscalation: &allowPrivEscalation, + if !allowPrivEscalation { + sc.AllowPrivilegeEscalation = &allowPrivEscalation + } + if privileged { + sc.Privileged = &privileged + } + if ro { + sc.ReadOnlyRootFilesystem = &ro } - if c.User() != "" { if !c.batched { c.lock.Lock() diff --git a/libpod/runtime_pod_linux.go b/libpod/runtime_pod_linux.go index 9c6f1539f..7d7fef4d1 100644 --- a/libpod/runtime_pod_linux.go +++ b/libpod/runtime_pod_linux.go @@ -177,10 +177,9 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool, if err != nil { return err } - numCtrs := len(ctrs) - // If the only container in the pod is the pause container, remove the pod and container unconditionally. + // If the only running container in the pod is the pause container, remove the pod and container unconditionally. pauseCtrID := p.state.InfraContainerID if numCtrs == 1 && ctrs[0].ID() == pauseCtrID { removeCtrs = true @@ -264,6 +263,15 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool, } } + // Clear infra container ID before we remove the infra container. + // There is a potential issue if we don't do that, and removal is + // interrupted between RemoveAllContainers() below and the pod's removal + // later - we end up with a reference to a nonexistent infra container. + p.state.InfraContainerID = "" + if err := p.save(); err != nil { + return err + } + // Remove all containers in the pod from the state. if err := r.state.RemovePodContainers(p); err != nil { // If this fails, there isn't much more we can do. |