aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--cmd/podman/root.go10
-rw-r--r--libpod/kube.go32
-rw-r--r--pkg/api/server/register_containers.go7
-rw-r--r--pkg/domain/entities/engine.go1
-rw-r--r--pkg/domain/infra/abi/play.go5
-rw-r--r--pkg/k8s.io/api/core/v1/types.go12
-rw-r--r--pkg/specgen/generate/validate.go16
-rw-r--r--test/e2e/generate_kube_test.go38
-rw-r--r--test/e2e/play_kube_test.go33
-rw-r--r--test/e2e/run_cpu_test.go16
-rw-r--r--test/system/001-basic.bats6
11 files changed, 158 insertions, 18 deletions
diff --git a/cmd/podman/root.go b/cmd/podman/root.go
index 0261cd670..2e00777a4 100644
--- a/cmd/podman/root.go
+++ b/cmd/podman/root.go
@@ -73,6 +73,7 @@ var (
defaultLogLevel = "warn"
logLevel = defaultLogLevel
+ dockerConfig = ""
debug bool
useSyslog bool
@@ -85,6 +86,7 @@ func init() {
loggingHook,
syslogHook,
earlyInitHook,
+ configHook,
)
rootFlags(rootCmd, registry.PodmanConfig())
@@ -311,6 +313,12 @@ func persistentPostRunE(cmd *cobra.Command, args []string) error {
return nil
}
+func configHook() {
+ if dockerConfig != "" {
+ logrus.Warn("The --config flag is ignored by Podman. Exists for Docker compatibility")
+ }
+}
+
func loggingHook() {
var found bool
if debug {
@@ -363,6 +371,8 @@ func rootFlags(cmd *cobra.Command, opts *entities.PodmanConfig) {
lFlags.StringVarP(&opts.URI, "host", "H", uri, "Used for Docker compatibility")
_ = lFlags.MarkHidden("host")
+ lFlags.StringVar(&dockerConfig, "config", "", "Ignored for Docker compatibility")
+ _ = lFlags.MarkHidden("config")
// Context option added just for compatibility with DockerCLI.
lFlags.String("context", "default", "Name of the context to use to connect to the daemon (This flag is a NOOP and provided solely for scripting compatibility.)")
_ = lFlags.MarkHidden("context")
diff --git a/libpod/kube.go b/libpod/kube.go
index d4414aabd..1f4831006 100644
--- a/libpod/kube.go
+++ b/libpod/kube.go
@@ -62,6 +62,7 @@ func (p *Pod) GenerateForKube(ctx context.Context) (*v1.Pod, []v1.ServicePort, e
extraHost := make([]v1.HostAlias, 0)
hostNetwork := false
+ hostUsers := true
if p.HasInfraContainer() {
infraContainer, err := p.getInfraContainer()
if err != nil {
@@ -87,8 +88,9 @@ func (p *Pod) GenerateForKube(ctx context.Context) (*v1.Pod, []v1.ServicePort, e
return nil, servicePorts, err
}
hostNetwork = infraContainer.NetworkMode() == string(namespaces.NetworkMode(specgen.Host))
+ hostUsers = infraContainer.IDMappings().HostUIDMapping && infraContainer.IDMappings().HostGIDMapping
}
- pod, err := p.podWithContainers(ctx, allContainers, ports, hostNetwork)
+ pod, err := p.podWithContainers(ctx, allContainers, ports, hostNetwork, hostUsers)
if err != nil {
return nil, servicePorts, err
}
@@ -348,7 +350,7 @@ func containersToServicePorts(containers []v1.Container) ([]v1.ServicePort, erro
return sps, nil
}
-func (p *Pod) podWithContainers(ctx context.Context, containers []*Container, ports []v1.ContainerPort, hostNetwork bool) (*v1.Pod, error) {
+func (p *Pod) podWithContainers(ctx context.Context, containers []*Container, ports []v1.ContainerPort, hostNetwork, hostUsers bool) (*v1.Pod, error) {
deDupPodVolumes := make(map[string]*v1.Volume)
first := true
podContainers := make([]v1.Container, 0, len(containers))
@@ -446,10 +448,11 @@ func (p *Pod) podWithContainers(ctx context.Context, containers []*Container, po
podVolumes,
&dnsInfo,
hostNetwork,
+ hostUsers,
hostname), nil
}
-func newPodObject(podName string, annotations map[string]string, initCtrs, containers []v1.Container, volumes []v1.Volume, dnsOptions *v1.PodDNSConfig, hostNetwork bool, hostname string) *v1.Pod {
+func newPodObject(podName string, annotations map[string]string, initCtrs, containers []v1.Container, volumes []v1.Volume, dnsOptions *v1.PodDNSConfig, hostNetwork, hostUsers bool, hostname string) *v1.Pod {
tm := v12.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
@@ -470,13 +473,19 @@ func newPodObject(podName string, annotations map[string]string, initCtrs, conta
}
// Set enableServiceLinks to false as podman doesn't use the service port environment variables
enableServiceLinks := false
+ // Set automountServiceAccountToken to false as podman doesn't use service account tokens
+ automountServiceAccountToken := false
ps := v1.PodSpec{
- Containers: containers,
- Hostname: hostname,
- HostNetwork: hostNetwork,
- InitContainers: initCtrs,
- Volumes: volumes,
- EnableServiceLinks: &enableServiceLinks,
+ Containers: containers,
+ Hostname: hostname,
+ HostNetwork: hostNetwork,
+ InitContainers: initCtrs,
+ Volumes: volumes,
+ EnableServiceLinks: &enableServiceLinks,
+ AutomountServiceAccountToken: &automountServiceAccountToken,
+ }
+ if !hostUsers {
+ ps.HostUsers = &hostUsers
}
if dnsOptions != nil && (len(dnsOptions.Nameservers)+len(dnsOptions.Searches)+len(dnsOptions.Options) > 0) {
ps.DNSConfig = dnsOptions
@@ -495,6 +504,7 @@ func simplePodWithV1Containers(ctx context.Context, ctrs []*Container) (*v1.Pod,
kubeCtrs := make([]v1.Container, 0, len(ctrs))
kubeInitCtrs := []v1.Container{}
kubeVolumes := make([]v1.Volume, 0)
+ hostUsers := true
hostNetwork := true
podDNS := v1.PodDNSConfig{}
kubeAnnotations := make(map[string]string)
@@ -524,6 +534,9 @@ func simplePodWithV1Containers(ctx context.Context, ctrs []*Container) (*v1.Pod,
if !ctr.HostNetwork() {
hostNetwork = false
}
+ if !(ctr.IDMappings().HostUIDMapping && ctr.IDMappings().HostGIDMapping) {
+ hostUsers = false
+ }
kubeCtr, kubeVols, ctrDNS, annotations, err := containerToV1Container(ctx, ctr)
if err != nil {
return nil, err
@@ -585,6 +598,7 @@ func simplePodWithV1Containers(ctx context.Context, ctrs []*Container) (*v1.Pod,
kubeVolumes,
&podDNS,
hostNetwork,
+ hostUsers,
hostname), nil
}
diff --git a/pkg/api/server/register_containers.go b/pkg/api/server/register_containers.go
index 7e9c02816..311eecd17 100644
--- a/pkg/api/server/register_containers.go
+++ b/pkg/api/server/register_containers.go
@@ -897,7 +897,7 @@ func (s *APIServer) registerContainersHandlers(r *mux.Router) error {
// - in: query
// name: signal
// type: string
- // default: TERM
+ // default: SIGKILL
// description: signal to be sent to container, either by integer or SIG_ name
// produces:
// - application/json
@@ -1291,11 +1291,6 @@ func (s *APIServer) registerContainersHandlers(r *mux.Router) error {
// required: true
// description: the name or ID of the container
// - in: query
- // name: all
- // type: boolean
- // default: false
- // description: Stop all containers
- // - in: query
// name: timeout
// type: integer
// default: 10
diff --git a/pkg/domain/entities/engine.go b/pkg/domain/entities/engine.go
index c1a4ffdf3..a69cf5111 100644
--- a/pkg/domain/entities/engine.go
+++ b/pkg/domain/entities/engine.go
@@ -33,6 +33,7 @@ type PodmanConfig struct {
*config.Config
*pflag.FlagSet
+ DockerConfig string // Used for Docker compatibility
CgroupUsage string // rootless code determines Usage message
ConmonPath string // --conmon flag will set Engine.ConmonPath
CPUProfile string // Hidden: Should CPU profile be taken
diff --git a/pkg/domain/infra/abi/play.go b/pkg/domain/infra/abi/play.go
index 12786afcd..57d795682 100644
--- a/pkg/domain/infra/abi/play.go
+++ b/pkg/domain/infra/abi/play.go
@@ -355,6 +355,11 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
if options.Userns == "" {
options.Userns = "host"
+ if podYAML.Spec.HostUsers != nil && !*podYAML.Spec.HostUsers {
+ options.Userns = "auto"
+ }
+ } else if podYAML.Spec.HostUsers != nil {
+ logrus.Info("overriding the user namespace mode in the pod spec")
}
// Validate the userns modes supported.
diff --git a/pkg/k8s.io/api/core/v1/types.go b/pkg/k8s.io/api/core/v1/types.go
index d47178878..6f20cd351 100644
--- a/pkg/k8s.io/api/core/v1/types.go
+++ b/pkg/k8s.io/api/core/v1/types.go
@@ -1984,6 +1984,18 @@ type PodSpec struct {
// Default to false.
// +optional
SetHostnameAsFQDN *bool `json:"setHostnameAsFQDN,omitempty"`
+ // Use the host's user namespace.
+ // Optional: Default to true.
+ // If set to true or not present, the pod will be run in the host user namespace, useful
+ // for when the pod needs a feature only available to the host user namespace, such as
+ // loading a kernel module with CAP_SYS_MODULE.
+ // When set to false, a new userns is created for the pod. Setting false is useful for
+ // mitigating container breakout vulnerabilities even allowing users to run their
+ // containers as root without actually having root privileges on the host.
+ // This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.
+ // +k8s:conversion-gen=false
+ // +optional
+ HostUsers *bool `json:"hostUsers,omitempty"`
}
type UnsatisfiableConstraintAction string
diff --git a/pkg/specgen/generate/validate.go b/pkg/specgen/generate/validate.go
index 3c5d5fb96..e9ebdfce3 100644
--- a/pkg/specgen/generate/validate.go
+++ b/pkg/specgen/generate/validate.go
@@ -82,7 +82,7 @@ func verifyContainerResourcesCgroupV1(s *specgen.SpecGenerator) ([]string, error
}
}
- // CPU Checks
+ // CPU checks
if s.ResourceLimits.CPU != nil {
cpu := s.ResourceLimits.CPU
if cpu.Shares != nil && !sysInfo.CPUShares {
@@ -169,6 +169,7 @@ func verifyContainerResourcesCgroupV2(s *specgen.SpecGenerator) ([]string, error
return warnings, nil
}
+ // Memory checks
if s.ResourceLimits.Memory != nil && s.ResourceLimits.Memory.Swap != nil {
own, err := utils.GetOwnCgroup()
if err != nil {
@@ -198,6 +199,19 @@ func verifyContainerResourcesCgroupV2(s *specgen.SpecGenerator) ([]string, error
s.ResourceLimits.Memory.Swap = nil
}
}
+
+ // CPU checks
+ if s.ResourceLimits.CPU != nil {
+ cpu := s.ResourceLimits.CPU
+ if cpu.RealtimePeriod != nil {
+ warnings = append(warnings, "Realtime period not supported on cgroups V2 systems")
+ cpu.RealtimePeriod = nil
+ }
+ if cpu.RealtimeRuntime != nil {
+ warnings = append(warnings, "Realtime runtime not supported on cgroups V2 systems")
+ cpu.RealtimeRuntime = nil
+ }
+ }
return warnings, nil
}
diff --git a/test/e2e/generate_kube_test.go b/test/e2e/generate_kube_test.go
index a4cbf64e0..d8308aeea 100644
--- a/test/e2e/generate_kube_test.go
+++ b/test/e2e/generate_kube_test.go
@@ -3,6 +3,7 @@ package integration
import (
"io/ioutil"
"os"
+ "os/user"
"path/filepath"
"strconv"
"strings"
@@ -73,6 +74,8 @@ var _ = Describe("Podman generate kube", func() {
Expect(pod).To(HaveField("Name", "top-pod"))
enableServiceLinks := false
Expect(pod.Spec).To(HaveField("EnableServiceLinks", &enableServiceLinks))
+ automountServiceAccountToken := false
+ Expect(pod.Spec).To(HaveField("AutomountServiceAccountToken", &automountServiceAccountToken))
numContainers := 0
for range pod.Spec.Containers {
@@ -169,6 +172,8 @@ var _ = Describe("Podman generate kube", func() {
Expect(pod.Spec).To(HaveField("HostNetwork", false))
enableServiceLinks := false
Expect(pod.Spec).To(HaveField("EnableServiceLinks", &enableServiceLinks))
+ automountServiceAccountToken := false
+ Expect(pod.Spec).To(HaveField("AutomountServiceAccountToken", &automountServiceAccountToken))
numContainers := 0
for range pod.Spec.Containers {
@@ -266,6 +271,39 @@ var _ = Describe("Podman generate kube", func() {
Expect(numContainers).To(Equal(1))
})
+ It("podman generate kube on pod with user namespace", func() {
+ u, err := user.Current()
+ Expect(err).To(BeNil())
+ name := u.Name
+ if name == "root" {
+ name = "containers"
+ }
+ content, err := ioutil.ReadFile("/etc/subuid")
+ if err != nil {
+ Skip("cannot read /etc/subuid")
+ }
+ if !strings.Contains(string(content), name) {
+ Skip("cannot find mappings for the current user")
+ }
+ podSession := podmanTest.Podman([]string{"pod", "create", "--name", "testPod", "--userns=auto"})
+ podSession.WaitWithDefaultTimeout()
+ Expect(podSession).Should(Exit(0))
+
+ session := podmanTest.Podman([]string{"create", "--name", "topcontainer", "--pod", "testPod", ALPINE, "top"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+
+ kube := podmanTest.Podman([]string{"generate", "kube", "testPod"})
+ kube.WaitWithDefaultTimeout()
+ Expect(kube).Should(Exit(0))
+
+ pod := new(v1.Pod)
+ err = yaml.Unmarshal(kube.Out.Contents(), pod)
+ Expect(err).To(BeNil())
+ expected := false
+ Expect(pod.Spec).To(HaveField("HostUsers", &expected))
+ })
+
It("podman generate kube on pod with host network", func() {
podSession := podmanTest.Podman([]string{"pod", "create", "--name", "testHostNetwork", "--network", "host"})
podSession.WaitWithDefaultTimeout()
diff --git a/test/e2e/play_kube_test.go b/test/e2e/play_kube_test.go
index d1eb960cd..baa74cb51 100644
--- a/test/e2e/play_kube_test.go
+++ b/test/e2e/play_kube_test.go
@@ -380,6 +380,9 @@ spec:
restartPolicy: {{ .RestartPolicy }}
hostname: {{ .Hostname }}
hostNetwork: {{ .HostNetwork }}
+{{ if .HostUsers }}
+ hostUsers: {{ .HostUsers }}
+{{ end }}
hostAliases:
{{ range .HostAliases }}
- hostnames:
@@ -844,6 +847,7 @@ type Pod struct {
RestartPolicy string
Hostname string
HostNetwork bool
+ HostUsers *bool
HostAliases []HostAlias
Ctrs []*Ctr
InitCtrs []*Ctr
@@ -968,6 +972,12 @@ func withHostNetwork() podOption {
}
}
+func withHostUsers(val bool) podOption {
+ return func(pod *Pod) {
+ pod.HostUsers = &val
+ }
+}
+
// Deployment describes the options a kube yaml can be configured at deployment level
type Deployment struct {
Name string
@@ -3783,8 +3793,7 @@ ENV OPENJ9_JAVA_OPTIONS=%q
Expect((inspect.InspectContainerToJSON()[0]).HostConfig.LogConfig.Tag).To(Equal("{{.ImageName}}"))
})
- // Check that --userns=auto creates a user namespace
- It("podman play kube --userns=auto", func() {
+ It("podman play kube using a user namespace", func() {
u, err := user.Current()
Expect(err).To(BeNil())
name := u.Name
@@ -3831,6 +3840,26 @@ ENV OPENJ9_JAVA_OPTIONS=%q
usernsInCtr.WaitWithDefaultTimeout()
Expect(usernsInCtr).Should(Exit(0))
Expect(string(usernsInCtr.Out.Contents())).To(Not(Equal(string(initialUsernsConfig))))
+
+ // Now try with hostUsers in the pod spec
+ for _, hostUsers := range []bool{true, false} {
+ pod = getPod(withHostUsers(hostUsers))
+ err = generateKubeYaml("pod", pod, kubeYaml)
+ Expect(err).To(BeNil())
+
+ kube = podmanTest.PodmanNoCache([]string{"play", "kube", "--replace", kubeYaml})
+ kube.WaitWithDefaultTimeout()
+ Expect(kube).Should(Exit(0))
+
+ usernsInCtr = podmanTest.Podman([]string{"exec", getCtrNameInPod(pod), "cat", "/proc/self/uid_map"})
+ usernsInCtr.WaitWithDefaultTimeout()
+ Expect(usernsInCtr).Should(Exit(0))
+ if hostUsers {
+ Expect(string(usernsInCtr.Out.Contents())).To(Equal(string(initialUsernsConfig)))
+ } else {
+ Expect(string(usernsInCtr.Out.Contents())).To(Not(Equal(string(initialUsernsConfig))))
+ }
+ }
})
// Check the block devices are exposed inside container
diff --git a/test/e2e/run_cpu_test.go b/test/e2e/run_cpu_test.go
index e57eb3b26..19bb735ff 100644
--- a/test/e2e/run_cpu_test.go
+++ b/test/e2e/run_cpu_test.go
@@ -138,4 +138,20 @@ var _ = Describe("Podman run cpu", func() {
result.WaitWithDefaultTimeout()
Expect(result).To(ExitWithError())
})
+
+ It("podman run invalid cpu-rt-period with cgroupsv2", func() {
+ SkipIfCgroupV1("testing options that only work in cgroup v2")
+ result := podmanTest.Podman([]string{"run", "--rm", "--cpu-rt-period=5000", ALPINE, "ls"})
+ result.WaitWithDefaultTimeout()
+ Expect(result).Should(Exit(0))
+ Expect(result.ErrorToString()).To(ContainSubstring("Realtime period not supported on cgroups V2 systems"))
+ })
+
+ It("podman run invalid cpu-rt-runtime with cgroupsv2", func() {
+ SkipIfCgroupV1("testing options that only work in cgroup v2")
+ result := podmanTest.Podman([]string{"run", "--rm", "--cpu-rt-runtime=5000", ALPINE, "ls"})
+ result.WaitWithDefaultTimeout()
+ Expect(result).Should(Exit(0))
+ Expect(result.ErrorToString()).To(ContainSubstring("Realtime runtime not supported on cgroups V2 systems"))
+ })
})
diff --git a/test/system/001-basic.bats b/test/system/001-basic.bats
index 1148aaae7..e3302bec3 100644
--- a/test/system/001-basic.bats
+++ b/test/system/001-basic.bats
@@ -29,6 +29,12 @@ function setup() {
local built=$(expr "$output" : ".*Built: \+\(.*\)" | head -n1)
local built_t=$(date --date="$built" +%s)
assert "$built_t" -gt 1546300800 "Preposterous 'Built' time in podman version"
+
+ run_podman -v
+ is "$output" "podman.*version \+" "'Version line' in output"
+
+ run_podman --config foobar version
+ is "$output" ".*The --config flag is ignored by Podman. Exists for Docker compatibility\+" "verify warning for --config option"
}
@test "podman info" {