summaryrefslogtreecommitdiff
path: root/pkg/specgen/generate
diff options
context:
space:
mode:
authorYaron Dayagi <ydayagi@redhat.com>2022-02-07 12:38:28 +0200
committerYaron Dayagi <ydayagi@redhat.com>2022-02-23 11:00:09 +0200
commit5cba5cbfdb85d97094905586427d274bf7a24c0f (patch)
treefeffa6906fb168bddfe9e66294ddcd750252bf50 /pkg/specgen/generate
parent8b2432422fc188e15130c888a05e41fd881b8ca4 (diff)
downloadpodman-5cba5cbfdb85d97094905586427d274bf7a24c0f.tar.gz
podman-5cba5cbfdb85d97094905586427d274bf7a24c0f.tar.bz2
podman-5cba5cbfdb85d97094905586427d274bf7a24c0f.zip
play kube: set defaults to container resources
this fixes https://github.com/containers/podman/issues/13115 the change tries to immitate k8s behavior. when limits are not set the container's limits are all CPU and all RAM when requests are missing then they are equal to limits Signed-off-by: Yaron Dayagi <ydayagi@redhat.com>
Diffstat (limited to 'pkg/specgen/generate')
-rw-r--r--pkg/specgen/generate/kube/kube.go48
-rw-r--r--pkg/specgen/generate/kube/play_test.go79
2 files changed, 126 insertions, 1 deletions
diff --git a/pkg/specgen/generate/kube/kube.go b/pkg/specgen/generate/kube/kube.go
index 475401016..a8f17e2b6 100644
--- a/pkg/specgen/generate/kube/kube.go
+++ b/pkg/specgen/generate/kube/kube.go
@@ -7,6 +7,7 @@ import (
"math"
"net"
"regexp"
+ "runtime"
"strconv"
"strings"
"time"
@@ -22,6 +23,7 @@ import (
"github.com/containers/podman/v4/pkg/specgen"
"github.com/containers/podman/v4/pkg/specgen/generate"
"github.com/containers/podman/v4/pkg/util"
+ "github.com/docker/docker/pkg/system"
"github.com/docker/go-units"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
@@ -700,8 +702,12 @@ func envVarValueResourceFieldRef(env v1.EnvVar, opts *CtrSpecGenOptions) (*strin
divisor.Set(1)
}
+ resources, err := getContainerResources(opts.Container)
+ if err != nil {
+ return nil, err
+ }
+
var value *resource.Quantity
- resources := opts.Container.Resources
resourceName := env.ValueFrom.ResourceFieldRef.Resource
var isValidDivisor bool
@@ -757,6 +763,46 @@ func isCPUDivisor(divisor resource.Quantity) bool {
}
}
+func getContainerResources(container v1.Container) (v1.ResourceRequirements, error) {
+ result := v1.ResourceRequirements{
+ Limits: v1.ResourceList{},
+ Requests: v1.ResourceList{},
+ }
+
+ limits := container.Resources.Limits
+ requests := container.Resources.Requests
+
+ if limits == nil || limits.Memory().IsZero() {
+ mi, err := system.ReadMemInfo()
+ if err != nil {
+ return result, err
+ }
+ result.Limits[v1.ResourceMemory] = *resource.NewQuantity(mi.MemTotal, resource.DecimalSI)
+ } else {
+ result.Limits[v1.ResourceMemory] = limits[v1.ResourceMemory]
+ }
+
+ if limits == nil || limits.Cpu().IsZero() {
+ result.Limits[v1.ResourceCPU] = *resource.NewQuantity(int64(runtime.NumCPU()), resource.DecimalSI)
+ } else {
+ result.Limits[v1.ResourceCPU] = limits[v1.ResourceCPU]
+ }
+
+ if requests == nil || requests.Memory().IsZero() {
+ result.Requests[v1.ResourceMemory] = result.Limits[v1.ResourceMemory]
+ } else {
+ result.Requests[v1.ResourceMemory] = requests[v1.ResourceMemory]
+ }
+
+ if requests == nil || requests.Cpu().IsZero() {
+ result.Requests[v1.ResourceCPU] = result.Limits[v1.ResourceCPU]
+ } else {
+ result.Requests[v1.ResourceCPU] = requests[v1.ResourceCPU]
+ }
+
+ return result, nil
+}
+
// getPodPorts converts a slice of kube container descriptions to an
// array of portmapping
func getPodPorts(containers []v1.Container) []types.PortMapping {
diff --git a/pkg/specgen/generate/kube/play_test.go b/pkg/specgen/generate/kube/play_test.go
index 282324310..6798fdb1b 100644
--- a/pkg/specgen/generate/kube/play_test.go
+++ b/pkg/specgen/generate/kube/play_test.go
@@ -6,10 +6,12 @@ import (
"io/ioutil"
"math"
"os"
+ "runtime"
"strconv"
"testing"
"github.com/containers/common/pkg/secrets"
+ "github.com/docker/docker/pkg/system"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
@@ -193,6 +195,11 @@ func TestEnvVarValue(t *testing.T) {
assert.NoError(t, err)
defer os.RemoveAll(d)
secretsManager := createSecrets(t, d)
+ stringNumCPUs := strconv.Itoa(runtime.NumCPU())
+
+ mi, err := system.ReadMemInfo()
+ assert.Nil(t, err)
+ stringMemTotal := strconv.FormatInt(mi.MemTotal, 10)
tests := []struct {
name string
@@ -694,6 +701,78 @@ func TestEnvVarValue(t *testing.T) {
true,
strconv.Itoa(int(float64(cpuInt) / 0.001)),
},
+ {
+ "ResourceFieldRefNoLimitMemory",
+ v1.EnvVar{
+ Name: "FOO",
+ ValueFrom: &v1.EnvVarSource{
+ ResourceFieldRef: &v1.ResourceFieldSelector{
+ Resource: "limits.memory",
+ },
+ },
+ },
+ CtrSpecGenOptions{
+ Container: v1.Container{
+ Name: "test",
+ },
+ },
+ true,
+ stringMemTotal,
+ },
+ {
+ "ResourceFieldRefNoRequestMemory",
+ v1.EnvVar{
+ Name: "FOO",
+ ValueFrom: &v1.EnvVarSource{
+ ResourceFieldRef: &v1.ResourceFieldSelector{
+ Resource: "requests.memory",
+ },
+ },
+ },
+ CtrSpecGenOptions{
+ Container: v1.Container{
+ Name: "test",
+ },
+ },
+ true,
+ stringMemTotal,
+ },
+ {
+ "ResourceFieldRefNoLimitCPU",
+ v1.EnvVar{
+ Name: "FOO",
+ ValueFrom: &v1.EnvVarSource{
+ ResourceFieldRef: &v1.ResourceFieldSelector{
+ Resource: "limits.cpu",
+ },
+ },
+ },
+ CtrSpecGenOptions{
+ Container: v1.Container{
+ Name: "test",
+ },
+ },
+ true,
+ stringNumCPUs,
+ },
+ {
+ "ResourceFieldRefNoRequestCPU",
+ v1.EnvVar{
+ Name: "FOO",
+ ValueFrom: &v1.EnvVarSource{
+ ResourceFieldRef: &v1.ResourceFieldSelector{
+ Resource: "requests.cpu",
+ },
+ },
+ },
+ CtrSpecGenOptions{
+ Container: v1.Container{
+ Name: "test",
+ },
+ },
+ true,
+ stringNumCPUs,
+ },
}
for _, test := range tests {