summaryrefslogtreecommitdiff
path: root/pkg/domain/infra/abi
diff options
context:
space:
mode:
Diffstat (limited to 'pkg/domain/infra/abi')
-rw-r--r--pkg/domain/infra/abi/containers.go34
-rw-r--r--pkg/domain/infra/abi/images.go2
-rw-r--r--pkg/domain/infra/abi/play.go125
-rw-r--r--pkg/domain/infra/abi/play_test.go254
-rw-r--r--pkg/domain/infra/abi/pods.go6
5 files changed, 400 insertions, 21 deletions
diff --git a/pkg/domain/infra/abi/containers.go b/pkg/domain/infra/abi/containers.go
index f6bae5bc0..614fd5fe0 100644
--- a/pkg/domain/infra/abi/containers.go
+++ b/pkg/domain/infra/abi/containers.go
@@ -23,7 +23,7 @@ import (
"github.com/containers/podman/v2/pkg/checkpoint"
"github.com/containers/podman/v2/pkg/domain/entities"
"github.com/containers/podman/v2/pkg/domain/infra/abi/terminal"
- "github.com/containers/podman/v2/pkg/parallel"
+ parallelctr "github.com/containers/podman/v2/pkg/parallel/ctr"
"github.com/containers/podman/v2/pkg/ps"
"github.com/containers/podman/v2/pkg/rootless"
"github.com/containers/podman/v2/pkg/signal"
@@ -157,7 +157,7 @@ func (ic *ContainerEngine) ContainerStop(ctx context.Context, namesOrIds []strin
if err != nil && !(options.Ignore && errors.Cause(err) == define.ErrNoSuchCtr) {
return nil, err
}
- errMap, err := parallel.ContainerOp(ctx, ctrs, func(c *libpod.Container) error {
+ errMap, err := parallelctr.ContainerOp(ctx, ctrs, func(c *libpod.Container) error {
var err error
if options.Timeout != nil {
err = c.StopWithTimeout(*options.Timeout)
@@ -273,16 +273,6 @@ func (ic *ContainerEngine) ContainerRestart(ctx context.Context, namesOrIds []st
func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string, options entities.RmOptions) ([]*entities.RmReport, error) {
reports := []*entities.RmReport{}
- if options.Storage {
- for _, ctr := range namesOrIds {
- report := entities.RmReport{Id: ctr}
- if err := ic.Libpod.RemoveStorageContainer(ctr, options.Force); err != nil {
- report.Err = err
- }
- reports = append(reports, &report)
- }
- return reports, nil
- }
names := namesOrIds
for _, cidFile := range options.CIDFiles {
@@ -294,6 +284,22 @@ func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string,
names = append(names, id)
}
+ // Attempt to remove named containers directly from storage, if container is defined in libpod
+ // this will fail and code will fall through to removing the container from libpod.`
+ tmpNames := []string{}
+ for _, ctr := range names {
+ report := entities.RmReport{Id: ctr}
+ if err := ic.Libpod.RemoveStorageContainer(ctr, options.Force); err != nil {
+ // remove container names that we successfully deleted
+ tmpNames = append(tmpNames, ctr)
+ } else {
+ reports = append(reports, &report)
+ }
+ }
+ if len(tmpNames) < len(names) {
+ names = tmpNames
+ }
+
ctrs, err := getContainersByContext(options.All, options.Latest, names, ic.Libpod)
if err != nil && !(options.Ignore && errors.Cause(err) == define.ErrNoSuchCtr) {
// Failed to get containers. If force is specified, get the containers ID
@@ -302,7 +308,7 @@ func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string,
return nil, err
}
- for _, ctr := range namesOrIds {
+ for _, ctr := range names {
logrus.Debugf("Evicting container %q", ctr)
report := entities.RmReport{Id: ctr}
id, err := ic.Libpod.EvictContainer(ctx, ctr, options.Volumes)
@@ -321,7 +327,7 @@ func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string,
return reports, nil
}
- errMap, err := parallel.ContainerOp(ctx, ctrs, func(c *libpod.Container) error {
+ errMap, err := parallelctr.ContainerOp(ctx, ctrs, func(c *libpod.Container) error {
err := ic.Libpod.RemoveContainer(ctx, c, options.Force, options.Volumes)
if err != nil {
if options.Ignore && errors.Cause(err) == define.ErrNoSuchCtr {
diff --git a/pkg/domain/infra/abi/images.go b/pkg/domain/infra/abi/images.go
index 3bb7de83c..f9d733c63 100644
--- a/pkg/domain/infra/abi/images.go
+++ b/pkg/domain/infra/abi/images.go
@@ -511,6 +511,7 @@ func (ir *ImageEngine) Search(ctx context.Context, term string, opts entities.Im
Limit: opts.Limit,
NoTrunc: opts.NoTrunc,
InsecureSkipTLSVerify: opts.SkipTLSVerify,
+ ListTags: opts.ListTags,
}
searchResults, err := image.SearchImages(term, searchOpts)
@@ -529,6 +530,7 @@ func (ir *ImageEngine) Search(ctx context.Context, term string, opts entities.Im
reports[i].Stars = searchResults[i].Stars
reports[i].Official = searchResults[i].Official
reports[i].Automated = searchResults[i].Automated
+ reports[i].Tag = searchResults[i].Tag
}
return reports, nil
diff --git a/pkg/domain/infra/abi/play.go b/pkg/domain/infra/abi/play.go
index 40edc1ae3..a7c66bae6 100644
--- a/pkg/domain/infra/abi/play.go
+++ b/pkg/domain/infra/abi/play.go
@@ -28,6 +28,7 @@ import (
"github.com/sirupsen/logrus"
v1apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
)
const (
@@ -35,6 +36,8 @@ const (
kubeDirectoryPermission = 0755
// https://kubernetes.io/docs/concepts/storage/volumes/#hostpath
kubeFilePermission = 0644
+ // Kubernetes sets CPUPeriod to 100000us (100ms): https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/
+ defaultCPUPeriod = 100000
)
func (ic *ContainerEngine) PlayKube(ctx context.Context, path string, options entities.PlayKubeOptions) (*entities.PlayKubeReport, error) {
@@ -311,6 +314,22 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
ctrRestartPolicy = libpod.RestartPolicyAlways
}
+ configMaps := []v1.ConfigMap{}
+ for _, p := range options.ConfigMaps {
+ f, err := os.Open(p)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ cm, err := readConfigMapFromFile(f)
+ if err != nil {
+ return nil, errors.Wrapf(err, "%q", p)
+ }
+
+ configMaps = append(configMaps, cm)
+ }
+
containers := make([]*libpod.Container, 0, len(podYAML.Spec.Containers))
for _, container := range podYAML.Spec.Containers {
pullPolicy := util.PullImageMissing
@@ -334,7 +353,7 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
if err != nil {
return nil, err
}
- conf, err := kubeContainerToCreateConfig(ctx, container, newImage, namespaces, volumes, pod.ID(), podName, podInfraID, seccompPaths)
+ conf, err := kubeContainerToCreateConfig(ctx, container, newImage, namespaces, volumes, pod.ID(), podName, podInfraID, configMaps, seccompPaths)
if err != nil {
return nil, err
}
@@ -447,7 +466,7 @@ func setupSecurityContext(securityConfig *createconfig.SecurityConfig, userConfi
}
// kubeContainerToCreateConfig takes a v1.Container and returns a createconfig describing a container
-func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container, newImage *image.Image, namespaces map[string]string, volumes map[string]string, podID, podName, infraID string, seccompPaths *kubeSeccompPaths) (*createconfig.CreateConfig, error) {
+func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container, newImage *image.Image, namespaces map[string]string, volumes map[string]string, podID, podName, infraID string, configMaps []v1.ConfigMap, seccompPaths *kubeSeccompPaths) (*createconfig.CreateConfig, error) {
var (
containerConfig createconfig.CreateConfig
pidConfig createconfig.PidConfig
@@ -490,6 +509,27 @@ func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container
// but apply to the containers with the prefixed name
securityConfig.SeccompProfilePath = seccompPaths.findForContainer(containerYAML.Name)
+ var err error
+ milliCPU, err := quantityToInt64(containerYAML.Resources.Limits.Cpu())
+ if err != nil {
+ return nil, errors.Wrap(err, "Failed to set CPU quota")
+ }
+ if milliCPU > 0 {
+ containerConfig.Resources.CPUPeriod = defaultCPUPeriod
+ // CPU quota is a fraction of the period: milliCPU / 1000.0 * period
+ // Or, without floating point math:
+ containerConfig.Resources.CPUQuota = milliCPU * defaultCPUPeriod / 1000
+ }
+
+ containerConfig.Resources.Memory, err = quantityToInt64(containerYAML.Resources.Limits.Memory())
+ if err != nil {
+ return nil, errors.Wrap(err, "Failed to set memory limit")
+ }
+ containerConfig.Resources.MemoryReservation, err = quantityToInt64(containerYAML.Resources.Requests.Memory())
+ if err != nil {
+ return nil, errors.Wrap(err, "Failed to set memory reservation")
+ }
+
containerConfig.Command = []string{}
if imageData != nil && imageData.Config != nil {
containerConfig.Command = imageData.Config.Entrypoint
@@ -572,8 +612,17 @@ func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container
}
envs = imageEnv
}
- for _, e := range containerYAML.Env {
- envs[e.Name] = e.Value
+ for _, env := range containerYAML.Env {
+ value := envVarValue(env, configMaps)
+
+ envs[env.Name] = value
+ }
+ for _, envFrom := range containerYAML.EnvFrom {
+ cmEnvs := envVarsFromConfigMap(envFrom, configMaps)
+
+ for k, v := range cmEnvs {
+ envs[k] = v
+ }
}
containerConfig.Env = envs
@@ -594,6 +643,62 @@ func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container
return &containerConfig, nil
}
+// readConfigMapFromFile returns a kubernetes configMap obtained from --configmap flag
+func readConfigMapFromFile(r io.Reader) (v1.ConfigMap, error) {
+ var cm v1.ConfigMap
+
+ content, err := ioutil.ReadAll(r)
+ if err != nil {
+ return cm, errors.Wrapf(err, "unable to read ConfigMap YAML content")
+ }
+
+ if err := yaml.Unmarshal(content, &cm); err != nil {
+ return cm, errors.Wrapf(err, "unable to read YAML as Kube ConfigMap")
+ }
+
+ if cm.Kind != "ConfigMap" {
+ return cm, errors.Errorf("invalid YAML kind: %q. [ConfigMap] is the only supported by --configmap", cm.Kind)
+ }
+
+ return cm, nil
+}
+
+// envVarsFromConfigMap returns all key-value pairs as env vars from a configMap that matches the envFrom setting of a container
+func envVarsFromConfigMap(envFrom v1.EnvFromSource, configMaps []v1.ConfigMap) map[string]string {
+ envs := map[string]string{}
+
+ if envFrom.ConfigMapRef != nil {
+ cmName := envFrom.ConfigMapRef.Name
+
+ for _, c := range configMaps {
+ if cmName == c.Name {
+ envs = c.Data
+ break
+ }
+ }
+ }
+
+ return envs
+}
+
+// envVarValue returns the environment variable value configured within the container's env setting.
+// It gets the value from a configMap if specified, otherwise returns env.Value
+func envVarValue(env v1.EnvVar, configMaps []v1.ConfigMap) string {
+ for _, c := range configMaps {
+ if env.ValueFrom != nil {
+ if env.ValueFrom.ConfigMapKeyRef != nil {
+ if env.ValueFrom.ConfigMapKeyRef.Name == c.Name {
+ if value, ok := c.Data[env.ValueFrom.ConfigMapKeyRef.Key]; ok {
+ return value
+ }
+ }
+ }
+ }
+ }
+
+ return env.Value
+}
+
// kubeSeccompPaths holds information about a pod YAML's seccomp configuration
// it holds both container and pod seccomp paths
type kubeSeccompPaths struct {
@@ -667,3 +772,15 @@ func verifySeccompPath(path string, profileRoot string) (string, error) {
return "", errors.Errorf("invalid seccomp path: %s", path)
}
}
+
+func quantityToInt64(quantity *resource.Quantity) (int64, error) {
+ if i, ok := quantity.AsInt64(); ok {
+ return i, nil
+ }
+
+ if i, ok := quantity.AsDec().Unscaled(); ok {
+ return i, nil
+ }
+
+ return 0, errors.Errorf("Quantity cannot be represented as int64: %v", quantity)
+}
diff --git a/pkg/domain/infra/abi/play_test.go b/pkg/domain/infra/abi/play_test.go
new file mode 100644
index 000000000..5595476c3
--- /dev/null
+++ b/pkg/domain/infra/abi/play_test.go
@@ -0,0 +1,254 @@
+package abi
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ v1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+var configMapList = []v1.ConfigMap{
+ {
+ TypeMeta: metav1.TypeMeta{
+ Kind: "ConfigMap",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "bar",
+ },
+ Data: map[string]string{
+ "myvar": "bar",
+ },
+ },
+ {
+ TypeMeta: metav1.TypeMeta{
+ Kind: "ConfigMap",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "foo",
+ },
+ Data: map[string]string{
+ "myvar": "foo",
+ },
+ },
+}
+
+func TestReadConfigMapFromFile(t *testing.T) {
+ tests := []struct {
+ name string
+ configMapContent string
+ expectError bool
+ expectedErrorMsg string
+ expected v1.ConfigMap
+ }{
+ {
+ "ValidConfigMap",
+ `
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: foo
+data:
+ myvar: foo
+`,
+ false,
+ "",
+ v1.ConfigMap{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "ConfigMap",
+ APIVersion: "v1",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "foo",
+ },
+ Data: map[string]string{
+ "myvar": "foo",
+ },
+ },
+ },
+ {
+ "InvalidYAML",
+ `
+Invalid YAML
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: foo
+data:
+ myvar: foo
+`,
+ true,
+ "unable to read YAML as Kube ConfigMap",
+ v1.ConfigMap{},
+ },
+ {
+ "InvalidKind",
+ `
+apiVersion: v1
+kind: InvalidKind
+metadata:
+ name: foo
+data:
+ myvar: foo
+`,
+ true,
+ "invalid YAML kind",
+ v1.ConfigMap{},
+ },
+ }
+
+ for _, test := range tests {
+ test := test
+ t.Run(test.name, func(t *testing.T) {
+ buf := bytes.NewBufferString(test.configMapContent)
+ cm, err := readConfigMapFromFile(buf)
+
+ if test.expectError {
+ assert.Error(t, err)
+ assert.Contains(t, err.Error(), test.expectedErrorMsg)
+ } else {
+ assert.NoError(t, err)
+ assert.Equal(t, test.expected, cm)
+ }
+ })
+ }
+}
+
+func TestEnvVarsFromConfigMap(t *testing.T) {
+ tests := []struct {
+ name string
+ envFrom v1.EnvFromSource
+ configMapList []v1.ConfigMap
+ expected map[string]string
+ }{
+ {
+ "ConfigMapExists",
+ v1.EnvFromSource{
+ ConfigMapRef: &v1.ConfigMapEnvSource{
+ LocalObjectReference: v1.LocalObjectReference{
+ Name: "foo",
+ },
+ },
+ },
+ configMapList,
+ map[string]string{
+ "myvar": "foo",
+ },
+ },
+ {
+ "ConfigMapDoesNotExist",
+ v1.EnvFromSource{
+ ConfigMapRef: &v1.ConfigMapEnvSource{
+ LocalObjectReference: v1.LocalObjectReference{
+ Name: "doesnotexist",
+ },
+ },
+ },
+ configMapList,
+ map[string]string{},
+ },
+ {
+ "EmptyConfigMapList",
+ v1.EnvFromSource{
+ ConfigMapRef: &v1.ConfigMapEnvSource{
+ LocalObjectReference: v1.LocalObjectReference{
+ Name: "foo",
+ },
+ },
+ },
+ []v1.ConfigMap{},
+ map[string]string{},
+ },
+ }
+
+ for _, test := range tests {
+ test := test
+ t.Run(test.name, func(t *testing.T) {
+ result := envVarsFromConfigMap(test.envFrom, test.configMapList)
+ assert.Equal(t, test.expected, result)
+ })
+ }
+}
+
+func TestEnvVarValue(t *testing.T) {
+ tests := []struct {
+ name string
+ envVar v1.EnvVar
+ configMapList []v1.ConfigMap
+ expected string
+ }{
+ {
+ "ConfigMapExists",
+ v1.EnvVar{
+ Name: "FOO",
+ ValueFrom: &v1.EnvVarSource{
+ ConfigMapKeyRef: &v1.ConfigMapKeySelector{
+ LocalObjectReference: v1.LocalObjectReference{
+ Name: "foo",
+ },
+ Key: "myvar",
+ },
+ },
+ },
+ configMapList,
+ "foo",
+ },
+ {
+ "ContainerKeyDoesNotExistInConfigMap",
+ v1.EnvVar{
+ Name: "FOO",
+ ValueFrom: &v1.EnvVarSource{
+ ConfigMapKeyRef: &v1.ConfigMapKeySelector{
+ LocalObjectReference: v1.LocalObjectReference{
+ Name: "foo",
+ },
+ Key: "doesnotexist",
+ },
+ },
+ },
+ configMapList,
+ "",
+ },
+ {
+ "ConfigMapDoesNotExist",
+ v1.EnvVar{
+ Name: "FOO",
+ ValueFrom: &v1.EnvVarSource{
+ ConfigMapKeyRef: &v1.ConfigMapKeySelector{
+ LocalObjectReference: v1.LocalObjectReference{
+ Name: "doesnotexist",
+ },
+ Key: "myvar",
+ },
+ },
+ },
+ configMapList,
+ "",
+ },
+ {
+ "EmptyConfigMapList",
+ v1.EnvVar{
+ Name: "FOO",
+ ValueFrom: &v1.EnvVarSource{
+ ConfigMapKeyRef: &v1.ConfigMapKeySelector{
+ LocalObjectReference: v1.LocalObjectReference{
+ Name: "foo",
+ },
+ Key: "myvar",
+ },
+ },
+ },
+ []v1.ConfigMap{},
+ "",
+ },
+ }
+
+ for _, test := range tests {
+ test := test
+ t.Run(test.name, func(t *testing.T) {
+ result := envVarValue(test.envVar, test.configMapList)
+ assert.Equal(t, test.expected, result)
+ })
+ }
+}
diff --git a/pkg/domain/infra/abi/pods.go b/pkg/domain/infra/abi/pods.go
index 747da9fd4..258640a81 100644
--- a/pkg/domain/infra/abi/pods.go
+++ b/pkg/domain/infra/abi/pods.go
@@ -66,7 +66,7 @@ func (ic *ContainerEngine) PodKill(ctx context.Context, namesOrIds []string, opt
for _, p := range pods {
report := entities.PodKillReport{Id: p.ID()}
- conErrs, err := p.Kill(uint(sig))
+ conErrs, err := p.Kill(ctx, uint(sig))
if err != nil && errors.Cause(err) != define.ErrPodPartialFail {
report.Errs = []error{err}
reports = append(reports, &report)
@@ -92,7 +92,7 @@ func (ic *ContainerEngine) PodPause(ctx context.Context, namesOrIds []string, op
}
for _, p := range pods {
report := entities.PodPauseReport{Id: p.ID()}
- errs, err := p.Pause()
+ errs, err := p.Pause(ctx)
if err != nil && errors.Cause(err) != define.ErrPodPartialFail {
report.Errs = []error{err}
continue
@@ -117,7 +117,7 @@ func (ic *ContainerEngine) PodUnpause(ctx context.Context, namesOrIds []string,
}
for _, p := range pods {
report := entities.PodUnpauseReport{Id: p.ID()}
- errs, err := p.Unpause()
+ errs, err := p.Unpause(ctx)
if err != nil && errors.Cause(err) != define.ErrPodPartialFail {
report.Errs = []error{err}
continue