summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEduardo Vega <edvegavalerio@gmail.com>2021-05-01 09:20:56 -0600
committerEduardo Vega <edvegavalerio@gmail.com>2021-05-06 21:21:43 -0600
commit72f4f389f0a77d226e36413cb54c3867ae25700d (patch)
treefe3dd4944ed43cefffb9b22f0d6c7b64212b4fa6
parent034470e5be8cfeef8ce0e0d2f47587a660682219 (diff)
downloadpodman-72f4f389f0a77d226e36413cb54c3867ae25700d.tar.gz
podman-72f4f389f0a77d226e36413cb54c3867ae25700d.tar.bz2
podman-72f4f389f0a77d226e36413cb54c3867ae25700d.zip
Adds support to preserve auto update labels in generate and play kube
In the case of generate kube the auto-update labels will be converted into kube annotations and for play kube they will be converted back to labels since that's what podman understands Signed-off-by: Eduardo Vega <edvegavalerio@gmail.com>
-rw-r--r--docs/source/markdown/podman-auto-update.1.md2
-rw-r--r--libpod/kube.go49
-rw-r--r--pkg/domain/infra/abi/play.go27
-rw-r--r--pkg/specgen/generate/kube/kube.go15
-rw-r--r--test/e2e/generate_kube_test.go50
-rw-r--r--test/e2e/play_kube_test.go81
6 files changed, 217 insertions, 7 deletions
diff --git a/docs/source/markdown/podman-auto-update.1.md b/docs/source/markdown/podman-auto-update.1.md
index f298d6bf6..12e8bc70f 100644
--- a/docs/source/markdown/podman-auto-update.1.md
+++ b/docs/source/markdown/podman-auto-update.1.md
@@ -14,7 +14,7 @@ The label "image" is an alternative to "registry" maintained for backwards compa
An image is considered updated if the digest in the local storage is different than the one of the remote image.
If an image must be updated, Podman pulls it down and restarts the systemd unit executing the container.
-The registry policy requires a requires a fully-qualified image reference (e.g., quay.io/podman/stable:latest) to be used to create the container.
+The registry policy requires a fully-qualified image reference (e.g., quay.io/podman/stable:latest) to be used to create the container.
This enforcement is necessary to know which image to actually check and pull.
If an image ID was used, Podman would not know which image to check/pull anymore.
diff --git a/libpod/kube.go b/libpod/kube.go
index adcfe92c9..a3f49bfe8 100644
--- a/libpod/kube.go
+++ b/libpod/kube.go
@@ -218,9 +218,15 @@ func (p *Pod) podWithContainers(containers []*Container, ports []v1.ContainerPor
deDupPodVolumes := make(map[string]*v1.Volume)
first := true
podContainers := make([]v1.Container, 0, len(containers))
+ podAnnotations := make(map[string]string)
dnsInfo := v1.PodDNSConfig{}
for _, ctr := range containers {
if !ctr.IsInfra() {
+ // Convert auto-update labels into kube annotations
+ for k, v := range getAutoUpdateAnnotations(removeUnderscores(ctr.Name()), ctr.Labels()) {
+ podAnnotations[k] = v
+ }
+
ctr, volumes, _, err := containerToV1Container(ctr)
if err != nil {
return nil, err
@@ -267,10 +273,16 @@ func (p *Pod) podWithContainers(containers []*Container, ports []v1.ContainerPor
podVolumes = append(podVolumes, *vol)
}
- return addContainersAndVolumesToPodObject(podContainers, podVolumes, p.Name(), &dnsInfo, hostNetwork), nil
+ return newPodObject(
+ p.Name(),
+ podAnnotations,
+ podContainers,
+ podVolumes,
+ &dnsInfo,
+ hostNetwork), nil
}
-func addContainersAndVolumesToPodObject(containers []v1.Container, volumes []v1.Volume, podName string, dnsOptions *v1.PodDNSConfig, hostNetwork bool) *v1.Pod {
+func newPodObject(podName string, annotations map[string]string, containers []v1.Container, volumes []v1.Volume, dnsOptions *v1.PodDNSConfig, hostNetwork bool) *v1.Pod {
tm := v12.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
@@ -287,6 +299,7 @@ func addContainersAndVolumesToPodObject(containers []v1.Container, volumes []v1.
// will reflect time this is run (not container create time) because the conversion
// of the container create time to v1 Time is probably not warranted nor worthwhile.
CreationTimestamp: v12.Now(),
+ Annotations: annotations,
}
ps := v1.PodSpec{
Containers: containers,
@@ -311,7 +324,13 @@ func simplePodWithV1Containers(ctrs []*Container) (*v1.Pod, error) {
kubeVolumes := make([]v1.Volume, 0)
hostNetwork := true
podDNS := v1.PodDNSConfig{}
+ kubeAnnotations := make(map[string]string)
for _, ctr := range ctrs {
+ // Convert auto-update labels into kube annotations
+ for k, v := range getAutoUpdateAnnotations(removeUnderscores(ctr.Name()), ctr.Labels()) {
+ kubeAnnotations[k] = v
+ }
+
if !ctr.HostNetwork() {
hostNetwork = false
}
@@ -355,7 +374,13 @@ func simplePodWithV1Containers(ctrs []*Container) (*v1.Pod, error) {
}
} // end if ctrDNS
}
- return addContainersAndVolumesToPodObject(kubeCtrs, kubeVolumes, strings.ReplaceAll(ctrs[0].Name(), "_", ""), &podDNS, hostNetwork), nil
+ return newPodObject(
+ strings.ReplaceAll(ctrs[0].Name(), "_", ""),
+ kubeAnnotations,
+ kubeCtrs,
+ kubeVolumes,
+ &podDNS,
+ hostNetwork), nil
}
// containerToV1Container converts information we know about a libpod container
@@ -792,3 +817,21 @@ func generateKubeVolumeDeviceFromLinuxDevice(devices []specs.LinuxDevice) []v1.V
func removeUnderscores(s string) string {
return strings.Replace(s, "_", "", -1)
}
+
+// getAutoUpdateAnnotations searches for auto-update container labels
+// and returns them as kube annotations
+func getAutoUpdateAnnotations(ctrName string, ctrLabels map[string]string) map[string]string {
+ autoUpdateLabel := "io.containers.autoupdate"
+ annotations := make(map[string]string)
+
+ for k, v := range ctrLabels {
+ if strings.Contains(k, autoUpdateLabel) {
+ // since labels can variate between containers within a pod, they will be
+ // identified with the container name when converted into kube annotations
+ kc := fmt.Sprintf("%s/%s", k, ctrName)
+ annotations[kc] = v
+ }
+ }
+
+ return annotations
+}
diff --git a/pkg/domain/infra/abi/play.go b/pkg/domain/infra/abi/play.go
index 64e7f208c..a94c5f5c5 100644
--- a/pkg/domain/infra/abi/play.go
+++ b/pkg/domain/infra/abi/play.go
@@ -16,6 +16,7 @@ import (
"github.com/containers/image/v5/types"
"github.com/containers/podman/v3/libpod"
"github.com/containers/podman/v3/libpod/define"
+ "github.com/containers/podman/v3/pkg/autoupdate"
"github.com/containers/podman/v3/pkg/domain/entities"
"github.com/containers/podman/v3/pkg/specgen"
"github.com/containers/podman/v3/pkg/specgen/generate"
@@ -73,7 +74,7 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, path string, options en
podTemplateSpec.ObjectMeta = podYAML.ObjectMeta
podTemplateSpec.Spec = podYAML.Spec
- r, err := ic.playKubePod(ctx, podTemplateSpec.ObjectMeta.Name, &podTemplateSpec, options, &ipIndex)
+ r, err := ic.playKubePod(ctx, podTemplateSpec.ObjectMeta.Name, &podTemplateSpec, options, &ipIndex, podYAML.Annotations)
if err != nil {
return nil, err
}
@@ -143,7 +144,7 @@ func (ic *ContainerEngine) playKubeDeployment(ctx context.Context, deploymentYAM
// create "replicas" number of pods
for i = 0; i < numReplicas; i++ {
podName := fmt.Sprintf("%s-pod-%d", deploymentName, i)
- podReport, err := ic.playKubePod(ctx, podName, &podSpec, options, ipIndex)
+ podReport, err := ic.playKubePod(ctx, podName, &podSpec, options, ipIndex, deploymentYAML.Annotations)
if err != nil {
return nil, errors.Wrapf(err, "error encountered while bringing up pod %s", podName)
}
@@ -152,7 +153,7 @@ func (ic *ContainerEngine) playKubeDeployment(ctx context.Context, deploymentYAM
return &report, nil
}
-func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podYAML *v1.PodTemplateSpec, options entities.PlayKubeOptions, ipIndex *int) (*entities.PlayKubeReport, error) {
+func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podYAML *v1.PodTemplateSpec, options entities.PlayKubeOptions, ipIndex *int, annotations map[string]string) (*entities.PlayKubeReport, error) {
var (
writer io.Writer
playKubePod entities.PlayKubePod
@@ -265,6 +266,9 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
containers := make([]*libpod.Container, 0, len(podYAML.Spec.Containers))
for _, container := range podYAML.Spec.Containers {
+ // Contains all labels obtained from kube
+ labels := make(map[string]string)
+
// NOTE: set the pull policy to "newer". This will cover cases
// where the "latest" tag requires a pull and will also
// transparently handle "localhost/" prefixed files which *may*
@@ -292,6 +296,22 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
return nil, err
}
+ // Handle kube annotations
+ for k, v := range annotations {
+ switch k {
+ // Auto update annotation without container name will apply to
+ // all containers within the pod
+ case autoupdate.Label, autoupdate.AuthfileLabel:
+ labels[k] = v
+ // Auto update annotation with container name will apply only
+ // to the specified container
+ case fmt.Sprintf("%s/%s", autoupdate.Label, container.Name),
+ fmt.Sprintf("%s/%s", autoupdate.AuthfileLabel, container.Name):
+ prefixAndCtr := strings.Split(k, "/")
+ labels[prefixAndCtr[0]] = v
+ }
+ }
+
specgenOpts := kube.CtrSpecGenOptions{
Container: container,
Image: pulledImages[0],
@@ -305,6 +325,7 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
NetNSIsHost: p.NetNS.IsHost(),
SecretsManager: secretsManager,
LogDriver: options.LogDriver,
+ Labels: labels,
}
specGen, err := kube.ToSpecGen(ctx, &specgenOpts)
if err != nil {
diff --git a/pkg/specgen/generate/kube/kube.go b/pkg/specgen/generate/kube/kube.go
index 73c1c31ba..ccce3edba 100644
--- a/pkg/specgen/generate/kube/kube.go
+++ b/pkg/specgen/generate/kube/kube.go
@@ -100,6 +100,8 @@ type CtrSpecGenOptions struct {
SecretsManager *secrets.SecretsManager
// LogDriver which should be used for the container
LogDriver string
+ // Labels define key-value pairs of metadata
+ Labels map[string]string
}
func ToSpecGen(ctx context.Context, opts *CtrSpecGenOptions) (*specgen.SpecGenerator, error) {
@@ -278,6 +280,19 @@ func ToSpecGen(ctx context.Context, opts *CtrSpecGenOptions) (*specgen.SpecGener
s.NetNS.NSMode = specgen.Host
}
+ // Add labels that come from kube
+ if len(s.Labels) == 0 {
+ // If there are no labels, let's use the map that comes
+ // from kube
+ s.Labels = opts.Labels
+ } else {
+ // If there are already labels in the map, append the ones
+ // obtained from kube
+ for k, v := range opts.Labels {
+ s.Labels[k] = v
+ }
+ }
+
return s, nil
}
diff --git a/test/e2e/generate_kube_test.go b/test/e2e/generate_kube_test.go
index 611e8ddac..4c0fc6db0 100644
--- a/test/e2e/generate_kube_test.go
+++ b/test/e2e/generate_kube_test.go
@@ -873,4 +873,54 @@ USER test1`
}
}
})
+
+ It("podman generate kube on container with auto update labels", func() {
+ top := podmanTest.Podman([]string{"run", "-dt", "--name", "top", "--label", "io.containers.autoupdate=local", ALPINE, "top"})
+ top.WaitWithDefaultTimeout()
+ Expect(top.ExitCode()).To(Equal(0))
+
+ kube := podmanTest.Podman([]string{"generate", "kube", "top"})
+ kube.WaitWithDefaultTimeout()
+ Expect(kube.ExitCode()).To(Equal(0))
+
+ pod := new(v1.Pod)
+ err := yaml.Unmarshal(kube.Out.Contents(), pod)
+ Expect(err).To(BeNil())
+
+ v, ok := pod.GetAnnotations()["io.containers.autoupdate/top"]
+ Expect(ok).To(Equal(true))
+ Expect(v).To(Equal("local"))
+ })
+
+ It("podman generate kube on pod with auto update labels in all containers", func() {
+ pod1 := podmanTest.Podman([]string{"pod", "create", "--name", "pod1"})
+ pod1.WaitWithDefaultTimeout()
+ Expect(pod1.ExitCode()).To(Equal(0))
+
+ top1 := podmanTest.Podman([]string{"run", "-dt", "--name", "top1", "--pod", "pod1", "--label", "io.containers.autoupdate=registry", "--label", "io.containers.autoupdate.authfile=/some/authfile.json", ALPINE, "top"})
+ top1.WaitWithDefaultTimeout()
+ Expect(top1.ExitCode()).To(Equal(0))
+
+ top2 := podmanTest.Podman([]string{"run", "-dt", "--name", "top2", "--pod", "pod1", "--label", "io.containers.autoupdate=registry", "--label", "io.containers.autoupdate.authfile=/some/authfile.json", ALPINE, "top"})
+ top2.WaitWithDefaultTimeout()
+ Expect(top2.ExitCode()).To(Equal(0))
+
+ kube := podmanTest.Podman([]string{"generate", "kube", "pod1"})
+ kube.WaitWithDefaultTimeout()
+ Expect(kube.ExitCode()).To(Equal(0))
+
+ pod := new(v1.Pod)
+ err := yaml.Unmarshal(kube.Out.Contents(), pod)
+ Expect(err).To(BeNil())
+
+ for _, ctr := range []string{"top1", "top2"} {
+ v, ok := pod.GetAnnotations()["io.containers.autoupdate/"+ctr]
+ Expect(ok).To(Equal(true))
+ Expect(v).To(Equal("registry"))
+
+ v, ok = pod.GetAnnotations()["io.containers.autoupdate.authfile/"+ctr]
+ Expect(ok).To(Equal(true))
+ Expect(v).To(Equal("/some/authfile.json"))
+ }
+ })
})
diff --git a/test/e2e/play_kube_test.go b/test/e2e/play_kube_test.go
index 836fbe1ee..d5861e7ba 100644
--- a/test/e2e/play_kube_test.go
+++ b/test/e2e/play_kube_test.go
@@ -768,6 +768,12 @@ func getCtr(options ...ctrOption) *Ctr {
type ctrOption func(*Ctr)
+func withName(name string) ctrOption {
+ return func(c *Ctr) {
+ c.Name = name
+ }
+}
+
func withCmd(cmd []string) ctrOption {
return func(c *Ctr) {
c.Cmd = cmd
@@ -2305,4 +2311,79 @@ invalid kube kind
kube.WaitWithDefaultTimeout()
Expect(kube.ExitCode()).To(Not(Equal(0)))
})
+
+ It("podman play kube with auto update annotations for all containers", func() {
+ ctr01Name := "ctr01"
+ ctr02Name := "ctr02"
+ podName := "foo"
+ autoUpdateRegistry := "io.containers.autoupdate"
+ autoUpdateRegistryValue := "registry"
+ autoUpdateAuthfile := "io.containers.autoupdate.authfile"
+ autoUpdateAuthfileValue := "/some/authfile.json"
+
+ ctr01 := getCtr(withName(ctr01Name))
+ ctr02 := getCtr(withName(ctr02Name))
+
+ pod := getPod(
+ withPodName(podName),
+ withCtr(ctr01),
+ withCtr(ctr02),
+ withAnnotation(autoUpdateRegistry, autoUpdateRegistryValue),
+ withAnnotation(autoUpdateAuthfile, autoUpdateAuthfileValue))
+
+ err = generateKubeYaml("pod", pod, kubeYaml)
+ Expect(err).To(BeNil())
+
+ kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
+ kube.WaitWithDefaultTimeout()
+ Expect(kube.ExitCode()).To(Equal(0))
+
+ for _, ctr := range []string{podName + "-" + ctr01Name, podName + "-" + ctr02Name} {
+ inspect := podmanTest.Podman([]string{"inspect", ctr, "--format", "'{{.Config.Labels}}'"})
+ inspect.WaitWithDefaultTimeout()
+ Expect(inspect.ExitCode()).To(Equal(0))
+
+ Expect(inspect.OutputToString()).To(ContainSubstring(autoUpdateRegistry + ":" + autoUpdateRegistryValue))
+ Expect(inspect.OutputToString()).To(ContainSubstring(autoUpdateAuthfile + ":" + autoUpdateAuthfileValue))
+ }
+ })
+
+ It("podman play kube with auto update annotations for first container only", func() {
+ ctr01Name := "ctr01"
+ ctr02Name := "ctr02"
+ autoUpdateRegistry := "io.containers.autoupdate"
+ autoUpdateRegistryValue := "local"
+
+ ctr01 := getCtr(withName(ctr01Name))
+ ctr02 := getCtr(withName(ctr02Name))
+
+ pod := getPod(
+ withCtr(ctr01),
+ withCtr(ctr02),
+ )
+
+ deployment := getDeployment(
+ withPod(pod),
+ withDeploymentAnnotation(autoUpdateRegistry+"/"+ctr01Name, autoUpdateRegistryValue),
+ )
+
+ err = generateKubeYaml("deployment", deployment, kubeYaml)
+ Expect(err).To(BeNil())
+
+ kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
+ kube.WaitWithDefaultTimeout()
+ Expect(kube.ExitCode()).To(Equal(0))
+
+ podName := getPodNamesInDeployment(deployment)[0].Name
+
+ inspect := podmanTest.Podman([]string{"inspect", podName + "-" + ctr01Name, "--format", "'{{.Config.Labels}}'"})
+ inspect.WaitWithDefaultTimeout()
+ Expect(inspect.ExitCode()).To(Equal(0))
+ Expect(inspect.OutputToString()).To(ContainSubstring(autoUpdateRegistry + ":" + autoUpdateRegistryValue))
+
+ inspect = podmanTest.Podman([]string{"inspect", podName + "-" + ctr02Name, "--format", "'{{.Config.Labels}}'"})
+ inspect.WaitWithDefaultTimeout()
+ Expect(inspect.ExitCode()).To(Equal(0))
+ Expect(inspect.OutputToString()).To(ContainSubstring(`map[]`))
+ })
})