summaryrefslogtreecommitdiff
path: root/pkg
diff options
context:
space:
mode:
authorValentin Rothberg <vrothberg@redhat.com>2022-05-05 13:34:01 +0200
committerValentin Rothberg <vrothberg@redhat.com>2022-05-12 10:51:13 +0200
commit840c120c21124de921a7f57435cf0d0497103736 (patch)
tree18b6d18b88ff178474487bd59e0d4275c1b27ea2 /pkg
parentecf0177a01535b273a62e12577d7caf062a91117 (diff)
downloadpodman-840c120c21124de921a7f57435cf0d0497103736.tar.gz
podman-840c120c21124de921a7f57435cf0d0497103736.tar.bz2
podman-840c120c21124de921a7f57435cf0d0497103736.zip
play kube: service container
Add the notion of a "service container" to play kube. A service container is started before the pods in play kube and is (reverse) linked to them. The service container is stopped/removed *after* all pods it is associated with are stopped/removed. In other words, a service container tracks the entire life cycle of a service started via `podman play kube`. This is required to enable `play kube` in a systemd unit file. The service container is only used when the `--service-container` flag is set on the CLI. This flag has been marked as hidden as it is not meant to be used outside the context of `play kube`. It is further not supported on the remote client. The wiring with systemd will be done in a later commit. Signed-off-by: Valentin Rothberg <vrothberg@redhat.com>
Diffstat (limited to 'pkg')
-rw-r--r--pkg/domain/entities/play.go2
-rw-r--r--pkg/domain/infra/abi/play.go82
-rw-r--r--pkg/specgen/generate/pause_image.go89
-rw-r--r--pkg/specgen/generate/pod_create.go104
-rw-r--r--pkg/specgen/podspecgen.go3
5 files changed, 182 insertions, 98 deletions
diff --git a/pkg/domain/entities/play.go b/pkg/domain/entities/play.go
index bf7c33f2b..f1ba21650 100644
--- a/pkg/domain/entities/play.go
+++ b/pkg/domain/entities/play.go
@@ -54,6 +54,8 @@ type PlayKubeOptions struct {
LogOptions []string
// Start - don't start the pod if false
Start types.OptionalBool
+ // ServiceContainer - creates a service container that is started before and is stopped after all pods.
+ ServiceContainer bool
// Userns - define the user namespace to use.
Userns string
}
diff --git a/pkg/domain/infra/abi/play.go b/pkg/domain/infra/abi/play.go
index 019361694..420d51483 100644
--- a/pkg/domain/infra/abi/play.go
+++ b/pkg/domain/infra/abi/play.go
@@ -28,12 +28,54 @@ import (
"github.com/containers/podman/v4/pkg/specgenutil"
"github.com/containers/podman/v4/pkg/util"
"github.com/ghodss/yaml"
+ "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
yamlv2 "gopkg.in/yaml.v2"
)
-func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options entities.PlayKubeOptions) (*entities.PlayKubeReport, error) {
+// createServiceContainer creates a container that can later on
+// be associated with the pods of a K8s yaml. It will be started along with
+// the first pod.
+func (ic *ContainerEngine) createServiceContainer(ctx context.Context, name string) (*libpod.Container, error) {
+ // Similar to infra containers, a service container is using the pause image.
+ image, err := generate.PullOrBuildInfraImage(ic.Libpod, "")
+ if err != nil {
+ return nil, fmt.Errorf("image for service container: %w", err)
+ }
+
+ ctrOpts := entities.ContainerCreateOptions{
+ // Inherited from infra containers
+ ImageVolume: "bind",
+ IsInfra: false,
+ MemorySwappiness: -1,
+ // No need to spin up slirp etc.
+ Net: &entities.NetOptions{Network: specgen.Namespace{NSMode: specgen.NoNetwork}},
+ }
+
+ // Create and fill out the runtime spec.
+ s := specgen.NewSpecGenerator(image, false)
+ if err := specgenutil.FillOutSpecGen(s, &ctrOpts, []string{}); err != nil {
+ return nil, fmt.Errorf("completing spec for service container: %w", err)
+ }
+ s.Name = name
+
+ runtimeSpec, spec, opts, err := generate.MakeContainer(ctx, ic.Libpod, s, false, nil)
+ if err != nil {
+ return nil, fmt.Errorf("creating runtime spec for service container: %w", err)
+ }
+ opts = append(opts, libpod.WithIsService())
+
+ // Create a new libpod container based on the spec.
+ ctr, err := ic.Libpod.NewContainer(ctx, runtimeSpec, spec, false, opts...)
+ if err != nil {
+ return nil, fmt.Errorf("creating service container: %w", err)
+ }
+
+ return ctr, nil
+}
+
+func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options entities.PlayKubeOptions) (_ *entities.PlayKubeReport, finalErr error) {
report := &entities.PlayKubeReport{}
validKinds := 0
@@ -67,6 +109,30 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options
return nil, errors.Wrap(err, "unable to read kube YAML")
}
+ // TODO: create constants for the various "kinds" of yaml files.
+ var serviceContainer *libpod.Container
+ if options.ServiceContainer && (kind == "Pod" || kind == "Deployment") {
+ // The name of the service container is the first 12
+ // characters of the yaml file's hash followed by the
+ // '-service' suffix to guarantee a predictable and
+ // discoverable name.
+ hash := digest.FromBytes(content).Encoded()
+ ctr, err := ic.createServiceContainer(ctx, hash[0:12]+"-service")
+ if err != nil {
+ return nil, err
+ }
+ serviceContainer = ctr
+ // Make sure to remove the container in case something goes wrong below.
+ defer func() {
+ if finalErr == nil {
+ return
+ }
+ if err := ic.Libpod.RemoveContainer(ctx, ctr, true, false, nil); err != nil {
+ logrus.Errorf("Cleaning up service container after failure: %v", err)
+ }
+ }()
+ }
+
switch kind {
case "Pod":
var podYAML v1.Pod
@@ -90,7 +156,7 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options
podYAML.Annotations[name] = val
}
- r, err := ic.playKubePod(ctx, podTemplateSpec.ObjectMeta.Name, &podTemplateSpec, options, &ipIndex, podYAML.Annotations, configMaps)
+ r, err := ic.playKubePod(ctx, podTemplateSpec.ObjectMeta.Name, &podTemplateSpec, options, &ipIndex, podYAML.Annotations, configMaps, serviceContainer)
if err != nil {
return nil, err
}
@@ -104,7 +170,7 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options
return nil, errors.Wrap(err, "unable to read YAML as Kube Deployment")
}
- r, err := ic.playKubeDeployment(ctx, &deploymentYAML, options, &ipIndex, configMaps)
+ r, err := ic.playKubeDeployment(ctx, &deploymentYAML, options, &ipIndex, configMaps, serviceContainer)
if err != nil {
return nil, err
}
@@ -148,7 +214,7 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options
return report, nil
}
-func (ic *ContainerEngine) playKubeDeployment(ctx context.Context, deploymentYAML *v1apps.Deployment, options entities.PlayKubeOptions, ipIndex *int, configMaps []v1.ConfigMap) (*entities.PlayKubeReport, error) {
+func (ic *ContainerEngine) playKubeDeployment(ctx context.Context, deploymentYAML *v1apps.Deployment, options entities.PlayKubeOptions, ipIndex *int, configMaps []v1.ConfigMap, serviceContainer *libpod.Container) (*entities.PlayKubeReport, error) {
var (
deploymentName string
podSpec v1.PodTemplateSpec
@@ -170,7 +236,7 @@ func (ic *ContainerEngine) playKubeDeployment(ctx context.Context, deploymentYAM
// create "replicas" number of pods
for i = 0; i < numReplicas; i++ {
podName := fmt.Sprintf("%s-pod-%d", deploymentName, i)
- podReport, err := ic.playKubePod(ctx, podName, &podSpec, options, ipIndex, deploymentYAML.Annotations, configMaps)
+ podReport, err := ic.playKubePod(ctx, podName, &podSpec, options, ipIndex, deploymentYAML.Annotations, configMaps, serviceContainer)
if err != nil {
return nil, errors.Wrapf(err, "error encountered while bringing up pod %s", podName)
}
@@ -179,7 +245,7 @@ func (ic *ContainerEngine) playKubeDeployment(ctx context.Context, deploymentYAM
return &report, nil
}
-func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podYAML *v1.PodTemplateSpec, options entities.PlayKubeOptions, ipIndex *int, annotations map[string]string, configMaps []v1.ConfigMap) (*entities.PlayKubeReport, error) {
+func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podYAML *v1.PodTemplateSpec, options entities.PlayKubeOptions, ipIndex *int, annotations map[string]string, configMaps []v1.ConfigMap, serviceContainer *libpod.Container) (*entities.PlayKubeReport, error) {
var (
writer io.Writer
playKubePod entities.PlayKubePod
@@ -374,6 +440,10 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
}
}
+ if serviceContainer != nil {
+ podSpec.PodSpecGen.ServiceContainerID = serviceContainer.ID()
+ }
+
// Create the Pod
pod, err := generate.MakePod(&podSpec, ic.Libpod)
if err != nil {
diff --git a/pkg/specgen/generate/pause_image.go b/pkg/specgen/generate/pause_image.go
new file mode 100644
index 000000000..4aba230a3
--- /dev/null
+++ b/pkg/specgen/generate/pause_image.go
@@ -0,0 +1,89 @@
+package generate
+
+import (
+ "context"
+ "fmt"
+ "io/ioutil"
+ "os"
+
+ buildahDefine "github.com/containers/buildah/define"
+ "github.com/containers/common/pkg/config"
+ "github.com/containers/podman/v4/libpod"
+ "github.com/containers/podman/v4/libpod/define"
+)
+
+// PullOrBuildInfraImage pulls down the specified image or the one set in
+// containers.conf. If none is set, it builds a local pause image.
+func PullOrBuildInfraImage(rt *libpod.Runtime, imageName string) (string, error) {
+ rtConfig, err := rt.GetConfigNoCopy()
+ if err != nil {
+ return "", err
+ }
+
+ if imageName == "" {
+ imageName = rtConfig.Engine.InfraImage
+ }
+
+ if imageName != "" {
+ _, err := rt.LibimageRuntime().Pull(context.Background(), imageName, config.PullPolicyMissing, nil)
+ if err != nil {
+ return "", err
+ }
+ return imageName, nil
+ }
+
+ name, err := buildPauseImage(rt, rtConfig)
+ if err != nil {
+ return "", fmt.Errorf("building local pause image: %w", err)
+ }
+ return name, nil
+}
+
+func buildPauseImage(rt *libpod.Runtime, rtConfig *config.Config) (string, error) {
+ version, err := define.GetVersion()
+ if err != nil {
+ return "", err
+ }
+ imageName := fmt.Sprintf("localhost/podman-pause:%s-%d", version.Version, version.Built)
+
+ // First check if the image has already been built.
+ if _, _, err := rt.LibimageRuntime().LookupImage(imageName, nil); err == nil {
+ return imageName, nil
+ }
+
+ // Also look into the path as some distributions install catatonit in
+ // /usr/bin.
+ catatonitPath, err := rtConfig.FindHelperBinary("catatonit", true)
+ if err != nil {
+ return "", fmt.Errorf("finding pause binary: %w", err)
+ }
+
+ buildContent := fmt.Sprintf(`FROM scratch
+COPY %s /catatonit
+ENTRYPOINT ["/catatonit", "-P"]`, catatonitPath)
+
+ tmpF, err := ioutil.TempFile("", "pause.containerfile")
+ if err != nil {
+ return "", err
+ }
+ if _, err := tmpF.WriteString(buildContent); err != nil {
+ return "", err
+ }
+ if err := tmpF.Close(); err != nil {
+ return "", err
+ }
+ defer os.Remove(tmpF.Name())
+
+ buildOptions := buildahDefine.BuildOptions{
+ CommonBuildOpts: &buildahDefine.CommonBuildOptions{},
+ Output: imageName,
+ Quiet: true,
+ IgnoreFile: "/dev/null", // makes sure to not read a local .ignorefile (see #13529)
+ IIDFile: "/dev/null", // prevents Buildah from writing the ID on stdout
+ }
+ if _, _, err := rt.Build(context.Background(), buildOptions, tmpF.Name()); err != nil {
+ return "", err
+ }
+
+ return imageName, nil
+}
diff --git a/pkg/specgen/generate/pod_create.go b/pkg/specgen/generate/pod_create.go
index fce32d688..5b7bb2b57 100644
--- a/pkg/specgen/generate/pod_create.go
+++ b/pkg/specgen/generate/pod_create.go
@@ -2,13 +2,8 @@ package generate
import (
"context"
- "fmt"
- "io/ioutil"
"net"
- "os"
- buildahDefine "github.com/containers/buildah/define"
- "github.com/containers/common/pkg/config"
"github.com/containers/podman/v4/libpod"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/domain/entities"
@@ -17,98 +12,18 @@ import (
"github.com/sirupsen/logrus"
)
-func buildPauseImage(rt *libpod.Runtime, rtConfig *config.Config) (string, error) {
- version, err := define.GetVersion()
- if err != nil {
- return "", err
- }
- imageName := fmt.Sprintf("localhost/podman-pause:%s-%d", version.Version, version.Built)
-
- // First check if the image has already been built.
- if _, _, err := rt.LibimageRuntime().LookupImage(imageName, nil); err == nil {
- return imageName, nil
- }
-
- // Also look into the path as some distributions install catatonit in
- // /usr/bin.
- catatonitPath, err := rtConfig.FindHelperBinary("catatonit", true)
- if err != nil {
- return "", fmt.Errorf("finding pause binary: %w", err)
- }
-
- buildContent := fmt.Sprintf(`FROM scratch
-COPY %s /catatonit
-ENTRYPOINT ["/catatonit", "-P"]`, catatonitPath)
-
- tmpF, err := ioutil.TempFile("", "pause.containerfile")
- if err != nil {
- return "", err
- }
- if _, err := tmpF.WriteString(buildContent); err != nil {
- return "", err
- }
- if err := tmpF.Close(); err != nil {
- return "", err
- }
- defer os.Remove(tmpF.Name())
-
- buildOptions := buildahDefine.BuildOptions{
- CommonBuildOpts: &buildahDefine.CommonBuildOptions{},
- Output: imageName,
- Quiet: true,
- IgnoreFile: "/dev/null", // makes sure to not read a local .ignorefile (see #13529)
- IIDFile: "/dev/null", // prevents Buildah from writing the ID on stdout
- }
- if _, _, err := rt.Build(context.Background(), buildOptions, tmpF.Name()); err != nil {
- return "", err
- }
-
- return imageName, nil
-}
-
-func pullOrBuildInfraImage(p *entities.PodSpec, rt *libpod.Runtime) error {
- if p.PodSpecGen.NoInfra {
- return nil
- }
-
- rtConfig, err := rt.GetConfigNoCopy()
- if err != nil {
- return err
- }
-
- // NOTE: we need pull down the infra image if it was explicitly set by
- // the user (or containers.conf) to the non-default one.
- imageName := p.PodSpecGen.InfraImage
- if imageName == "" {
- imageName = rtConfig.Engine.InfraImage
- }
-
- if imageName != "" {
- _, err := rt.LibimageRuntime().Pull(context.Background(), imageName, config.PullPolicyMissing, nil)
- if err != nil {
- return err
- }
- } else {
- name, err := buildPauseImage(rt, rtConfig)
- if err != nil {
- return fmt.Errorf("building local pause image: %w", err)
- }
- imageName = name
- }
-
- p.PodSpecGen.InfraImage = imageName
- p.PodSpecGen.InfraContainerSpec.RawImageName = imageName
-
- return nil
-}
-
func MakePod(p *entities.PodSpec, rt *libpod.Runtime) (*libpod.Pod, error) {
if err := p.PodSpecGen.Validate(); err != nil {
return nil, err
}
- if err := pullOrBuildInfraImage(p, rt); err != nil {
- return nil, err
+ if !p.PodSpecGen.NoInfra {
+ imageName, err := PullOrBuildInfraImage(rt, p.PodSpecGen.InfraImage)
+ if err != nil {
+ return nil, err
+ }
+ p.PodSpecGen.InfraImage = imageName
+ p.PodSpecGen.InfraContainerSpec.RawImageName = imageName
}
if !p.PodSpecGen.NoInfra && p.PodSpecGen.InfraContainerSpec != nil {
@@ -180,6 +95,11 @@ func createPodOptions(p *specgen.PodSpecGenerator) ([]libpod.PodCreateOption, er
options = append(options, libpod.WithPodUser())
}
}
+
+ if len(p.ServiceContainerID) > 0 {
+ options = append(options, libpod.WithServiceContainer(p.ServiceContainerID))
+ }
+
if len(p.CgroupParent) > 0 {
options = append(options, libpod.WithPodCgroupParent(p.CgroupParent))
}
diff --git a/pkg/specgen/podspecgen.go b/pkg/specgen/podspecgen.go
index 1bb64448f..603506241 100644
--- a/pkg/specgen/podspecgen.go
+++ b/pkg/specgen/podspecgen.go
@@ -204,6 +204,9 @@ type PodSpecGenerator struct {
PodStorageConfig
PodSecurityConfig
InfraContainerSpec *SpecGenerator `json:"-"`
+
+ // The ID of the pod's service container.
+ ServiceContainerID string `json:"serviceContainerID,omitempty"`
}
type PodResourceConfig struct {