aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorbaude <bbaude@redhat.com>2018-11-13 10:53:18 -0600
committerbaude <bbaude@redhat.com>2018-11-19 09:05:24 -0600
commitf11a74e7150c2929a0ed18732ee0c4895f7e75be (patch)
tree779c097e2b02134d60986dc8950a76695202a8b7
parent47ffaae840157dba421b018738416a3e4cc56a04 (diff)
downloadpodman-f11a74e7150c2929a0ed18732ee0c4895f7e75be.tar.gz
podman-f11a74e7150c2929a0ed18732ee0c4895f7e75be.tar.bz2
podman-f11a74e7150c2929a0ed18732ee0c4895f7e75be.zip
output libpod container to kubernetes yaml
scope out new kube subcommand where we can add generate. you can now generate kubernetes YAML that will allow you to run the container in a kubernetes environment. When The YAML description will always "wrap" a container in a simple v1.Pod description. Tests and further documentation will be added in additional PRs. This function should be considered very much "under heavy development" at this point. Signed-off-by: baude <bbaude@redhat.com>
-rw-r--r--cmd/podman/kube.go22
-rw-r--r--cmd/podman/kube_generate.go93
-rw-r--r--cmd/podman/main.go1
-rw-r--r--libpod/kube.go270
4 files changed, 386 insertions, 0 deletions
diff --git a/cmd/podman/kube.go b/cmd/podman/kube.go
new file mode 100644
index 000000000..ced87e2bd
--- /dev/null
+++ b/cmd/podman/kube.go
@@ -0,0 +1,22 @@
+package main
+
+import (
+ "github.com/urfave/cli"
+)
+
+var (
+ kubeSubCommands = []cli.Command{
+ containerKubeCommand,
+ }
+
+ kubeDescription = "Work with Kubernetes objects"
+ kubeCommand = cli.Command{
+ Name: "kube",
+ Usage: "Import and export Kubernetes objections from and to Podman",
+ Description: containerDescription,
+ ArgsUsage: "",
+ Subcommands: kubeSubCommands,
+ UseShortOptionHandling: true,
+ OnUsageError: usageErrorHandler,
+ }
+)
diff --git a/cmd/podman/kube_generate.go b/cmd/podman/kube_generate.go
new file mode 100644
index 000000000..a18912668
--- /dev/null
+++ b/cmd/podman/kube_generate.go
@@ -0,0 +1,93 @@
+package main
+
+import (
+ "fmt"
+
+ "github.com/containers/libpod/cmd/podman/libpodruntime"
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/pkg/rootless"
+ "github.com/ghodss/yaml"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "github.com/urfave/cli"
+)
+
+var (
+ containerKubeFlags = []cli.Flag{
+ cli.BoolFlag{
+ Name: "service, s",
+ Usage: "only generate YAML for kubernetes service object",
+ },
+ LatestFlag,
+ }
+ containerKubeDescription = "Generate Kubernetes Pod YAML"
+ containerKubeCommand = cli.Command{
+ Name: "generate",
+ Usage: "Generate Kubernetes pod YAML for a container",
+ Description: containerKubeDescription,
+ Flags: sortFlags(containerKubeFlags),
+ Action: generateKubeYAMLCmd,
+ ArgsUsage: "CONTAINER-NAME",
+ UseShortOptionHandling: true,
+ OnUsageError: usageErrorHandler,
+ }
+)
+
+// generateKubeYAMLCmdgenerates or replays kube
+func generateKubeYAMLCmd(c *cli.Context) error {
+ var (
+ container *libpod.Container
+ err error
+ output []byte
+ )
+
+ if rootless.IsRootless() {
+ return errors.Wrapf(libpod.ErrNotImplemented, "rootless users")
+ }
+ args := c.Args()
+ if len(args) > 1 || (len(args) < 1 && !c.Bool("latest")) {
+ return errors.Errorf("you must provide one container ID or name or --latest")
+ }
+ if c.Bool("service") {
+ return errors.Wrapf(libpod.ErrNotImplemented, "service generation")
+ }
+
+ runtime, err := libpodruntime.GetRuntime(c)
+ if err != nil {
+ return errors.Wrapf(err, "could not get runtime")
+ }
+ defer runtime.Shutdown(false)
+
+ // Get the container in question
+ if c.Bool("latest") {
+ container, err = runtime.GetLatestContainer()
+ } else {
+ container, err = runtime.LookupContainer(args[0])
+ }
+ if err != nil {
+ return err
+ }
+
+ if len(container.Dependencies()) > 0 {
+ return errors.Wrapf(libpod.ErrNotImplemented, "containers with dependencies")
+ }
+
+ podYAML, err := container.InspectForKube()
+ if err != nil {
+ return err
+ }
+
+ developmentComment := []byte("# Generation of Kubenetes YAML is still under development!\n")
+ logrus.Warn("This function is still under heavy development.")
+ // Marshall the results
+ b, err := yaml.Marshal(podYAML)
+ if err != nil {
+ return err
+ }
+ output = append(output, developmentComment...)
+ output = append(output, b...)
+ // Output the v1.Pod with the v1.Container
+ fmt.Println(string(output))
+
+ return nil
+}
diff --git a/cmd/podman/main.go b/cmd/podman/main.go
index 38eac4504..6be192593 100644
--- a/cmd/podman/main.go
+++ b/cmd/podman/main.go
@@ -77,6 +77,7 @@ func main() {
infoCommand,
inspectCommand,
killCommand,
+ kubeCommand,
loadCommand,
loginCommand,
logoutCommand,
diff --git a/libpod/kube.go b/libpod/kube.go
new file mode 100644
index 000000000..00db0033b
--- /dev/null
+++ b/libpod/kube.go
@@ -0,0 +1,270 @@
+package libpod
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/containers/libpod/pkg/lookup"
+ "github.com/containers/libpod/pkg/util"
+ "github.com/cri-o/ocicni/pkg/ocicni"
+ "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
+ v12 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// InspectForKube takes a slice of libpod containers and generates
+// one v1.Pod description that includes just a single container.
+func (c *Container) InspectForKube() (*v1.Pod, error) {
+ // Generate the v1.Pod yaml description
+ return simplePodWithV1Container(c)
+}
+
+// simplePodWithV1Container is a function used by inspect when kube yaml needs to be generated
+// for a single container. we "insert" that container description in a pod.
+func simplePodWithV1Container(ctr *Container) (*v1.Pod, error) {
+ var containers []v1.Container
+ result, err := containerToV1Container(ctr)
+ if err != nil {
+ return nil, err
+ }
+ containers = append(containers, result)
+
+ tm := v12.TypeMeta{
+ Kind: "Pod",
+ APIVersion: "v1",
+ }
+
+ // Add a label called "app" with the containers name as a value
+ labels := make(map[string]string)
+ labels["app"] = removeUnderscores(ctr.Name())
+ om := v12.ObjectMeta{
+ // The name of the pod is container_name-libpod
+ Name: fmt.Sprintf("%s-libpod", removeUnderscores(ctr.Name())),
+ Labels: labels,
+ // CreationTimestamp seems to be required, so adding it; in doing so, the timestamp
+ // will reflect time this is run (not container create time) because the conversion
+ // of the container create time to v1 Time is probably not warranted nor worthwhile.
+ CreationTimestamp: v12.Now(),
+ }
+ ps := v1.PodSpec{
+ Containers: containers,
+ }
+ p := v1.Pod{
+ TypeMeta: tm,
+ ObjectMeta: om,
+ Spec: ps,
+ }
+ return &p, nil
+}
+
+// containerToV1Container converts information we know about a libpod container
+// to a V1.Container specification.
+func containerToV1Container(c *Container) (v1.Container, error) {
+ kubeContainer := v1.Container{}
+ kubeSec, err := generateKubeSecurityContext(c)
+ if err != nil {
+ return kubeContainer, err
+ }
+
+ if len(c.config.Spec.Linux.Devices) > 0 {
+ // TODO Enable when we can support devices and their names
+ devices, err := generateKubeVolumeDeviceFromLinuxDevice(c.Spec().Linux.Devices)
+ if err != nil {
+ return kubeContainer, err
+ }
+ kubeContainer.VolumeDevices = devices
+ return kubeContainer, errors.Wrapf(ErrNotImplemented, "linux devices")
+ }
+
+ if len(c.config.UserVolumes) > 0 {
+ // TODO When we until we can resolve what the volume name should be, this is disabled
+ // Volume names need to be coordinated "globally" in the kube files.
+ volumes, err := libpodMountsToKubeVolumeMounts(c)
+ if err != nil {
+ return kubeContainer, err
+ }
+ kubeContainer.VolumeMounts = volumes
+ return kubeContainer, errors.Wrapf(ErrNotImplemented, "volume names")
+ }
+
+ envVariables, err := libpodEnvVarsToKubeEnvVars(c.config.Spec.Process.Env)
+ if err != nil {
+ return kubeContainer, nil
+ }
+
+ ports, err := ocicniPortMappingToContainerPort(c.PortMappings())
+ if err != nil {
+ return kubeContainer, nil
+ }
+
+ containerCommands := c.Command()
+ kubeContainer.Name = removeUnderscores(c.Name())
+
+ _, image := c.Image()
+ kubeContainer.Image = image
+ kubeContainer.Stdin = c.Stdin()
+ kubeContainer.Command = containerCommands
+ // TODO need to figure out how we handle command vs entry point. Kube appears to prefer entrypoint.
+ // right now we just take the container's command
+ //container.Args = args
+ kubeContainer.WorkingDir = c.WorkingDir()
+ kubeContainer.Ports = ports
+ // This should not be applicable
+ //container.EnvFromSource =
+ kubeContainer.Env = envVariables
+ // TODO enable resources when we can support naming conventions
+ //container.Resources
+ kubeContainer.SecurityContext = kubeSec
+ kubeContainer.StdinOnce = false
+ kubeContainer.TTY = c.config.Spec.Process.Terminal
+
+ return kubeContainer, nil
+}
+
+// ocicniPortMappingToContainerPort takes an ocicni portmapping and converts
+// it to a v1.ContainerPort format for kube output
+func ocicniPortMappingToContainerPort(portMappings []ocicni.PortMapping) ([]v1.ContainerPort, error) {
+ var containerPorts []v1.ContainerPort
+ for _, p := range portMappings {
+ var protocol v1.Protocol
+ switch strings.ToUpper(p.Protocol) {
+ case "TCP":
+ protocol = v1.ProtocolTCP
+ case "UDP":
+ protocol = v1.ProtocolUDP
+ default:
+ return containerPorts, errors.Errorf("unknown network protocol %s", p.Protocol)
+ }
+ cp := v1.ContainerPort{
+ // Name will not be supported
+ HostPort: p.HostPort,
+ HostIP: p.HostIP,
+ ContainerPort: p.ContainerPort,
+ Protocol: protocol,
+ }
+ containerPorts = append(containerPorts, cp)
+ }
+ return containerPorts, nil
+}
+
+// libpodEnvVarsToKubeEnvVars converts a key=value string slice to []v1.EnvVar
+func libpodEnvVarsToKubeEnvVars(envs []string) ([]v1.EnvVar, error) {
+ var envVars []v1.EnvVar
+ for _, e := range envs {
+ splitE := strings.SplitN(e, "=", 2)
+ if len(splitE) != 2 {
+ return envVars, errors.Errorf("environment variable %s is malformed; should be key=value", e)
+ }
+ ev := v1.EnvVar{
+ Name: splitE[0],
+ Value: splitE[1],
+ }
+ envVars = append(envVars, ev)
+ }
+ return envVars, nil
+}
+
+// Is this worth it?
+func libpodMaxAndMinToResourceList(c *Container) (v1.ResourceList, v1.ResourceList) { //nolint
+ // It does not appear we can properly calculate CPU resources from the information
+ // we know in libpod. Libpod knows CPUs by time, shares, etc.
+
+ // We also only know about a memory limit; no memory minimum
+ maxResources := make(map[v1.ResourceName]resource.Quantity)
+ minResources := make(map[v1.ResourceName]resource.Quantity)
+ config := c.Config()
+ maxMem := config.Spec.Linux.Resources.Memory.Limit
+
+ _ = maxMem
+
+ return maxResources, minResources
+}
+
+func generateKubeVolumeMount(hostSourcePath string, mounts []specs.Mount) (v1.VolumeMount, error) {
+ vm := v1.VolumeMount{}
+ for _, m := range mounts {
+ if m.Source == hostSourcePath {
+ // TODO Name is not provided and is required by Kube; therefore, this is disabled earlier
+ //vm.Name =
+ vm.MountPath = m.Source
+ vm.SubPath = m.Destination
+ if util.StringInSlice("ro", m.Options) {
+ vm.ReadOnly = true
+ }
+ return vm, nil
+ }
+ }
+ return vm, errors.New("unable to find mount source")
+}
+
+// libpodMountsToKubeVolumeMounts converts the containers mounts to a struct kube understands
+func libpodMountsToKubeVolumeMounts(c *Container) ([]v1.VolumeMount, error) {
+ // At this point, I dont think we can distinguish between the default
+ // volume mounts and user added ones. For now, we pass them all.
+ var vms []v1.VolumeMount
+ for _, hostSourcePath := range c.config.UserVolumes {
+ vm, err := generateKubeVolumeMount(hostSourcePath, c.config.Spec.Mounts)
+ if err != nil {
+ return vms, err
+ }
+ vms = append(vms, vm)
+ }
+ return vms, nil
+}
+
+// generateKubeSecurityContext generates a securityContext based on the existing container
+func generateKubeSecurityContext(c *Container) (*v1.SecurityContext, error) {
+ priv := c.Privileged()
+ ro := c.IsReadOnly()
+ allowPrivEscalation := !c.Spec().Process.NoNewPrivileges
+
+ // TODO enable use of capabilities when we can figure out how to extract cap-add|remove
+ //caps := v1.Capabilities{
+ // //Add: c.config.Spec.Process.Capabilities
+ //}
+ sc := v1.SecurityContext{
+ // TODO enable use of capabilities when we can figure out how to extract cap-add|remove
+ //Capabilities: &caps,
+ Privileged: &priv,
+ // TODO How do we know if selinux were passed into podman
+ //SELinuxOptions:
+ // RunAsNonRoot is an optional parameter; our first implementations should be root only; however
+ // I'm leaving this as a bread-crumb for later
+ //RunAsNonRoot: &nonRoot,
+ ReadOnlyRootFilesystem: &ro,
+ AllowPrivilegeEscalation: &allowPrivEscalation,
+ }
+
+ if c.User() != "" {
+ // It is *possible* that
+ logrus.Debug("Looking in container for user: %s", c.User())
+ u, err := lookup.GetUser(c.state.Mountpoint, c.User())
+ if err != nil {
+ return nil, err
+ }
+ user := int64(u.Uid)
+ sc.RunAsUser = &user
+ }
+ return &sc, nil
+}
+
+// generateKubeVolumeDeviceFromLinuxDevice takes a list of devices and makes a VolumeDevice struct for kube
+func generateKubeVolumeDeviceFromLinuxDevice(devices []specs.LinuxDevice) ([]v1.VolumeDevice, error) {
+ var volumeDevices []v1.VolumeDevice
+ for _, d := range devices {
+ vd := v1.VolumeDevice{
+ // TBD How are we going to sync up these names
+ //Name:
+ DevicePath: d.Path,
+ }
+ volumeDevices = append(volumeDevices, vd)
+ }
+ return volumeDevices, nil
+}
+
+func removeUnderscores(s string) string {
+ return strings.Replace(s, "_", "", -1)
+}