summaryrefslogtreecommitdiff
path: root/pkg/domain/infra
diff options
context:
space:
mode:
Diffstat (limited to 'pkg/domain/infra')
-rw-r--r--pkg/domain/infra/abi/generate.go85
-rw-r--r--pkg/domain/infra/abi/play.go544
-rw-r--r--pkg/domain/infra/abi/trust.go171
-rw-r--r--pkg/domain/infra/tunnel/generate.go5
-rw-r--r--pkg/domain/infra/tunnel/play.go12
-rw-r--r--pkg/domain/infra/tunnel/trust.go16
6 files changed, 833 insertions, 0 deletions
diff --git a/pkg/domain/infra/abi/generate.go b/pkg/domain/infra/abi/generate.go
index f69ba560e..be5d452bd 100644
--- a/pkg/domain/infra/abi/generate.go
+++ b/pkg/domain/infra/abi/generate.go
@@ -1,14 +1,18 @@
package abi
import (
+ "bytes"
"context"
"fmt"
"strings"
"github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/domain/entities"
"github.com/containers/libpod/pkg/systemd/generate"
+ "github.com/ghodss/yaml"
"github.com/pkg/errors"
+ k8sAPI "k8s.io/api/core/v1"
)
func (ic *ContainerEngine) GenerateSystemd(ctx context.Context, nameOrID string, options entities.GenerateSystemdOptions) (*entities.GenerateSystemdReport, error) {
@@ -172,3 +176,84 @@ func generateServiceName(ctr *libpod.Container, pod *libpod.Pod, options entitie
}
return ctrName, fmt.Sprintf("%s-%s", kind, name)
}
+
+func (ic *ContainerEngine) GenerateKube(ctx context.Context, nameOrID string, options entities.GenerateKubeOptions) (*entities.GenerateKubeReport, error) {
+ var (
+ pod *libpod.Pod
+ podYAML *k8sAPI.Pod
+ err error
+ ctr *libpod.Container
+ servicePorts []k8sAPI.ServicePort
+ serviceYAML k8sAPI.Service
+ )
+ // Get the container in question.
+ ctr, err = ic.Libpod.LookupContainer(nameOrID)
+ if err != nil {
+ pod, err = ic.Libpod.LookupPod(nameOrID)
+ if err != nil {
+ return nil, err
+ }
+ podYAML, servicePorts, err = pod.GenerateForKube()
+ } else {
+ if len(ctr.Dependencies()) > 0 {
+ return nil, errors.Wrapf(define.ErrNotImplemented, "containers with dependencies")
+ }
+ podYAML, err = ctr.GenerateForKube()
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ if options.Service {
+ serviceYAML = libpod.GenerateKubeServiceFromV1Pod(podYAML, servicePorts)
+ }
+
+ content, err := generateKubeOutput(podYAML, &serviceYAML)
+ if err != nil {
+ return nil, err
+ }
+
+ return &entities.GenerateKubeReport{Reader: bytes.NewReader(content)}, nil
+}
+
+func generateKubeOutput(podYAML *k8sAPI.Pod, serviceYAML *k8sAPI.Service) ([]byte, error) {
+ var (
+ output []byte
+ marshalledPod []byte
+ marshalledService []byte
+ err error
+ )
+
+ marshalledPod, err = yaml.Marshal(podYAML)
+ if err != nil {
+ return nil, err
+ }
+
+ if serviceYAML != nil {
+ marshalledService, err = yaml.Marshal(serviceYAML)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ header := `# Generation of Kubernetes YAML is still under development!
+#
+# Save the output of this file and use kubectl create -f to import
+# it into Kubernetes.
+#
+# Created with podman-%s
+`
+ podmanVersion, err := define.GetVersion()
+ if err != nil {
+ return nil, err
+ }
+
+ output = append(output, []byte(fmt.Sprintf(header, podmanVersion.Version))...)
+ output = append(output, marshalledPod...)
+ if serviceYAML != nil {
+ output = append(output, []byte("---\n")...)
+ output = append(output, marshalledService...)
+ }
+
+ return output, nil
+}
diff --git a/pkg/domain/infra/abi/play.go b/pkg/domain/infra/abi/play.go
new file mode 100644
index 000000000..cd7eec7e6
--- /dev/null
+++ b/pkg/domain/infra/abi/play.go
@@ -0,0 +1,544 @@
+package abi
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/containers/buildah/pkg/parse"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/libpod/image"
+ ann "github.com/containers/libpod/pkg/annotations"
+ "github.com/containers/libpod/pkg/domain/entities"
+ envLib "github.com/containers/libpod/pkg/env"
+ ns "github.com/containers/libpod/pkg/namespaces"
+ createconfig "github.com/containers/libpod/pkg/spec"
+ "github.com/containers/libpod/pkg/specgen/generate"
+ "github.com/containers/libpod/pkg/util"
+ "github.com/containers/storage"
+ "github.com/cri-o/ocicni/pkg/ocicni"
+ "github.com/docker/distribution/reference"
+ "github.com/ghodss/yaml"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ v1 "k8s.io/api/core/v1"
+)
+
+const (
+ // https://kubernetes.io/docs/concepts/storage/volumes/#hostpath
+ kubeDirectoryPermission = 0755
+ // https://kubernetes.io/docs/concepts/storage/volumes/#hostpath
+ kubeFilePermission = 0644
+)
+
+func (ic *ContainerEngine) PlayKube(ctx context.Context, path string, options entities.PlayKubeOptions) (*entities.PlayKubeReport, error) {
+ var (
+ containers []*libpod.Container
+ pod *libpod.Pod
+ podOptions []libpod.PodCreateOption
+ podYAML v1.Pod
+ registryCreds *types.DockerAuthConfig
+ writer io.Writer
+ report entities.PlayKubeReport
+ )
+
+ content, err := ioutil.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := yaml.Unmarshal(content, &podYAML); err != nil {
+ return nil, errors.Wrapf(err, "unable to read %q as YAML", path)
+ }
+
+ if podYAML.Kind != "Pod" {
+ return nil, errors.Errorf("invalid YAML kind: %q. Pod is the only supported Kubernetes YAML kind", podYAML.Kind)
+ }
+
+ // check for name collision between pod and container
+ podName := podYAML.ObjectMeta.Name
+ if podName == "" {
+ return nil, errors.Errorf("pod does not have a name")
+ }
+ for _, n := range podYAML.Spec.Containers {
+ if n.Name == podName {
+ report.Logs = append(report.Logs,
+ fmt.Sprintf("a container exists with the same name (%q) as the pod in your YAML file; changing pod name to %s_pod\n", podName, podName))
+ podName = fmt.Sprintf("%s_pod", podName)
+ }
+ }
+
+ podOptions = append(podOptions, libpod.WithInfraContainer())
+ podOptions = append(podOptions, libpod.WithPodName(podName))
+ // TODO for now we just used the default kernel namespaces; we need to add/subtract this from yaml
+
+ hostname := podYAML.Spec.Hostname
+ if hostname == "" {
+ hostname = podName
+ }
+ podOptions = append(podOptions, libpod.WithPodHostname(hostname))
+
+ if podYAML.Spec.HostNetwork {
+ podOptions = append(podOptions, libpod.WithPodHostNetwork())
+ }
+
+ nsOptions, err := generate.GetNamespaceOptions(strings.Split(createconfig.DefaultKernelNamespaces, ","))
+ if err != nil {
+ return nil, err
+ }
+ podOptions = append(podOptions, nsOptions...)
+ podPorts := getPodPorts(podYAML.Spec.Containers)
+ podOptions = append(podOptions, libpod.WithInfraContainerPorts(podPorts))
+
+ if options.Network != "" {
+ switch strings.ToLower(options.Network) {
+ case "bridge", "host":
+ return nil, errors.Errorf("invalid value passed to --network: bridge or host networking must be configured in YAML")
+ case "":
+ return nil, errors.Errorf("invalid value passed to --network: must provide a comma-separated list of CNI networks")
+ default:
+ // We'll assume this is a comma-separated list of CNI
+ // networks.
+ networks := strings.Split(options.Network, ",")
+ logrus.Debugf("Pod joining CNI networks: %v", networks)
+ podOptions = append(podOptions, libpod.WithPodNetworks(networks))
+ }
+ }
+
+ // Create the Pod
+ pod, err = ic.Libpod.NewPod(ctx, podOptions...)
+ if err != nil {
+ return nil, err
+ }
+
+ podInfraID, err := pod.InfraContainerID()
+ if err != nil {
+ return nil, err
+ }
+ hasUserns := false
+ if podInfraID != "" {
+ podCtr, err := ic.Libpod.GetContainer(podInfraID)
+ if err != nil {
+ return nil, err
+ }
+ mappings, err := podCtr.IDMappings()
+ if err != nil {
+ return nil, err
+ }
+ hasUserns = len(mappings.UIDMap) > 0
+ }
+
+ namespaces := map[string]string{
+ // Disabled during code review per mheon
+ //"pid": fmt.Sprintf("container:%s", podInfraID),
+ "net": fmt.Sprintf("container:%s", podInfraID),
+ "ipc": fmt.Sprintf("container:%s", podInfraID),
+ "uts": fmt.Sprintf("container:%s", podInfraID),
+ }
+ if hasUserns {
+ namespaces["user"] = fmt.Sprintf("container:%s", podInfraID)
+ }
+ if !options.Quiet {
+ writer = os.Stderr
+ }
+
+ dockerRegistryOptions := image.DockerRegistryOptions{
+ DockerRegistryCreds: registryCreds,
+ DockerCertPath: options.CertDir,
+ DockerInsecureSkipTLSVerify: options.SkipTLSVerify,
+ }
+
+ // map from name to mount point
+ volumes := make(map[string]string)
+ for _, volume := range podYAML.Spec.Volumes {
+ hostPath := volume.VolumeSource.HostPath
+ if hostPath == nil {
+ return nil, errors.Errorf("HostPath is currently the only supported VolumeSource")
+ }
+ if hostPath.Type != nil {
+ switch *hostPath.Type {
+ case v1.HostPathDirectoryOrCreate:
+ if _, err := os.Stat(hostPath.Path); os.IsNotExist(err) {
+ if err := os.Mkdir(hostPath.Path, kubeDirectoryPermission); err != nil {
+ return nil, errors.Errorf("Error creating HostPath %s at %s", volume.Name, hostPath.Path)
+ }
+ }
+ // Label a newly created volume
+ if err := libpod.LabelVolumePath(hostPath.Path); err != nil {
+ return nil, errors.Wrapf(err, "Error giving %s a label", hostPath.Path)
+ }
+ case v1.HostPathFileOrCreate:
+ if _, err := os.Stat(hostPath.Path); os.IsNotExist(err) {
+ f, err := os.OpenFile(hostPath.Path, os.O_RDONLY|os.O_CREATE, kubeFilePermission)
+ if err != nil {
+ return nil, errors.Errorf("Error creating HostPath %s at %s", volume.Name, hostPath.Path)
+ }
+ if err := f.Close(); err != nil {
+ logrus.Warnf("Error in closing newly created HostPath file: %v", err)
+ }
+ }
+ // unconditionally label a newly created volume
+ if err := libpod.LabelVolumePath(hostPath.Path); err != nil {
+ return nil, errors.Wrapf(err, "Error giving %s a label", hostPath.Path)
+ }
+ case v1.HostPathDirectory:
+ case v1.HostPathFile:
+ case v1.HostPathUnset:
+ // do nothing here because we will verify the path exists in validateVolumeHostDir
+ break
+ default:
+ return nil, errors.Errorf("Directories are the only supported HostPath type")
+ }
+ }
+
+ if err := parse.ValidateVolumeHostDir(hostPath.Path); err != nil {
+ return nil, errors.Wrapf(err, "Error in parsing HostPath in YAML")
+ }
+ volumes[volume.Name] = hostPath.Path
+ }
+
+ seccompPaths, err := initializeSeccompPaths(podYAML.ObjectMeta.Annotations, options.SeccompProfileRoot)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, container := range podYAML.Spec.Containers {
+ pullPolicy := util.PullImageMissing
+ if len(container.ImagePullPolicy) > 0 {
+ pullPolicy, err = util.ValidatePullType(string(container.ImagePullPolicy))
+ if err != nil {
+ return nil, err
+ }
+ }
+ named, err := reference.ParseNormalizedNamed(container.Image)
+ if err != nil {
+ return nil, err
+ }
+ // In kube, if the image is tagged with latest, it should always pull
+ if tagged, isTagged := named.(reference.NamedTagged); isTagged {
+ if tagged.Tag() == image.LatestTag {
+ pullPolicy = util.PullImageAlways
+ }
+ }
+ newImage, err := ic.Libpod.ImageRuntime().New(ctx, container.Image, options.SignaturePolicy, options.Authfile, writer, &dockerRegistryOptions, image.SigningOptions{}, nil, pullPolicy)
+ if err != nil {
+ return nil, err
+ }
+ conf, err := kubeContainerToCreateConfig(ctx, container, ic.Libpod, newImage, namespaces, volumes, pod.ID(), podInfraID, seccompPaths)
+ if err != nil {
+ return nil, err
+ }
+ ctr, err := createconfig.CreateContainerFromCreateConfig(ic.Libpod, conf, ctx, pod)
+ if err != nil {
+ return nil, err
+ }
+ containers = append(containers, ctr)
+ }
+
+ // start the containers
+ for _, ctr := range containers {
+ if err := ctr.Start(ctx, true); err != nil {
+ // Making this a hard failure here to avoid a mess
+ // the other containers are in created status
+ return nil, err
+ }
+ }
+
+ report.Pod = pod.ID()
+ for _, ctr := range containers {
+ report.Containers = append(report.Containers, ctr.ID())
+ }
+
+ return &report, nil
+}
+
+// getPodPorts converts a slice of kube container descriptions to an
+// array of ocicni portmapping descriptions usable in libpod
+func getPodPorts(containers []v1.Container) []ocicni.PortMapping {
+ var infraPorts []ocicni.PortMapping
+ for _, container := range containers {
+ for _, p := range container.Ports {
+ if p.HostPort != 0 && p.ContainerPort == 0 {
+ p.ContainerPort = p.HostPort
+ }
+ if p.Protocol == "" {
+ p.Protocol = "tcp"
+ }
+ portBinding := ocicni.PortMapping{
+ HostPort: p.HostPort,
+ ContainerPort: p.ContainerPort,
+ Protocol: strings.ToLower(string(p.Protocol)),
+ }
+ if p.HostIP != "" {
+ logrus.Debug("HostIP on port bindings is not supported")
+ }
+ // only hostPort is utilized in podman context, all container ports
+ // are accessible inside the shared network namespace
+ if p.HostPort != 0 {
+ infraPorts = append(infraPorts, portBinding)
+ }
+
+ }
+ }
+ return infraPorts
+}
+
+func setupSecurityContext(securityConfig *createconfig.SecurityConfig, userConfig *createconfig.UserConfig, containerYAML v1.Container) {
+ if containerYAML.SecurityContext == nil {
+ return
+ }
+ if containerYAML.SecurityContext.ReadOnlyRootFilesystem != nil {
+ securityConfig.ReadOnlyRootfs = *containerYAML.SecurityContext.ReadOnlyRootFilesystem
+ }
+ if containerYAML.SecurityContext.Privileged != nil {
+ securityConfig.Privileged = *containerYAML.SecurityContext.Privileged
+ }
+
+ if containerYAML.SecurityContext.AllowPrivilegeEscalation != nil {
+ securityConfig.NoNewPrivs = !*containerYAML.SecurityContext.AllowPrivilegeEscalation
+ }
+
+ if seopt := containerYAML.SecurityContext.SELinuxOptions; seopt != nil {
+ if seopt.User != "" {
+ securityConfig.SecurityOpts = append(securityConfig.SecurityOpts, fmt.Sprintf("label=user:%s", seopt.User))
+ securityConfig.LabelOpts = append(securityConfig.LabelOpts, fmt.Sprintf("user:%s", seopt.User))
+ }
+ if seopt.Role != "" {
+ securityConfig.SecurityOpts = append(securityConfig.SecurityOpts, fmt.Sprintf("label=role:%s", seopt.Role))
+ securityConfig.LabelOpts = append(securityConfig.LabelOpts, fmt.Sprintf("role:%s", seopt.Role))
+ }
+ if seopt.Type != "" {
+ securityConfig.SecurityOpts = append(securityConfig.SecurityOpts, fmt.Sprintf("label=type:%s", seopt.Type))
+ securityConfig.LabelOpts = append(securityConfig.LabelOpts, fmt.Sprintf("type:%s", seopt.Type))
+ }
+ if seopt.Level != "" {
+ securityConfig.SecurityOpts = append(securityConfig.SecurityOpts, fmt.Sprintf("label=level:%s", seopt.Level))
+ securityConfig.LabelOpts = append(securityConfig.LabelOpts, fmt.Sprintf("level:%s", seopt.Level))
+ }
+ }
+ if caps := containerYAML.SecurityContext.Capabilities; caps != nil {
+ for _, capability := range caps.Add {
+ securityConfig.CapAdd = append(securityConfig.CapAdd, string(capability))
+ }
+ for _, capability := range caps.Drop {
+ securityConfig.CapDrop = append(securityConfig.CapDrop, string(capability))
+ }
+ }
+ if containerYAML.SecurityContext.RunAsUser != nil {
+ userConfig.User = fmt.Sprintf("%d", *containerYAML.SecurityContext.RunAsUser)
+ }
+ if containerYAML.SecurityContext.RunAsGroup != nil {
+ if userConfig.User == "" {
+ userConfig.User = "0"
+ }
+ userConfig.User = fmt.Sprintf("%s:%d", userConfig.User, *containerYAML.SecurityContext.RunAsGroup)
+ }
+}
+
+// kubeContainerToCreateConfig takes a v1.Container and returns a createconfig describing a container
+func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container, runtime *libpod.Runtime, newImage *image.Image, namespaces map[string]string, volumes map[string]string, podID, infraID string, seccompPaths *kubeSeccompPaths) (*createconfig.CreateConfig, error) {
+ var (
+ containerConfig createconfig.CreateConfig
+ pidConfig createconfig.PidConfig
+ networkConfig createconfig.NetworkConfig
+ cgroupConfig createconfig.CgroupConfig
+ utsConfig createconfig.UtsConfig
+ ipcConfig createconfig.IpcConfig
+ userConfig createconfig.UserConfig
+ securityConfig createconfig.SecurityConfig
+ )
+
+ // The default for MemorySwappiness is -1, not 0
+ containerConfig.Resources.MemorySwappiness = -1
+
+ containerConfig.Image = containerYAML.Image
+ containerConfig.ImageID = newImage.ID()
+ containerConfig.Name = containerYAML.Name
+ containerConfig.Tty = containerYAML.TTY
+
+ containerConfig.Pod = podID
+
+ imageData, _ := newImage.Inspect(ctx)
+
+ userConfig.User = "0"
+ if imageData != nil {
+ userConfig.User = imageData.Config.User
+ }
+
+ setupSecurityContext(&securityConfig, &userConfig, containerYAML)
+
+ securityConfig.SeccompProfilePath = seccompPaths.findForContainer(containerConfig.Name)
+
+ containerConfig.Command = []string{}
+ if imageData != nil && imageData.Config != nil {
+ containerConfig.Command = append(containerConfig.Command, imageData.Config.Entrypoint...)
+ }
+ if len(containerYAML.Command) != 0 {
+ containerConfig.Command = append(containerConfig.Command, containerYAML.Command...)
+ } else if imageData != nil && imageData.Config != nil {
+ containerConfig.Command = append(containerConfig.Command, imageData.Config.Cmd...)
+ }
+ if imageData != nil && len(containerConfig.Command) == 0 {
+ return nil, errors.Errorf("No command specified in container YAML or as CMD or ENTRYPOINT in this image for %s", containerConfig.Name)
+ }
+
+ containerConfig.UserCommand = containerConfig.Command
+
+ containerConfig.StopSignal = 15
+
+ containerConfig.WorkDir = "/"
+ if imageData != nil {
+ // FIXME,
+ // we are currently ignoring imageData.Config.ExposedPorts
+ containerConfig.BuiltinImgVolumes = imageData.Config.Volumes
+ if imageData.Config.WorkingDir != "" {
+ containerConfig.WorkDir = imageData.Config.WorkingDir
+ }
+ containerConfig.Labels = imageData.Config.Labels
+ if imageData.Config.StopSignal != "" {
+ stopSignal, err := util.ParseSignal(imageData.Config.StopSignal)
+ if err != nil {
+ return nil, err
+ }
+ containerConfig.StopSignal = stopSignal
+ }
+ }
+
+ if containerYAML.WorkingDir != "" {
+ containerConfig.WorkDir = containerYAML.WorkingDir
+ }
+ // If the user does not pass in ID mappings, just set to basics
+ if userConfig.IDMappings == nil {
+ userConfig.IDMappings = &storage.IDMappingOptions{}
+ }
+
+ networkConfig.NetMode = ns.NetworkMode(namespaces["net"])
+ ipcConfig.IpcMode = ns.IpcMode(namespaces["ipc"])
+ utsConfig.UtsMode = ns.UTSMode(namespaces["uts"])
+ // disabled in code review per mheon
+ //containerConfig.PidMode = ns.PidMode(namespaces["pid"])
+ userConfig.UsernsMode = ns.UsernsMode(namespaces["user"])
+ if len(containerConfig.WorkDir) == 0 {
+ containerConfig.WorkDir = "/"
+ }
+
+ containerConfig.Pid = pidConfig
+ containerConfig.Network = networkConfig
+ containerConfig.Uts = utsConfig
+ containerConfig.Ipc = ipcConfig
+ containerConfig.Cgroup = cgroupConfig
+ containerConfig.User = userConfig
+ containerConfig.Security = securityConfig
+
+ annotations := make(map[string]string)
+ if infraID != "" {
+ annotations[ann.SandboxID] = infraID
+ annotations[ann.ContainerType] = ann.ContainerTypeContainer
+ }
+ containerConfig.Annotations = annotations
+
+ // Environment Variables
+ envs := map[string]string{}
+ if imageData != nil {
+ imageEnv, err := envLib.ParseSlice(imageData.Config.Env)
+ if err != nil {
+ return nil, errors.Wrap(err, "error parsing image environment variables")
+ }
+ envs = imageEnv
+ }
+ for _, e := range containerYAML.Env {
+ envs[e.Name] = e.Value
+ }
+ containerConfig.Env = envs
+
+ for _, volume := range containerYAML.VolumeMounts {
+ hostPath, exists := volumes[volume.Name]
+ if !exists {
+ return nil, errors.Errorf("Volume mount %s specified for container but not configured in volumes", volume.Name)
+ }
+ if err := parse.ValidateVolumeCtrDir(volume.MountPath); err != nil {
+ return nil, errors.Wrapf(err, "error in parsing MountPath")
+ }
+ containerConfig.Volumes = append(containerConfig.Volumes, fmt.Sprintf("%s:%s", hostPath, volume.MountPath))
+ }
+ return &containerConfig, nil
+}
+
+// kubeSeccompPaths holds information about a pod YAML's seccomp configuration
+// it holds both container and pod seccomp paths
+type kubeSeccompPaths struct {
+ containerPaths map[string]string
+ podPath string
+}
+
+// findForContainer checks whether a container has a seccomp path configured for it
+// if not, it returns the podPath, which should always have a value
+func (k *kubeSeccompPaths) findForContainer(ctrName string) string {
+ if path, ok := k.containerPaths[ctrName]; ok {
+ return path
+ }
+ return k.podPath
+}
+
+// initializeSeccompPaths takes annotations from the pod object metadata and finds annotations pertaining to seccomp
+// it parses both pod and container level
+// if the annotation is of the form "localhost/%s", the seccomp profile will be set to profileRoot/%s
+func initializeSeccompPaths(annotations map[string]string, profileRoot string) (*kubeSeccompPaths, error) {
+ seccompPaths := &kubeSeccompPaths{containerPaths: make(map[string]string)}
+ var err error
+ if annotations != nil {
+ for annKeyValue, seccomp := range annotations {
+ // check if it is prefaced with container.seccomp.security.alpha.kubernetes.io/
+ prefixAndCtr := strings.Split(annKeyValue, "/")
+ if prefixAndCtr[0]+"/" != v1.SeccompContainerAnnotationKeyPrefix {
+ continue
+ } else if len(prefixAndCtr) != 2 {
+ // this could be caused by a user inputting either of
+ // container.seccomp.security.alpha.kubernetes.io{,/}
+ // both of which are invalid
+ return nil, errors.Errorf("Invalid seccomp path: %s", prefixAndCtr[0])
+ }
+
+ path, err := verifySeccompPath(seccomp, profileRoot)
+ if err != nil {
+ return nil, err
+ }
+ seccompPaths.containerPaths[prefixAndCtr[1]] = path
+ }
+
+ podSeccomp, ok := annotations[v1.SeccompPodAnnotationKey]
+ if ok {
+ seccompPaths.podPath, err = verifySeccompPath(podSeccomp, profileRoot)
+ } else {
+ seccompPaths.podPath, err = libpod.DefaultSeccompPath()
+ }
+ if err != nil {
+ return nil, err
+ }
+ }
+ return seccompPaths, nil
+}
+
+// verifySeccompPath takes a path and checks whether it is a default, unconfined, or a path
+// the available options are parsed as defined in https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
+func verifySeccompPath(path string, profileRoot string) (string, error) {
+ switch path {
+ case v1.DeprecatedSeccompProfileDockerDefault:
+ fallthrough
+ case v1.SeccompProfileRuntimeDefault:
+ return libpod.DefaultSeccompPath()
+ case "unconfined":
+ return path, nil
+ default:
+ parts := strings.Split(path, "/")
+ if parts[0] == "localhost" {
+ return filepath.Join(profileRoot, parts[1]), nil
+ }
+ return "", errors.Errorf("invalid seccomp path: %s", path)
+ }
+}
diff --git a/pkg/domain/infra/abi/trust.go b/pkg/domain/infra/abi/trust.go
new file mode 100644
index 000000000..5b89c91d9
--- /dev/null
+++ b/pkg/domain/infra/abi/trust.go
@@ -0,0 +1,171 @@
+package abi
+
+import (
+ "context"
+ "encoding/json"
+ "io/ioutil"
+ "os"
+ "strings"
+
+ "github.com/containers/libpod/pkg/domain/entities"
+ "github.com/containers/libpod/pkg/trust"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+func (ir *ImageEngine) ShowTrust(ctx context.Context, args []string, options entities.ShowTrustOptions) (*entities.ShowTrustReport, error) {
+ var (
+ err error
+ report entities.ShowTrustReport
+ )
+ policyPath := trust.DefaultPolicyPath(ir.Libpod.SystemContext())
+ if len(options.PolicyPath) > 0 {
+ policyPath = options.PolicyPath
+ }
+ report.Raw, err = ioutil.ReadFile(policyPath)
+ if err != nil {
+ return nil, errors.Wrapf(err, "unable to read %s", policyPath)
+ }
+ if options.Raw {
+ return &report, nil
+ }
+ report.SystemRegistriesDirPath = trust.RegistriesDirPath(ir.Libpod.SystemContext())
+ if len(options.RegistryPath) > 0 {
+ report.SystemRegistriesDirPath = options.RegistryPath
+ }
+ policyContentStruct, err := trust.GetPolicy(policyPath)
+ if err != nil {
+ return nil, errors.Wrapf(err, "could not read trust policies")
+ }
+ report.Policies, err = getPolicyShowOutput(policyContentStruct, report.SystemRegistriesDirPath)
+ if err != nil {
+ return nil, errors.Wrapf(err, "could not show trust policies")
+ }
+ return &report, nil
+}
+
+func (ir *ImageEngine) SetTrust(ctx context.Context, args []string, options entities.SetTrustOptions) error {
+ var (
+ policyContentStruct trust.PolicyContent
+ newReposContent []trust.RepoContent
+ )
+ trustType := options.Type
+ if trustType == "accept" {
+ trustType = "insecureAcceptAnything"
+ }
+
+ pubkeysfile := options.PubKeysFile
+ if len(pubkeysfile) == 0 && trustType == "signedBy" {
+ return errors.Errorf("At least one public key must be defined for type 'signedBy'")
+ }
+
+ policyPath := trust.DefaultPolicyPath(ir.Libpod.SystemContext())
+ if len(options.PolicyPath) > 0 {
+ policyPath = options.PolicyPath
+ }
+ _, err := os.Stat(policyPath)
+ if !os.IsNotExist(err) {
+ policyContent, err := ioutil.ReadFile(policyPath)
+ if err != nil {
+ return errors.Wrapf(err, "unable to read %s", policyPath)
+ }
+ if err := json.Unmarshal(policyContent, &policyContentStruct); err != nil {
+ return errors.Errorf("could not read trust policies")
+ }
+ }
+ if len(pubkeysfile) != 0 {
+ for _, filepath := range pubkeysfile {
+ newReposContent = append(newReposContent, trust.RepoContent{Type: trustType, KeyType: "GPGKeys", KeyPath: filepath})
+ }
+ } else {
+ newReposContent = append(newReposContent, trust.RepoContent{Type: trustType})
+ }
+ if args[0] == "default" {
+ policyContentStruct.Default = newReposContent
+ } else {
+ if len(policyContentStruct.Default) == 0 {
+ return errors.Errorf("Default trust policy must be set.")
+ }
+ registryExists := false
+ for transport, transportval := range policyContentStruct.Transports {
+ _, registryExists = transportval[args[0]]
+ if registryExists {
+ policyContentStruct.Transports[transport][args[0]] = newReposContent
+ break
+ }
+ }
+ if !registryExists {
+ if policyContentStruct.Transports == nil {
+ policyContentStruct.Transports = make(map[string]trust.RepoMap)
+ }
+ if policyContentStruct.Transports["docker"] == nil {
+ policyContentStruct.Transports["docker"] = make(map[string][]trust.RepoContent)
+ }
+ policyContentStruct.Transports["docker"][args[0]] = append(policyContentStruct.Transports["docker"][args[0]], newReposContent...)
+ }
+ }
+
+ data, err := json.MarshalIndent(policyContentStruct, "", " ")
+ if err != nil {
+ return errors.Wrapf(err, "error setting trust policy")
+ }
+ return ioutil.WriteFile(policyPath, data, 0644)
+}
+
+func getPolicyShowOutput(policyContentStruct trust.PolicyContent, systemRegistriesDirPath string) ([]*trust.TrustPolicy, error) {
+ var output []*trust.TrustPolicy
+
+ registryConfigs, err := trust.LoadAndMergeConfig(systemRegistriesDirPath)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(policyContentStruct.Default) > 0 {
+ defaultPolicyStruct := trust.TrustPolicy{
+ Name: "* (default)",
+ RepoName: "default",
+ Type: trustTypeDescription(policyContentStruct.Default[0].Type),
+ }
+ output = append(output, &defaultPolicyStruct)
+ }
+ for _, transval := range policyContentStruct.Transports {
+ for repo, repoval := range transval {
+ tempTrustShowOutput := trust.TrustPolicy{
+ Name: repo,
+ RepoName: repo,
+ Type: repoval[0].Type,
+ }
+ // TODO - keyarr is not used and I don't know its intent; commenting out for now for someone to fix later
+ //keyarr := []string{}
+ uids := []string{}
+ for _, repoele := range repoval {
+ if len(repoele.KeyPath) > 0 {
+ //keyarr = append(keyarr, repoele.KeyPath)
+ uids = append(uids, trust.GetGPGIdFromKeyPath(repoele.KeyPath)...)
+ }
+ if len(repoele.KeyData) > 0 {
+ //keyarr = append(keyarr, string(repoele.KeyData))
+ uids = append(uids, trust.GetGPGIdFromKeyData(repoele.KeyData)...)
+ }
+ }
+ tempTrustShowOutput.GPGId = strings.Join(uids, ", ")
+
+ registryNamespace := trust.HaveMatchRegistry(repo, registryConfigs)
+ if registryNamespace != nil {
+ tempTrustShowOutput.SignatureStore = registryNamespace.SigStore
+ }
+ output = append(output, &tempTrustShowOutput)
+ }
+ }
+ return output, nil
+}
+
+var typeDescription = map[string]string{"insecureAcceptAnything": "accept", "signedBy": "signed", "reject": "reject"}
+
+func trustTypeDescription(trustType string) string {
+ trustDescription, exist := typeDescription[trustType]
+ if !exist {
+ logrus.Warnf("invalid trust type %s", trustType)
+ }
+ return trustDescription
+}
diff --git a/pkg/domain/infra/tunnel/generate.go b/pkg/domain/infra/tunnel/generate.go
index 3cd483053..eb5587f89 100644
--- a/pkg/domain/infra/tunnel/generate.go
+++ b/pkg/domain/infra/tunnel/generate.go
@@ -3,6 +3,7 @@ package tunnel
import (
"context"
+ "github.com/containers/libpod/pkg/bindings/generate"
"github.com/containers/libpod/pkg/domain/entities"
"github.com/pkg/errors"
)
@@ -10,3 +11,7 @@ import (
func (ic *ContainerEngine) GenerateSystemd(ctx context.Context, nameOrID string, options entities.GenerateSystemdOptions) (*entities.GenerateSystemdReport, error) {
return nil, errors.New("not implemented for tunnel")
}
+
+func (ic *ContainerEngine) GenerateKube(ctx context.Context, nameOrID string, options entities.GenerateKubeOptions) (*entities.GenerateKubeReport, error) {
+ return generate.GenerateKube(ic.ClientCxt, nameOrID, options)
+}
diff --git a/pkg/domain/infra/tunnel/play.go b/pkg/domain/infra/tunnel/play.go
new file mode 100644
index 000000000..15383a703
--- /dev/null
+++ b/pkg/domain/infra/tunnel/play.go
@@ -0,0 +1,12 @@
+package tunnel
+
+import (
+ "context"
+
+ "github.com/containers/libpod/pkg/bindings/play"
+ "github.com/containers/libpod/pkg/domain/entities"
+)
+
+func (ic *ContainerEngine) PlayKube(ctx context.Context, path string, options entities.PlayKubeOptions) (*entities.PlayKubeReport, error) {
+ return play.PlayKube(ic.ClientCxt, path, options)
+}
diff --git a/pkg/domain/infra/tunnel/trust.go b/pkg/domain/infra/tunnel/trust.go
new file mode 100644
index 000000000..a976bfdc2
--- /dev/null
+++ b/pkg/domain/infra/tunnel/trust.go
@@ -0,0 +1,16 @@
+package tunnel
+
+import (
+ "context"
+ "errors"
+
+ "github.com/containers/libpod/pkg/domain/entities"
+)
+
+func (ir *ImageEngine) ShowTrust(ctx context.Context, args []string, options entities.ShowTrustOptions) (*entities.ShowTrustReport, error) {
+ return nil, errors.New("not implemented")
+}
+
+func (ir *ImageEngine) SetTrust(ctx context.Context, args []string, options entities.SetTrustOptions) error {
+ return errors.New("not implemented")
+}