summaryrefslogtreecommitdiff
path: root/pkg/adapter
diff options
context:
space:
mode:
Diffstat (limited to 'pkg/adapter')
-rw-r--r--pkg/adapter/client.go10
-rw-r--r--pkg/adapter/containers.go88
-rw-r--r--pkg/adapter/containers_remote.go34
-rw-r--r--pkg/adapter/info_remote.go14
-rw-r--r--pkg/adapter/pods.go302
-rw-r--r--pkg/adapter/pods_remote.go27
-rw-r--r--pkg/adapter/runtime.go14
-rw-r--r--pkg/adapter/runtime_remote.go12
8 files changed, 447 insertions, 54 deletions
diff --git a/pkg/adapter/client.go b/pkg/adapter/client.go
index 6feae5400..694d9f961 100644
--- a/pkg/adapter/client.go
+++ b/pkg/adapter/client.go
@@ -15,8 +15,10 @@ import (
var remoteEndpoint *Endpoint
func (r RemoteRuntime) RemoteEndpoint() (remoteEndpoint *Endpoint, err error) {
- remoteConfigConnections, _ := remoteclientconfig.ReadRemoteConfig(r.config)
-
+ remoteConfigConnections, err := remoteclientconfig.ReadRemoteConfig(r.config)
+ if errors.Cause(err) != remoteclientconfig.ErrNoConfigationFile {
+ return nil, err
+ }
// If the user defines an env variable for podman_varlink_bridge
// we use that as passed.
if bridge := os.Getenv("PODMAN_VARLINK_BRIDGE"); bridge != "" {
@@ -47,6 +49,10 @@ func (r RemoteRuntime) RemoteEndpoint() (remoteEndpoint *Endpoint, err error) {
if err != nil {
return nil, err
}
+ if len(rc.Username) < 1 {
+ logrus.Debugf("Connection has no username, using current user %q", r.cmd.RemoteUserName)
+ rc.Username = r.cmd.RemoteUserName
+ }
remoteEndpoint, err = newBridgeConnection("", rc, r.cmd.LogLevel)
// last resort is to make a socket connection with the default varlink address for root user
} else {
diff --git a/pkg/adapter/containers.go b/pkg/adapter/containers.go
index 38c04cceb..10720886b 100644
--- a/pkg/adapter/containers.go
+++ b/pkg/adapter/containers.go
@@ -20,9 +20,11 @@ import (
"github.com/containers/image/manifest"
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/shared"
+ "github.com/containers/libpod/cmd/podman/shared/parse"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/image"
+ "github.com/containers/libpod/libpod/logs"
"github.com/containers/libpod/pkg/adapter/shortcuts"
"github.com/containers/libpod/pkg/systemdgen"
"github.com/containers/psgo"
@@ -242,7 +244,7 @@ func (r *LocalRuntime) UmountRootFilesystems(ctx context.Context, cli *cliconfig
logrus.Debugf("Error umounting container %s state: %s", ctr.ID(), err.Error())
continue
}
- if state == libpod.ContainerStateRunning {
+ if state == define.ContainerStateRunning {
logrus.Debugf("Error umounting container %s, is running", ctr.ID())
continue
}
@@ -283,13 +285,14 @@ func (r *LocalRuntime) WaitOnContainers(ctx context.Context, cli *cliconfig.Wait
}
// Log logs one or more containers
-func (r *LocalRuntime) Log(c *cliconfig.LogsValues, options *libpod.LogOptions) error {
+func (r *LocalRuntime) Log(c *cliconfig.LogsValues, options *logs.LogOptions) error {
+
var wg sync.WaitGroup
options.WaitGroup = &wg
if len(c.InputArgs) > 1 {
options.Multi = true
}
- logChannel := make(chan *libpod.LogLine, int(c.Tail)*len(c.InputArgs)+1)
+ logChannel := make(chan *logs.LogLine, int(c.Tail)*len(c.InputArgs)+1)
containers, err := shortcuts.GetContainersByContext(false, c.Latest, c.InputArgs, r.Runtime)
if err != nil {
return err
@@ -494,7 +497,7 @@ func (r *LocalRuntime) Attach(ctx context.Context, c *cliconfig.AttachValues) er
if err != nil {
return errors.Wrapf(err, "unable to determine state of %s", ctr.ID())
}
- if conState != libpod.ContainerStateRunning {
+ if conState != define.ContainerStateRunning {
return errors.Errorf("you can only attach to running containers")
}
@@ -545,16 +548,23 @@ func (r *LocalRuntime) Checkpoint(c *cliconfig.CheckpointValues) error {
}
// Restore one or more containers
-func (r *LocalRuntime) Restore(ctx context.Context, c *cliconfig.RestoreValues, options libpod.ContainerCheckpointOptions) error {
+func (r *LocalRuntime) Restore(ctx context.Context, c *cliconfig.RestoreValues) error {
var (
containers []*libpod.Container
err, lastError error
filterFuncs []libpod.ContainerFilter
)
+ options := libpod.ContainerCheckpointOptions{
+ Keep: c.Keep,
+ TCPEstablished: c.TcpEstablished,
+ TargetFile: c.Import,
+ Name: c.Name,
+ }
+
filterFuncs = append(filterFuncs, func(c *libpod.Container) bool {
state, _ := c.State()
- return state == libpod.ContainerStateExited
+ return state == define.ContainerStateExited
})
if c.Import != "" {
@@ -612,7 +622,7 @@ func (r *LocalRuntime) Start(ctx context.Context, c *cliconfig.StartValues, sigP
return exitCode, errors.Wrapf(err, "unable to get container state")
}
- ctrRunning := ctrState == libpod.ContainerStateRunning
+ ctrRunning := ctrState == define.ContainerStateRunning
if c.Attach {
inputStream := os.Stdin
@@ -738,7 +748,7 @@ func (r *LocalRuntime) UnpauseContainers(ctx context.Context, cli *cliconfig.Unp
var filterFuncs []libpod.ContainerFilter
filterFuncs = append(filterFuncs, func(c *libpod.Container) bool {
state, _ := c.State()
- return state == libpod.ContainerStatePaused
+ return state == define.ContainerStatePaused
})
ctrs, err = r.GetContainers(filterFuncs...)
} else {
@@ -935,7 +945,7 @@ func (r *LocalRuntime) Prune(ctx context.Context, maxWorkers int, force bool) ([
if c.PodID() != "" {
return false
}
- if state == libpod.ContainerStateStopped || state == libpod.ContainerStateExited {
+ if state == define.ContainerStateStopped || state == define.ContainerStateExited {
return true
}
return false
@@ -1026,7 +1036,7 @@ func (r *LocalRuntime) Port(c *cliconfig.PortValues) ([]*Container, error) {
//Convert libpod containers to adapter Containers
for _, con := range containers {
- if state, _ := con.State(); state != libpod.ContainerStateRunning {
+ if state, _ := con.State(); state != define.ContainerStateRunning {
continue
}
portContainers = append(portContainers, &Container{con})
@@ -1107,3 +1117,61 @@ func (r *LocalRuntime) Commit(ctx context.Context, c *cliconfig.CommitValues, co
}
return newImage.ID(), nil
}
+
+// Exec a command in a container
+func (r *LocalRuntime) Exec(c *cliconfig.ExecValues, cmd []string) error {
+ var ctr *Container
+ var err error
+
+ if c.Latest {
+ ctr, err = r.GetLatestContainer()
+ } else {
+ ctr, err = r.LookupContainer(c.InputArgs[0])
+ }
+ if err != nil {
+ return errors.Wrapf(err, "unable to exec into %s", c.InputArgs[0])
+ }
+
+ if c.PreserveFDs > 0 {
+ entries, err := ioutil.ReadDir("/proc/self/fd")
+ if err != nil {
+ return errors.Wrapf(err, "unable to read /proc/self/fd")
+ }
+ m := make(map[int]bool)
+ for _, e := range entries {
+ i, err := strconv.Atoi(e.Name())
+ if err != nil {
+ if err != nil {
+ return errors.Wrapf(err, "cannot parse %s in /proc/self/fd", e.Name())
+ }
+ }
+ m[i] = true
+ }
+ for i := 3; i < 3+c.PreserveFDs; i++ {
+ if _, found := m[i]; !found {
+ return errors.New("invalid --preserve-fds=N specified. Not enough FDs available")
+ }
+ }
+ }
+
+ // ENVIRONMENT VARIABLES
+ env := map[string]string{}
+
+ if err := parse.ReadKVStrings(env, []string{}, c.Env); err != nil {
+ return errors.Wrapf(err, "unable to process environment variables")
+ }
+ envs := []string{}
+ for k, v := range env {
+ envs = append(envs, fmt.Sprintf("%s=%s", k, v))
+ }
+
+ streams := new(libpod.AttachStreams)
+ streams.OutputStream = os.Stdout
+ streams.ErrorStream = os.Stderr
+ streams.InputStream = os.Stdin
+ streams.AttachOutput = true
+ streams.AttachError = true
+ streams.AttachInput = true
+
+ return ctr.Exec(c.Tty, c.Privileged, envs, cmd, c.User, c.Workdir, streams, c.PreserveFDs)
+}
diff --git a/pkg/adapter/containers_remote.go b/pkg/adapter/containers_remote.go
index c52dc1d7a..5836d0788 100644
--- a/pkg/adapter/containers_remote.go
+++ b/pkg/adapter/containers_remote.go
@@ -17,6 +17,7 @@ import (
iopodman "github.com/containers/libpod/cmd/podman/varlink"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/define"
+ "github.com/containers/libpod/libpod/logs"
"github.com/containers/libpod/pkg/varlinkapi/virtwriter"
"github.com/cri-o/ocicni/pkg/ocicni"
"github.com/docker/docker/pkg/term"
@@ -411,8 +412,8 @@ func BatchContainerOp(ctr *Container, opts shared.PsOptions) (shared.BatchContai
return bcs, nil
}
-// Logs one or more containers over a varlink connection
-func (r *LocalRuntime) Log(c *cliconfig.LogsValues, options *libpod.LogOptions) error {
+// Log one or more containers over a varlink connection
+func (r *LocalRuntime) Log(c *cliconfig.LogsValues, options *logs.LogOptions) error {
// GetContainersLogs
reply, err := iopodman.GetContainersLogs().Send(r.Conn, uint64(varlink.More), c.InputArgs, c.Follow, c.Latest, options.Since.Format(time.RFC3339Nano), int64(c.Tail), c.Timestamps)
if err != nil {
@@ -434,7 +435,7 @@ func (r *LocalRuntime) Log(c *cliconfig.LogsValues, options *libpod.LogOptions)
if err != nil {
return errors.Wrapf(err, "unable to parse time of log %s", log.Time)
}
- logLine := libpod.LogLine{
+ logLine := logs.LogLine{
Device: log.Device,
ParseLogType: log.ParseLogType,
Time: lTime,
@@ -516,7 +517,7 @@ func (r *LocalRuntime) Ps(c *cliconfig.PsValues, opts shared.PsOptions) ([]share
RootFsSize: ctr.RootFsSize,
RwSize: ctr.RwSize,
}
- state, err := libpod.StringToContainerStatus(ctr.State)
+ state, err := define.StringToContainerStatus(ctr.State)
if err != nil {
return nil, err
}
@@ -645,7 +646,7 @@ func (r *LocalRuntime) Attach(ctx context.Context, c *cliconfig.AttachValues) er
if err != nil {
return nil
}
- if ctr.state.State != libpod.ContainerStateRunning {
+ if ctr.state.State != define.ContainerStateRunning {
return errors.New("you can only attach to running containers")
}
inputStream := os.Stdin
@@ -682,7 +683,7 @@ func (r *LocalRuntime) Checkpoint(c *cliconfig.CheckpointValues) error {
if err != nil {
return err
}
- if ctr.state.State == libpod.ContainerStateRunning {
+ if ctr.state.State == define.ContainerStateRunning {
runningIds = append(runningIds, id)
}
}
@@ -703,7 +704,7 @@ func (r *LocalRuntime) Checkpoint(c *cliconfig.CheckpointValues) error {
}
// Restore one or more containers
-func (r *LocalRuntime) Restore(ctx context.Context, c *cliconfig.RestoreValues, options libpod.ContainerCheckpointOptions) error {
+func (r *LocalRuntime) Restore(ctx context.Context, c *cliconfig.RestoreValues) error {
if c.Import != "" {
return errors.New("the remote client does not support importing checkpoints")
}
@@ -722,7 +723,7 @@ func (r *LocalRuntime) Restore(ctx context.Context, c *cliconfig.RestoreValues,
if err != nil {
return err
}
- if ctr.state.State != libpod.ContainerStateRunning {
+ if ctr.state.State != define.ContainerStateRunning {
exitedIDs = append(exitedIDs, id)
}
}
@@ -730,7 +731,7 @@ func (r *LocalRuntime) Restore(ctx context.Context, c *cliconfig.RestoreValues,
}
for _, id := range ids {
- if _, err := iopodman.ContainerRestore().Call(r.Conn, id, options.Keep, options.TCPEstablished); err != nil {
+ if _, err := iopodman.ContainerRestore().Call(r.Conn, id, c.Keep, c.TcpEstablished); err != nil {
if lastError != nil {
fmt.Fprintln(os.Stderr, lastError)
}
@@ -797,7 +798,7 @@ func (r *LocalRuntime) PauseContainers(ctx context.Context, cli *cliconfig.Pause
)
if cli.All {
- filters := []string{libpod.ContainerStateRunning.String()}
+ filters := []string{define.ContainerStateRunning.String()}
ctrs, err = r.LookupContainersWithStatus(filters)
} else {
ctrs, err = r.LookupContainers(cli.InputArgs)
@@ -834,7 +835,7 @@ func (r *LocalRuntime) UnpauseContainers(ctx context.Context, cli *cliconfig.Unp
logrus.Debugf("Setting maximum rm workers to %d", maxWorkers)
if cli.All {
- filters := []string{libpod.ContainerStatePaused.String()}
+ filters := []string{define.ContainerStatePaused.String()}
ctrs, err = r.LookupContainersWithStatus(filters)
} else {
ctrs, err = r.LookupContainers(cli.InputArgs)
@@ -873,7 +874,7 @@ func (r *LocalRuntime) Restart(ctx context.Context, c *cliconfig.RestartValues)
}
restartContainers = append(restartContainers, lastCtr)
} else if c.Running {
- containers, err = r.LookupContainersWithStatus([]string{libpod.ContainerStateRunning.String()})
+ containers, err = r.LookupContainersWithStatus([]string{define.ContainerStateRunning.String()})
if err != nil {
return nil, nil, err
}
@@ -941,7 +942,7 @@ func (r *LocalRuntime) Prune(ctx context.Context, maxWorkers int, force bool) ([
)
logrus.Debugf("Setting maximum rm workers to %d", maxWorkers)
- filters := []string{libpod.ContainerStateExited.String()}
+ filters := []string{define.ContainerStateExited.String()}
ctrs, err = r.LookupContainersWithStatus(filters)
if err != nil {
return ok, failures, err
@@ -974,7 +975,7 @@ func (r *LocalRuntime) Port(c *cliconfig.PortValues) ([]*Container, error) {
containers, err = r.GetContainersByContext(false, c.Latest, c.InputArgs)
} else {
// we need to only use running containers if all
- filters := []string{libpod.ContainerStateRunning.String()}
+ filters := []string{define.ContainerStateRunning.String()}
containers, err = r.LookupContainersWithStatus(filters)
}
if err != nil {
@@ -1025,3 +1026,8 @@ func (r *LocalRuntime) Commit(ctx context.Context, c *cliconfig.CommitValues, co
}
return iid, nil
}
+
+// Exec executes a container in a running container
+func (r *LocalRuntime) Exec(c *cliconfig.ExecValues, cmd []string) error {
+ return define.ErrNotImplemented
+}
diff --git a/pkg/adapter/info_remote.go b/pkg/adapter/info_remote.go
index 3b2d02a5a..3170e5b3d 100644
--- a/pkg/adapter/info_remote.go
+++ b/pkg/adapter/info_remote.go
@@ -4,16 +4,16 @@ package adapter
import (
"encoding/json"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/cmd/podman/varlink"
- "github.com/containers/libpod/libpod"
)
// Info returns information for the host system and its components
-func (r RemoteRuntime) Info() ([]libpod.InfoData, error) {
+func (r RemoteRuntime) Info() ([]define.InfoData, error) {
// TODO the varlink implementation for info should be updated to match the output for regular info
var (
- reply []libpod.InfoData
+ reply []define.InfoData
hostInfo map[string]interface{}
store map[string]interface{}
)
@@ -43,9 +43,9 @@ func (r RemoteRuntime) Info() ([]libpod.InfoData, error) {
insecureRegistries["registries"] = info.Insecure_registries
// Add everything to the reply
- reply = append(reply, libpod.InfoData{Type: "host", Data: hostInfo})
- reply = append(reply, libpod.InfoData{Type: "registries", Data: registries})
- reply = append(reply, libpod.InfoData{Type: "insecure registries", Data: insecureRegistries})
- reply = append(reply, libpod.InfoData{Type: "store", Data: store})
+ reply = append(reply, define.InfoData{Type: "host", Data: hostInfo})
+ reply = append(reply, define.InfoData{Type: "registries", Data: registries})
+ reply = append(reply, define.InfoData{Type: "insecure registries", Data: insecureRegistries})
+ reply = append(reply, define.InfoData{Type: "store", Data: store})
return reply, nil
}
diff --git a/pkg/adapter/pods.go b/pkg/adapter/pods.go
index bb7d9cce6..a28e1ab4b 100644
--- a/pkg/adapter/pods.go
+++ b/pkg/adapter/pods.go
@@ -4,14 +4,33 @@ package adapter
import (
"context"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
"strings"
+ "github.com/containers/image/types"
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/libpod/image"
"github.com/containers/libpod/pkg/adapter/shortcuts"
+ ns "github.com/containers/libpod/pkg/namespaces"
+ createconfig "github.com/containers/libpod/pkg/spec"
+ "github.com/containers/storage"
+ "github.com/cri-o/ocicni/pkg/ocicni"
+ "github.com/ghodss/yaml"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
+ v1 "k8s.io/api/core/v1"
+)
+
+const (
+ // https://kubernetes.io/docs/concepts/storage/volumes/#hostpath
+ createDirectoryPermission = 0755
+ // https://kubernetes.io/docs/concepts/storage/volumes/#hostpath
+ createFilePermission = 0644
)
// PodContainerStats is struct containing an adapter Pod and a libpod
@@ -420,3 +439,286 @@ func (r *LocalRuntime) GetStatPods(c *cliconfig.PodStatsValues) ([]*Pod, error)
}
return adapterPods, nil
}
+
+// PlayKubeYAML creates pods and containers from a kube YAML file
+func (r *LocalRuntime) PlayKubeYAML(ctx context.Context, c *cliconfig.KubePlayValues, yamlFile string) (*Pod, error) {
+ var (
+ containers []*libpod.Container
+ pod *libpod.Pod
+ podOptions []libpod.PodCreateOption
+ podYAML v1.Pod
+ registryCreds *types.DockerAuthConfig
+ writer io.Writer
+ )
+
+ content, err := ioutil.ReadFile(yamlFile)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := yaml.Unmarshal(content, &podYAML); err != nil {
+ return nil, errors.Wrapf(err, "unable to read %s as YAML", yamlFile)
+ }
+
+ // check for name collision between pod and container
+ podName := podYAML.ObjectMeta.Name
+ for _, n := range podYAML.Spec.Containers {
+ if n.Name == podName {
+ fmt.Printf("a container exists with the same name (%s) as the pod in your YAML file; changing pod name to %s_pod\n", podName, podName)
+ podName = fmt.Sprintf("%s_pod", podName)
+ }
+ }
+
+ podOptions = append(podOptions, libpod.WithInfraContainer())
+ podOptions = append(podOptions, libpod.WithPodName(podName))
+ // TODO for now we just used the default kernel namespaces; we need to add/subtract this from yaml
+
+ nsOptions, err := shared.GetNamespaceOptions(strings.Split(shared.DefaultKernelNamespaces, ","))
+ if err != nil {
+ return nil, err
+ }
+ podOptions = append(podOptions, nsOptions...)
+ podPorts := getPodPorts(podYAML.Spec.Containers)
+ podOptions = append(podOptions, libpod.WithInfraContainerPorts(podPorts))
+
+ // Create the Pod
+ pod, err = r.NewPod(ctx, podOptions...)
+ if err != nil {
+ return nil, err
+ }
+
+ podInfraID, err := pod.InfraContainerID()
+ if err != nil {
+ return nil, err
+ }
+
+ namespaces := map[string]string{
+ // Disabled during code review per mheon
+ //"pid": fmt.Sprintf("container:%s", podInfraID),
+ "net": fmt.Sprintf("container:%s", podInfraID),
+ "user": fmt.Sprintf("container:%s", podInfraID),
+ "ipc": fmt.Sprintf("container:%s", podInfraID),
+ "uts": fmt.Sprintf("container:%s", podInfraID),
+ }
+ if !c.Quiet {
+ writer = os.Stderr
+ }
+
+ dockerRegistryOptions := image.DockerRegistryOptions{
+ DockerRegistryCreds: registryCreds,
+ DockerCertPath: c.CertDir,
+ }
+ if c.Flag("tls-verify").Changed {
+ dockerRegistryOptions.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!c.TlsVerify)
+ }
+
+ // map from name to mount point
+ volumes := make(map[string]string)
+ for _, volume := range podYAML.Spec.Volumes {
+ hostPath := volume.VolumeSource.HostPath
+ if hostPath == nil {
+ return nil, errors.Errorf("HostPath is currently the only supported VolumeSource")
+ }
+ if hostPath.Type != nil {
+ switch *hostPath.Type {
+ case v1.HostPathDirectoryOrCreate:
+ if _, err := os.Stat(hostPath.Path); os.IsNotExist(err) {
+ if err := os.Mkdir(hostPath.Path, createDirectoryPermission); err != nil {
+ return nil, errors.Errorf("Error creating HostPath %s at %s", volume.Name, hostPath.Path)
+ }
+ }
+ // unconditionally label a newly created volume as private
+ if err := libpod.LabelVolumePath(hostPath.Path, false); err != nil {
+ return nil, errors.Wrapf(err, "Error giving %s a label", hostPath.Path)
+ }
+ break
+ case v1.HostPathFileOrCreate:
+ if _, err := os.Stat(hostPath.Path); os.IsNotExist(err) {
+ f, err := os.OpenFile(hostPath.Path, os.O_RDONLY|os.O_CREATE, createFilePermission)
+ if err != nil {
+ return nil, errors.Errorf("Error creating HostPath %s at %s", volume.Name, hostPath.Path)
+ }
+ if err := f.Close(); err != nil {
+ logrus.Warnf("Error in closing newly created HostPath file: %v", err)
+ }
+ }
+ // unconditionally label a newly created volume as private
+ if err := libpod.LabelVolumePath(hostPath.Path, false); err != nil {
+ return nil, errors.Wrapf(err, "Error giving %s a label", hostPath.Path)
+ }
+ break
+ case v1.HostPathDirectory:
+ case v1.HostPathFile:
+ case v1.HostPathUnset:
+ // do nothing here because we will verify the path exists in validateVolumeHostDir
+ break
+ default:
+ return nil, errors.Errorf("Directories are the only supported HostPath type")
+ }
+ }
+
+ if err := createconfig.ValidateVolumeHostDir(hostPath.Path); err != nil {
+ return nil, errors.Wrapf(err, "Error in parsing HostPath in YAML")
+ }
+ volumes[volume.Name] = hostPath.Path
+ }
+
+ for _, container := range podYAML.Spec.Containers {
+ newImage, err := r.ImageRuntime().New(ctx, container.Image, c.SignaturePolicy, c.Authfile, writer, &dockerRegistryOptions, image.SigningOptions{}, false, nil)
+ if err != nil {
+ return nil, err
+ }
+ createConfig, err := kubeContainerToCreateConfig(ctx, container, r.Runtime, newImage, namespaces, volumes, pod.ID())
+ if err != nil {
+ return nil, err
+ }
+ ctr, err := shared.CreateContainerFromCreateConfig(r.Runtime, createConfig, ctx, pod)
+ if err != nil {
+ return nil, err
+ }
+ containers = append(containers, ctr)
+ }
+
+ // start the containers
+ for _, ctr := range containers {
+ if err := ctr.Start(ctx, true); err != nil {
+ // Making this a hard failure here to avoid a mess
+ // the other containers are in created status
+ return nil, err
+ }
+ }
+
+ // We've now successfully converted this YAML into a pod
+ // print our pod and containers, signifying we succeeded
+ fmt.Printf("Pod:\n%s\n", pod.ID())
+ if len(containers) == 1 {
+ fmt.Printf("Container:\n")
+ }
+ if len(containers) > 1 {
+ fmt.Printf("Containers:\n")
+ }
+ for _, ctr := range containers {
+ fmt.Println(ctr.ID())
+ }
+
+ if err := playcleanup(ctx, r, pod, nil); err != nil {
+ logrus.Errorf("unable to remove pod %s after failing to play kube", pod.ID())
+ }
+ return nil, nil
+}
+
+func playcleanup(ctx context.Context, runtime *LocalRuntime, pod *libpod.Pod, err error) error {
+ if err != nil && pod != nil {
+ return runtime.RemovePod(ctx, pod, true, true)
+ }
+ return nil
+}
+
+// getPodPorts converts a slice of kube container descriptions to an
+// array of ocicni portmapping descriptions usable in libpod
+func getPodPorts(containers []v1.Container) []ocicni.PortMapping {
+ var infraPorts []ocicni.PortMapping
+ for _, container := range containers {
+ for _, p := range container.Ports {
+ portBinding := ocicni.PortMapping{
+ HostPort: p.HostPort,
+ ContainerPort: p.ContainerPort,
+ Protocol: strings.ToLower(string(p.Protocol)),
+ }
+ if p.HostIP != "" {
+ logrus.Debug("HostIP on port bindings is not supported")
+ }
+ infraPorts = append(infraPorts, portBinding)
+ }
+ }
+ return infraPorts
+}
+
+// kubeContainerToCreateConfig takes a v1.Container and returns a createconfig describing a container
+func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container, runtime *libpod.Runtime, newImage *image.Image, namespaces map[string]string, volumes map[string]string, podID string) (*createconfig.CreateConfig, error) {
+ var (
+ containerConfig createconfig.CreateConfig
+ )
+
+ // The default for MemorySwappiness is -1, not 0
+ containerConfig.Resources.MemorySwappiness = -1
+
+ containerConfig.Image = containerYAML.Image
+ containerConfig.ImageID = newImage.ID()
+ containerConfig.Name = containerYAML.Name
+ containerConfig.Tty = containerYAML.TTY
+ containerConfig.WorkDir = containerYAML.WorkingDir
+
+ containerConfig.Pod = podID
+
+ imageData, _ := newImage.Inspect(ctx)
+
+ containerConfig.User = "0"
+ if imageData != nil {
+ containerConfig.User = imageData.Config.User
+ }
+
+ if containerConfig.SecurityOpts != nil {
+ if containerYAML.SecurityContext.ReadOnlyRootFilesystem != nil {
+ containerConfig.ReadOnlyRootfs = *containerYAML.SecurityContext.ReadOnlyRootFilesystem
+ }
+ if containerYAML.SecurityContext.Privileged != nil {
+ containerConfig.Privileged = *containerYAML.SecurityContext.Privileged
+ }
+
+ if containerYAML.SecurityContext.AllowPrivilegeEscalation != nil {
+ containerConfig.NoNewPrivs = !*containerYAML.SecurityContext.AllowPrivilegeEscalation
+ }
+ }
+
+ containerConfig.Command = []string{}
+ if imageData != nil && imageData.Config != nil {
+ containerConfig.Command = append(containerConfig.Command, imageData.Config.Entrypoint...)
+ }
+ if len(containerConfig.Command) != 0 {
+ containerConfig.Command = append(containerConfig.Command, containerYAML.Command...)
+ } else if imageData != nil && imageData.Config != nil {
+ containerConfig.Command = append(containerConfig.Command, imageData.Config.Cmd...)
+ }
+ if imageData != nil && len(containerConfig.Command) == 0 {
+ return nil, errors.Errorf("No command specified in container YAML or as CMD or ENTRYPOINT in this image for %s", containerConfig.Name)
+ }
+
+ containerConfig.StopSignal = 15
+
+ // If the user does not pass in ID mappings, just set to basics
+ if containerConfig.IDMappings == nil {
+ containerConfig.IDMappings = &storage.IDMappingOptions{}
+ }
+
+ containerConfig.NetMode = ns.NetworkMode(namespaces["net"])
+ containerConfig.IpcMode = ns.IpcMode(namespaces["ipc"])
+ containerConfig.UtsMode = ns.UTSMode(namespaces["uts"])
+ // disabled in code review per mheon
+ //containerConfig.PidMode = ns.PidMode(namespaces["pid"])
+ containerConfig.UsernsMode = ns.UsernsMode(namespaces["user"])
+ if len(containerConfig.WorkDir) == 0 {
+ containerConfig.WorkDir = "/"
+ }
+
+ // Set default environment variables and incorporate data from image, if necessary
+ envs := shared.EnvVariablesFromData(imageData)
+
+ // Environment Variables
+ for _, e := range containerYAML.Env {
+ envs[e.Name] = e.Value
+ }
+ containerConfig.Env = envs
+
+ for _, volume := range containerYAML.VolumeMounts {
+ hostPath, exists := volumes[volume.Name]
+ if !exists {
+ return nil, errors.Errorf("Volume mount %s specified for container but not configured in volumes", volume.Name)
+ }
+ if err := createconfig.ValidateVolumeCtrDir(volume.MountPath); err != nil {
+ return nil, errors.Wrapf(err, "error in parsing MountPath")
+ }
+ containerConfig.Volumes = append(containerConfig.Volumes, fmt.Sprintf("%s:%s", hostPath, volume.MountPath))
+ }
+ return &containerConfig, nil
+}
diff --git a/pkg/adapter/pods_remote.go b/pkg/adapter/pods_remote.go
index 125d057b0..0c62ac923 100644
--- a/pkg/adapter/pods_remote.go
+++ b/pkg/adapter/pods_remote.go
@@ -258,25 +258,25 @@ func (p *Pod) AllContainers() ([]*Container, error) {
}
// Status ...
-func (p *Pod) Status() (map[string]libpod.ContainerStatus, error) {
- ctrs := make(map[string]libpod.ContainerStatus)
+func (p *Pod) Status() (map[string]define.ContainerStatus, error) {
+ ctrs := make(map[string]define.ContainerStatus)
for _, i := range p.containers {
- var status libpod.ContainerStatus
+ var status define.ContainerStatus
switch i.State {
case "exited":
- status = libpod.ContainerStateExited
+ status = define.ContainerStateExited
case "stopped":
- status = libpod.ContainerStateStopped
+ status = define.ContainerStateStopped
case "running":
- status = libpod.ContainerStateRunning
+ status = define.ContainerStateRunning
case "paused":
- status = libpod.ContainerStatePaused
+ status = define.ContainerStatePaused
case "created":
- status = libpod.ContainerStateCreated
- case "configured":
- status = libpod.ContainerStateConfigured
+ status = define.ContainerStateCreated
+ case "define.red":
+ status = define.ContainerStateConfigured
default:
- status = libpod.ContainerStateUnknown
+ status = define.ContainerStateUnknown
}
ctrs[i.ID] = status
}
@@ -564,3 +564,8 @@ func (r *LocalRuntime) PrunePods(ctx context.Context, cli *cliconfig.PodPruneVal
}
return ok, failures, nil
}
+
+// PlayKubeYAML creates pods and containers from a kube YAML file
+func (r *LocalRuntime) PlayKubeYAML(ctx context.Context, c *cliconfig.KubePlayValues, yamlFile string) (*Pod, error) {
+ return nil, define.ErrNotImplemented
+}
diff --git a/pkg/adapter/runtime.go b/pkg/adapter/runtime.go
index 37ee1b737..dd77b3a3e 100644
--- a/pkg/adapter/runtime.go
+++ b/pkg/adapter/runtime.go
@@ -5,6 +5,7 @@ package adapter
import (
"bufio"
"context"
+ "github.com/containers/libpod/libpod/define"
"io"
"io/ioutil"
"os"
@@ -313,8 +314,13 @@ func IsImageNotFound(err error) bool {
}
// HealthCheck is a wrapper to same named function in libpod
-func (r *LocalRuntime) HealthCheck(c *cliconfig.HealthCheckValues) (libpod.HealthCheckStatus, error) {
- return r.Runtime.HealthCheck(c.InputArgs[0])
+func (r *LocalRuntime) HealthCheck(c *cliconfig.HealthCheckValues) (string, error) {
+ output := "unhealthy"
+ status, err := r.Runtime.HealthCheck(c.InputArgs[0])
+ if status == libpod.HealthCheckSuccess {
+ output = "healthy"
+ }
+ return output, err
}
// Events is a wrapper to libpod to obtain libpod/podman events
@@ -395,8 +401,8 @@ func (r *LocalRuntime) GetPodsByStatus(statuses []string) ([]*libpod.Pod, error)
}
// GetVersion is an alias to satisfy interface{}
-func (r *LocalRuntime) GetVersion() (libpod.Version, error) {
- return libpod.GetVersion()
+func (r *LocalRuntime) GetVersion() (define.Version, error) {
+ return define.GetVersion()
}
// RemoteEndpoint resolve interface requirement
diff --git a/pkg/adapter/runtime_remote.go b/pkg/adapter/runtime_remote.go
index 97e28e901..3be89233d 100644
--- a/pkg/adapter/runtime_remote.go
+++ b/pkg/adapter/runtime_remote.go
@@ -771,8 +771,8 @@ func IsImageNotFound(err error) bool {
}
// HealthCheck executes a container's healthcheck over a varlink connection
-func (r *LocalRuntime) HealthCheck(c *cliconfig.HealthCheckValues) (libpod.HealthCheckStatus, error) {
- return -1, define.ErrNotImplemented
+func (r *LocalRuntime) HealthCheck(c *cliconfig.HealthCheckValues) (string, error) {
+ return "", define.ErrNotImplemented
}
// Events monitors libpod/podman events over a varlink connection
@@ -907,22 +907,22 @@ func (r *LocalRuntime) GetContainersByContext(all bool, latest bool, namesOrIDs
}
// GetVersion returns version information from service
-func (r *LocalRuntime) GetVersion() (libpod.Version, error) {
+func (r *LocalRuntime) GetVersion() (define.Version, error) {
version, goVersion, gitCommit, built, osArch, apiVersion, err := iopodman.GetVersion().Call(r.Conn)
if err != nil {
- return libpod.Version{}, errors.Wrapf(err, "Unable to obtain server version information")
+ return define.Version{}, errors.Wrapf(err, "Unable to obtain server version information")
}
var buildTime int64
if built != "" {
t, err := time.Parse(time.RFC3339, built)
if err != nil {
- return libpod.Version{}, nil
+ return define.Version{}, nil
}
buildTime = t.Unix()
}
- return libpod.Version{
+ return define.Version{
RemoteAPIVersion: apiVersion,
Version: version,
GoVersion: goVersion,