summaryrefslogtreecommitdiff
path: root/pkg/adapter/containers_remote.go
diff options
context:
space:
mode:
Diffstat (limited to 'pkg/adapter/containers_remote.go')
-rw-r--r--pkg/adapter/containers_remote.go349
1 files changed, 333 insertions, 16 deletions
diff --git a/pkg/adapter/containers_remote.go b/pkg/adapter/containers_remote.go
index 3730827c7..50cff9fa0 100644
--- a/pkg/adapter/containers_remote.go
+++ b/pkg/adapter/containers_remote.go
@@ -6,18 +6,25 @@ import (
"context"
"encoding/json"
"fmt"
+ "io"
+ "os"
"strconv"
"syscall"
"time"
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/shared"
- "github.com/containers/libpod/cmd/podman/varlink"
+ iopodman "github.com/containers/libpod/cmd/podman/varlink"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/pkg/inspect"
+ "github.com/containers/libpod/pkg/varlinkapi/virtwriter"
+ "github.com/docker/docker/pkg/term"
+ "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/varlink/go/varlink"
+ "golang.org/x/crypto/ssh/terminal"
+ "k8s.io/client-go/tools/remotecommand"
)
// Inspect returns an inspect struct from varlink
@@ -70,6 +77,19 @@ func (r *LocalRuntime) ContainerState(name string) (*libpod.ContainerState, erro
}
+// Spec obtains the container spec.
+func (r *LocalRuntime) Spec(name string) (*specs.Spec, error) {
+ reply, err := iopodman.Spec().Call(r.Conn, name)
+ if err != nil {
+ return nil, err
+ }
+ data := specs.Spec{}
+ if err := json.Unmarshal([]byte(reply), &data); err != nil {
+ return nil, err
+ }
+ return &data, nil
+}
+
// LookupContainer gets basic information about container over a varlink
// connection and then translates it to a *Container
func (r *LocalRuntime) LookupContainer(idOrName string) (*Container, error) {
@@ -78,10 +98,6 @@ func (r *LocalRuntime) LookupContainer(idOrName string) (*Container, error) {
return nil, err
}
config := r.Config(idOrName)
- if err != nil {
- return nil, err
- }
-
return &Container{
remoteContainer{
r,
@@ -128,7 +144,7 @@ func (c *Container) Name() string {
return c.config.Name
}
-// StopContainers stops requested containers using CLI inputs.
+// StopContainers stops requested containers using varlink.
// Returns the list of stopped container ids, map of failed to stop container ids + errors, or any non-container error
func (r *LocalRuntime) StopContainers(ctx context.Context, cli *cliconfig.StopValues) ([]string, map[string]error, error) {
var (
@@ -152,7 +168,7 @@ func (r *LocalRuntime) StopContainers(ctx context.Context, cli *cliconfig.StopVa
return ok, failures, nil
}
-// KillContainers sends signal to container(s) based on CLI inputs.
+// KillContainers sends signal to container(s) based on varlink.
// Returns list of successful id(s), map of failed id(s) + error, or error not from container
func (r *LocalRuntime) KillContainers(ctx context.Context, cli *cliconfig.KillValues, signal syscall.Signal) ([]string, map[string]error, error) {
var (
@@ -176,6 +192,52 @@ func (r *LocalRuntime) KillContainers(ctx context.Context, cli *cliconfig.KillVa
return ok, failures, nil
}
+// RemoveContainer removes container(s) based on varlink inputs.
+func (r *LocalRuntime) RemoveContainers(ctx context.Context, cli *cliconfig.RmValues) ([]string, map[string]error, error) {
+ ids, err := iopodman.GetContainersByContext().Call(r.Conn, cli.All, cli.Latest, cli.InputArgs)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var (
+ ok = []string{}
+ failures = map[string]error{}
+ )
+
+ for _, id := range ids {
+ _, err := iopodman.RemoveContainer().Call(r.Conn, id, cli.Force, cli.Volumes)
+ if err != nil {
+ failures[id] = err
+ } else {
+ ok = append(ok, id)
+ }
+ }
+ return ok, failures, nil
+}
+
+// UmountRootFilesystems umounts container(s) root filesystems based on varlink inputs
+func (r *LocalRuntime) UmountRootFilesystems(ctx context.Context, cli *cliconfig.UmountValues) ([]string, map[string]error, error) {
+ ids, err := iopodman.GetContainersByContext().Call(r.Conn, cli.All, cli.Latest, cli.InputArgs)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var (
+ ok = []string{}
+ failures = map[string]error{}
+ )
+
+ for _, id := range ids {
+ err := iopodman.UnmountContainer().Call(r.Conn, id, cli.Force)
+ if err != nil {
+ failures[id] = err
+ } else {
+ ok = append(ok, id)
+ }
+ }
+ return ok, failures, nil
+}
+
// WaitOnContainers waits for all given container(s) to stop.
// interval is currently ignored.
func (r *LocalRuntime) WaitOnContainers(ctx context.Context, cli *cliconfig.WaitValues, interval time.Duration) ([]string, map[string]error, error) {
@@ -227,7 +289,7 @@ func BatchContainerOp(ctr *Container, opts shared.PsOptions) (shared.BatchContai
// Logs one or more containers over a varlink connection
func (r *LocalRuntime) Log(c *cliconfig.LogsValues, options *libpod.LogOptions) error {
- //GetContainersLogs
+ // GetContainersLogs
reply, err := iopodman.GetContainersLogs().Send(r.Conn, uint64(varlink.More), c.InputArgs, c.Follow, c.Latest, options.Since.Format(time.RFC3339Nano), int64(c.Tail), c.Timestamps)
if err != nil {
return errors.Wrapf(err, "failed to get container logs")
@@ -269,26 +331,281 @@ func (r *LocalRuntime) CreateContainer(ctx context.Context, c *cliconfig.CreateV
// TODO need to add attach when that function becomes available
return "", errors.New("the remote client only supports detached containers")
}
- results := shared.NewIntermediateLayer(&c.PodmanCommand)
+ results := shared.NewIntermediateLayer(&c.PodmanCommand, true)
return iopodman.CreateContainer().Call(r.Conn, results.MakeVarlink())
}
// Run creates a container overvarlink and then starts it
func (r *LocalRuntime) Run(ctx context.Context, c *cliconfig.RunValues, exitCode int) (int, error) {
+ // FIXME
+ // podman-remote run -it alpine ls DOES NOT WORK YET
+ // podman-remote run -it alpine /bin/sh does, i suspect there is some sort of
+ // timing issue between the socket availability and terminal setup and the command
+ // being run.
+
// TODO the exit codes for run need to be figured out for remote connections
- if !c.Bool("detach") {
- return 0, errors.New("the remote client only supports detached containers")
- }
- results := shared.NewIntermediateLayer(&c.PodmanCommand)
+ results := shared.NewIntermediateLayer(&c.PodmanCommand, true)
cid, err := iopodman.CreateContainer().Call(r.Conn, results.MakeVarlink())
if err != nil {
return 0, err
}
- fmt.Println(cid)
- _, err = iopodman.StartContainer().Call(r.Conn, cid)
- return 0, err
+ if c.Bool("detach") {
+ _, err := iopodman.StartContainer().Call(r.Conn, cid)
+ fmt.Println(cid)
+ return 0, err
+ }
+
+ errChan, err := r.attach(ctx, os.Stdin, os.Stdout, cid, true)
+ if err != nil {
+ return 0, err
+ }
+ finalError := <-errChan
+ return 0, finalError
}
func ReadExitFile(runtimeTmp, ctrID string) (int, error) {
return 0, libpod.ErrNotImplemented
}
+
+// Ps ...
+func (r *LocalRuntime) Ps(c *cliconfig.PsValues, opts shared.PsOptions) ([]shared.PsContainerOutput, error) {
+ var psContainers []shared.PsContainerOutput
+ last := int64(c.Last)
+ PsOpts := iopodman.PsOpts{
+ All: c.All,
+ Filters: &c.Filter,
+ Last: &last,
+ Latest: &c.Latest,
+ NoTrunc: &c.NoTrunct,
+ Pod: &c.Pod,
+ Quiet: &c.Quiet,
+ Sort: &c.Sort,
+ Sync: &c.Sync,
+ }
+ containers, err := iopodman.Ps().Call(r.Conn, PsOpts)
+ if err != nil {
+ return nil, err
+ }
+ for _, ctr := range containers {
+ createdAt, err := time.Parse(time.RFC3339Nano, ctr.CreatedAt)
+ if err != nil {
+ return nil, err
+ }
+ exitedAt, err := time.Parse(time.RFC3339Nano, ctr.ExitedAt)
+ if err != nil {
+ return nil, err
+ }
+ startedAt, err := time.Parse(time.RFC3339Nano, ctr.StartedAt)
+ if err != nil {
+ return nil, err
+ }
+ containerSize := shared.ContainerSize{
+ RootFsSize: ctr.RootFsSize,
+ RwSize: ctr.RwSize,
+ }
+ state, err := libpod.StringToContainerStatus(ctr.State)
+ if err != nil {
+ return nil, err
+ }
+ psc := shared.PsContainerOutput{
+ ID: ctr.Id,
+ Image: ctr.Image,
+ Command: ctr.Command,
+ Created: ctr.Created,
+ Ports: ctr.Ports,
+ Names: ctr.Names,
+ IsInfra: ctr.IsInfra,
+ Status: ctr.Status,
+ State: state,
+ Pid: int(ctr.PidNum),
+ Size: &containerSize,
+ Pod: ctr.Pod,
+ CreatedAt: createdAt,
+ ExitedAt: exitedAt,
+ StartedAt: startedAt,
+ Labels: ctr.Labels,
+ PID: ctr.NsPid,
+ Cgroup: ctr.Cgroup,
+ IPC: ctr.Ipc,
+ MNT: ctr.Mnt,
+ NET: ctr.Net,
+ PIDNS: ctr.PidNs,
+ User: ctr.User,
+ UTS: ctr.Uts,
+ Mounts: ctr.Mounts,
+ }
+ psContainers = append(psContainers, psc)
+ }
+ return psContainers, nil
+}
+
+func (r *LocalRuntime) attach(ctx context.Context, stdin, stdout *os.File, cid string, start bool) (chan error, error) {
+ var (
+ oldTermState *term.State
+ )
+ errChan := make(chan error)
+ spec, err := r.Spec(cid)
+ if err != nil {
+ return nil, err
+ }
+ resize := make(chan remotecommand.TerminalSize)
+ haveTerminal := terminal.IsTerminal(int(os.Stdin.Fd()))
+
+ // Check if we are attached to a terminal. If we are, generate resize
+ // events, and set the terminal to raw mode
+ if haveTerminal && spec.Process.Terminal {
+ logrus.Debugf("Handling terminal attach")
+
+ subCtx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ resizeTty(subCtx, resize)
+ oldTermState, err = term.SaveState(os.Stdin.Fd())
+ if err != nil {
+ return nil, errors.Wrapf(err, "unable to save terminal state")
+ }
+
+ logrus.SetFormatter(&RawTtyFormatter{})
+ term.SetRawTerminal(os.Stdin.Fd())
+
+ }
+ // TODO add detach keys support
+ _, err = iopodman.Attach().Send(r.Conn, varlink.Upgrade, cid, "", start)
+ if err != nil {
+ restoreTerminal(oldTermState)
+ return nil, err
+ }
+
+ // These are the varlink sockets
+ reader := r.Conn.Reader
+ writer := r.Conn.Writer
+
+ // These are the special writers that encode input from the client.
+ varlinkStdinWriter := virtwriter.NewVirtWriteCloser(writer, virtwriter.ToStdin)
+ varlinkResizeWriter := virtwriter.NewVirtWriteCloser(writer, virtwriter.TerminalResize)
+
+ go func() {
+ // Read from the wire and direct to stdout or stderr
+ err := virtwriter.Reader(reader, stdout, os.Stderr, nil, nil)
+ defer restoreTerminal(oldTermState)
+ errChan <- err
+ }()
+
+ go func() {
+ for termResize := range resize {
+ b, err := json.Marshal(termResize)
+ if err != nil {
+ defer restoreTerminal(oldTermState)
+ errChan <- err
+ }
+ _, err = varlinkResizeWriter.Write(b)
+ if err != nil {
+ defer restoreTerminal(oldTermState)
+ errChan <- err
+ }
+ }
+ }()
+
+ // Takes stdinput and sends it over the wire after being encoded
+ go func() {
+ if _, err := io.Copy(varlinkStdinWriter, stdin); err != nil {
+ defer restoreTerminal(oldTermState)
+ errChan <- err
+ }
+
+ }()
+ return errChan, nil
+
+}
+
+// Attach to a remote terminal
+func (r *LocalRuntime) Attach(ctx context.Context, c *cliconfig.AttachValues) error {
+ ctr, err := r.LookupContainer(c.InputArgs[0])
+ if err != nil {
+ return nil
+ }
+ if ctr.state.State != libpod.ContainerStateRunning {
+ return errors.New("you can only attach to running containers")
+ }
+ inputStream := os.Stdin
+ if c.NoStdin {
+ inputStream = nil
+ }
+ errChan, err := r.attach(ctx, inputStream, os.Stdout, c.InputArgs[0], false)
+ if err != nil {
+ return err
+ }
+ return <-errChan
+}
+
+// Checkpoint one or more containers
+func (r *LocalRuntime) Checkpoint(c *cliconfig.CheckpointValues, options libpod.ContainerCheckpointOptions) error {
+ var lastError error
+ ids, err := iopodman.GetContainersByContext().Call(r.Conn, c.All, c.Latest, c.InputArgs)
+ if err != nil {
+ return err
+ }
+ if c.All {
+ // We dont have a great way to get all the running containers, so need to get all and then
+ // check status on them bc checkpoint considers checkpointing a stopped container an error
+ var runningIds []string
+ for _, id := range ids {
+ ctr, err := r.LookupContainer(id)
+ if err != nil {
+ return err
+ }
+ if ctr.state.State == libpod.ContainerStateRunning {
+ runningIds = append(runningIds, id)
+ }
+ }
+ ids = runningIds
+ }
+
+ for _, id := range ids {
+ if _, err := iopodman.ContainerCheckpoint().Call(r.Conn, id, options.Keep, options.KeepRunning, options.TCPEstablished); err != nil {
+ if lastError != nil {
+ fmt.Fprintln(os.Stderr, lastError)
+ }
+ lastError = errors.Wrapf(err, "failed to checkpoint container %v", id)
+ } else {
+ fmt.Println(id)
+ }
+ }
+ return lastError
+}
+
+// Restore one or more containers
+func (r *LocalRuntime) Restore(c *cliconfig.RestoreValues, options libpod.ContainerCheckpointOptions) error {
+ var lastError error
+ ids, err := iopodman.GetContainersByContext().Call(r.Conn, c.All, c.Latest, c.InputArgs)
+ if err != nil {
+ return err
+ }
+ if c.All {
+ // We dont have a great way to get all the exited containers, so need to get all and then
+ // check status on them bc checkpoint considers restoring a running container an error
+ var exitedIDs []string
+ for _, id := range ids {
+ ctr, err := r.LookupContainer(id)
+ if err != nil {
+ return err
+ }
+ if ctr.state.State != libpod.ContainerStateRunning {
+ exitedIDs = append(exitedIDs, id)
+ }
+ }
+ ids = exitedIDs
+ }
+
+ for _, id := range ids {
+ if _, err := iopodman.ContainerRestore().Call(r.Conn, id, options.Keep, options.TCPEstablished); err != nil {
+ if lastError != nil {
+ fmt.Fprintln(os.Stderr, lastError)
+ }
+ lastError = errors.Wrapf(err, "failed to restore container %v", id)
+ } else {
+ fmt.Println(id)
+ }
+ }
+ return lastError
+}