summaryrefslogtreecommitdiff
path: root/pkg/adapter
diff options
context:
space:
mode:
Diffstat (limited to 'pkg/adapter')
-rw-r--r--pkg/adapter/checkpoint_restore.go151
-rw-r--r--pkg/adapter/client.go90
-rw-r--r--pkg/adapter/client_unix.go16
-rw-r--r--pkg/adapter/client_windows.go15
-rw-r--r--pkg/adapter/containers.go260
-rw-r--r--pkg/adapter/containers_remote.go328
-rw-r--r--pkg/adapter/errors.go8
-rw-r--r--pkg/adapter/info_remote.go14
-rw-r--r--pkg/adapter/pods.go321
-rw-r--r--pkg/adapter/pods_remote.go30
-rw-r--r--pkg/adapter/runtime.go49
-rw-r--r--pkg/adapter/runtime_remote.go81
-rw-r--r--pkg/adapter/sigproxy_linux.go6
-rw-r--r--pkg/adapter/terminal.go23
-rw-r--r--pkg/adapter/terminal_linux.go48
15 files changed, 1205 insertions, 235 deletions
diff --git a/pkg/adapter/checkpoint_restore.go b/pkg/adapter/checkpoint_restore.go
new file mode 100644
index 000000000..1cac86d12
--- /dev/null
+++ b/pkg/adapter/checkpoint_restore.go
@@ -0,0 +1,151 @@
+// +build !remoteclient
+
+package adapter
+
+import (
+ "context"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/libpod/image"
+ "github.com/containers/libpod/pkg/errorhandling"
+ "github.com/containers/storage/pkg/archive"
+ jsoniter "github.com/json-iterator/go"
+ spec "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// Prefixing the checkpoint/restore related functions with 'cr'
+
+// crImportFromJSON imports the JSON files stored in the exported
+// checkpoint tarball
+func crImportFromJSON(filePath string, v interface{}) error {
+ jsonFile, err := os.Open(filePath)
+ if err != nil {
+ return errors.Wrapf(err, "Failed to open container definition %s for restore", filePath)
+ }
+ defer errorhandling.CloseQuiet(jsonFile)
+
+ content, err := ioutil.ReadAll(jsonFile)
+ if err != nil {
+ return errors.Wrapf(err, "Failed to read container definition %s for restore", filePath)
+ }
+ json := jsoniter.ConfigCompatibleWithStandardLibrary
+ if err = json.Unmarshal(content, v); err != nil {
+ return errors.Wrapf(err, "Failed to unmarshal container definition %s for restore", filePath)
+ }
+
+ return nil
+}
+
+// crImportCheckpoint it the function which imports the information
+// from checkpoint tarball and re-creates the container from that information
+func crImportCheckpoint(ctx context.Context, runtime *libpod.Runtime, input string, name string) ([]*libpod.Container, error) {
+ // First get the container definition from the
+ // tarball to a temporary directory
+ archiveFile, err := os.Open(input)
+ if err != nil {
+ return nil, errors.Wrapf(err, "Failed to open checkpoint archive %s for import", input)
+ }
+ defer errorhandling.CloseQuiet(archiveFile)
+ options := &archive.TarOptions{
+ // Here we only need the files config.dump and spec.dump
+ ExcludePatterns: []string{
+ "checkpoint",
+ "artifacts",
+ "ctr.log",
+ "rootfs-diff.tar",
+ "network.status",
+ },
+ }
+ dir, err := ioutil.TempDir("", "checkpoint")
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ if err := os.RemoveAll(dir); err != nil {
+ logrus.Errorf("could not recursively remove %s: %q", dir, err)
+ }
+ }()
+ err = archive.Untar(archiveFile, dir, options)
+ if err != nil {
+ return nil, errors.Wrapf(err, "Unpacking of checkpoint archive %s failed", input)
+ }
+
+ // Load spec.dump from temporary directory
+ dumpSpec := new(spec.Spec)
+ if err := crImportFromJSON(filepath.Join(dir, "spec.dump"), dumpSpec); err != nil {
+ return nil, err
+ }
+
+ // Load config.dump from temporary directory
+ config := new(libpod.ContainerConfig)
+ if err = crImportFromJSON(filepath.Join(dir, "config.dump"), config); err != nil {
+ return nil, err
+ }
+
+ // This should not happen as checkpoints with these options are not exported.
+ if (len(config.Dependencies) > 0) || (len(config.NamedVolumes) > 0) {
+ return nil, errors.Errorf("Cannot import checkpoints of containers with named volumes or dependencies")
+ }
+
+ ctrID := config.ID
+ newName := false
+
+ // Check if the restored container gets a new name
+ if name != "" {
+ config.ID = ""
+ config.Name = name
+ newName = true
+ }
+
+ ctrName := config.Name
+
+ // The code to load the images is copied from create.go
+ // In create.go this only set if '--quiet' does not exist.
+ writer := os.Stderr
+ rtc, err := runtime.GetConfig()
+ if err != nil {
+ return nil, err
+ }
+
+ _, err = runtime.ImageRuntime().New(ctx, config.RootfsImageName, rtc.SignaturePolicyPath, "", writer, nil, image.SigningOptions{}, false, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Now create a new container from the just loaded information
+ container, err := runtime.RestoreContainer(ctx, dumpSpec, config)
+ if err != nil {
+ return nil, err
+ }
+
+ var containers []*libpod.Container
+ if container == nil {
+ return nil, nil
+ }
+
+ containerConfig := container.Config()
+ if containerConfig.Name != ctrName {
+ return nil, errors.Errorf("Name of restored container (%s) does not match requested name (%s)", containerConfig.Name, ctrName)
+ }
+
+ if !newName {
+ // Only check ID for a restore with the same name.
+ // Using -n to request a new name for the restored container, will also create a new ID
+ if containerConfig.ID != ctrID {
+ return nil, errors.Errorf("ID of restored container (%s) does not match requested ID (%s)", containerConfig.ID, ctrID)
+ }
+ }
+
+ // Check if the ExitCommand points to the correct container ID
+ if containerConfig.ExitCommand[len(containerConfig.ExitCommand)-1] != containerConfig.ID {
+ return nil, errors.Errorf("'ExitCommandID' uses ID %s instead of container ID %s", containerConfig.ExitCommand[len(containerConfig.ExitCommand)-1], containerConfig.ID)
+ }
+
+ containers = append(containers, container)
+ return containers, nil
+}
diff --git a/pkg/adapter/client.go b/pkg/adapter/client.go
index 01914834f..694d9f961 100644
--- a/pkg/adapter/client.go
+++ b/pkg/adapter/client.go
@@ -6,42 +6,58 @@ import (
"fmt"
"os"
+ "github.com/containers/libpod/cmd/podman/remoteclientconfig"
"github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
"github.com/varlink/go/varlink"
)
var remoteEndpoint *Endpoint
func (r RemoteRuntime) RemoteEndpoint() (remoteEndpoint *Endpoint, err error) {
- if remoteEndpoint == nil {
- remoteEndpoint = &Endpoint{Unknown, ""}
- } else {
- return remoteEndpoint, nil
+ remoteConfigConnections, err := remoteclientconfig.ReadRemoteConfig(r.config)
+ if errors.Cause(err) != remoteclientconfig.ErrNoConfigationFile {
+ return nil, err
}
-
- // I'm leaving this here for now as a document of the birdge format. It can be removed later once the bridge
- // function is more flushed out.
- // bridge := `ssh -T root@192.168.122.1 "/usr/bin/varlink -A '/usr/bin/podman varlink \$VARLINK_ADDRESS' bridge"`
- if len(r.cmd.RemoteHost) > 0 {
- // The user has provided a remote host endpoint
+ // If the user defines an env variable for podman_varlink_bridge
+ // we use that as passed.
+ if bridge := os.Getenv("PODMAN_VARLINK_BRIDGE"); bridge != "" {
+ logrus.Debug("creating a varlink bridge based on env variable")
+ remoteEndpoint, err = newBridgeConnection(bridge, nil, r.cmd.LogLevel)
+ // if an environment variable for podman_varlink_address is defined,
+ // we used that as passed
+ } else if address := os.Getenv("PODMAN_VARLINK_ADDRESS"); address != "" {
+ logrus.Debug("creating a varlink address based on env variable: %s", address)
+ remoteEndpoint, err = newSocketConnection(address)
+ // if the user provides a remote host, we use it to configure a bridge connection
+ } else if len(r.cmd.RemoteHost) > 0 {
+ logrus.Debug("creating a varlink bridge based on user input")
if len(r.cmd.RemoteUserName) < 1 {
return nil, errors.New("you must provide a username when providing a remote host name")
}
- remoteEndpoint.Type = BridgeConnection
- remoteEndpoint.Connection = fmt.Sprintf(
- `ssh -T %s@%s /usr/bin/varlink -A \'/usr/bin/podman --log-level=%s varlink \\\$VARLINK_ADDRESS\' bridge`,
- r.cmd.RemoteUserName, r.cmd.RemoteHost, r.cmd.LogLevel)
-
- } else if bridge := os.Getenv("PODMAN_VARLINK_BRIDGE"); bridge != "" {
- remoteEndpoint.Type = BridgeConnection
- remoteEndpoint.Connection = bridge
- } else {
- address := os.Getenv("PODMAN_VARLINK_ADDRESS")
- if address == "" {
- address = DefaultAddress
+ rc := remoteclientconfig.RemoteConnection{r.cmd.RemoteHost, r.cmd.RemoteUserName, false}
+ remoteEndpoint, err = newBridgeConnection("", &rc, r.cmd.LogLevel)
+ // if the user has a config file with connections in it
+ } else if len(remoteConfigConnections.Connections) > 0 {
+ logrus.Debug("creating a varlink bridge based configuration file")
+ var rc *remoteclientconfig.RemoteConnection
+ if len(r.cmd.ConnectionName) > 0 {
+ rc, err = remoteConfigConnections.GetRemoteConnection(r.cmd.ConnectionName)
+ } else {
+ rc, err = remoteConfigConnections.GetDefault()
}
- remoteEndpoint.Type = DirectConnection
- remoteEndpoint.Connection = address
+ if err != nil {
+ return nil, err
+ }
+ if len(rc.Username) < 1 {
+ logrus.Debugf("Connection has no username, using current user %q", r.cmd.RemoteUserName)
+ rc.Username = r.cmd.RemoteUserName
+ }
+ remoteEndpoint, err = newBridgeConnection("", rc, r.cmd.LogLevel)
+ // last resort is to make a socket connection with the default varlink address for root user
+ } else {
+ logrus.Debug("creating a varlink address based default root address")
+ remoteEndpoint, err = newSocketConnection(DefaultAddress)
}
return
}
@@ -72,3 +88,29 @@ func (r RemoteRuntime) RefreshConnection() error {
r.Conn = newConn
return nil
}
+
+// newSocketConnection returns an endpoint for a uds based connection
+func newSocketConnection(address string) (*Endpoint, error) {
+ endpoint := Endpoint{
+ Type: DirectConnection,
+ Connection: address,
+ }
+ return &endpoint, nil
+}
+
+// newBridgeConnection creates a bridge type endpoint with username, destination, and log-level
+func newBridgeConnection(formattedBridge string, remoteConn *remoteclientconfig.RemoteConnection, logLevel string) (*Endpoint, error) {
+ endpoint := Endpoint{
+ Type: BridgeConnection,
+ }
+
+ if len(formattedBridge) < 1 && remoteConn == nil {
+ return nil, errors.New("bridge connections must either be created by string or remoteconnection")
+ }
+ if len(formattedBridge) > 0 {
+ endpoint.Connection = formattedBridge
+ return &endpoint, nil
+ }
+ endpoint.Connection = formatDefaultBridge(remoteConn, logLevel)
+ return &endpoint, nil
+}
diff --git a/pkg/adapter/client_unix.go b/pkg/adapter/client_unix.go
new file mode 100644
index 000000000..4781acd06
--- /dev/null
+++ b/pkg/adapter/client_unix.go
@@ -0,0 +1,16 @@
+// +build linux darwin
+// +build remoteclient
+
+package adapter
+
+import (
+ "fmt"
+
+ "github.com/containers/libpod/cmd/podman/remoteclientconfig"
+)
+
+func formatDefaultBridge(remoteConn *remoteclientconfig.RemoteConnection, logLevel string) string {
+ return fmt.Sprintf(
+ `ssh -T %s@%s -- /usr/bin/varlink -A \'/usr/bin/podman --log-level=%s varlink \\\$VARLINK_ADDRESS\' bridge`,
+ remoteConn.Username, remoteConn.Destination, logLevel)
+}
diff --git a/pkg/adapter/client_windows.go b/pkg/adapter/client_windows.go
new file mode 100644
index 000000000..31e5d9830
--- /dev/null
+++ b/pkg/adapter/client_windows.go
@@ -0,0 +1,15 @@
+// +build remoteclient
+
+package adapter
+
+import (
+ "fmt"
+
+ "github.com/containers/libpod/cmd/podman/remoteclientconfig"
+)
+
+func formatDefaultBridge(remoteConn *remoteclientconfig.RemoteConnection, logLevel string) string {
+ return fmt.Sprintf(
+ `ssh -T %s@%s -- /usr/bin/varlink -A '/usr/bin/podman --log-level=%s varlink $VARLINK_ADDRESS' bridge`,
+ remoteConn.Username, remoteConn.Destination, logLevel)
+}
diff --git a/pkg/adapter/containers.go b/pkg/adapter/containers.go
index ff7b6377a..faaef3e60 100644
--- a/pkg/adapter/containers.go
+++ b/pkg/adapter/containers.go
@@ -6,6 +6,7 @@ import (
"bufio"
"context"
"fmt"
+ "io"
"io/ioutil"
"os"
"path/filepath"
@@ -15,9 +16,15 @@ import (
"syscall"
"time"
+ "github.com/containers/buildah"
+ "github.com/containers/image/manifest"
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/shared"
+ "github.com/containers/libpod/cmd/podman/shared/parse"
"github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/libpod/define"
+ "github.com/containers/libpod/libpod/image"
+ "github.com/containers/libpod/libpod/logs"
"github.com/containers/libpod/pkg/adapter/shortcuts"
"github.com/containers/libpod/pkg/systemdgen"
"github.com/containers/psgo"
@@ -62,7 +69,7 @@ func (r *LocalRuntime) LookupContainer(idOrName string) (*Container, error) {
func (r *LocalRuntime) StopContainers(ctx context.Context, cli *cliconfig.StopValues) ([]string, map[string]error, error) {
var timeout *uint
if cli.Flags().Changed("timeout") || cli.Flags().Changed("time") {
- t := uint(cli.Timeout)
+ t := cli.Timeout
timeout = &t
}
@@ -88,14 +95,14 @@ func (r *LocalRuntime) StopContainers(ctx context.Context, cli *cliconfig.StopVa
}
pool.Add(shared.Job{
- c.ID(),
- func() error {
+ ID: c.ID(),
+ Fn: func() error {
err := c.StopWithTimeout(*timeout)
if err != nil {
- if errors.Cause(err) == libpod.ErrCtrStopped {
+ if errors.Cause(err) == define.ErrCtrStopped {
logrus.Debugf("Container %s is already stopped", c.ID())
return nil
- } else if cli.All && errors.Cause(err) == libpod.ErrCtrStateInvalid {
+ } else if cli.All && errors.Cause(err) == define.ErrCtrStateInvalid {
logrus.Debugf("Container %s is not running, could not stop", c.ID())
return nil
}
@@ -127,8 +134,8 @@ func (r *LocalRuntime) KillContainers(ctx context.Context, cli *cliconfig.KillVa
c := c
pool.Add(shared.Job{
- c.ID(),
- func() error {
+ ID: c.ID(),
+ Fn: func() error {
return c.Kill(uint(signal))
},
})
@@ -156,12 +163,12 @@ func (r *LocalRuntime) InitContainers(ctx context.Context, cli *cliconfig.InitVa
ctr := c
pool.Add(shared.Job{
- ctr.ID(),
- func() error {
+ ID: ctr.ID(),
+ Fn: func() error {
err := ctr.Init(ctx)
if err != nil {
// If we're initializing all containers, ignore invalid state errors
- if cli.All && errors.Cause(err) == libpod.ErrCtrStateInvalid {
+ if cli.All && errors.Cause(err) == define.ErrCtrStateInvalid {
return nil
}
return err
@@ -186,12 +193,18 @@ func (r *LocalRuntime) RemoveContainers(ctx context.Context, cli *cliconfig.RmVa
}
logrus.Debugf("Setting maximum rm workers to %d", maxWorkers)
+ if cli.Storage {
+ for _, ctr := range cli.InputArgs {
+ if err := r.RemoveStorageContainer(ctr, cli.Force); err != nil {
+ failures[ctr] = err
+ }
+ ok = append(ok, ctr)
+ }
+ return ok, failures, nil
+ }
+
ctrs, err := shortcuts.GetContainersByContext(cli.All, cli.Latest, cli.InputArgs, r.Runtime)
if err != nil {
- // Force may be used to remove containers no longer found in the database
- if cli.Force && len(cli.InputArgs) > 0 && errors.Cause(err) == libpod.ErrNoSuchCtr {
- r.RemoveContainersFromStorage(cli.InputArgs)
- }
return ok, failures, err
}
@@ -200,8 +213,8 @@ func (r *LocalRuntime) RemoveContainers(ctx context.Context, cli *cliconfig.RmVa
c := c
pool.Add(shared.Job{
- c.ID(),
- func() error {
+ ID: c.ID(),
+ Fn: func() error {
err := r.RemoveContainer(ctx, c, cli.Force, cli.Volumes)
if err != nil {
logrus.Debugf("Failed to remove container %s: %s", c.ID(), err.Error())
@@ -231,7 +244,7 @@ func (r *LocalRuntime) UmountRootFilesystems(ctx context.Context, cli *cliconfig
logrus.Debugf("Error umounting container %s state: %s", ctr.ID(), err.Error())
continue
}
- if state == libpod.ContainerStateRunning {
+ if state == define.ContainerStateRunning {
logrus.Debugf("Error umounting container %s, is running", ctr.ID())
continue
}
@@ -272,13 +285,14 @@ func (r *LocalRuntime) WaitOnContainers(ctx context.Context, cli *cliconfig.Wait
}
// Log logs one or more containers
-func (r *LocalRuntime) Log(c *cliconfig.LogsValues, options *libpod.LogOptions) error {
+func (r *LocalRuntime) Log(c *cliconfig.LogsValues, options *logs.LogOptions) error {
+
var wg sync.WaitGroup
options.WaitGroup = &wg
if len(c.InputArgs) > 1 {
options.Multi = true
}
- logChannel := make(chan *libpod.LogLine, int(c.Tail)*len(c.InputArgs)+1)
+ logChannel := make(chan *logs.LogLine, int(c.Tail)*len(c.InputArgs)+1)
containers, err := shortcuts.GetContainersByContext(false, c.Latest, c.InputArgs, r.Runtime)
if err != nil {
return err
@@ -328,7 +342,7 @@ func (r *LocalRuntime) Run(ctx context.Context, c *cliconfig.RunValues, exitCode
if err := ctr.Start(ctx, c.IsSet("pod")); err != nil {
// This means the command did not exist
exitCode = 127
- if strings.Index(err.Error(), "permission denied") > -1 {
+ if strings.Contains(err.Error(), "permission denied") {
exitCode = 126
}
return exitCode, err
@@ -366,22 +380,32 @@ func (r *LocalRuntime) Run(ctx context.Context, c *cliconfig.RunValues, exitCode
case "stdin":
inputStream = os.Stdin
default:
- return exitCode, errors.Wrapf(libpod.ErrInvalidArg, "invalid stream %q for --attach - must be one of stdin, stdout, or stderr", stream)
+ return exitCode, errors.Wrapf(define.ErrInvalidArg, "invalid stream %q for --attach - must be one of stdin, stdout, or stderr", stream)
}
}
}
+
+ config, err := r.Runtime.GetConfig()
+ if err != nil {
+ return exitCode, err
+ }
+ detachKeys := c.String("detach-keys")
+ if detachKeys == "" {
+ detachKeys = config.DetachKeys
+ }
+
// if the container was created as part of a pod, also start its dependencies, if any.
- if err := StartAttachCtr(ctx, ctr, outputStream, errorStream, inputStream, c.String("detach-keys"), c.Bool("sig-proxy"), true, c.IsSet("pod")); err != nil {
+ if err := StartAttachCtr(ctx, ctr, outputStream, errorStream, inputStream, detachKeys, c.Bool("sig-proxy"), true, c.IsSet("pod")); err != nil {
// We've manually detached from the container
// Do not perform cleanup, or wait for container exit code
// Just exit immediately
- if errors.Cause(err) == libpod.ErrDetach {
+ if errors.Cause(err) == define.ErrDetach {
exitCode = 0
return exitCode, nil
}
// This means the command did not exist
exitCode = 127
- if strings.Index(err.Error(), "permission denied") > -1 {
+ if strings.Contains(err.Error(), "permission denied") {
exitCode = 126
}
if c.IsSet("rm") {
@@ -393,13 +417,9 @@ func (r *LocalRuntime) Run(ctx context.Context, c *cliconfig.RunValues, exitCode
}
if ecode, err := ctr.Wait(); err != nil {
- if errors.Cause(err) == libpod.ErrNoSuchCtr {
+ if errors.Cause(err) == define.ErrNoSuchCtr {
// The container may have been removed
// Go looking for an exit file
- config, err := r.Runtime.GetConfig()
- if err != nil {
- return exitCode, err
- }
ctrExitCode, err := ReadExitFile(config.TmpDir, ctr.ID())
if err != nil {
logrus.Errorf("Cannot get exit code: %v", err)
@@ -477,7 +497,7 @@ func (r *LocalRuntime) Attach(ctx context.Context, c *cliconfig.AttachValues) er
if err != nil {
return errors.Wrapf(err, "unable to determine state of %s", ctr.ID())
}
- if conState != libpod.ContainerStateRunning {
+ if conState != define.ContainerStateRunning {
return errors.Errorf("you can only attach to running containers")
}
@@ -486,19 +506,29 @@ func (r *LocalRuntime) Attach(ctx context.Context, c *cliconfig.AttachValues) er
inputStream = nil
}
// If the container is in a pod, also set to recursively start dependencies
- if err := StartAttachCtr(ctx, ctr, os.Stdout, os.Stderr, inputStream, c.DetachKeys, c.SigProxy, false, ctr.PodID() != ""); err != nil && errors.Cause(err) != libpod.ErrDetach {
+ if err := StartAttachCtr(ctx, ctr, os.Stdout, os.Stderr, inputStream, c.DetachKeys, c.SigProxy, false, ctr.PodID() != ""); err != nil && errors.Cause(err) != define.ErrDetach {
return errors.Wrapf(err, "error attaching to container %s", ctr.ID())
}
return nil
}
// Checkpoint one or more containers
-func (r *LocalRuntime) Checkpoint(c *cliconfig.CheckpointValues, options libpod.ContainerCheckpointOptions) error {
+func (r *LocalRuntime) Checkpoint(c *cliconfig.CheckpointValues) error {
var (
containers []*libpod.Container
err, lastError error
)
+ options := libpod.ContainerCheckpointOptions{
+ Keep: c.Keep,
+ KeepRunning: c.LeaveRunning,
+ TCPEstablished: c.TcpEstablished,
+ TargetFile: c.Export,
+ IgnoreRootfs: c.IgnoreRootfs,
+ }
+ if c.Export == "" && c.IgnoreRootfs {
+ return errors.Errorf("--ignore-rootfs can only be used with --export")
+ }
if c.All {
containers, err = r.Runtime.GetRunningContainers()
} else {
@@ -522,19 +552,29 @@ func (r *LocalRuntime) Checkpoint(c *cliconfig.CheckpointValues, options libpod.
}
// Restore one or more containers
-func (r *LocalRuntime) Restore(c *cliconfig.RestoreValues, options libpod.ContainerCheckpointOptions) error {
+func (r *LocalRuntime) Restore(ctx context.Context, c *cliconfig.RestoreValues) error {
var (
containers []*libpod.Container
err, lastError error
filterFuncs []libpod.ContainerFilter
)
+ options := libpod.ContainerCheckpointOptions{
+ Keep: c.Keep,
+ TCPEstablished: c.TcpEstablished,
+ TargetFile: c.Import,
+ Name: c.Name,
+ IgnoreRootfs: c.IgnoreRootfs,
+ }
+
filterFuncs = append(filterFuncs, func(c *libpod.Container) bool {
state, _ := c.State()
- return state == libpod.ContainerStateExited
+ return state == define.ContainerStateExited
})
- if c.All {
+ if c.Import != "" {
+ containers, err = crImportCheckpoint(ctx, r.Runtime, c.Import, c.Name)
+ } else if c.All {
containers, err = r.GetContainers(filterFuncs...)
} else {
containers, err = shortcuts.GetContainersByContext(false, c.Latest, c.InputArgs, r.Runtime)
@@ -587,7 +627,7 @@ func (r *LocalRuntime) Start(ctx context.Context, c *cliconfig.StartValues, sigP
return exitCode, errors.Wrapf(err, "unable to get container state")
}
- ctrRunning := ctrState == libpod.ContainerStateRunning
+ ctrRunning := ctrState == define.ContainerStateRunning
if c.Attach {
inputStream := os.Stdin
@@ -598,7 +638,7 @@ func (r *LocalRuntime) Start(ctx context.Context, c *cliconfig.StartValues, sigP
// attach to the container and also start it not already running
// If the container is in a pod, also set to recursively start dependencies
err = StartAttachCtr(ctx, ctr.Container, os.Stdout, os.Stderr, inputStream, c.DetachKeys, sigProxy, !ctrRunning, ctr.PodID() != "")
- if errors.Cause(err) == libpod.ErrDetach {
+ if errors.Cause(err) == define.ErrDetach {
// User manually detached
// Exit cleanly immediately
exitCode = 0
@@ -614,7 +654,7 @@ func (r *LocalRuntime) Start(ctx context.Context, c *cliconfig.StartValues, sigP
}
if ecode, err := ctr.Wait(); err != nil {
- if errors.Cause(err) == libpod.ErrNoSuchCtr {
+ if errors.Cause(err) == define.ErrNoSuchCtr {
// The container may have been removed
// Go looking for an exit file
rtc, err := r.GetConfig()
@@ -713,7 +753,7 @@ func (r *LocalRuntime) UnpauseContainers(ctx context.Context, cli *cliconfig.Unp
var filterFuncs []libpod.ContainerFilter
filterFuncs = append(filterFuncs, func(c *libpod.Container) bool {
state, _ := c.State()
- return state == libpod.ContainerStatePaused
+ return state == define.ContainerStatePaused
})
ctrs, err = r.GetContainers(filterFuncs...)
} else {
@@ -884,13 +924,86 @@ func (r *LocalRuntime) execPS(c *libpod.Container, args []string) ([]string, err
}()
cmd := append([]string{"ps"}, args...)
- if err := c.Exec(false, false, []string{}, cmd, "", "", streams, 0); err != nil {
+ ec, err := c.Exec(false, false, []string{}, cmd, "", "", streams, 0, nil, "")
+ if err != nil {
return nil, err
+ } else if ec != 0 {
+ return nil, errors.Errorf("Runtime failed with exit status: %d and output: %s", ec, strings.Join(psOutput, " "))
}
return psOutput, nil
}
+// ExecContainer executes a command in the container
+func (r *LocalRuntime) ExecContainer(ctx context.Context, cli *cliconfig.ExecValues) (int, error) {
+ var (
+ ctr *Container
+ err error
+ cmd []string
+ )
+ // default invalid command exit code
+ ec := 125
+
+ if cli.Latest {
+ if ctr, err = r.GetLatestContainer(); err != nil {
+ return ec, err
+ }
+ cmd = cli.InputArgs[0:]
+ } else {
+ if ctr, err = r.LookupContainer(cli.InputArgs[0]); err != nil {
+ return ec, err
+ }
+ cmd = cli.InputArgs[1:]
+ }
+
+ if cli.PreserveFDs > 0 {
+ entries, err := ioutil.ReadDir("/proc/self/fd")
+ if err != nil {
+ return ec, errors.Wrapf(err, "unable to read /proc/self/fd")
+ }
+
+ m := make(map[int]bool)
+ for _, e := range entries {
+ i, err := strconv.Atoi(e.Name())
+ if err != nil {
+ return ec, errors.Wrapf(err, "cannot parse %s in /proc/self/fd", e.Name())
+ }
+ m[i] = true
+ }
+
+ for i := 3; i < 3+cli.PreserveFDs; i++ {
+ if _, found := m[i]; !found {
+ return ec, errors.New("invalid --preserve-fds=N specified. Not enough FDs available")
+ }
+ }
+ }
+
+ // Validate given environment variables
+ env := map[string]string{}
+ if err := parse.ReadKVStrings(env, []string{}, cli.Env); err != nil {
+ return ec, errors.Wrapf(err, "unable to process environment variables")
+ }
+
+ // Build env slice of key=value strings for Exec
+ envs := []string{}
+ for k, v := range env {
+ envs = append(envs, fmt.Sprintf("%s=%s", k, v))
+ }
+
+ streams := new(libpod.AttachStreams)
+ streams.OutputStream = os.Stdout
+ streams.ErrorStream = os.Stderr
+ if cli.Interactive {
+ streams.InputStream = os.Stdin
+ streams.AttachInput = true
+ }
+ streams.AttachOutput = true
+ streams.AttachError = true
+
+ ec, err = ExecAttachCtr(ctx, ctr.Container, cli.Tty, cli.Privileged, envs, cmd, cli.User, cli.Workdir, streams, cli.PreserveFDs, cli.DetachKeys)
+ return define.TranslateExecErrorToExitCode(ec, err), err
+}
+
// Prune removes stopped containers
func (r *LocalRuntime) Prune(ctx context.Context, maxWorkers int, force bool) ([]string, map[string]error, error) {
var (
@@ -910,7 +1023,7 @@ func (r *LocalRuntime) Prune(ctx context.Context, maxWorkers int, force bool) ([
if c.PodID() != "" {
return false
}
- if state == libpod.ContainerStateStopped || state == libpod.ContainerStateExited {
+ if state == define.ContainerStateStopped || state == define.ContainerStateExited {
return true
}
return false
@@ -1001,7 +1114,7 @@ func (r *LocalRuntime) Port(c *cliconfig.PortValues) ([]*Container, error) {
//Convert libpod containers to adapter Containers
for _, con := range containers {
- if state, _ := con.State(); state != libpod.ContainerStateRunning {
+ if state, _ := con.State(); state != define.ContainerStateRunning {
continue
}
portContainers = append(portContainers, &Container{con})
@@ -1017,16 +1130,75 @@ func (r *LocalRuntime) GenerateSystemd(c *cliconfig.GenerateSystemdValues) (stri
}
timeout := int(ctr.StopTimeout())
if c.StopTimeout >= 0 {
- timeout = int(c.StopTimeout)
+ timeout = c.StopTimeout
}
name := ctr.ID()
if c.Name {
name = ctr.Name()
}
- return systemdgen.CreateSystemdUnitAsString(name, ctr.ID(), c.RestartPolicy, ctr.Config().StaticDir, timeout)
+
+ config := ctr.Config()
+ conmonPidFile := config.ConmonPidFile
+ if conmonPidFile == "" {
+ return "", errors.Errorf("conmon PID file path is empty, try to recreate the container with --conmon-pidfile flag")
+ }
+
+ return systemdgen.CreateSystemdUnitAsString(name, ctr.ID(), c.RestartPolicy, conmonPidFile, timeout)
}
// GetNamespaces returns namespace information about a container for PS
func (r *LocalRuntime) GetNamespaces(container shared.PsContainerOutput) *shared.Namespace {
return shared.GetNamespaces(container.Pid)
}
+
+// Commit creates a local image from a container
+func (r *LocalRuntime) Commit(ctx context.Context, c *cliconfig.CommitValues, container, imageName string) (string, error) {
+ var (
+ writer io.Writer
+ mimeType string
+ )
+ switch c.Format {
+ case "oci":
+ mimeType = buildah.OCIv1ImageManifest
+ if c.Flag("message").Changed {
+ return "", errors.Errorf("messages are only compatible with the docker image format (-f docker)")
+ }
+ case "docker":
+ mimeType = manifest.DockerV2Schema2MediaType
+ default:
+ return "", errors.Errorf("unrecognized image format %q", c.Format)
+ }
+ if !c.Quiet {
+ writer = os.Stderr
+ }
+ ctr, err := r.Runtime.LookupContainer(container)
+ if err != nil {
+ return "", errors.Wrapf(err, "error looking up container %q", container)
+ }
+
+ rtc, err := r.Runtime.GetConfig()
+ if err != nil {
+ return "", err
+ }
+
+ sc := image.GetSystemContext(rtc.SignaturePolicyPath, "", false)
+ coptions := buildah.CommitOptions{
+ SignaturePolicyPath: rtc.SignaturePolicyPath,
+ ReportWriter: writer,
+ SystemContext: sc,
+ PreferredManifestType: mimeType,
+ }
+ options := libpod.ContainerCommitOptions{
+ CommitOptions: coptions,
+ Pause: c.Pause,
+ IncludeVolumes: c.IncludeVolumes,
+ Message: c.Message,
+ Changes: c.Change,
+ Author: c.Author,
+ }
+ newImage, err := ctr.Commit(ctx, imageName, options)
+ if err != nil {
+ return "", err
+ }
+ return newImage.ID(), nil
+}
diff --git a/pkg/adapter/containers_remote.go b/pkg/adapter/containers_remote.go
index c34495b3d..5a26f537f 100644
--- a/pkg/adapter/containers_remote.go
+++ b/pkg/adapter/containers_remote.go
@@ -3,6 +3,7 @@
package adapter
import (
+ "bufio"
"context"
"encoding/json"
"fmt"
@@ -14,9 +15,11 @@ import (
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/shared"
+ "github.com/containers/libpod/cmd/podman/shared/parse"
iopodman "github.com/containers/libpod/cmd/podman/varlink"
"github.com/containers/libpod/libpod"
- "github.com/containers/libpod/pkg/inspect"
+ "github.com/containers/libpod/libpod/define"
+ "github.com/containers/libpod/libpod/logs"
"github.com/containers/libpod/pkg/varlinkapi/virtwriter"
"github.com/cri-o/ocicni/pkg/ocicni"
"github.com/docker/docker/pkg/term"
@@ -29,12 +32,12 @@ import (
)
// Inspect returns an inspect struct from varlink
-func (c *Container) Inspect(size bool) (*inspect.ContainerInspectData, error) {
+func (c *Container) Inspect(size bool) (*libpod.InspectContainerData, error) {
reply, err := iopodman.ContainerInspectData().Call(c.Runtime.Conn, c.ID(), size)
if err != nil {
return nil, err
}
- data := inspect.ContainerInspectData{}
+ data := libpod.InspectContainerData{}
if err := json.Unmarshal([]byte(reply), &data); err != nil {
return nil, err
}
@@ -240,11 +243,11 @@ func (r *LocalRuntime) StopContainers(ctx context.Context, cli *cliconfig.StopVa
for _, id := range ids {
if _, err := iopodman.StopContainer().Call(r.Conn, id, int64(cli.Timeout)); err != nil {
transError := TranslateError(err)
- if errors.Cause(transError) == libpod.ErrCtrStopped {
+ if errors.Cause(transError) == define.ErrCtrStopped {
ok = append(ok, id)
continue
}
- if errors.Cause(transError) == libpod.ErrCtrStateInvalid && cli.All {
+ if errors.Cause(transError) == define.ErrCtrStateInvalid && cli.All {
ok = append(ok, id)
continue
}
@@ -411,8 +414,8 @@ func BatchContainerOp(ctr *Container, opts shared.PsOptions) (shared.BatchContai
return bcs, nil
}
-// Logs one or more containers over a varlink connection
-func (r *LocalRuntime) Log(c *cliconfig.LogsValues, options *libpod.LogOptions) error {
+// Log one or more containers over a varlink connection
+func (r *LocalRuntime) Log(c *cliconfig.LogsValues, options *logs.LogOptions) error {
// GetContainersLogs
reply, err := iopodman.GetContainersLogs().Send(r.Conn, uint64(varlink.More), c.InputArgs, c.Follow, c.Latest, options.Since.Format(time.RFC3339Nano), int64(c.Tail), c.Timestamps)
if err != nil {
@@ -434,7 +437,7 @@ func (r *LocalRuntime) Log(c *cliconfig.LogsValues, options *libpod.LogOptions)
if err != nil {
return errors.Wrapf(err, "unable to parse time of log %s", log.Time)
}
- logLine := libpod.LogLine{
+ logLine := logs.LogLine{
Device: log.Device,
ParseLogType: log.ParseLogType,
Time: lTime,
@@ -477,7 +480,7 @@ func (r *LocalRuntime) Run(ctx context.Context, c *cliconfig.RunValues, exitCode
}
func ReadExitFile(runtimeTmp, ctrID string) (int, error) {
- return 0, libpod.ErrNotImplemented
+ return 0, define.ErrNotImplemented
}
// Ps lists containers based on criteria from user
@@ -492,6 +495,7 @@ func (r *LocalRuntime) Ps(c *cliconfig.PsValues, opts shared.PsOptions) ([]share
NoTrunc: &c.NoTrunct,
Pod: &c.Pod,
Quiet: &c.Quiet,
+ Size: &c.Size,
Sort: &c.Sort,
Sync: &c.Sync,
}
@@ -516,7 +520,7 @@ func (r *LocalRuntime) Ps(c *cliconfig.PsValues, opts shared.PsOptions) ([]share
RootFsSize: ctr.RootFsSize,
RwSize: ctr.RwSize,
}
- state, err := libpod.StringToContainerStatus(ctr.State)
+ state, err := define.StringToContainerStatus(ctr.State)
if err != nil {
return nil, err
}
@@ -552,92 +556,13 @@ func (r *LocalRuntime) Ps(c *cliconfig.PsValues, opts shared.PsOptions) ([]share
return psContainers, nil
}
-func (r *LocalRuntime) attach(ctx context.Context, stdin, stdout *os.File, cid string, start bool, detachKeys string) (chan error, error) {
- var (
- oldTermState *term.State
- )
- errChan := make(chan error)
- spec, err := r.Spec(cid)
- if err != nil {
- return nil, err
- }
- resize := make(chan remotecommand.TerminalSize, 5)
- haveTerminal := terminal.IsTerminal(int(os.Stdin.Fd()))
-
- // Check if we are attached to a terminal. If we are, generate resize
- // events, and set the terminal to raw mode
- if haveTerminal && spec.Process.Terminal {
- logrus.Debugf("Handling terminal attach")
-
- subCtx, cancel := context.WithCancel(ctx)
- defer cancel()
-
- resizeTty(subCtx, resize)
- oldTermState, err = term.SaveState(os.Stdin.Fd())
- if err != nil {
- return nil, errors.Wrapf(err, "unable to save terminal state")
- }
-
- logrus.SetFormatter(&RawTtyFormatter{})
- term.SetRawTerminal(os.Stdin.Fd())
-
- }
- // TODO add detach keys support
- _, err = iopodman.Attach().Send(r.Conn, varlink.Upgrade, cid, detachKeys, start)
- if err != nil {
- restoreTerminal(oldTermState)
- return nil, err
- }
-
- // These are the varlink sockets
- reader := r.Conn.Reader
- writer := r.Conn.Writer
-
- // These are the special writers that encode input from the client.
- varlinkStdinWriter := virtwriter.NewVirtWriteCloser(writer, virtwriter.ToStdin)
- varlinkResizeWriter := virtwriter.NewVirtWriteCloser(writer, virtwriter.TerminalResize)
-
- go func() {
- // Read from the wire and direct to stdout or stderr
- err := virtwriter.Reader(reader, stdout, os.Stderr, nil, nil)
- defer restoreTerminal(oldTermState)
- errChan <- err
- }()
-
- go func() {
- for termResize := range resize {
- b, err := json.Marshal(termResize)
- if err != nil {
- defer restoreTerminal(oldTermState)
- errChan <- err
- }
- _, err = varlinkResizeWriter.Write(b)
- if err != nil {
- defer restoreTerminal(oldTermState)
- errChan <- err
- }
- }
- }()
-
- // Takes stdinput and sends it over the wire after being encoded
- go func() {
- if _, err := io.Copy(varlinkStdinWriter, stdin); err != nil {
- defer restoreTerminal(oldTermState)
- errChan <- err
- }
-
- }()
- return errChan, nil
-
-}
-
// Attach to a remote terminal
func (r *LocalRuntime) Attach(ctx context.Context, c *cliconfig.AttachValues) error {
ctr, err := r.LookupContainer(c.InputArgs[0])
if err != nil {
return nil
}
- if ctr.state.State != libpod.ContainerStateRunning {
+ if ctr.state.State != define.ContainerStateRunning {
return errors.New("you can only attach to running containers")
}
inputStream := os.Stdin
@@ -655,7 +580,14 @@ func (r *LocalRuntime) Attach(ctx context.Context, c *cliconfig.AttachValues) er
}
// Checkpoint one or more containers
-func (r *LocalRuntime) Checkpoint(c *cliconfig.CheckpointValues, options libpod.ContainerCheckpointOptions) error {
+func (r *LocalRuntime) Checkpoint(c *cliconfig.CheckpointValues) error {
+ if c.Export != "" {
+ return errors.New("the remote client does not support exporting checkpoints")
+ }
+ if c.IgnoreRootfs {
+ return errors.New("the remote client does not support --ignore-rootfs")
+ }
+
var lastError error
ids, err := iopodman.GetContainersByContext().Call(r.Conn, c.All, c.Latest, c.InputArgs)
if err != nil {
@@ -670,7 +602,7 @@ func (r *LocalRuntime) Checkpoint(c *cliconfig.CheckpointValues, options libpod.
if err != nil {
return err
}
- if ctr.state.State == libpod.ContainerStateRunning {
+ if ctr.state.State == define.ContainerStateRunning {
runningIds = append(runningIds, id)
}
}
@@ -678,7 +610,7 @@ func (r *LocalRuntime) Checkpoint(c *cliconfig.CheckpointValues, options libpod.
}
for _, id := range ids {
- if _, err := iopodman.ContainerCheckpoint().Call(r.Conn, id, options.Keep, options.KeepRunning, options.TCPEstablished); err != nil {
+ if _, err := iopodman.ContainerCheckpoint().Call(r.Conn, id, c.Keep, c.Keep, c.TcpEstablished); err != nil {
if lastError != nil {
fmt.Fprintln(os.Stderr, lastError)
}
@@ -691,7 +623,14 @@ func (r *LocalRuntime) Checkpoint(c *cliconfig.CheckpointValues, options libpod.
}
// Restore one or more containers
-func (r *LocalRuntime) Restore(c *cliconfig.RestoreValues, options libpod.ContainerCheckpointOptions) error {
+func (r *LocalRuntime) Restore(ctx context.Context, c *cliconfig.RestoreValues) error {
+ if c.Import != "" {
+ return errors.New("the remote client does not support importing checkpoints")
+ }
+ if c.IgnoreRootfs {
+ return errors.New("the remote client does not support --ignore-rootfs")
+ }
+
var lastError error
ids, err := iopodman.GetContainersByContext().Call(r.Conn, c.All, c.Latest, c.InputArgs)
if err != nil {
@@ -706,7 +645,7 @@ func (r *LocalRuntime) Restore(c *cliconfig.RestoreValues, options libpod.Contai
if err != nil {
return err
}
- if ctr.state.State != libpod.ContainerStateRunning {
+ if ctr.state.State != define.ContainerStateRunning {
exitedIDs = append(exitedIDs, id)
}
}
@@ -714,7 +653,7 @@ func (r *LocalRuntime) Restore(c *cliconfig.RestoreValues, options libpod.Contai
}
for _, id := range ids {
- if _, err := iopodman.ContainerRestore().Call(r.Conn, id, options.Keep, options.TCPEstablished); err != nil {
+ if _, err := iopodman.ContainerRestore().Call(r.Conn, id, c.Keep, c.TcpEstablished); err != nil {
if lastError != nil {
fmt.Fprintln(os.Stderr, lastError)
}
@@ -771,6 +710,49 @@ func (r *LocalRuntime) Start(ctx context.Context, c *cliconfig.StartValues, sigP
return exitCode, finalErr
}
+func (r *LocalRuntime) attach(ctx context.Context, stdin, stdout *os.File, cid string, start bool, detachKeys string) (chan error, error) {
+ var (
+ oldTermState *term.State
+ )
+ spec, err := r.Spec(cid)
+ if err != nil {
+ return nil, err
+ }
+ resize := make(chan remotecommand.TerminalSize, 5)
+ haveTerminal := terminal.IsTerminal(int(os.Stdin.Fd()))
+
+ // Check if we are attached to a terminal. If we are, generate resize
+ // events, and set the terminal to raw mode
+ if haveTerminal && spec.Process.Terminal {
+ cancel, oldTermState, err := handleTerminalAttach(ctx, resize)
+ if err != nil {
+ return nil, err
+ }
+ defer cancel()
+ defer restoreTerminal(oldTermState)
+
+ logrus.SetFormatter(&RawTtyFormatter{})
+ term.SetRawTerminal(os.Stdin.Fd())
+ }
+
+ reply, err := iopodman.Attach().Send(r.Conn, varlink.Upgrade, cid, detachKeys, start)
+ if err != nil {
+ restoreTerminal(oldTermState)
+ return nil, err
+ }
+
+ // See if the server accepts the upgraded connection or returns an error
+ _, err = reply()
+
+ if err != nil {
+ restoreTerminal(oldTermState)
+ return nil, err
+ }
+
+ errChan := configureVarlinkAttachStdio(r.Conn.Reader, r.Conn.Writer, stdin, stdout, oldTermState, resize, nil)
+ return errChan, nil
+}
+
// PauseContainers pauses container(s) based on CLI inputs.
func (r *LocalRuntime) PauseContainers(ctx context.Context, cli *cliconfig.PauseValues) ([]string, map[string]error, error) {
var (
@@ -781,7 +763,7 @@ func (r *LocalRuntime) PauseContainers(ctx context.Context, cli *cliconfig.Pause
)
if cli.All {
- filters := []string{libpod.ContainerStateRunning.String()}
+ filters := []string{define.ContainerStateRunning.String()}
ctrs, err = r.LookupContainersWithStatus(filters)
} else {
ctrs, err = r.LookupContainers(cli.InputArgs)
@@ -818,7 +800,7 @@ func (r *LocalRuntime) UnpauseContainers(ctx context.Context, cli *cliconfig.Unp
logrus.Debugf("Setting maximum rm workers to %d", maxWorkers)
if cli.All {
- filters := []string{libpod.ContainerStatePaused.String()}
+ filters := []string{define.ContainerStatePaused.String()}
ctrs, err = r.LookupContainersWithStatus(filters)
} else {
ctrs, err = r.LookupContainers(cli.InputArgs)
@@ -857,7 +839,7 @@ func (r *LocalRuntime) Restart(ctx context.Context, c *cliconfig.RestartValues)
}
restartContainers = append(restartContainers, lastCtr)
} else if c.Running {
- containers, err = r.LookupContainersWithStatus([]string{libpod.ContainerStateRunning.String()})
+ containers, err = r.LookupContainersWithStatus([]string{define.ContainerStateRunning.String()})
if err != nil {
return nil, nil, err
}
@@ -925,7 +907,7 @@ func (r *LocalRuntime) Prune(ctx context.Context, maxWorkers int, force bool) ([
)
logrus.Debugf("Setting maximum rm workers to %d", maxWorkers)
- filters := []string{libpod.ContainerStateExited.String()}
+ filters := []string{define.ContainerStateExited.String()}
ctrs, err = r.LookupContainersWithStatus(filters)
if err != nil {
return ok, failures, err
@@ -958,7 +940,7 @@ func (r *LocalRuntime) Port(c *cliconfig.PortValues) ([]*Container, error) {
containers, err = r.GetContainersByContext(false, c.Latest, c.InputArgs)
} else {
// we need to only use running containers if all
- filters := []string{libpod.ContainerStateRunning.String()}
+ filters := []string{define.ContainerStateRunning.String()}
containers, err = r.LookupContainersWithStatus(filters)
}
if err != nil {
@@ -986,3 +968,147 @@ func (r *LocalRuntime) GetNamespaces(container shared.PsContainerOutput) *shared
}
return &ns
}
+
+// Commit creates a local image from a container
+func (r *LocalRuntime) Commit(ctx context.Context, c *cliconfig.CommitValues, container, imageName string) (string, error) {
+ var iid string
+ reply, err := iopodman.Commit().Send(r.Conn, varlink.More, container, imageName, c.Change, c.Author, c.Message, c.Pause, c.Format)
+ if err != nil {
+ return "", err
+ }
+ for {
+ responses, flags, err := reply()
+ if err != nil {
+ return "", err
+ }
+ for _, line := range responses.Logs {
+ fmt.Fprintln(os.Stderr, line)
+ }
+ iid = responses.Id
+ if flags&varlink.Continues == 0 {
+ break
+ }
+ }
+ return iid, nil
+}
+
+// ExecContainer executes a command in the container
+func (r *LocalRuntime) ExecContainer(ctx context.Context, cli *cliconfig.ExecValues) (int, error) {
+ var (
+ oldTermState *term.State
+ ec int = define.ExecErrorCodeGeneric
+ )
+ // default invalid command exit code
+ // Validate given environment variables
+ env := map[string]string{}
+ if err := parse.ReadKVStrings(env, []string{}, cli.Env); err != nil {
+ return -1, errors.Wrapf(err, "Exec unable to process environment variables")
+ }
+
+ // Build env slice of key=value strings for Exec
+ envs := []string{}
+ for k, v := range env {
+ envs = append(envs, fmt.Sprintf("%s=%s", k, v))
+ }
+
+ resize := make(chan remotecommand.TerminalSize, 5)
+ haveTerminal := terminal.IsTerminal(int(os.Stdin.Fd()))
+
+ // Check if we are attached to a terminal. If we are, generate resize
+ // events, and set the terminal to raw mode
+ if haveTerminal && cli.Tty {
+ cancel, oldTermState, err := handleTerminalAttach(ctx, resize)
+ if err != nil {
+ return ec, err
+ }
+ defer cancel()
+ defer restoreTerminal(oldTermState)
+
+ logrus.SetFormatter(&RawTtyFormatter{})
+ term.SetRawTerminal(os.Stdin.Fd())
+ }
+
+ opts := iopodman.ExecOpts{
+ Name: cli.InputArgs[0],
+ Tty: cli.Tty,
+ Privileged: cli.Privileged,
+ Cmd: cli.InputArgs[1:],
+ User: &cli.User,
+ Workdir: &cli.Workdir,
+ Env: &envs,
+ DetachKeys: &cli.DetachKeys,
+ }
+
+ inputStream := os.Stdin
+ if !cli.Interactive {
+ inputStream = nil
+ }
+
+ reply, err := iopodman.ExecContainer().Send(r.Conn, varlink.Upgrade, opts)
+ if err != nil {
+ return ec, errors.Wrapf(err, "Exec failed to contact service for %s", cli.InputArgs)
+ }
+
+ _, err = reply()
+ if err != nil {
+ return ec, errors.Wrapf(err, "Exec operation failed for %s", cli.InputArgs)
+ }
+ ecChan := make(chan int, 1)
+ errChan := configureVarlinkAttachStdio(r.Conn.Reader, r.Conn.Writer, inputStream, os.Stdout, oldTermState, resize, ecChan)
+
+ ec = <-ecChan
+ err = <-errChan
+
+ return ec, err
+}
+
+func configureVarlinkAttachStdio(reader *bufio.Reader, writer *bufio.Writer, stdin *os.File, stdout *os.File, oldTermState *term.State, resize chan remotecommand.TerminalSize, ecChan chan int) chan error {
+ errChan := make(chan error, 1)
+ // These are the special writers that encode input from the client.
+ varlinkStdinWriter := virtwriter.NewVirtWriteCloser(writer, virtwriter.ToStdin)
+ varlinkResizeWriter := virtwriter.NewVirtWriteCloser(writer, virtwriter.TerminalResize)
+
+ go func() {
+ // Read from the wire and direct to stdout or stderr
+ err := virtwriter.Reader(reader, stdout, os.Stderr, nil, nil, ecChan)
+ defer restoreTerminal(oldTermState)
+ sendGenericError(ecChan)
+ errChan <- err
+ }()
+
+ go func() {
+ for termResize := range resize {
+ b, err := json.Marshal(termResize)
+ if err != nil {
+ defer restoreTerminal(oldTermState)
+ sendGenericError(ecChan)
+ errChan <- err
+ }
+ _, err = varlinkResizeWriter.Write(b)
+ if err != nil {
+ defer restoreTerminal(oldTermState)
+ sendGenericError(ecChan)
+ errChan <- err
+ }
+ }
+ }()
+
+ if stdin != nil {
+ // Takes stdinput and sends it over the wire after being encoded
+ go func() {
+ if _, err := io.Copy(varlinkStdinWriter, stdin); err != nil {
+ defer restoreTerminal(oldTermState)
+ sendGenericError(ecChan)
+ errChan <- err
+ }
+
+ }()
+ }
+ return errChan
+}
+
+func sendGenericError(ecChan chan int) {
+ if ecChan != nil {
+ ecChan <- define.ExecErrorCodeGeneric
+ }
+}
diff --git a/pkg/adapter/errors.go b/pkg/adapter/errors.go
index 7fbbabd93..ede3d4b1a 100644
--- a/pkg/adapter/errors.go
+++ b/pkg/adapter/errors.go
@@ -4,7 +4,7 @@ package adapter
import (
iopodman "github.com/containers/libpod/cmd/podman/varlink"
- "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/libpod/define"
"github.com/pkg/errors"
)
@@ -21,11 +21,11 @@ func TranslateMapErrors(failures map[string]error) map[string]error {
func TranslateError(err error) error {
switch err.(type) {
case *iopodman.ContainerNotFound:
- return errors.Wrap(libpod.ErrNoSuchCtr, err.Error())
+ return errors.Wrap(define.ErrNoSuchCtr, err.Error())
case *iopodman.ErrCtrStopped:
- return errors.Wrap(libpod.ErrCtrStopped, err.Error())
+ return errors.Wrap(define.ErrCtrStopped, err.Error())
case *iopodman.InvalidState:
- return errors.Wrap(libpod.ErrCtrStateInvalid, err.Error())
+ return errors.Wrap(define.ErrCtrStateInvalid, err.Error())
}
return err
}
diff --git a/pkg/adapter/info_remote.go b/pkg/adapter/info_remote.go
index 3b2d02a5a..3170e5b3d 100644
--- a/pkg/adapter/info_remote.go
+++ b/pkg/adapter/info_remote.go
@@ -4,16 +4,16 @@ package adapter
import (
"encoding/json"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/cmd/podman/varlink"
- "github.com/containers/libpod/libpod"
)
// Info returns information for the host system and its components
-func (r RemoteRuntime) Info() ([]libpod.InfoData, error) {
+func (r RemoteRuntime) Info() ([]define.InfoData, error) {
// TODO the varlink implementation for info should be updated to match the output for regular info
var (
- reply []libpod.InfoData
+ reply []define.InfoData
hostInfo map[string]interface{}
store map[string]interface{}
)
@@ -43,9 +43,9 @@ func (r RemoteRuntime) Info() ([]libpod.InfoData, error) {
insecureRegistries["registries"] = info.Insecure_registries
// Add everything to the reply
- reply = append(reply, libpod.InfoData{Type: "host", Data: hostInfo})
- reply = append(reply, libpod.InfoData{Type: "registries", Data: registries})
- reply = append(reply, libpod.InfoData{Type: "insecure registries", Data: insecureRegistries})
- reply = append(reply, libpod.InfoData{Type: "store", Data: store})
+ reply = append(reply, define.InfoData{Type: "host", Data: hostInfo})
+ reply = append(reply, define.InfoData{Type: "registries", Data: registries})
+ reply = append(reply, define.InfoData{Type: "insecure registries", Data: insecureRegistries})
+ reply = append(reply, define.InfoData{Type: "store", Data: store})
return reply, nil
}
diff --git a/pkg/adapter/pods.go b/pkg/adapter/pods.go
index bb7d9cce6..b9d7fcd9b 100644
--- a/pkg/adapter/pods.go
+++ b/pkg/adapter/pods.go
@@ -4,14 +4,33 @@ package adapter
import (
"context"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
"strings"
+ "github.com/containers/image/types"
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/libpod/image"
"github.com/containers/libpod/pkg/adapter/shortcuts"
+ ns "github.com/containers/libpod/pkg/namespaces"
+ createconfig "github.com/containers/libpod/pkg/spec"
+ "github.com/containers/storage"
+ "github.com/cri-o/ocicni/pkg/ocicni"
+ "github.com/ghodss/yaml"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
+ v1 "k8s.io/api/core/v1"
+)
+
+const (
+ // https://kubernetes.io/docs/concepts/storage/volumes/#hostpath
+ createDirectoryPermission = 0755
+ // https://kubernetes.io/docs/concepts/storage/volumes/#hostpath
+ createFilePermission = 0644
)
// PodContainerStats is struct containing an adapter Pod and a libpod
@@ -51,8 +70,9 @@ func (r *LocalRuntime) PrunePods(ctx context.Context, cli *cliconfig.PodPruneVal
for _, p := range pods {
p := p
- pool.Add(shared.Job{p.ID(),
- func() error {
+ pool.Add(shared.Job{
+ ID: p.ID(),
+ Fn: func() error {
err := r.Runtime.RemovePod(ctx, p, cli.Force, cli.Force)
if err != nil {
logrus.Debugf("Failed to remove pod %s: %s", p.ID(), err.Error())
@@ -135,7 +155,7 @@ func (r *LocalRuntime) StopPods(ctx context.Context, cli *cliconfig.PodStopValue
for _, p := range pods {
stopped := true
- conErrs, stopErr := p.StopWithTimeout(ctx, true, int(timeout))
+ conErrs, stopErr := p.StopWithTimeout(ctx, true, timeout)
if stopErr != nil {
errs = append(errs, stopErr)
stopped = false
@@ -420,3 +440,298 @@ func (r *LocalRuntime) GetStatPods(c *cliconfig.PodStatsValues) ([]*Pod, error)
}
return adapterPods, nil
}
+
+// PlayKubeYAML creates pods and containers from a kube YAML file
+func (r *LocalRuntime) PlayKubeYAML(ctx context.Context, c *cliconfig.KubePlayValues, yamlFile string) (*Pod, error) {
+ var (
+ containers []*libpod.Container
+ pod *libpod.Pod
+ podOptions []libpod.PodCreateOption
+ podYAML v1.Pod
+ registryCreds *types.DockerAuthConfig
+ writer io.Writer
+ )
+
+ content, err := ioutil.ReadFile(yamlFile)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := yaml.Unmarshal(content, &podYAML); err != nil {
+ return nil, errors.Wrapf(err, "unable to read %s as YAML", yamlFile)
+ }
+
+ // check for name collision between pod and container
+ podName := podYAML.ObjectMeta.Name
+ for _, n := range podYAML.Spec.Containers {
+ if n.Name == podName {
+ fmt.Printf("a container exists with the same name (%s) as the pod in your YAML file; changing pod name to %s_pod\n", podName, podName)
+ podName = fmt.Sprintf("%s_pod", podName)
+ }
+ }
+
+ podOptions = append(podOptions, libpod.WithInfraContainer())
+ podOptions = append(podOptions, libpod.WithPodName(podName))
+ // TODO for now we just used the default kernel namespaces; we need to add/subtract this from yaml
+
+ nsOptions, err := shared.GetNamespaceOptions(strings.Split(shared.DefaultKernelNamespaces, ","))
+ if err != nil {
+ return nil, err
+ }
+ podOptions = append(podOptions, nsOptions...)
+ podPorts := getPodPorts(podYAML.Spec.Containers)
+ podOptions = append(podOptions, libpod.WithInfraContainerPorts(podPorts))
+
+ // Create the Pod
+ pod, err = r.NewPod(ctx, podOptions...)
+ if err != nil {
+ return nil, err
+ }
+
+ podInfraID, err := pod.InfraContainerID()
+ if err != nil {
+ return nil, err
+ }
+ hasUserns := false
+ if podInfraID != "" {
+ podCtr, err := r.GetContainer(podInfraID)
+ if err != nil {
+ return nil, err
+ }
+ mappings, err := podCtr.IDMappings()
+ if err != nil {
+ return nil, err
+ }
+ hasUserns = len(mappings.UIDMap) > 0
+ }
+
+ namespaces := map[string]string{
+ // Disabled during code review per mheon
+ //"pid": fmt.Sprintf("container:%s", podInfraID),
+ "net": fmt.Sprintf("container:%s", podInfraID),
+ "ipc": fmt.Sprintf("container:%s", podInfraID),
+ "uts": fmt.Sprintf("container:%s", podInfraID),
+ }
+ if hasUserns {
+ namespaces["user"] = fmt.Sprintf("container:%s", podInfraID)
+ }
+ if !c.Quiet {
+ writer = os.Stderr
+ }
+
+ dockerRegistryOptions := image.DockerRegistryOptions{
+ DockerRegistryCreds: registryCreds,
+ DockerCertPath: c.CertDir,
+ }
+ if c.Flag("tls-verify").Changed {
+ dockerRegistryOptions.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!c.TlsVerify)
+ }
+
+ // map from name to mount point
+ volumes := make(map[string]string)
+ for _, volume := range podYAML.Spec.Volumes {
+ hostPath := volume.VolumeSource.HostPath
+ if hostPath == nil {
+ return nil, errors.Errorf("HostPath is currently the only supported VolumeSource")
+ }
+ if hostPath.Type != nil {
+ switch *hostPath.Type {
+ case v1.HostPathDirectoryOrCreate:
+ if _, err := os.Stat(hostPath.Path); os.IsNotExist(err) {
+ if err := os.Mkdir(hostPath.Path, createDirectoryPermission); err != nil {
+ return nil, errors.Errorf("Error creating HostPath %s at %s", volume.Name, hostPath.Path)
+ }
+ }
+ // unconditionally label a newly created volume as private
+ if err := libpod.LabelVolumePath(hostPath.Path, false); err != nil {
+ return nil, errors.Wrapf(err, "Error giving %s a label", hostPath.Path)
+ }
+ case v1.HostPathFileOrCreate:
+ if _, err := os.Stat(hostPath.Path); os.IsNotExist(err) {
+ f, err := os.OpenFile(hostPath.Path, os.O_RDONLY|os.O_CREATE, createFilePermission)
+ if err != nil {
+ return nil, errors.Errorf("Error creating HostPath %s at %s", volume.Name, hostPath.Path)
+ }
+ if err := f.Close(); err != nil {
+ logrus.Warnf("Error in closing newly created HostPath file: %v", err)
+ }
+ }
+ // unconditionally label a newly created volume as private
+ if err := libpod.LabelVolumePath(hostPath.Path, false); err != nil {
+ return nil, errors.Wrapf(err, "Error giving %s a label", hostPath.Path)
+ }
+ case v1.HostPathDirectory:
+ case v1.HostPathFile:
+ case v1.HostPathUnset:
+ // do nothing here because we will verify the path exists in validateVolumeHostDir
+ break
+ default:
+ return nil, errors.Errorf("Directories are the only supported HostPath type")
+ }
+ }
+
+ if err := createconfig.ValidateVolumeHostDir(hostPath.Path); err != nil {
+ return nil, errors.Wrapf(err, "Error in parsing HostPath in YAML")
+ }
+ volumes[volume.Name] = hostPath.Path
+ }
+
+ for _, container := range podYAML.Spec.Containers {
+ newImage, err := r.ImageRuntime().New(ctx, container.Image, c.SignaturePolicy, c.Authfile, writer, &dockerRegistryOptions, image.SigningOptions{}, false, nil)
+ if err != nil {
+ return nil, err
+ }
+ createConfig, err := kubeContainerToCreateConfig(ctx, container, r.Runtime, newImage, namespaces, volumes, pod.ID())
+ if err != nil {
+ return nil, err
+ }
+ ctr, err := shared.CreateContainerFromCreateConfig(r.Runtime, createConfig, ctx, pod)
+ if err != nil {
+ return nil, err
+ }
+ containers = append(containers, ctr)
+ }
+
+ // start the containers
+ for _, ctr := range containers {
+ if err := ctr.Start(ctx, true); err != nil {
+ // Making this a hard failure here to avoid a mess
+ // the other containers are in created status
+ return nil, err
+ }
+ }
+
+ // We've now successfully converted this YAML into a pod
+ // print our pod and containers, signifying we succeeded
+ fmt.Printf("Pod:\n%s\n", pod.ID())
+ if len(containers) == 1 {
+ fmt.Printf("Container:\n")
+ }
+ if len(containers) > 1 {
+ fmt.Printf("Containers:\n")
+ }
+ for _, ctr := range containers {
+ fmt.Println(ctr.ID())
+ }
+
+ if err := playcleanup(ctx, r, pod, nil); err != nil {
+ logrus.Errorf("unable to remove pod %s after failing to play kube", pod.ID())
+ }
+ return nil, nil
+}
+
+func playcleanup(ctx context.Context, runtime *LocalRuntime, pod *libpod.Pod, err error) error {
+ if err != nil && pod != nil {
+ return runtime.RemovePod(ctx, pod, true, true)
+ }
+ return nil
+}
+
+// getPodPorts converts a slice of kube container descriptions to an
+// array of ocicni portmapping descriptions usable in libpod
+func getPodPorts(containers []v1.Container) []ocicni.PortMapping {
+ var infraPorts []ocicni.PortMapping
+ for _, container := range containers {
+ for _, p := range container.Ports {
+ portBinding := ocicni.PortMapping{
+ HostPort: p.HostPort,
+ ContainerPort: p.ContainerPort,
+ Protocol: strings.ToLower(string(p.Protocol)),
+ }
+ if p.HostIP != "" {
+ logrus.Debug("HostIP on port bindings is not supported")
+ }
+ infraPorts = append(infraPorts, portBinding)
+ }
+ }
+ return infraPorts
+}
+
+// kubeContainerToCreateConfig takes a v1.Container and returns a createconfig describing a container
+func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container, runtime *libpod.Runtime, newImage *image.Image, namespaces map[string]string, volumes map[string]string, podID string) (*createconfig.CreateConfig, error) {
+ var (
+ containerConfig createconfig.CreateConfig
+ )
+
+ // The default for MemorySwappiness is -1, not 0
+ containerConfig.Resources.MemorySwappiness = -1
+
+ containerConfig.Image = containerYAML.Image
+ containerConfig.ImageID = newImage.ID()
+ containerConfig.Name = containerYAML.Name
+ containerConfig.Tty = containerYAML.TTY
+ containerConfig.WorkDir = containerYAML.WorkingDir
+
+ containerConfig.Pod = podID
+
+ imageData, _ := newImage.Inspect(ctx)
+
+ containerConfig.User = "0"
+ if imageData != nil {
+ containerConfig.User = imageData.Config.User
+ }
+
+ if containerConfig.SecurityOpts != nil {
+ if containerYAML.SecurityContext.ReadOnlyRootFilesystem != nil {
+ containerConfig.ReadOnlyRootfs = *containerYAML.SecurityContext.ReadOnlyRootFilesystem
+ }
+ if containerYAML.SecurityContext.Privileged != nil {
+ containerConfig.Privileged = *containerYAML.SecurityContext.Privileged
+ }
+
+ if containerYAML.SecurityContext.AllowPrivilegeEscalation != nil {
+ containerConfig.NoNewPrivs = !*containerYAML.SecurityContext.AllowPrivilegeEscalation
+ }
+ }
+
+ containerConfig.Command = []string{}
+ if imageData != nil && imageData.Config != nil {
+ containerConfig.Command = append(containerConfig.Command, imageData.Config.Entrypoint...)
+ }
+ if len(containerYAML.Command) != 0 {
+ containerConfig.Command = append(containerConfig.Command, containerYAML.Command...)
+ } else if imageData != nil && imageData.Config != nil {
+ containerConfig.Command = append(containerConfig.Command, imageData.Config.Cmd...)
+ }
+ if imageData != nil && len(containerConfig.Command) == 0 {
+ return nil, errors.Errorf("No command specified in container YAML or as CMD or ENTRYPOINT in this image for %s", containerConfig.Name)
+ }
+
+ containerConfig.StopSignal = 15
+
+ // If the user does not pass in ID mappings, just set to basics
+ if containerConfig.IDMappings == nil {
+ containerConfig.IDMappings = &storage.IDMappingOptions{}
+ }
+
+ containerConfig.NetMode = ns.NetworkMode(namespaces["net"])
+ containerConfig.IpcMode = ns.IpcMode(namespaces["ipc"])
+ containerConfig.UtsMode = ns.UTSMode(namespaces["uts"])
+ // disabled in code review per mheon
+ //containerConfig.PidMode = ns.PidMode(namespaces["pid"])
+ containerConfig.UsernsMode = ns.UsernsMode(namespaces["user"])
+ if len(containerConfig.WorkDir) == 0 {
+ containerConfig.WorkDir = "/"
+ }
+
+ // Set default environment variables and incorporate data from image, if necessary
+ envs := shared.EnvVariablesFromData(imageData)
+
+ // Environment Variables
+ for _, e := range containerYAML.Env {
+ envs[e.Name] = e.Value
+ }
+ containerConfig.Env = envs
+
+ for _, volume := range containerYAML.VolumeMounts {
+ hostPath, exists := volumes[volume.Name]
+ if !exists {
+ return nil, errors.Errorf("Volume mount %s specified for container but not configured in volumes", volume.Name)
+ }
+ if err := createconfig.ValidateVolumeCtrDir(volume.MountPath); err != nil {
+ return nil, errors.Wrapf(err, "error in parsing MountPath")
+ }
+ containerConfig.Volumes = append(containerConfig.Volumes, fmt.Sprintf("%s:%s", hostPath, volume.MountPath))
+ }
+ return &containerConfig, nil
+}
diff --git a/pkg/adapter/pods_remote.go b/pkg/adapter/pods_remote.go
index e2c97c36a..0c62ac923 100644
--- a/pkg/adapter/pods_remote.go
+++ b/pkg/adapter/pods_remote.go
@@ -12,6 +12,7 @@ import (
"github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/cmd/podman/varlink"
"github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/varlinkapi"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -257,25 +258,25 @@ func (p *Pod) AllContainers() ([]*Container, error) {
}
// Status ...
-func (p *Pod) Status() (map[string]libpod.ContainerStatus, error) {
- ctrs := make(map[string]libpod.ContainerStatus)
+func (p *Pod) Status() (map[string]define.ContainerStatus, error) {
+ ctrs := make(map[string]define.ContainerStatus)
for _, i := range p.containers {
- var status libpod.ContainerStatus
+ var status define.ContainerStatus
switch i.State {
case "exited":
- status = libpod.ContainerStateExited
+ status = define.ContainerStateExited
case "stopped":
- status = libpod.ContainerStateStopped
+ status = define.ContainerStateStopped
case "running":
- status = libpod.ContainerStateRunning
+ status = define.ContainerStateRunning
case "paused":
- status = libpod.ContainerStatePaused
+ status = define.ContainerStatePaused
case "created":
- status = libpod.ContainerStateCreated
- case "configured":
- status = libpod.ContainerStateConfigured
+ status = define.ContainerStateCreated
+ case "define.red":
+ status = define.ContainerStateConfigured
default:
- status = libpod.ContainerStateUnknown
+ status = define.ContainerStateUnknown
}
ctrs[i.ID] = status
}
@@ -509,7 +510,7 @@ func (p *Pod) GetPodStats(previousContainerStats map[string]*libpod.ContainerSta
newStats := varlinkapi.ContainerStatsToLibpodContainerStats(stats)
// If the container wasn't running, don't include it
// but also suppress the error
- if err != nil && errors.Cause(err) != libpod.ErrCtrStateInvalid {
+ if err != nil && errors.Cause(err) != define.ErrCtrStateInvalid {
return nil, err
}
if err == nil {
@@ -563,3 +564,8 @@ func (r *LocalRuntime) PrunePods(ctx context.Context, cli *cliconfig.PodPruneVal
}
return ok, failures, nil
}
+
+// PlayKubeYAML creates pods and containers from a kube YAML file
+func (r *LocalRuntime) PlayKubeYAML(ctx context.Context, c *cliconfig.KubePlayValues, yamlFile string) (*Pod, error) {
+ return nil, define.ErrNotImplemented
+}
diff --git a/pkg/adapter/runtime.go b/pkg/adapter/runtime.go
index 37ee1b737..ee6913cc0 100644
--- a/pkg/adapter/runtime.go
+++ b/pkg/adapter/runtime.go
@@ -5,6 +5,7 @@ package adapter
import (
"bufio"
"context"
+ "github.com/containers/libpod/libpod/define"
"io"
"io/ioutil"
"os"
@@ -57,12 +58,26 @@ type Volume struct {
// VolumeFilter is for filtering volumes on the client
type VolumeFilter func(*Volume) bool
+// GetRuntimeNoStore returns a localruntime struct wit an embedded runtime but
+// without a configured storage.
+func GetRuntimeNoStore(ctx context.Context, c *cliconfig.PodmanCommand) (*LocalRuntime, error) {
+ runtime, err := libpodruntime.GetRuntimeNoStore(ctx, c)
+ if err != nil {
+ return nil, err
+ }
+ return getRuntime(runtime)
+}
+
// GetRuntime returns a LocalRuntime struct with the actual runtime embedded in it
func GetRuntime(ctx context.Context, c *cliconfig.PodmanCommand) (*LocalRuntime, error) {
runtime, err := libpodruntime.GetRuntime(ctx, c)
if err != nil {
return nil, err
}
+ return getRuntime(runtime)
+}
+
+func getRuntime(runtime *libpod.Runtime) (*LocalRuntime, error) {
return &LocalRuntime{
Runtime: runtime,
}, nil
@@ -70,16 +85,27 @@ func GetRuntime(ctx context.Context, c *cliconfig.PodmanCommand) (*LocalRuntime,
// GetImages returns a slice of images in containerimages
func (r *LocalRuntime) GetImages() ([]*ContainerImage, error) {
+ return r.getImages(false)
+}
+
+// GetRWImages returns a slice of read/write images in containerimages
+func (r *LocalRuntime) GetRWImages() ([]*ContainerImage, error) {
+ return r.getImages(true)
+}
+
+func (r *LocalRuntime) getImages(rwOnly bool) ([]*ContainerImage, error) {
var containerImages []*ContainerImage
images, err := r.Runtime.ImageRuntime().GetImages()
if err != nil {
return nil, err
}
for _, i := range images {
+ if rwOnly && i.IsReadOnly() {
+ continue
+ }
containerImages = append(containerImages, &ContainerImage{i})
}
return containerImages, nil
-
}
// NewImageFromLocal returns a containerimage representation of a image from local storage
@@ -306,15 +332,17 @@ func (r *LocalRuntime) LoadImage(ctx context.Context, name string, cli *cliconfi
// IsImageNotFound checks if the error indicates that no image was found.
func IsImageNotFound(err error) bool {
- if errors.Cause(err) == image.ErrNoSuchImage {
- return true
- }
- return false
+ return errors.Cause(err) == image.ErrNoSuchImage
}
// HealthCheck is a wrapper to same named function in libpod
-func (r *LocalRuntime) HealthCheck(c *cliconfig.HealthCheckValues) (libpod.HealthCheckStatus, error) {
- return r.Runtime.HealthCheck(c.InputArgs[0])
+func (r *LocalRuntime) HealthCheck(c *cliconfig.HealthCheckValues) (string, error) {
+ output := "unhealthy"
+ status, err := r.Runtime.HealthCheck(c.InputArgs[0])
+ if status == libpod.HealthCheckSuccess {
+ output = "healthy"
+ }
+ return output, err
}
// Events is a wrapper to libpod to obtain libpod/podman events
@@ -339,9 +367,6 @@ func (r *LocalRuntime) Events(c *cliconfig.EventValues) error {
if eventsError != nil {
return eventsError
}
- if err != nil {
- return errors.Wrapf(err, "unable to tail the events log")
- }
w := bufio.NewWriter(os.Stdout)
for event := range eventChannel {
if len(c.Format) > 0 {
@@ -395,8 +420,8 @@ func (r *LocalRuntime) GetPodsByStatus(statuses []string) ([]*libpod.Pod, error)
}
// GetVersion is an alias to satisfy interface{}
-func (r *LocalRuntime) GetVersion() (libpod.Version, error) {
- return libpod.GetVersion()
+func (r *LocalRuntime) GetVersion() (define.Version, error) {
+ return define.GetVersion()
}
// RemoteEndpoint resolve interface requirement
diff --git a/pkg/adapter/runtime_remote.go b/pkg/adapter/runtime_remote.go
index e0c0898bd..9fae39df0 100644
--- a/pkg/adapter/runtime_remote.go
+++ b/pkg/adapter/runtime_remote.go
@@ -20,8 +20,10 @@ import (
"github.com/containers/image/docker/reference"
"github.com/containers/image/types"
"github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/containers/libpod/cmd/podman/remoteclientconfig"
"github.com/containers/libpod/cmd/podman/varlink"
"github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/events"
"github.com/containers/libpod/libpod/image"
"github.com/containers/libpod/utils"
@@ -40,6 +42,7 @@ type RemoteRuntime struct {
Conn *varlink.Connection
Remote bool
cmd cliconfig.MainFlags
+ config io.Reader
}
// LocalRuntime describes a typical libpod runtime
@@ -47,12 +50,43 @@ type LocalRuntime struct {
*RemoteRuntime
}
+// GetRuntimeNoStore returns a LocalRuntime struct with the actual runtime embedded in it
+// The nostore is ignored
+func GetRuntimeNoStore(ctx context.Context, c *cliconfig.PodmanCommand) (*LocalRuntime, error) {
+ return GetRuntime(ctx, c)
+}
+
// GetRuntime returns a LocalRuntime struct with the actual runtime embedded in it
func GetRuntime(ctx context.Context, c *cliconfig.PodmanCommand) (*LocalRuntime, error) {
+ var (
+ customConfig bool
+ err error
+ f *os.File
+ )
runtime := RemoteRuntime{
Remote: true,
cmd: c.GlobalFlags,
}
+ configPath := remoteclientconfig.GetConfigFilePath()
+ if len(c.GlobalFlags.RemoteConfigFilePath) > 0 {
+ configPath = c.GlobalFlags.RemoteConfigFilePath
+ customConfig = true
+ }
+
+ f, err = os.Open(configPath)
+ if err != nil {
+ // If user does not explicitly provide a configuration file path and we cannot
+ // find a default, no error should occur.
+ if os.IsNotExist(err) && !customConfig {
+ logrus.Debugf("unable to load configuration file at %s", configPath)
+ runtime.config = nil
+ } else {
+ return nil, errors.Wrapf(err, "unable to load configuration file at %s", configPath)
+ }
+ } else {
+ // create the io reader for the remote client
+ runtime.config = bufio.NewReader(f)
+ }
conn, err := runtime.Connect()
if err != nil {
return nil, err
@@ -63,6 +97,14 @@ func GetRuntime(ctx context.Context, c *cliconfig.PodmanCommand) (*LocalRuntime,
}, nil
}
+// DeferredShutdown is a bogus wrapper for compaat with the libpod
+// runtime and should only be run when a defer is being used
+func (r RemoteRuntime) DeferredShutdown(force bool) {
+ if err := r.Shutdown(force); err != nil {
+ logrus.Error("unable to shutdown runtime")
+ }
+}
+
// Shutdown is a bogus wrapper for compat with the libpod runtime
func (r RemoteRuntime) Shutdown(force bool) error {
return nil
@@ -87,6 +129,7 @@ type remoteImage struct {
isParent bool
Runtime *LocalRuntime
TopLayer string
+ ReadOnly bool
}
// Container ...
@@ -127,12 +170,24 @@ type remoteVolume struct {
// GetImages returns a slice of containerimages over a varlink connection
func (r *LocalRuntime) GetImages() ([]*ContainerImage, error) {
+ return r.getImages(false)
+}
+
+// GetRWImages returns a slice of read/write containerimages over a varlink connection
+func (r *LocalRuntime) GetRWImages() ([]*ContainerImage, error) {
+ return r.getImages(true)
+}
+
+func (r *LocalRuntime) getImages(rwOnly bool) ([]*ContainerImage, error) {
var newImages []*ContainerImage
images, err := iopodman.ListImages().Call(r.Conn)
if err != nil {
return nil, err
}
for _, i := range images {
+ if rwOnly && i.ReadOnly {
+ continue
+ }
name := i.Id
if len(i.RepoTags) > 1 {
name = i.RepoTags[0]
@@ -165,6 +220,7 @@ func imageInListToContainerImage(i iopodman.Image, name string, runtime *LocalRu
isParent: i.IsParent,
Runtime: runtime,
TopLayer: i.TopLayer,
+ ReadOnly: i.ReadOnly,
}
return &ContainerImage{ri}, nil
}
@@ -260,6 +316,11 @@ func (ci *ContainerImage) Created() time.Time {
return ci.remoteImage.Created
}
+// IsReadOnly returns whether the image is ReadOnly
+func (ci *ContainerImage) IsReadOnly() bool {
+ return ci.remoteImage.ReadOnly
+}
+
// Size returns the size of the image
func (ci *ContainerImage) Size(ctx context.Context) (*uint64, error) {
usize := uint64(ci.remoteImage.Size)
@@ -500,12 +561,12 @@ func (r *LocalRuntime) SendFileOverVarlink(source string) (string, error) {
// GetAllVolumes retrieves all the volumes
func (r *LocalRuntime) GetAllVolumes() ([]*libpod.Volume, error) {
- return nil, libpod.ErrNotImplemented
+ return nil, define.ErrNotImplemented
}
// RemoveVolume removes a volumes
func (r *LocalRuntime) RemoveVolume(ctx context.Context, v *libpod.Volume, force, prune bool) error {
- return libpod.ErrNotImplemented
+ return define.ErrNotImplemented
}
// GetContainers retrieves all containers from the state
@@ -513,14 +574,14 @@ func (r *LocalRuntime) RemoveVolume(ctx context.Context, v *libpod.Volume, force
// the output. Multiple filters are handled by ANDing their output, so only
// containers matching all filters are returned
func (r *LocalRuntime) GetContainers(filters ...libpod.ContainerFilter) ([]*libpod.Container, error) {
- return nil, libpod.ErrNotImplemented
+ return nil, define.ErrNotImplemented
}
// RemoveContainer removes the given container
// If force is specified, the container will be stopped first
// Otherwise, RemoveContainer will return an error if the container is running
func (r *LocalRuntime) RemoveContainer(ctx context.Context, c *libpod.Container, force, volumes bool) error {
- return libpod.ErrNotImplemented
+ return define.ErrNotImplemented
}
// CreateVolume creates a volume over a varlink connection for the remote client
@@ -743,8 +804,8 @@ func IsImageNotFound(err error) bool {
}
// HealthCheck executes a container's healthcheck over a varlink connection
-func (r *LocalRuntime) HealthCheck(c *cliconfig.HealthCheckValues) (libpod.HealthCheckStatus, error) {
- return -1, libpod.ErrNotImplemented
+func (r *LocalRuntime) HealthCheck(c *cliconfig.HealthCheckValues) (string, error) {
+ return "", define.ErrNotImplemented
}
// Events monitors libpod/podman events over a varlink connection
@@ -879,22 +940,22 @@ func (r *LocalRuntime) GetContainersByContext(all bool, latest bool, namesOrIDs
}
// GetVersion returns version information from service
-func (r *LocalRuntime) GetVersion() (libpod.Version, error) {
+func (r *LocalRuntime) GetVersion() (define.Version, error) {
version, goVersion, gitCommit, built, osArch, apiVersion, err := iopodman.GetVersion().Call(r.Conn)
if err != nil {
- return libpod.Version{}, errors.Wrapf(err, "Unable to obtain server version information")
+ return define.Version{}, errors.Wrapf(err, "Unable to obtain server version information")
}
var buildTime int64
if built != "" {
t, err := time.Parse(time.RFC3339, built)
if err != nil {
- return libpod.Version{}, nil
+ return define.Version{}, nil
}
buildTime = t.Unix()
}
- return libpod.Version{
+ return define.Version{
RemoteAPIVersion: apiVersion,
Version: version,
GoVersion: goVersion,
diff --git a/pkg/adapter/sigproxy_linux.go b/pkg/adapter/sigproxy_linux.go
index af968cb89..ebfeab725 100644
--- a/pkg/adapter/sigproxy_linux.go
+++ b/pkg/adapter/sigproxy_linux.go
@@ -27,10 +27,10 @@ func ProxySignals(ctr *libpod.Container) {
if err := ctr.Kill(uint(s.(syscall.Signal))); err != nil {
logrus.Errorf("Error forwarding signal %d to container %s: %v", s, ctr.ID(), err)
signal.StopCatch(sigBuffer)
- syscall.Kill(syscall.Getpid(), s.(syscall.Signal))
+ if err := syscall.Kill(syscall.Getpid(), s.(syscall.Signal)); err != nil {
+ logrus.Errorf("failed to kill pid %d", syscall.Getpid())
+ }
}
}
}()
-
- return
}
diff --git a/pkg/adapter/terminal.go b/pkg/adapter/terminal.go
index 373c78322..51b747d23 100644
--- a/pkg/adapter/terminal.go
+++ b/pkg/adapter/terminal.go
@@ -7,6 +7,7 @@ import (
"github.com/docker/docker/pkg/signal"
"github.com/docker/docker/pkg/term"
+ "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"k8s.io/client-go/tools/remotecommand"
)
@@ -76,3 +77,25 @@ func (f *RawTtyFormatter) Format(entry *logrus.Entry) ([]byte, error) {
return bytes, err
}
+
+func handleTerminalAttach(ctx context.Context, resize chan remotecommand.TerminalSize) (context.CancelFunc, *term.State, error) {
+ logrus.Debugf("Handling terminal attach")
+
+ subCtx, cancel := context.WithCancel(ctx)
+
+ resizeTty(subCtx, resize)
+
+ oldTermState, err := term.SaveState(os.Stdin.Fd())
+ if err != nil {
+ // allow caller to not have to do any cleaning up if we error here
+ cancel()
+ return nil, nil, errors.Wrapf(err, "unable to save terminal state")
+ }
+
+ logrus.SetFormatter(&RawTtyFormatter{})
+ if _, err := term.SetRawTerminal(os.Stdin.Fd()); err != nil {
+ return cancel, nil, err
+ }
+
+ return cancel, oldTermState, nil
+}
diff --git a/pkg/adapter/terminal_linux.go b/pkg/adapter/terminal_linux.go
index 3c4c3bd38..26cfd7b5e 100644
--- a/pkg/adapter/terminal_linux.go
+++ b/pkg/adapter/terminal_linux.go
@@ -6,38 +6,56 @@ import (
"os"
"github.com/containers/libpod/libpod"
- "github.com/docker/docker/pkg/term"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/crypto/ssh/terminal"
"k8s.io/client-go/tools/remotecommand"
)
-// StartAttachCtr starts and (if required) attaches to a container
-func StartAttachCtr(ctx context.Context, ctr *libpod.Container, stdout, stderr, stdin *os.File, detachKeys string, sigProxy bool, startContainer bool, recursive bool) error {
+// ExecAttachCtr execs and attaches to a container
+func ExecAttachCtr(ctx context.Context, ctr *libpod.Container, tty, privileged bool, env, cmd []string, user, workDir string, streams *libpod.AttachStreams, preserveFDs int, detachKeys string) (int, error) {
resize := make(chan remotecommand.TerminalSize)
haveTerminal := terminal.IsTerminal(int(os.Stdin.Fd()))
// Check if we are attached to a terminal. If we are, generate resize
// events, and set the terminal to raw mode
- if haveTerminal && ctr.Spec().Process.Terminal {
- logrus.Debugf("Handling terminal attach")
-
- subCtx, cancel := context.WithCancel(ctx)
+ if haveTerminal && tty {
+ cancel, oldTermState, err := handleTerminalAttach(ctx, resize)
+ if err != nil {
+ return -1, err
+ }
defer cancel()
+ defer func() {
+ if err := restoreTerminal(oldTermState); err != nil {
+ logrus.Errorf("unable to restore terminal: %q", err)
+ }
+ }()
+ }
+ return ctr.Exec(tty, privileged, env, cmd, user, workDir, streams, preserveFDs, resize, detachKeys)
+}
- resizeTty(subCtx, resize)
+// StartAttachCtr starts and (if required) attaches to a container
+// if you change the signature of this function from os.File to io.Writer, it will trigger a downstream
+// error. we may need to just lint disable this one.
+func StartAttachCtr(ctx context.Context, ctr *libpod.Container, stdout, stderr, stdin *os.File, detachKeys string, sigProxy bool, startContainer bool, recursive bool) error { //nolint-interfacer
+ resize := make(chan remotecommand.TerminalSize)
- oldTermState, err := term.SaveState(os.Stdin.Fd())
+ haveTerminal := terminal.IsTerminal(int(os.Stdin.Fd()))
+
+ // Check if we are attached to a terminal. If we are, generate resize
+ // events, and set the terminal to raw mode
+ if haveTerminal && ctr.Spec().Process.Terminal {
+ cancel, oldTermState, err := handleTerminalAttach(ctx, resize)
if err != nil {
- return errors.Wrapf(err, "unable to save terminal state")
+ return err
}
-
- logrus.SetFormatter(&RawTtyFormatter{})
- term.SetRawTerminal(os.Stdin.Fd())
-
- defer restoreTerminal(oldTermState)
+ defer func() {
+ if err := restoreTerminal(oldTermState); err != nil {
+ logrus.Errorf("unable to restore terminal: %q", err)
+ }
+ }()
+ defer cancel()
}
streams := new(libpod.AttachStreams)