diff options
author | Brent Baude <bbaude@redhat.com> | 2020-04-16 12:25:26 -0500 |
---|---|---|
committer | Brent Baude <bbaude@redhat.com> | 2020-04-16 15:53:58 -0500 |
commit | 241326a9a8c20ad7f2bcf651416b836e7778e090 (patch) | |
tree | 4001e8e47a022bb1b9bfbf2332c42e1aeb802f9e /pkg/adapter | |
parent | 88c6fd06cd54fb9a8826306dfdf1a77e400de5de (diff) | |
download | podman-241326a9a8c20ad7f2bcf651416b836e7778e090.tar.gz podman-241326a9a8c20ad7f2bcf651416b836e7778e090.tar.bz2 podman-241326a9a8c20ad7f2bcf651416b836e7778e090.zip |
Podman V2 birth
remote podman v1 and replace with podman v2.
Signed-off-by: Brent Baude <bbaude@redhat.com>
Diffstat (limited to 'pkg/adapter')
25 files changed, 0 insertions, 6799 deletions
diff --git a/pkg/adapter/autoupdate.go b/pkg/adapter/autoupdate.go deleted file mode 100644 index 01f7a29c5..000000000 --- a/pkg/adapter/autoupdate.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !remoteclient - -package adapter - -import ( - "github.com/containers/libpod/pkg/autoupdate" -) - -func (r *LocalRuntime) AutoUpdate() ([]string, []error) { - return autoupdate.AutoUpdate(r.Runtime) -} diff --git a/pkg/adapter/autoupdate_remote.go b/pkg/adapter/autoupdate_remote.go deleted file mode 100644 index a2a82d0d4..000000000 --- a/pkg/adapter/autoupdate_remote.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build remoteclient - -package adapter - -import ( - "github.com/containers/libpod/libpod/define" -) - -func (r *LocalRuntime) AutoUpdate() ([]string, []error) { - return nil, []error{define.ErrNotImplemented} -} diff --git a/pkg/adapter/client.go b/pkg/adapter/client.go deleted file mode 100644 index a1b2bd507..000000000 --- a/pkg/adapter/client.go +++ /dev/null @@ -1,115 +0,0 @@ -// +build remoteclient - -package adapter - -import ( - "fmt" - "os" - - "github.com/containers/libpod/cmd/podman/remoteclientconfig" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "github.com/varlink/go/varlink" -) - -var remoteEndpoint *Endpoint // nolint: deadcode,unused - -func (r RemoteRuntime) RemoteEndpoint() (remoteEndpoint *Endpoint, err error) { - remoteConfigConnections, err := remoteclientconfig.ReadRemoteConfig(r.config) - if err != nil && errors.Cause(err) != remoteclientconfig.ErrNoConfigationFile { - return nil, err - } - // If the user defines an env variable for podman_varlink_bridge - // we use that as passed. - if bridge := os.Getenv("PODMAN_VARLINK_BRIDGE"); bridge != "" { - logrus.Debug("creating a varlink bridge based on env variable") - remoteEndpoint, err = newBridgeConnection(bridge, nil, r.cmd.LogLevel) - // if an environment variable for podman_varlink_address is defined, - // we used that as passed - } else if address := os.Getenv("PODMAN_VARLINK_ADDRESS"); address != "" { // nolint:gocritic - logrus.Debugf("creating a varlink address based on env variable: %s", address) - remoteEndpoint, err = newSocketConnection(address) - // if the user provides a remote host, we use it to configure a bridge connection - } else if len(r.cmd.RemoteHost) > 0 { - logrus.Debug("creating a varlink bridge based on user input") - if len(r.cmd.RemoteUserName) < 1 { - return nil, errors.New("you must provide a username when providing a remote host name") - } - rc := remoteclientconfig.RemoteConnection{r.cmd.RemoteHost, r.cmd.RemoteUserName, false, r.cmd.Port, r.cmd.IdentityFile, r.cmd.IgnoreHosts} // nolint: govet - remoteEndpoint, err = newBridgeConnection("", &rc, r.cmd.LogLevel) - // if the user has a config file with connections in it - } else if len(remoteConfigConnections.Connections) > 0 { - logrus.Debug("creating a varlink bridge based configuration file") - var rc *remoteclientconfig.RemoteConnection - if len(r.cmd.ConnectionName) > 0 { - rc, err = remoteConfigConnections.GetRemoteConnection(r.cmd.ConnectionName) - } else { - rc, err = remoteConfigConnections.GetDefault() - } - if err != nil { - return nil, err - } - if len(rc.Username) < 1 { - logrus.Debugf("Connection has no username, using current user %q", r.cmd.RemoteUserName) - rc.Username = r.cmd.RemoteUserName - } - remoteEndpoint, err = newBridgeConnection("", rc, r.cmd.LogLevel) - // last resort is to make a socket connection with the default varlink address for root user - } else { - logrus.Debug("creating a varlink address based default root address") - remoteEndpoint, err = newSocketConnection(DefaultVarlinkAddress) - } - return // nolint: nakedret -} - -// Connect provides a varlink connection -func (r RemoteRuntime) Connect() (*varlink.Connection, error) { - ep, err := r.RemoteEndpoint() - if err != nil { - return nil, err - } - switch ep.Type { - case DirectConnection: - return varlink.NewConnection(ep.Connection) - case BridgeConnection: - return varlink.NewBridge(ep.Connection) - } - return nil, errors.New(fmt.Sprintf("Unable to determine type of varlink connection: %s", ep.Connection)) -} - -// RefreshConnection is used to replace the current r.Conn after things like -// using an upgraded varlink connection -func (r RemoteRuntime) RefreshConnection() error { - newConn, err := r.Connect() - if err != nil { - return err - } - r.Conn = newConn - return nil -} - -// newSocketConnection returns an endpoint for a uds based connection -func newSocketConnection(address string) (*Endpoint, error) { - endpoint := Endpoint{ - Type: DirectConnection, - Connection: address, - } - return &endpoint, nil -} - -// newBridgeConnection creates a bridge type endpoint with username, destination, and log-level -func newBridgeConnection(formattedBridge string, remoteConn *remoteclientconfig.RemoteConnection, logLevel string) (*Endpoint, error) { - endpoint := Endpoint{ - Type: BridgeConnection, - } - - if len(formattedBridge) < 1 && remoteConn == nil { - return nil, errors.New("bridge connections must either be created by string or remoteconnection") - } - if len(formattedBridge) > 0 { - endpoint.Connection = formattedBridge - return &endpoint, nil - } - endpoint.Connection = formatDefaultBridge(remoteConn, logLevel) - return &endpoint, nil -} diff --git a/pkg/adapter/client_config.go b/pkg/adapter/client_config.go deleted file mode 100644 index 8187b03b1..000000000 --- a/pkg/adapter/client_config.go +++ /dev/null @@ -1,39 +0,0 @@ -package adapter - -// DefaultAPIAddress is the default address of the REST socket -const DefaultAPIAddress = "unix:/run/podman/podman.sock" - -// DefaultVarlinkAddress is the default address of the varlink socket -const DefaultVarlinkAddress = "unix:/run/podman/io.podman" - -// EndpointType declares the type of server connection -type EndpointType int - -// Enum of connection types -const ( - Unknown = iota - 1 // Unknown connection type - BridgeConnection // BridgeConnection proxy connection via ssh - DirectConnection // DirectConnection socket connection to server -) - -// String prints ASCII string for EndpointType -func (e EndpointType) String() string { - // declare an array of strings - // ... operator counts how many - // items in the array (7) - names := [...]string{ - "BridgeConnection", - "DirectConnection", - } - - if e < BridgeConnection || e > DirectConnection { - return "Unknown" - } - return names[e] -} - -// Endpoint type and connection string to use -type Endpoint struct { - Type EndpointType - Connection string -} diff --git a/pkg/adapter/client_unix.go b/pkg/adapter/client_unix.go deleted file mode 100644 index 7af8b24c6..000000000 --- a/pkg/adapter/client_unix.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build linux darwin -// +build remoteclient - -package adapter - -import ( - "fmt" - - "github.com/containers/libpod/cmd/podman/remoteclientconfig" -) - -func formatDefaultBridge(remoteConn *remoteclientconfig.RemoteConnection, logLevel string) string { - port := remoteConn.Port - if port == 0 { - port = 22 - } - options := "" - if remoteConn.IdentityFile != "" { - options += " -i " + remoteConn.IdentityFile - } - if remoteConn.IgnoreHosts { - options += " -q -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" - } - return fmt.Sprintf( - `ssh -p %d -T%s %s@%s -- varlink -A \'podman --log-level=%s varlink \\\$VARLINK_ADDRESS\' bridge`, - port, options, remoteConn.Username, remoteConn.Destination, logLevel) -} diff --git a/pkg/adapter/client_windows.go b/pkg/adapter/client_windows.go deleted file mode 100644 index 32302a600..000000000 --- a/pkg/adapter/client_windows.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build remoteclient - -package adapter - -import ( - "fmt" - - "github.com/containers/libpod/cmd/podman/remoteclientconfig" -) - -func formatDefaultBridge(remoteConn *remoteclientconfig.RemoteConnection, logLevel string) string { - port := remoteConn.Port - if port == 0 { - port = 22 - } - options := "" - if remoteConn.IdentityFile != "" { - options += " -i " + remoteConn.IdentityFile - } - if remoteConn.IgnoreHosts { - options += " -q -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" - } - return fmt.Sprintf( - `ssh -p %d -T%s %s@%s -- varlink -A 'podman --log-level=%s varlink $VARLINK_ADDRESS' bridge`, - port, options, remoteConn.Username, remoteConn.Destination, logLevel) -} diff --git a/pkg/adapter/containers.go b/pkg/adapter/containers.go deleted file mode 100644 index ecadbd2f9..000000000 --- a/pkg/adapter/containers.go +++ /dev/null @@ -1,1394 +0,0 @@ -// +build !remoteclient - -package adapter - -import ( - "bufio" - "context" - "fmt" - "io" - "io/ioutil" - "os" - "strconv" - "strings" - "sync" - "syscall" - "time" - - "github.com/containers/buildah" - cfg "github.com/containers/common/pkg/config" - "github.com/containers/image/v5/manifest" - "github.com/containers/libpod/cmd/podman/cliconfig" - "github.com/containers/libpod/cmd/podman/shared" - "github.com/containers/libpod/libpod" - "github.com/containers/libpod/libpod/define" - "github.com/containers/libpod/libpod/events" - "github.com/containers/libpod/libpod/image" - "github.com/containers/libpod/libpod/logs" - "github.com/containers/libpod/pkg/adapter/shortcuts" - "github.com/containers/libpod/pkg/checkpoint" - envLib "github.com/containers/libpod/pkg/env" - "github.com/containers/libpod/pkg/systemd/generate" - "github.com/containers/storage" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// GetLatestContainer gets the latest Container and wraps it in an adapter Container -func (r *LocalRuntime) GetLatestContainer() (*Container, error) { - Container := Container{} - c, err := r.Runtime.GetLatestContainer() - Container.Container = c - return &Container, err -} - -// GetAllContainers gets all Containers and wraps each one in an adapter Container -func (r *LocalRuntime) GetAllContainers() ([]*Container, error) { - var containers []*Container - allContainers, err := r.Runtime.GetAllContainers() - if err != nil { - return nil, err - } - - for _, c := range allContainers { - containers = append(containers, &Container{c}) - } - return containers, nil -} - -// LookupContainer gets a Container by name or id and wraps it in an adapter Container -func (r *LocalRuntime) LookupContainer(idOrName string) (*Container, error) { - ctr, err := r.Runtime.LookupContainer(idOrName) - if err != nil { - return nil, err - } - return &Container{ctr}, nil -} - -// StopContainers stops container(s) based on CLI inputs. -// Returns list of successful id(s), map of failed id(s) + error, or error not from container -func (r *LocalRuntime) StopContainers(ctx context.Context, cli *cliconfig.StopValues) ([]string, map[string]error, error) { - var timeout *uint - if cli.Flags().Changed("timeout") || cli.Flags().Changed("time") { - t := cli.Timeout - timeout = &t - } - - maxWorkers := shared.DefaultPoolSize("stop") - if cli.GlobalIsSet("max-workers") { - maxWorkers = cli.GlobalFlags.MaxWorks - } - logrus.Debugf("Setting maximum stop workers to %d", maxWorkers) - - names := cli.InputArgs - for _, cidFile := range cli.CIDFiles { - content, err := ioutil.ReadFile(cidFile) - if err != nil { - return nil, nil, errors.Wrap(err, "error reading CIDFile") - } - id := strings.Split(string(content), "\n")[0] - names = append(names, id) - } - - ctrs, err := shortcuts.GetContainersByContext(cli.All, cli.Latest, names, r.Runtime) - if err != nil && !(cli.Ignore && errors.Cause(err) == define.ErrNoSuchCtr) { - return nil, nil, err - } - - pool := shared.NewPool("stop", maxWorkers, len(ctrs)) - for _, c := range ctrs { - c := c - - if timeout == nil { - t := c.StopTimeout() - timeout = &t - logrus.Debugf("Set timeout to container %s default (%d)", c.ID(), *timeout) - } - - pool.Add(shared.Job{ - ID: c.ID(), - Fn: func() error { - err := c.StopWithTimeout(*timeout) - if err != nil { - if errors.Cause(err) == define.ErrCtrStopped { - logrus.Debugf("Container %s is already stopped", c.ID()) - return nil - } else if cli.All && errors.Cause(err) == define.ErrCtrStateInvalid { - logrus.Debugf("Container %s is not running, could not stop", c.ID()) - return nil - } - logrus.Debugf("Failed to stop container %s: %s", c.ID(), err.Error()) - } - return err - }, - }) - } - return pool.Run() -} - -// KillContainers sends signal to container(s) based on CLI inputs. -// Returns list of successful id(s), map of failed id(s) + error, or error not from container -func (r *LocalRuntime) KillContainers(ctx context.Context, cli *cliconfig.KillValues, signal syscall.Signal) ([]string, map[string]error, error) { - maxWorkers := shared.DefaultPoolSize("kill") - if cli.GlobalIsSet("max-workers") { - maxWorkers = cli.GlobalFlags.MaxWorks - } - logrus.Debugf("Setting maximum kill workers to %d", maxWorkers) - - ctrs, err := shortcuts.GetContainersByContext(cli.All, cli.Latest, cli.InputArgs, r.Runtime) - if err != nil { - return nil, nil, err - } - - pool := shared.NewPool("kill", maxWorkers, len(ctrs)) - for _, c := range ctrs { - c := c - - pool.Add(shared.Job{ - ID: c.ID(), - Fn: func() error { - return c.Kill(uint(signal)) - }, - }) - } - return pool.Run() -} - -// InitContainers initializes container(s) based on CLI inputs. -// Returns list of successful id(s), map of failed id(s) to errors, or a general -// error not from the container. -func (r *LocalRuntime) InitContainers(ctx context.Context, cli *cliconfig.InitValues) ([]string, map[string]error, error) { - maxWorkers := shared.DefaultPoolSize("init") - if cli.GlobalIsSet("max-workers") { - maxWorkers = cli.GlobalFlags.MaxWorks - } - logrus.Debugf("Setting maximum init workers to %d", maxWorkers) - - ctrs, err := shortcuts.GetContainersByContext(cli.All, cli.Latest, cli.InputArgs, r.Runtime) - if err != nil { - return nil, nil, err - } - - pool := shared.NewPool("init", maxWorkers, len(ctrs)) - for _, c := range ctrs { - ctr := c - - pool.Add(shared.Job{ - ID: ctr.ID(), - Fn: func() error { - err := ctr.Init(ctx) - if err != nil { - // If we're initializing all containers, ignore invalid state errors - if cli.All && errors.Cause(err) == define.ErrCtrStateInvalid { - return nil - } - return err - } - return nil - }, - }) - } - return pool.Run() -} - -// RemoveContainers removes container(s) based on CLI inputs. -func (r *LocalRuntime) RemoveContainers(ctx context.Context, cli *cliconfig.RmValues) ([]string, map[string]error, error) { - var ( - ok = []string{} - failures = map[string]error{} - ) - - maxWorkers := shared.DefaultPoolSize("rm") - if cli.GlobalIsSet("max-workers") { - maxWorkers = cli.GlobalFlags.MaxWorks - } - logrus.Debugf("Setting maximum rm workers to %d", maxWorkers) - - if cli.Storage { - for _, ctr := range cli.InputArgs { - if err := r.RemoveStorageContainer(ctr, cli.Force); err != nil { - failures[ctr] = err - } - ok = append(ok, ctr) - } - return ok, failures, nil - } - - names := cli.InputArgs - for _, cidFile := range cli.CIDFiles { - content, err := ioutil.ReadFile(cidFile) - if err != nil { - return nil, nil, errors.Wrap(err, "error reading CIDFile") - } - id := strings.Split(string(content), "\n")[0] - names = append(names, id) - } - - ctrs, err := shortcuts.GetContainersByContext(cli.All, cli.Latest, names, r.Runtime) - if err != nil && !(cli.Ignore && errors.Cause(err) == define.ErrNoSuchCtr) { - // Failed to get containers. If force is specified, get the containers ID - // and evict them - if !cli.Force { - return ok, failures, err - } - - for _, ctr := range cli.InputArgs { - logrus.Debugf("Evicting container %q", ctr) - id, err := r.EvictContainer(ctx, ctr, cli.Volumes) - if err != nil { - if cli.Ignore && errors.Cause(err) == define.ErrNoSuchCtr { - logrus.Debugf("Ignoring error (--allow-missing): %v", err) - continue - } - failures[ctr] = errors.Wrapf(err, "Failed to evict container: %q", id) - continue - } - ok = append(ok, id) - } - return ok, failures, nil - } - - pool := shared.NewPool("rm", maxWorkers, len(ctrs)) - for _, c := range ctrs { - c := c - - pool.Add(shared.Job{ - ID: c.ID(), - Fn: func() error { - err := r.RemoveContainer(ctx, c, cli.Force, cli.Volumes) - if err != nil { - if cli.Ignore && errors.Cause(err) == define.ErrNoSuchCtr { - logrus.Debugf("Ignoring error (--allow-missing): %v", err) - return nil - } - logrus.Debugf("Failed to remove container %s: %s", c.ID(), err.Error()) - } - return err - }, - }) - } - return pool.Run() -} - -// UmountRootFilesystems removes container(s) based on CLI inputs. -func (r *LocalRuntime) UmountRootFilesystems(ctx context.Context, cli *cliconfig.UmountValues) ([]string, map[string]error, error) { - var ( - ok = []string{} - failures = map[string]error{} - ) - - ctrs, err := shortcuts.GetContainersByContext(cli.All, cli.Latest, cli.InputArgs, r.Runtime) - if err != nil { - return ok, failures, err - } - - for _, ctr := range ctrs { - state, err := ctr.State() - if err != nil { - logrus.Debugf("Error umounting container %s state: %s", ctr.ID(), err.Error()) - continue - } - if state == define.ContainerStateRunning { - logrus.Debugf("Error umounting container %s, is running", ctr.ID()) - continue - } - - if err := ctr.Unmount(cli.Force); err != nil { - if cli.All && errors.Cause(err) == storage.ErrLayerNotMounted { - logrus.Debugf("Error umounting container %s, storage.ErrLayerNotMounted", ctr.ID()) - continue - } - failures[ctr.ID()] = errors.Wrapf(err, "error unmounting container %s", ctr.ID()) - } else { - ok = append(ok, ctr.ID()) - } - } - return ok, failures, nil -} - -// WaitOnContainers waits for all given container(s) to stop -func (r *LocalRuntime) WaitOnContainers(ctx context.Context, cli *cliconfig.WaitValues, interval time.Duration) ([]string, map[string]error, error) { - var ( - ok = []string{} - failures = map[string]error{} - ) - - ctrs, err := shortcuts.GetContainersByContext(false, cli.Latest, cli.InputArgs, r.Runtime) - if err != nil { - return ok, failures, err - } - - for _, c := range ctrs { - if returnCode, err := c.WaitWithInterval(interval); err == nil { - ok = append(ok, strconv.Itoa(int(returnCode))) - } else { - failures[c.ID()] = err - } - } - return ok, failures, err -} - -// Log logs one or more containers -func (r *LocalRuntime) Log(c *cliconfig.LogsValues, options *logs.LogOptions) error { - - var wg sync.WaitGroup - options.WaitGroup = &wg - if len(c.InputArgs) > 1 { - options.Multi = true - } - tailLen := int(c.Tail) - if tailLen < 0 { - tailLen = 0 - } - numContainers := len(c.InputArgs) - if numContainers == 0 { - numContainers = 1 - } - logChannel := make(chan *logs.LogLine, tailLen*numContainers+1) - containers, err := shortcuts.GetContainersByContext(false, c.Latest, c.InputArgs, r.Runtime) - if err != nil { - return err - } - if err := r.Runtime.Log(containers, options, logChannel); err != nil { - return err - } - go func() { - wg.Wait() - close(logChannel) - }() - for line := range logChannel { - fmt.Println(line.String(options)) - } - return nil -} - -// CreateContainer creates a libpod container -func (r *LocalRuntime) CreateContainer(ctx context.Context, c *cliconfig.CreateValues) (string, error) { - results := shared.NewIntermediateLayer(&c.PodmanCommand, false) - ctr, _, err := shared.CreateContainer(ctx, &results, r.Runtime) - if err != nil { - return "", err - } - return ctr.ID(), nil -} - -// Select the detach keys to use from user input flag, config file, or default value -func (r *LocalRuntime) selectDetachKeys(flagValue string) (string, error) { - if flagValue != "" { - return flagValue, nil - } - - config, err := r.GetConfig() - if err != nil { - return "", errors.Wrapf(err, "unable to retrieve runtime config") - } - if config.Engine.DetachKeys != "" { - return config.Engine.DetachKeys, nil - } - - return cfg.DefaultDetachKeys, nil -} - -// Run a libpod container -func (r *LocalRuntime) Run(ctx context.Context, c *cliconfig.RunValues, exitCode int) (int, error) { - results := shared.NewIntermediateLayer(&c.PodmanCommand, false) - - ctr, createConfig, err := shared.CreateContainer(ctx, &results, r.Runtime) - if err != nil { - return exitCode, err - } - - if logrus.GetLevel() == logrus.DebugLevel { - cgroupPath, err := ctr.CGroupPath() - if err == nil { - logrus.Debugf("container %q has CgroupParent %q", ctr.ID(), cgroupPath) - } - } - - // Handle detached start - if createConfig.Detach { - // if the container was created as part of a pod, also start its dependencies, if any. - if err := ctr.Start(ctx, c.IsSet("pod")); err != nil { - // This means the command did not exist - return define.ExitCode(err), err - } - - fmt.Printf("%s\n", ctr.ID()) - exitCode = 0 - return exitCode, nil - } - - outputStream := os.Stdout - errorStream := os.Stderr - inputStream := os.Stdin - - // If -i is not set, clear stdin - if !c.Bool("interactive") { - inputStream = nil - } - - // If attach is set, clear stdin/stdout/stderr and only attach requested - if c.IsSet("attach") || c.IsSet("a") { - outputStream = nil - errorStream = nil - if !c.Bool("interactive") { - inputStream = nil - } - - attachTo := c.StringSlice("attach") - for _, stream := range attachTo { - switch strings.ToLower(stream) { - case "stdout": - outputStream = os.Stdout - case "stderr": - errorStream = os.Stderr - case "stdin": - inputStream = os.Stdin - default: - return exitCode, errors.Wrapf(define.ErrInvalidArg, "invalid stream %q for --attach - must be one of stdin, stdout, or stderr", stream) - } - } - } - - keys := c.String("detach-keys") - if !c.IsSet("detach-keys") { - keys, err = r.selectDetachKeys(keys) - if err != nil { - return exitCode, err - } - } - - // if the container was created as part of a pod, also start its dependencies, if any. - if err := StartAttachCtr(ctx, ctr, outputStream, errorStream, inputStream, keys, c.Bool("sig-proxy"), true, c.IsSet("pod")); err != nil { - // We've manually detached from the container - // Do not perform cleanup, or wait for container exit code - // Just exit immediately - if errors.Cause(err) == define.ErrDetach { - return 0, nil - } - if c.IsSet("rm") { - if deleteError := r.Runtime.RemoveContainer(ctx, ctr, true, false); deleteError != nil { - logrus.Debugf("unable to remove container %s after failing to start and attach to it", ctr.ID()) - } - } - if errors.Cause(err) == define.ErrWillDeadlock { - logrus.Debugf("Deadlock error: %v", err) - return define.ExitCode(err), errors.Errorf("attempting to start container %s would cause a deadlock; please run 'podman system renumber' to resolve", ctr.ID()) - } - return define.ExitCode(err), err - } - - if ecode, err := ctr.Wait(); err != nil { - if errors.Cause(err) == define.ErrNoSuchCtr { - // Check events - event, err := r.Runtime.GetLastContainerEvent(ctr.ID(), events.Exited) - if err != nil { - logrus.Errorf("Cannot get exit code: %v", err) - exitCode = define.ExecErrorCodeNotFound - } else { - exitCode = event.ContainerExitCode - } - } - } else { - exitCode = int(ecode) - } - - if c.IsSet("rm") { - if err := r.Runtime.RemoveContainer(ctx, ctr, false, true); err != nil { - if errors.Cause(err) == define.ErrNoSuchCtr || - errors.Cause(err) == define.ErrCtrRemoved { - logrus.Warnf("Container %s does not exist: %v", ctr.ID(), err) - } else { - logrus.Errorf("Error removing container %s: %v", ctr.ID(), err) - } - } - } - - return exitCode, nil -} - -// Ps ... -func (r *LocalRuntime) Ps(c *cliconfig.PsValues, opts shared.PsOptions) ([]shared.PsContainerOutput, error) { - maxWorkers := shared.Parallelize("ps") - if c.GlobalIsSet("max-workers") { - maxWorkers = c.GlobalFlags.MaxWorks - } - logrus.Debugf("Setting maximum workers to %d", maxWorkers) - return shared.GetPsContainerOutput(r.Runtime, opts, c.Filter, maxWorkers) -} - -// Attach ... -func (r *LocalRuntime) Attach(ctx context.Context, c *cliconfig.AttachValues) error { - var ( - ctr *libpod.Container - err error - ) - - if c.Latest { - ctr, err = r.Runtime.GetLatestContainer() - } else { - ctr, err = r.Runtime.LookupContainer(c.InputArgs[0]) - } - - if err != nil { - return errors.Wrapf(err, "unable to exec into %s", c.InputArgs[0]) - } - - conState, err := ctr.State() - if err != nil { - return errors.Wrapf(err, "unable to determine state of %s", ctr.ID()) - } - if conState != define.ContainerStateRunning { - return errors.Errorf("you can only attach to running containers") - } - - inputStream := os.Stdin - if c.NoStdin { - inputStream = nil - } - - keys := c.DetachKeys - if !c.IsSet("detach-keys") { - keys, err = r.selectDetachKeys(keys) - if err != nil { - return err - } - } - - // If the container is in a pod, also set to recursively start dependencies - if err := StartAttachCtr(ctx, ctr, os.Stdout, os.Stderr, inputStream, keys, c.SigProxy, false, ctr.PodID() != ""); err != nil && errors.Cause(err) != define.ErrDetach { - return errors.Wrapf(err, "error attaching to container %s", ctr.ID()) - } - return nil -} - -// Checkpoint one or more containers -func (r *LocalRuntime) Checkpoint(c *cliconfig.CheckpointValues) error { - var ( - containers []*libpod.Container - err, lastError error - ) - - options := libpod.ContainerCheckpointOptions{ - Keep: c.Keep, - KeepRunning: c.LeaveRunning, - TCPEstablished: c.TcpEstablished, - TargetFile: c.Export, - IgnoreRootfs: c.IgnoreRootfs, - } - if c.Export == "" && c.IgnoreRootfs { - return errors.Errorf("--ignore-rootfs can only be used with --export") - } - if c.All { - containers, err = r.Runtime.GetRunningContainers() - } else { - containers, err = shortcuts.GetContainersByContext(false, c.Latest, c.InputArgs, r.Runtime) - } - if err != nil { - return err - } - - for _, ctr := range containers { - if err = ctr.Checkpoint(context.TODO(), options); err != nil { - if lastError != nil { - fmt.Fprintln(os.Stderr, lastError) - } - lastError = errors.Wrapf(err, "failed to checkpoint container %v", ctr.ID()) - } else { - fmt.Println(ctr.ID()) - } - } - return lastError -} - -// Restore one or more containers -func (r *LocalRuntime) Restore(ctx context.Context, c *cliconfig.RestoreValues) error { - var ( - containers []*libpod.Container - err, lastError error - filterFuncs []libpod.ContainerFilter - ) - - options := libpod.ContainerCheckpointOptions{ - Keep: c.Keep, - TCPEstablished: c.TcpEstablished, - TargetFile: c.Import, - Name: c.Name, - IgnoreRootfs: c.IgnoreRootfs, - IgnoreStaticIP: c.IgnoreStaticIP, - IgnoreStaticMAC: c.IgnoreStaticMAC, - } - - filterFuncs = append(filterFuncs, func(c *libpod.Container) bool { - state, _ := c.State() - return state == define.ContainerStateExited - }) - - switch { - case c.Import != "": - containers, err = checkpoint.CRImportCheckpoint(ctx, r.Runtime, c.Import, c.Name) - case c.All: - containers, err = r.GetContainers(filterFuncs...) - default: - containers, err = shortcuts.GetContainersByContext(false, c.Latest, c.InputArgs, r.Runtime) - } - if err != nil { - return err - } - - for _, ctr := range containers { - if err = ctr.Restore(context.TODO(), options); err != nil { - if lastError != nil { - fmt.Fprintln(os.Stderr, lastError) - } - lastError = errors.Wrapf(err, "failed to restore container %v", ctr.ID()) - } else { - fmt.Println(ctr.ID()) - } - } - return lastError -} - -// Start will start a container -func (r *LocalRuntime) Start(ctx context.Context, c *cliconfig.StartValues, sigProxy bool) (int, error) { - var ( - exitCode = define.ExecErrorCodeGeneric - lastError error - ) - - args := c.InputArgs - if c.Latest { - lastCtr, err := r.GetLatestContainer() - if err != nil { - return 0, errors.Wrapf(err, "unable to get latest container") - } - args = append(args, lastCtr.ID()) - } - - for _, container := range args { - ctr, err := r.LookupContainer(container) - if err != nil { - if lastError != nil { - fmt.Fprintln(os.Stderr, lastError) - } - lastError = errors.Wrapf(err, "unable to find container %s", container) - continue - } - - ctrState, err := ctr.State() - if err != nil { - return exitCode, errors.Wrapf(err, "unable to get container state") - } - - ctrRunning := ctrState == define.ContainerStateRunning - - if c.Attach { - inputStream := os.Stdin - if !c.Interactive { - if !ctr.Stdin() { - inputStream = nil - } - } - - keys := c.DetachKeys - if !c.IsSet("detach-keys") { - keys, err = r.selectDetachKeys(keys) - if err != nil { - return exitCode, err - } - } - - // attach to the container and also start it not already running - // If the container is in a pod, also set to recursively start dependencies - err = StartAttachCtr(ctx, ctr.Container, os.Stdout, os.Stderr, inputStream, keys, sigProxy, !ctrRunning, ctr.PodID() != "") - if errors.Cause(err) == define.ErrDetach { - // User manually detached - // Exit cleanly immediately - exitCode = 0 - return exitCode, nil - } - - if errors.Cause(err) == define.ErrWillDeadlock { - logrus.Debugf("Deadlock error: %v", err) - return define.ExitCode(err), errors.Errorf("attempting to start container %s would cause a deadlock; please run 'podman system renumber' to resolve", ctr.ID()) - } - - if ctrRunning { - return 0, err - } - - if err != nil { - return exitCode, errors.Wrapf(err, "unable to start container %s", ctr.ID()) - } - - if ecode, err := ctr.Wait(); err != nil { - if errors.Cause(err) == define.ErrNoSuchCtr { - // Check events - event, err := r.Runtime.GetLastContainerEvent(ctr.ID(), events.Exited) - if err != nil { - logrus.Errorf("Cannot get exit code: %v", err) - exitCode = define.ExecErrorCodeNotFound - } else { - exitCode = event.ContainerExitCode - } - } - } else { - exitCode = int(ecode) - } - - return exitCode, nil - } - // Start the container if it's not running already. - if !ctrRunning { - // Handle non-attach start - // If the container is in a pod, also set to recursively start dependencies - if err := ctr.Start(ctx, ctr.PodID() != ""); err != nil { - if lastError != nil { - fmt.Fprintln(os.Stderr, lastError) - } - if errors.Cause(err) == define.ErrWillDeadlock { - lastError = errors.Wrapf(err, "please run 'podman system renumber' to resolve deadlocks") - continue - } - lastError = errors.Wrapf(err, "unable to start container %q", container) - continue - } - } - // Check if the container is referenced by ID or by name and print - // it accordingly. - if strings.HasPrefix(ctr.ID(), container) { - fmt.Println(ctr.ID()) - } else { - fmt.Println(container) - } - } - return exitCode, lastError -} - -// PauseContainers removes container(s) based on CLI inputs. -func (r *LocalRuntime) PauseContainers(ctx context.Context, cli *cliconfig.PauseValues) ([]string, map[string]error, error) { - var ( - ok = []string{} - failures = map[string]error{} - ctrs []*libpod.Container - err error - ) - - maxWorkers := shared.DefaultPoolSize("pause") - if cli.GlobalIsSet("max-workers") { - maxWorkers = cli.GlobalFlags.MaxWorks - } - logrus.Debugf("Setting maximum rm workers to %d", maxWorkers) - - if cli.All { - ctrs, err = r.GetRunningContainers() - } else { - ctrs, err = shortcuts.GetContainersByContext(false, false, cli.InputArgs, r.Runtime) - } - if err != nil { - return ok, failures, err - } - - pool := shared.NewPool("pause", maxWorkers, len(ctrs)) - for _, c := range ctrs { - ctr := c - pool.Add(shared.Job{ - ID: ctr.ID(), - Fn: func() error { - err := ctr.Pause() - if err != nil { - logrus.Debugf("Failed to pause container %s: %s", ctr.ID(), err.Error()) - } - return err - }, - }) - } - return pool.Run() -} - -// UnpauseContainers removes container(s) based on CLI inputs. -func (r *LocalRuntime) UnpauseContainers(ctx context.Context, cli *cliconfig.UnpauseValues) ([]string, map[string]error, error) { - var ( - ok = []string{} - failures = map[string]error{} - ctrs []*libpod.Container - err error - ) - - maxWorkers := shared.DefaultPoolSize("pause") - if cli.GlobalIsSet("max-workers") { - maxWorkers = cli.GlobalFlags.MaxWorks - } - logrus.Debugf("Setting maximum rm workers to %d", maxWorkers) - - if cli.All { - var filterFuncs []libpod.ContainerFilter - filterFuncs = append(filterFuncs, func(c *libpod.Container) bool { - state, _ := c.State() - return state == define.ContainerStatePaused - }) - ctrs, err = r.GetContainers(filterFuncs...) - } else { - ctrs, err = shortcuts.GetContainersByContext(false, false, cli.InputArgs, r.Runtime) - } - if err != nil { - return ok, failures, err - } - - pool := shared.NewPool("pause", maxWorkers, len(ctrs)) - for _, c := range ctrs { - ctr := c - pool.Add(shared.Job{ - ID: ctr.ID(), - Fn: func() error { - err := ctr.Unpause() - if err != nil { - logrus.Debugf("Failed to unpause container %s: %s", ctr.ID(), err.Error()) - } - return err - }, - }) - } - return pool.Run() -} - -// Restart containers without or without a timeout -func (r *LocalRuntime) Restart(ctx context.Context, c *cliconfig.RestartValues) ([]string, map[string]error, error) { - var ( - containers []*libpod.Container - restartContainers []*libpod.Container - err error - ) - useTimeout := c.Flag("timeout").Changed || c.Flag("time").Changed - inputTimeout := c.Timeout - - // Handle --latest - switch { - case c.Latest: - lastCtr, err := r.Runtime.GetLatestContainer() - if err != nil { - return nil, nil, errors.Wrapf(err, "unable to get latest container") - } - restartContainers = append(restartContainers, lastCtr) - case c.Running: - containers, err = r.GetRunningContainers() - if err != nil { - return nil, nil, err - } - restartContainers = append(restartContainers, containers...) - case c.All: - containers, err = r.Runtime.GetAllContainers() - if err != nil { - return nil, nil, err - } - restartContainers = append(restartContainers, containers...) - default: - for _, id := range c.InputArgs { - ctr, err := r.Runtime.LookupContainer(id) - if err != nil { - return nil, nil, err - } - restartContainers = append(restartContainers, ctr) - } - } - - maxWorkers := shared.DefaultPoolSize("restart") - if c.GlobalIsSet("max-workers") { - maxWorkers = c.GlobalFlags.MaxWorks - } - - logrus.Debugf("Setting maximum workers to %d", maxWorkers) - - // We now have a slice of all the containers to be restarted. Iterate them to - // create restart Funcs with a timeout as needed - pool := shared.NewPool("restart", maxWorkers, len(restartContainers)) - for _, c := range restartContainers { - ctr := c - timeout := ctr.StopTimeout() - if useTimeout { - timeout = inputTimeout - } - pool.Add(shared.Job{ - ID: ctr.ID(), - Fn: func() error { - err := ctr.RestartWithTimeout(ctx, timeout) - if err != nil { - logrus.Debugf("Failed to restart container %s: %s", ctr.ID(), err.Error()) - } - return err - }, - }) - } - return pool.Run() -} - -// Top display the running processes of a container -func (r *LocalRuntime) Top(cli *cliconfig.TopValues) ([]string, error) { - var ( - descriptors []string - container *libpod.Container - err error - ) - if cli.Latest { - descriptors = cli.InputArgs - container, err = r.Runtime.GetLatestContainer() - } else { - descriptors = cli.InputArgs[1:] - container, err = r.Runtime.LookupContainer(cli.InputArgs[0]) - } - if err != nil { - return nil, errors.Wrapf(err, "unable to lookup requested container") - } - - return container.Top(descriptors) -} - -// ExecContainer executes a command in the container -func (r *LocalRuntime) ExecContainer(ctx context.Context, cli *cliconfig.ExecValues) (int, error) { - var ( - ctr *Container - err error - cmd []string - ) - // default invalid command exit code - ec := define.ExecErrorCodeGeneric - - if cli.Latest { - if ctr, err = r.GetLatestContainer(); err != nil { - return ec, err - } - cmd = cli.InputArgs[0:] - } else { - if ctr, err = r.LookupContainer(cli.InputArgs[0]); err != nil { - return ec, err - } - cmd = cli.InputArgs[1:] - } - - if cli.PreserveFDs > 0 { - entries, err := ioutil.ReadDir("/proc/self/fd") - if err != nil { - return ec, errors.Wrapf(err, "unable to read /proc/self/fd") - } - - m := make(map[int]bool) - for _, e := range entries { - i, err := strconv.Atoi(e.Name()) - if err != nil { - return ec, errors.Wrapf(err, "cannot parse %s in /proc/self/fd", e.Name()) - } - m[i] = true - } - - for i := 3; i < 3+cli.PreserveFDs; i++ { - if _, found := m[i]; !found { - return ec, errors.New("invalid --preserve-fds=N specified. Not enough FDs available") - } - } - } - - // Validate given environment variables - env := map[string]string{} - if len(cli.EnvFile) > 0 { - for _, f := range cli.EnvFile { - fileEnv, err := envLib.ParseFile(f) - if err != nil { - return ec, err - } - env = envLib.Join(env, fileEnv) - } - } - cliEnv, err := envLib.ParseSlice(cli.Env) - if err != nil { - return ec, errors.Wrap(err, "error parsing environment variables") - } - env = envLib.Join(env, cliEnv) - - streams := new(define.AttachStreams) - streams.OutputStream = os.Stdout - streams.ErrorStream = os.Stderr - if cli.Interactive { - streams.InputStream = bufio.NewReader(os.Stdin) - streams.AttachInput = true - } - streams.AttachOutput = true - streams.AttachError = true - - keys := cli.DetachKeys - if !cli.IsSet("detach-keys") { - keys, err = r.selectDetachKeys(keys) - if err != nil { - return ec, err - } - } - - ec, err = ExecAttachCtr(ctx, ctr.Container, cli.Tty, cli.Privileged, env, cmd, cli.User, cli.Workdir, streams, uint(cli.PreserveFDs), keys) - return define.TranslateExecErrorToExitCode(ec, err), err -} - -// Prune removes stopped containers -func (r *LocalRuntime) Prune(ctx context.Context, maxWorkers int, filters []string) ([]string, map[string]error, error) { - var ( - ok = []string{} - failures = map[string]error{} - err error - filterFunc []libpod.ContainerFilter - ) - - logrus.Debugf("Setting maximum rm workers to %d", maxWorkers) - - for _, filter := range filters { - filterSplit := strings.SplitN(filter, "=", 2) - if len(filterSplit) < 2 { - return ok, failures, errors.Errorf("filter input must be in the form of filter=value: %s is invalid", filter) - } - - f, err := shared.GenerateContainerFilterFuncs(filterSplit[0], filterSplit[1], r.Runtime) - if err != nil { - return ok, failures, err - } - filterFunc = append(filterFunc, f) - } - - containerStateFilter := func(c *libpod.Container) bool { - state, err := c.State() - if err != nil { - logrus.Error(err) - return false - } - if c.PodID() != "" { - return false - } - if state == define.ContainerStateStopped || state == define.ContainerStateExited || - state == define.ContainerStateCreated || state == define.ContainerStateConfigured { - return true - } - return false - } - filterFunc = append(filterFunc, containerStateFilter) - - delContainers, err := r.Runtime.GetContainers(filterFunc...) - if err != nil { - return ok, failures, err - } - if len(delContainers) < 1 { - return ok, failures, err - } - pool := shared.NewPool("prune", maxWorkers, len(delContainers)) - for _, c := range delContainers { - ctr := c - pool.Add(shared.Job{ - ID: ctr.ID(), - Fn: func() error { - err := r.Runtime.RemoveContainer(ctx, ctr, false, false) - if err != nil { - logrus.Debugf("Failed to prune container %s: %s", ctr.ID(), err.Error()) - } - return err - }, - }) - } - return pool.Run() -} - -// CleanupContainers any leftovers bits of stopped containers -func (r *LocalRuntime) CleanupContainers(ctx context.Context, cli *cliconfig.CleanupValues) ([]string, map[string]error, error) { - var ( - ok = []string{} - failures = map[string]error{} - ) - - ctrs, err := shortcuts.GetContainersByContext(cli.All, cli.Latest, cli.InputArgs, r.Runtime) - if err != nil { - return ok, failures, err - } - - for _, ctr := range ctrs { - if cli.Remove { - err = removeContainer(ctx, ctr, r) - } else { - err = cleanupContainer(ctx, ctr, r) - } - - if err == nil { - ok = append(ok, ctr.ID()) - } else { - failures[ctr.ID()] = err - } - - if cli.RemoveImage { - _, imageName := ctr.Image() - if err := removeContainerImage(ctx, ctr, r); err != nil { - failures[imageName] = err - } else { - ok = append(ok, imageName) - } - } - } - return ok, failures, nil -} - -// Only used when cleaning up containers -func removeContainer(ctx context.Context, ctr *libpod.Container, runtime *LocalRuntime) error { - if err := runtime.RemoveContainer(ctx, ctr, false, true); err != nil { - return errors.Wrapf(err, "failed to cleanup and remove container %v", ctr.ID()) - } - return nil -} - -func cleanupContainer(ctx context.Context, ctr *libpod.Container, runtime *LocalRuntime) error { - if err := ctr.Cleanup(ctx); err != nil { - return errors.Wrapf(err, "failed to cleanup container %v", ctr.ID()) - } - return nil -} - -func removeContainerImage(ctx context.Context, ctr *libpod.Container, runtime *LocalRuntime) error { - _, imageName := ctr.Image() - ctrImage, err := runtime.NewImageFromLocal(imageName) - if err != nil { - return err - } - _, err = runtime.RemoveImage(ctx, ctrImage, false) - return err -} - -// Port displays port information about existing containers -func (r *LocalRuntime) Port(c *cliconfig.PortValues) ([]*Container, error) { - var ( - portContainers []*Container - containers []*libpod.Container - err error - ) - - if !c.All { - names := []string{} - if len(c.InputArgs) >= 1 { - names = []string{c.InputArgs[0]} - } - containers, err = shortcuts.GetContainersByContext(false, c.Latest, names, r.Runtime) - } else { - containers, err = r.Runtime.GetRunningContainers() - } - if err != nil { - return nil, err - } - - //Convert libpod containers to adapter Containers - for _, con := range containers { - if state, _ := con.State(); state != define.ContainerStateRunning { - continue - } - portContainers = append(portContainers, &Container{con}) - } - return portContainers, nil -} - -// generateServiceName generates the container name and the service name for systemd service. -func generateServiceName(c *cliconfig.GenerateSystemdValues, ctr *libpod.Container, pod *libpod.Pod) (string, string) { - var kind, name, ctrName string - if pod == nil { - kind = "container" - name = ctr.ID() - if c.Name { - name = ctr.Name() - } - ctrName = name - } else { - kind = "pod" - name = pod.ID() - ctrName = ctr.ID() - if c.Name { - name = pod.Name() - ctrName = ctr.Name() - } - } - return ctrName, fmt.Sprintf("%s-%s", kind, name) -} - -// generateSystemdgenContainerInfo is a helper to generate a -// systemdgen.ContainerInfo for `GenerateSystemd`. -func (r *LocalRuntime) generateSystemdgenContainerInfo(c *cliconfig.GenerateSystemdValues, nameOrID string, pod *libpod.Pod) (*generate.ContainerInfo, bool, error) { - ctr, err := r.Runtime.LookupContainer(nameOrID) - if err != nil { - return nil, false, err - } - - timeout := ctr.StopTimeout() - if c.Flags().Changed("timeout") || c.Flags().Changed("time") { - timeout = c.StopTimeout - } - - config := ctr.Config() - conmonPidFile := config.ConmonPidFile - if conmonPidFile == "" { - return nil, true, errors.Errorf("conmon PID file path is empty, try to recreate the container with --conmon-pidfile flag") - } - - name, serviceName := generateServiceName(c, ctr, pod) - info := &generate.ContainerInfo{ - ServiceName: serviceName, - ContainerName: name, - RestartPolicy: c.RestartPolicy, - PIDFile: conmonPidFile, - StopTimeout: timeout, - GenerateTimestamp: true, - CreateCommand: config.CreateCommand, - } - - return info, true, nil -} - -// GenerateSystemd creates a unit file for a container or pod. -func (r *LocalRuntime) GenerateSystemd(c *cliconfig.GenerateSystemdValues) (string, error) { - opts := generate.Options{ - Files: c.Files, - New: c.New, - } - - // First assume it's a container. - if info, found, err := r.generateSystemdgenContainerInfo(c, c.InputArgs[0], nil); found && err != nil { - return "", err - } else if found && err == nil { - return generate.CreateContainerSystemdUnit(info, opts) - } - - // --new does not support pods. - if c.New { - return "", errors.Errorf("error generating systemd unit files: cannot generate generic files for a pod") - } - - // We're either having a pod or garbage. - pod, err := r.Runtime.LookupPod(c.InputArgs[0]) - if err != nil { - return "", err - } - - // Error out if the pod has no infra container, which we require to be the - // main service. - if !pod.HasInfraContainer() { - return "", fmt.Errorf("error generating systemd unit files: Pod %q has no infra container", pod.Name()) - } - - // Generate a systemdgen.ContainerInfo for the infra container. This - // ContainerInfo acts as the main service of the pod. - infraID, err := pod.InfraContainerID() - if err != nil { - return "", nil - } - podInfo, _, err := r.generateSystemdgenContainerInfo(c, infraID, pod) - if err != nil { - return "", nil - } - - // Compute the container-dependency graph for the Pod. - containers, err := pod.AllContainers() - if err != nil { - return "", err - } - if len(containers) == 0 { - return "", fmt.Errorf("error generating systemd unit files: Pod %q has no containers", pod.Name()) - } - graph, err := libpod.BuildContainerGraph(containers) - if err != nil { - return "", err - } - - // Traverse the dependency graph and create systemdgen.ContainerInfo's for - // each container. - containerInfos := []*generate.ContainerInfo{podInfo} - for ctr, dependencies := range graph.DependencyMap() { - // Skip the infra container as we already generated it. - if ctr.ID() == infraID { - continue - } - ctrInfo, _, err := r.generateSystemdgenContainerInfo(c, ctr.ID(), nil) - if err != nil { - return "", err - } - // Now add the container's dependencies and at the container as a - // required service of the infra container. - for _, dep := range dependencies { - if dep.ID() == infraID { - ctrInfo.BoundToServices = append(ctrInfo.BoundToServices, podInfo.ServiceName) - } else { - _, serviceName := generateServiceName(c, dep, nil) - ctrInfo.BoundToServices = append(ctrInfo.BoundToServices, serviceName) - } - } - podInfo.RequiredServices = append(podInfo.RequiredServices, ctrInfo.ServiceName) - containerInfos = append(containerInfos, ctrInfo) - } - - // Now generate the systemd service for all containers. - builder := strings.Builder{} - for i, info := range containerInfos { - if i > 0 { - builder.WriteByte('\n') - } - out, err := generate.CreateContainerSystemdUnit(info, opts) - if err != nil { - return "", err - } - builder.WriteString(out) - } - - return builder.String(), nil -} - -// GetNamespaces returns namespace information about a container for PS -func (r *LocalRuntime) GetNamespaces(container shared.PsContainerOutput) *shared.Namespace { - return shared.GetNamespaces(container.Pid) -} - -// Commit creates a local image from a container -func (r *LocalRuntime) Commit(ctx context.Context, c *cliconfig.CommitValues, container, imageName string) (string, error) { - var ( - writer io.Writer - mimeType string - ) - switch c.Format { - case "oci": - mimeType = buildah.OCIv1ImageManifest - if c.Flag("message").Changed { - return "", errors.Errorf("messages are only compatible with the docker image format (-f docker)") - } - case "docker": - mimeType = manifest.DockerV2Schema2MediaType - default: - return "", errors.Errorf("unrecognized image format %q", c.Format) - } - if !c.Quiet { - writer = os.Stderr - } - ctr, err := r.Runtime.LookupContainer(container) - if err != nil { - return "", errors.Wrapf(err, "error looking up container %q", container) - } - - rtc, err := r.Runtime.GetConfig() - if err != nil { - return "", err - } - - sc := image.GetSystemContext(rtc.Engine.SignaturePolicyPath, "", false) - coptions := buildah.CommitOptions{ - SignaturePolicyPath: rtc.Engine.SignaturePolicyPath, - ReportWriter: writer, - SystemContext: sc, - PreferredManifestType: mimeType, - } - options := libpod.ContainerCommitOptions{ - CommitOptions: coptions, - Pause: c.Pause, - IncludeVolumes: c.IncludeVolumes, - Message: c.Message, - Changes: c.Change, - Author: c.Author, - } - newImage, err := ctr.Commit(ctx, imageName, options) - if err != nil { - return "", err - } - return newImage.ID(), nil -} diff --git a/pkg/adapter/containers_remote.go b/pkg/adapter/containers_remote.go deleted file mode 100644 index 777605896..000000000 --- a/pkg/adapter/containers_remote.go +++ /dev/null @@ -1,1139 +0,0 @@ -// +build remoteclient - -package adapter - -import ( - "bufio" - "context" - "encoding/json" - "fmt" - "io" - "os" - "strconv" - "syscall" - "time" - - "github.com/containers/libpod/cmd/podman/cliconfig" - "github.com/containers/libpod/cmd/podman/shared" - "github.com/containers/libpod/libpod" - "github.com/containers/libpod/libpod/define" - "github.com/containers/libpod/libpod/logs" - envLib "github.com/containers/libpod/pkg/env" - iopodman "github.com/containers/libpod/pkg/varlink" - "github.com/containers/libpod/pkg/varlinkapi/virtwriter" - "github.com/cri-o/ocicni/pkg/ocicni" - "github.com/docker/docker/pkg/term" - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "github.com/varlink/go/varlink" - "golang.org/x/crypto/ssh/terminal" - "k8s.io/client-go/tools/remotecommand" -) - -// Inspect returns an inspect struct from varlink -func (c *Container) Inspect(size bool) (*define.InspectContainerData, error) { - reply, err := iopodman.ContainerInspectData().Call(c.Runtime.Conn, c.ID(), size) - if err != nil { - return nil, err - } - data := define.InspectContainerData{} - if err := json.Unmarshal([]byte(reply), &data); err != nil { - return nil, err - } - return &data, err -} - -// ID returns the ID of the container -func (c *Container) ID() string { - return c.config.ID -} - -// Restart a single container -func (c *Container) Restart(timeout int64) error { - _, err := iopodman.RestartContainer().Call(c.Runtime.Conn, c.ID(), timeout) - return err -} - -// Pause a container -func (c *Container) Pause() error { - _, err := iopodman.PauseContainer().Call(c.Runtime.Conn, c.ID()) - return err -} - -// Unpause a container -func (c *Container) Unpause() error { - _, err := iopodman.UnpauseContainer().Call(c.Runtime.Conn, c.ID()) - return err -} - -func (c *Container) PortMappings() ([]ocicni.PortMapping, error) { - // First check if the container belongs to a network namespace (like a pod) - // Taken from libpod portmappings() - if len(c.config.NetNsCtr) > 0 { - netNsCtr, err := c.Runtime.LookupContainer(c.config.NetNsCtr) - if err != nil { - return nil, errors.Wrapf(err, "unable to lookup network namespace for container %s", c.ID()) - } - return netNsCtr.PortMappings() - } - return c.config.PortMappings, nil -} - -// Config returns a container config -func (r *LocalRuntime) Config(name string) *libpod.ContainerConfig { - // TODO the Spec being returned is not populated. Matt and I could not figure out why. Will defer - // further looking into it for after devconf. - // The libpod function for this has no errors so we are kind of in a tough - // spot here. Logging the errors for now. - reply, err := iopodman.ContainerConfig().Call(r.Conn, name) - if err != nil { - logrus.Error("call to container.config failed") - } - data := libpod.ContainerConfig{} - if err := json.Unmarshal([]byte(reply), &data); err != nil { - logrus.Error("failed to unmarshal container inspect data") - } - return &data - -} - -// ContainerState returns the "state" of the container. -func (r *LocalRuntime) ContainerState(name string) (*libpod.ContainerState, error) { // no-lint - reply, err := iopodman.ContainerStateData().Call(r.Conn, name) - if err != nil { - return nil, err - } - data := libpod.ContainerState{} - if err := json.Unmarshal([]byte(reply), &data); err != nil { - return nil, err - } - return &data, err - -} - -// Spec obtains the container spec. -func (r *LocalRuntime) Spec(name string) (*specs.Spec, error) { - reply, err := iopodman.Spec().Call(r.Conn, name) - if err != nil { - return nil, err - } - data := specs.Spec{} - if err := json.Unmarshal([]byte(reply), &data); err != nil { - return nil, err - } - return &data, nil -} - -// LookupContainers is a wrapper for LookupContainer -func (r *LocalRuntime) LookupContainers(idsOrNames []string) ([]*Container, error) { - var containers []*Container - for _, name := range idsOrNames { - ctr, err := r.LookupContainer(name) - if err != nil { - return nil, err - } - containers = append(containers, ctr) - } - return containers, nil -} - -// LookupContainer gets basic information about container over a varlink -// connection and then translates it to a *Container -func (r *LocalRuntime) LookupContainer(idOrName string) (*Container, error) { - state, err := r.ContainerState(idOrName) - if err != nil { - return nil, err - } - config := r.Config(idOrName) - return &Container{ - remoteContainer{ - r, - config, - state, - }, - }, nil -} - -// GetAllContainers returns all containers in a slice -func (r *LocalRuntime) GetAllContainers() ([]*Container, error) { - var containers []*Container - ctrs, err := iopodman.GetContainersByContext().Call(r.Conn, true, false, []string{}) - if err != nil { - return nil, err - } - for _, ctr := range ctrs { - container, err := r.LookupContainer(ctr) - if err != nil { - return nil, err - } - containers = append(containers, container) - } - return containers, nil -} - -func (r *LocalRuntime) LookupContainersWithStatus(filters []string) ([]*Container, error) { - var containers []*Container - ctrs, err := iopodman.GetContainersByStatus().Call(r.Conn, filters) - if err != nil { - return nil, err - } - // This is not performance savvy; if this turns out to be a problematic series of lookups, we need to - // create a new endpoint to speed things up - for _, ctr := range ctrs { - container, err := r.LookupContainer(ctr.Id) - if err != nil { - return nil, err - } - containers = append(containers, container) - } - return containers, nil -} - -func (r *LocalRuntime) GetLatestContainer() (*Container, error) { - reply, err := iopodman.GetContainersByContext().Call(r.Conn, false, true, nil) - if err != nil { - return nil, err - } - if len(reply) > 0 { - return r.LookupContainer(reply[0]) - } - return nil, errors.New("no containers exist") -} - -// GetArtifact returns a container's artifacts -func (c *Container) GetArtifact(name string) ([]byte, error) { - var data []byte - reply, err := iopodman.ContainerArtifacts().Call(c.Runtime.Conn, c.ID(), name) - if err != nil { - return nil, err - } - if err := json.Unmarshal([]byte(reply), &data); err != nil { - return nil, err - } - return data, err -} - -// Config returns a container's Config ... same as ctr.Config() -func (c *Container) Config() *libpod.ContainerConfig { - if c.config != nil { - return c.config - } - return c.Runtime.Config(c.ID()) -} - -// Name returns the name of the container -func (c *Container) Name() string { - return c.config.Name -} - -// StopContainers stops requested containers using varlink. -// Returns the list of stopped container ids, map of failed to stop container ids + errors, or any non-container error -func (r *LocalRuntime) StopContainers(ctx context.Context, cli *cliconfig.StopValues) ([]string, map[string]error, error) { - var ( - ok = []string{} - failures = map[string]error{} - ) - - ids, err := iopodman.GetContainersByContext().Call(r.Conn, cli.All, cli.Latest, cli.InputArgs) - if err != nil { - return ok, failures, TranslateError(err) - } - - for _, id := range ids { - if _, err := iopodman.StopContainer().Call(r.Conn, id, int64(cli.Timeout)); err != nil { - transError := TranslateError(err) - if errors.Cause(transError) == define.ErrCtrStopped { - ok = append(ok, id) - continue - } - if errors.Cause(transError) == define.ErrCtrStateInvalid && cli.All { - ok = append(ok, id) - continue - } - failures[id] = err - } else { - // We should be using ID here because in varlink, only successful returns - // include the string id - ok = append(ok, id) - } - } - return ok, failures, nil -} - -// InitContainers initializes container(s) based on Varlink. -// It returns a list of successful ID(s), a map of failed container ID to error, -// or an error if a more general error occurred. -func (r *LocalRuntime) InitContainers(ctx context.Context, cli *cliconfig.InitValues) ([]string, map[string]error, error) { - var ( - ok = []string{} - failures = map[string]error{} - ) - - ids, err := iopodman.GetContainersByContext().Call(r.Conn, cli.All, cli.Latest, cli.InputArgs) - if err != nil { - return nil, nil, err - } - - for _, id := range ids { - initialized, err := iopodman.InitContainer().Call(r.Conn, id) - if err != nil { - if cli.All { - switch err.(type) { - case *iopodman.InvalidState: - ok = append(ok, initialized) - default: - failures[id] = err - } - } else { - failures[id] = err - } - } else { - ok = append(ok, initialized) - } - } - return ok, failures, nil -} - -// KillContainers sends signal to container(s) based on varlink. -// Returns list of successful id(s), map of failed id(s) + error, or error not from container -func (r *LocalRuntime) KillContainers(ctx context.Context, cli *cliconfig.KillValues, signal syscall.Signal) ([]string, map[string]error, error) { - var ( - ok = []string{} - failures = map[string]error{} - ) - - ids, err := iopodman.GetContainersByContext().Call(r.Conn, cli.All, cli.Latest, cli.InputArgs) - if err != nil { - return ok, failures, err - } - - for _, id := range ids { - killed, err := iopodman.KillContainer().Call(r.Conn, id, int64(signal)) - if err != nil { - failures[id] = err - } else { - ok = append(ok, killed) - } - } - return ok, failures, nil -} - -// RemoveContainer removes container(s) based on varlink inputs. -func (r *LocalRuntime) RemoveContainers(ctx context.Context, cli *cliconfig.RmValues) ([]string, map[string]error, error) { - var ( - ok = []string{} - failures = map[string]error{} - ) - - ids, err := iopodman.GetContainersByContext().Call(r.Conn, cli.All, cli.Latest, cli.InputArgs) - if err != nil { - // Failed to get containers. If force is specified, get the containers ID - // and evict them - if !cli.Force { - return nil, nil, TranslateError(err) - } - - for _, ctr := range cli.InputArgs { - logrus.Debugf("Evicting container %q", ctr) - id, err := iopodman.EvictContainer().Call(r.Conn, ctr, cli.Volumes) - if err != nil { - failures[ctr] = errors.Wrapf(err, "Failed to evict container: %q", id) - continue - } - ok = append(ok, id) - } - return ok, failures, nil - } - - for _, id := range ids { - _, err := iopodman.RemoveContainer().Call(r.Conn, id, cli.Force, cli.Volumes) - if err != nil { - failures[id] = err - } else { - ok = append(ok, id) - } - } - return ok, failures, nil -} - -// UmountRootFilesystems umounts container(s) root filesystems based on varlink inputs -func (r *LocalRuntime) UmountRootFilesystems(ctx context.Context, cli *cliconfig.UmountValues) ([]string, map[string]error, error) { - ids, err := iopodman.GetContainersByContext().Call(r.Conn, cli.All, cli.Latest, cli.InputArgs) - if err != nil { - return nil, nil, err - } - - var ( - ok = []string{} - failures = map[string]error{} - ) - - for _, id := range ids { - err := iopodman.UnmountContainer().Call(r.Conn, id, cli.Force) - if err != nil { - failures[id] = err - } else { - ok = append(ok, id) - } - } - return ok, failures, nil -} - -// WaitOnContainers waits for all given container(s) to stop. -// interval is currently ignored. -func (r *LocalRuntime) WaitOnContainers(ctx context.Context, cli *cliconfig.WaitValues, interval time.Duration) ([]string, map[string]error, error) { - var ( - ok = []string{} - failures = map[string]error{} - ) - - ids, err := iopodman.GetContainersByContext().Call(r.Conn, false, cli.Latest, cli.InputArgs) - if err != nil { - return ok, failures, err - } - - for _, id := range ids { - stopped, err := iopodman.WaitContainer().Call(r.Conn, id, int64(interval)) - if err != nil { - failures[id] = err - } else { - ok = append(ok, strconv.FormatInt(stopped, 10)) - } - } - return ok, failures, nil -} - -// BatchContainerOp is wrapper func to mimic shared's function with a similar name meant for libpod -func BatchContainerOp(ctr *Container, opts shared.PsOptions) (shared.BatchContainerStruct, error) { - // TODO If pod ps ever shows container's sizes, re-enable this code; otherwise it isn't needed - // and would be a perf hit - // data, err := ctr.Inspect(true) - // if err != nil { - // return shared.BatchContainerStruct{}, err - // } - // - // size := new(shared.ContainerSize) - // size.RootFsSize = data.SizeRootFs - // size.RwSize = data.SizeRw - - bcs := shared.BatchContainerStruct{ - ConConfig: ctr.config, - ConState: ctr.state.State, - ExitCode: ctr.state.ExitCode, - Pid: ctr.state.PID, - StartedTime: ctr.state.StartedTime, - ExitedTime: ctr.state.FinishedTime, - // Size: size, - } - return bcs, nil -} - -// Log one or more containers over a varlink connection -func (r *LocalRuntime) Log(c *cliconfig.LogsValues, options *logs.LogOptions) error { - // GetContainersLogs - reply, err := iopodman.GetContainersLogs().Send(r.Conn, uint64(varlink.More), c.InputArgs, c.Follow, c.Latest, options.Since.Format(time.RFC3339Nano), c.Tail, c.Timestamps) - if err != nil { - return errors.Wrapf(err, "failed to get container logs") - } - if len(c.InputArgs) > 1 { - options.Multi = true - } - for { - log, flags, err := reply() - if err != nil { - return err - } - if log.Time == "" && log.Msg == "" { - // We got a blank log line which can signal end of stream - break - } - lTime, err := time.Parse(time.RFC3339Nano, log.Time) - if err != nil { - return errors.Wrapf(err, "unable to parse time of log %s", log.Time) - } - logLine := logs.LogLine{ - Device: log.Device, - ParseLogType: log.ParseLogType, - Time: lTime, - Msg: log.Msg, - CID: log.Cid, - } - fmt.Println(logLine.String(options)) - if flags&varlink.Continues == 0 { - break - } - } - return nil -} - -// CreateContainer creates a container from the cli over varlink -func (r *LocalRuntime) CreateContainer(ctx context.Context, c *cliconfig.CreateValues) (string, error) { - results := shared.NewIntermediateLayer(&c.PodmanCommand, true) - return iopodman.CreateContainer().Call(r.Conn, results.MakeVarlink()) -} - -// Run creates a container overvarlink and then starts it -func (r *LocalRuntime) Run(ctx context.Context, c *cliconfig.RunValues, exitCode int) (int, error) { - // TODO the exit codes for run need to be figured out for remote connections - results := shared.NewIntermediateLayer(&c.PodmanCommand, true) - cid, err := iopodman.CreateContainer().Call(r.Conn, results.MakeVarlink()) - if err != nil { - return exitCode, err - } - if c.Bool("detach") { - if _, err := iopodman.StartContainer().Call(r.Conn, cid); err != nil { - return exitCode, err - } - fmt.Println(cid) - return 0, nil - } - inputStream := os.Stdin - // If -i is not set, clear stdin - if !c.Bool("interactive") { - inputStream = nil - } - exitChan, errChan, err := r.attach(ctx, inputStream, os.Stdout, cid, true, c.String("detach-keys")) - if err != nil { - return exitCode, err - } - exitCode = <-exitChan - finalError := <-errChan - return exitCode, finalError -} - -func ReadExitFile(runtimeTmp, ctrID string) (int, error) { - return 0, define.ErrNotImplemented -} - -// Ps lists containers based on criteria from user -func (r *LocalRuntime) Ps(c *cliconfig.PsValues, opts shared.PsOptions) ([]shared.PsContainerOutput, error) { - var psContainers []shared.PsContainerOutput - last := int64(c.Last) - PsOpts := iopodman.PsOpts{ - All: c.All, - Filters: &c.Filter, - Last: &last, - Latest: &c.Latest, - NoTrunc: &c.NoTrunct, - Pod: &c.Pod, - Quiet: &c.Quiet, - Size: &c.Size, - Sort: &c.Sort, - Sync: &c.Sync, - } - containers, err := iopodman.Ps().Call(r.Conn, PsOpts) - if err != nil { - return nil, err - } - for _, ctr := range containers { - createdAt, err := time.Parse(time.RFC3339Nano, ctr.CreatedAt) - if err != nil { - return nil, err - } - exitedAt, err := time.Parse(time.RFC3339Nano, ctr.ExitedAt) - if err != nil { - return nil, err - } - startedAt, err := time.Parse(time.RFC3339Nano, ctr.StartedAt) - if err != nil { - return nil, err - } - containerSize := shared.ContainerSize{ - RootFsSize: ctr.RootFsSize, - RwSize: ctr.RwSize, - } - state, err := define.StringToContainerStatus(ctr.State) - if err != nil { - return nil, err - } - psc := shared.PsContainerOutput{ - ID: ctr.Id, - Image: ctr.Image, - Command: ctr.Command, - Created: ctr.Created, - Ports: ctr.Ports, - Names: ctr.Names, - IsInfra: ctr.IsInfra, - Status: ctr.Status, - State: state, - Pid: int(ctr.PidNum), - Size: &containerSize, - Pod: ctr.Pod, - CreatedAt: createdAt, - ExitedAt: exitedAt, - StartedAt: startedAt, - Labels: ctr.Labels, - PID: ctr.NsPid, - Cgroup: ctr.Cgroup, - IPC: ctr.Ipc, - MNT: ctr.Mnt, - NET: ctr.Net, - PIDNS: ctr.PidNs, - User: ctr.User, - UTS: ctr.Uts, - Mounts: ctr.Mounts, - } - psContainers = append(psContainers, psc) - } - return psContainers, nil -} - -// Attach to a remote terminal -func (r *LocalRuntime) Attach(ctx context.Context, c *cliconfig.AttachValues) error { - ctr, err := r.LookupContainer(c.InputArgs[0]) - if err != nil { - return nil - } - if ctr.state.State != define.ContainerStateRunning { - return errors.New("you can only attach to running containers") - } - inputStream := os.Stdin - if c.NoStdin { - inputStream, err = os.Open(os.DevNull) - if err != nil { - return err - } - } - _, errChan, err := r.attach(ctx, inputStream, os.Stdout, c.InputArgs[0], false, c.DetachKeys) - if err != nil { - return err - } - return <-errChan -} - -// Checkpoint one or more containers -func (r *LocalRuntime) Checkpoint(c *cliconfig.CheckpointValues) error { - if c.Export != "" { - return errors.New("the remote client does not support exporting checkpoints") - } - if c.IgnoreRootfs { - return errors.New("the remote client does not support --ignore-rootfs") - } - - var lastError error - ids, err := iopodman.GetContainersByContext().Call(r.Conn, c.All, c.Latest, c.InputArgs) - if err != nil { - return err - } - if c.All { - // We don't have a great way to get all the running containers, so need to get all and then - // check status on them bc checkpoint considers checkpointing a stopped container an error - var runningIds []string - for _, id := range ids { - ctr, err := r.LookupContainer(id) - if err != nil { - return err - } - if ctr.state.State == define.ContainerStateRunning { - runningIds = append(runningIds, id) - } - } - ids = runningIds - } - - for _, id := range ids { - if _, err := iopodman.ContainerCheckpoint().Call(r.Conn, id, c.Keep, c.Keep, c.TcpEstablished); err != nil { - if lastError != nil { - fmt.Fprintln(os.Stderr, lastError) - } - lastError = errors.Wrapf(err, "failed to checkpoint container %v", id) - } else { - fmt.Println(id) - } - } - return lastError -} - -// Restore one or more containers -func (r *LocalRuntime) Restore(ctx context.Context, c *cliconfig.RestoreValues) error { - if c.Import != "" { - return errors.New("the remote client does not support importing checkpoints") - } - if c.IgnoreRootfs { - return errors.New("the remote client does not support --ignore-rootfs") - } - - var lastError error - ids, err := iopodman.GetContainersByContext().Call(r.Conn, c.All, c.Latest, c.InputArgs) - if err != nil { - return err - } - if c.All { - // We don't have a great way to get all the exited containers, so need to get all and then - // check status on them bc checkpoint considers restoring a running container an error - var exitedIDs []string - for _, id := range ids { - ctr, err := r.LookupContainer(id) - if err != nil { - return err - } - if ctr.state.State != define.ContainerStateRunning { - exitedIDs = append(exitedIDs, id) - } - } - ids = exitedIDs - } - - for _, id := range ids { - if _, err := iopodman.ContainerRestore().Call(r.Conn, id, c.Keep, c.TcpEstablished); err != nil { - if lastError != nil { - fmt.Fprintln(os.Stderr, lastError) - } - lastError = errors.Wrapf(err, "failed to restore container %v", id) - } else { - fmt.Println(id) - } - } - return lastError -} - -// Start starts an already created container -func (r *LocalRuntime) Start(ctx context.Context, c *cliconfig.StartValues, sigProxy bool) (int, error) { - var ( - finalErr error - exitCode = define.ExecErrorCodeGeneric - ) - // TODO Figure out how to deal with exit codes - inputStream := os.Stdin - if !c.Interactive { - inputStream = nil - } - - containerIDs, err := iopodman.GetContainersByContext().Call(r.Conn, false, c.Latest, c.InputArgs) - if err != nil { - return exitCode, err - } - if len(containerIDs) < 1 { - return exitCode, errors.New("failed to find containers to start") - } - // start.go makes sure that if attach, there can be only one ctr - if c.Attach { - exitChan, errChan, err := r.attach(ctx, inputStream, os.Stdout, containerIDs[0], true, c.DetachKeys) - if err != nil { - return exitCode, nil - } - exitCode := <-exitChan - err = <-errChan - return exitCode, err - } - - // TODO the notion of starting a pod container and its deps still needs to be worked through - // Everything else is detached - for _, cid := range containerIDs { - reply, err := iopodman.StartContainer().Call(r.Conn, cid) - if err != nil { - if finalErr != nil { - fmt.Println(err) - } - finalErr = err - } else { - fmt.Println(reply) - } - } - return exitCode, finalErr -} - -func (r *LocalRuntime) attach(ctx context.Context, stdin, stdout *os.File, cid string, start bool, detachKeys string) (chan int, chan error, error) { - var ( - oldTermState *term.State - ) - spec, err := r.Spec(cid) - if err != nil { - return nil, nil, err - } - resize := make(chan remotecommand.TerminalSize, 5) - haveTerminal := terminal.IsTerminal(int(os.Stdin.Fd())) - - // Check if we are attached to a terminal. If we are, generate resize - // events, and set the terminal to raw mode - if haveTerminal && spec.Process.Terminal { - cancel, oldTermState, err := handleTerminalAttach(ctx, resize) - if err != nil { - return nil, nil, err - } - defer cancel() - defer restoreTerminal(oldTermState) // nolint: errcheck - - logrus.SetFormatter(&RawTtyFormatter{}) - term.SetRawTerminal(os.Stdin.Fd()) // nolint: errcheck - } - - reply, err := iopodman.Attach().Send(r.Conn, varlink.Upgrade, cid, detachKeys, start) - if err != nil { - restoreTerminal(oldTermState) // nolint: errcheck - return nil, nil, err - } - - // See if the server accepts the upgraded connection or returns an error - _, err = reply() - - if err != nil { - restoreTerminal(oldTermState) // nolint: errcheck - return nil, nil, err - } - - ecChan := make(chan int, 1) - errChan := configureVarlinkAttachStdio(r.Conn.Reader, r.Conn.Writer, stdin, stdout, oldTermState, resize, ecChan) - return ecChan, errChan, nil -} - -// PauseContainers pauses container(s) based on CLI inputs. -func (r *LocalRuntime) PauseContainers(ctx context.Context, cli *cliconfig.PauseValues) ([]string, map[string]error, error) { - var ( - ok []string - failures = map[string]error{} - ctrs []*Container - err error - ) - - if cli.All { - filters := []string{define.ContainerStateRunning.String()} - ctrs, err = r.LookupContainersWithStatus(filters) - } else { - ctrs, err = r.LookupContainers(cli.InputArgs) - } - if err != nil { - return ok, failures, err - } - - for _, c := range ctrs { - c := c - err := c.Pause() - if err != nil { - failures[c.ID()] = err - } else { - ok = append(ok, c.ID()) - } - } - return ok, failures, nil -} - -// UnpauseContainers unpauses containers based on input -func (r *LocalRuntime) UnpauseContainers(ctx context.Context, cli *cliconfig.UnpauseValues) ([]string, map[string]error, error) { - var ( - ok = []string{} - failures = map[string]error{} - ctrs []*Container - err error - ) - - maxWorkers := shared.DefaultPoolSize("unpause") - if cli.GlobalIsSet("max-workers") { - maxWorkers = cli.GlobalFlags.MaxWorks - } - logrus.Debugf("Setting maximum rm workers to %d", maxWorkers) - - if cli.All { - filters := []string{define.ContainerStatePaused.String()} - ctrs, err = r.LookupContainersWithStatus(filters) - } else { - ctrs, err = r.LookupContainers(cli.InputArgs) - } - if err != nil { - return ok, failures, err - } - for _, c := range ctrs { - c := c - err := c.Unpause() - if err != nil { - failures[c.ID()] = err - } else { - ok = append(ok, c.ID()) - } - } - return ok, failures, nil -} - -// Restart restarts a container over varlink -func (r *LocalRuntime) Restart(ctx context.Context, c *cliconfig.RestartValues) ([]string, map[string]error, error) { - var ( - containers []*Container - restartContainers []*Container - err error - ok = []string{} - failures = map[string]error{} - ) - useTimeout := c.Flag("timeout").Changed || c.Flag("time").Changed - inputTimeout := c.Timeout - - if c.Latest { // nolint: gocritic - lastCtr, err := r.GetLatestContainer() - if err != nil { - return nil, nil, errors.Wrapf(err, "unable to get latest container") - } - restartContainers = append(restartContainers, lastCtr) - } else if c.Running { - containers, err = r.LookupContainersWithStatus([]string{define.ContainerStateRunning.String()}) - if err != nil { - return nil, nil, err - } - restartContainers = append(restartContainers, containers...) - } else if c.All { - containers, err = r.GetAllContainers() - if err != nil { - return nil, nil, err - } - restartContainers = append(restartContainers, containers...) - } else { - for _, id := range c.InputArgs { - ctr, err := r.LookupContainer(id) - if err != nil { - return nil, nil, err - } - restartContainers = append(restartContainers, ctr) - } - } - - for _, c := range restartContainers { - c := c - timeout := c.config.StopTimeout - if useTimeout { - timeout = inputTimeout - } - err := c.Restart(int64(timeout)) - if err != nil { - failures[c.ID()] = err - } else { - ok = append(ok, c.ID()) - } - } - return ok, failures, nil -} - -// Top display the running processes of a container -func (r *LocalRuntime) Top(cli *cliconfig.TopValues) ([]string, error) { - var ( - ctr *Container - err error - descriptors []string - ) - if cli.Latest { - ctr, err = r.GetLatestContainer() - descriptors = cli.InputArgs - } else { - ctr, err = r.LookupContainer(cli.InputArgs[0]) - descriptors = cli.InputArgs[1:] - } - if err != nil { - return nil, err - } - return iopodman.Top().Call(r.Conn, ctr.ID(), descriptors) -} - -// Prune removes stopped containers -func (r *LocalRuntime) Prune(ctx context.Context, maxWorkers int, filter []string) ([]string, map[string]error, error) { - - var ( - ok = []string{} - failures = map[string]error{} - ctrs []*Container - err error - ) - logrus.Debugf("Setting maximum rm workers to %d", maxWorkers) - - filters := []string{define.ContainerStateExited.String()} - ctrs, err = r.LookupContainersWithStatus(filters) - if err != nil { - return ok, failures, err - } - for _, c := range ctrs { - c := c - _, err := iopodman.RemoveContainer().Call(r.Conn, c.ID(), false, false) - if err != nil { - failures[c.ID()] = err - } else { - ok = append(ok, c.ID()) - } - } - return ok, failures, nil -} - -// Cleanup any leftovers bits of stopped containers -func (r *LocalRuntime) CleanupContainers(ctx context.Context, cli *cliconfig.CleanupValues) ([]string, map[string]error, error) { - return nil, nil, errors.New("container cleanup not supported for remote clients") -} - -// Port displays port information about existing containers -func (r *LocalRuntime) Port(c *cliconfig.PortValues) ([]*Container, error) { - var ( - containers []*Container - err error - ) - // This one is a bit odd because when all is used, we only use running containers. - if !c.All { - containers, err = r.GetContainersByContext(false, c.Latest, c.InputArgs) - } else { - // we need to only use running containers if all - filters := []string{define.ContainerStateRunning.String()} - containers, err = r.LookupContainersWithStatus(filters) - } - if err != nil { - return nil, err - } - return containers, nil -} - -// GenerateSystemd creates a systemd until for a container -func (r *LocalRuntime) GenerateSystemd(c *cliconfig.GenerateSystemdValues) (string, error) { - return "", errors.New("systemd generation not supported for remote clients") -} - -// GetNamespaces returns namespace information about a container for PS -func (r *LocalRuntime) GetNamespaces(container shared.PsContainerOutput) *shared.Namespace { - ns := shared.Namespace{ - PID: container.PID, - Cgroup: container.Cgroup, - IPC: container.IPC, - MNT: container.MNT, - NET: container.NET, - PIDNS: container.PIDNS, - User: container.User, - UTS: container.UTS, - } - return &ns -} - -// Commit creates a local image from a container -func (r *LocalRuntime) Commit(ctx context.Context, c *cliconfig.CommitValues, container, imageName string) (string, error) { - var iid string - reply, err := iopodman.Commit().Send(r.Conn, varlink.More, container, imageName, c.Change, c.Author, c.Message, c.Pause, c.Format) - if err != nil { - return "", err - } - for { - responses, flags, err := reply() - if err != nil { - return "", err - } - for _, line := range responses.Logs { - fmt.Fprintln(os.Stderr, line) - } - iid = responses.Id - if flags&varlink.Continues == 0 { - break - } - } - return iid, nil -} - -// ExecContainer executes a command in the container -func (r *LocalRuntime) ExecContainer(ctx context.Context, cli *cliconfig.ExecValues) (int, error) { - var ( - oldTermState *term.State - ec = define.ExecErrorCodeGeneric - ) - // default invalid command exit code - // Validate given environment variables - cliEnv, err := envLib.ParseSlice(cli.Env) - if err != nil { - return 0, errors.Wrap(err, "error parsing environment variables") - } - envs := envLib.Slice(cliEnv) - - resize := make(chan remotecommand.TerminalSize, 5) - haveTerminal := terminal.IsTerminal(int(os.Stdin.Fd())) - - // Check if we are attached to a terminal. If we are, generate resize - // events, and set the terminal to raw mode - if haveTerminal && cli.Tty { - cancel, oldTermState, err := handleTerminalAttach(ctx, resize) - if err != nil { - return ec, err - } - defer cancel() - defer restoreTerminal(oldTermState) // nolint: errcheck - - logrus.SetFormatter(&RawTtyFormatter{}) - term.SetRawTerminal(os.Stdin.Fd()) // nolint: errcheck - } - - opts := iopodman.ExecOpts{ - Name: cli.InputArgs[0], - Tty: cli.Tty, - Privileged: cli.Privileged, - Cmd: cli.InputArgs[1:], - User: &cli.User, - Workdir: &cli.Workdir, - Env: &envs, - DetachKeys: &cli.DetachKeys, - } - - inputStream := os.Stdin - if !cli.Interactive { - inputStream = nil - } - - reply, err := iopodman.ExecContainer().Send(r.Conn, varlink.Upgrade, opts) - if err != nil { - return ec, errors.Wrapf(err, "Exec failed to contact service for %s", cli.InputArgs) - } - - _, err = reply() - if err != nil { - return ec, errors.Wrapf(err, "Exec operation failed for %s", cli.InputArgs) - } - ecChan := make(chan int, 1) - errChan := configureVarlinkAttachStdio(r.Conn.Reader, r.Conn.Writer, inputStream, os.Stdout, oldTermState, resize, ecChan) - - ec = <-ecChan - err = <-errChan - - return ec, err -} - -func configureVarlinkAttachStdio(reader *bufio.Reader, writer *bufio.Writer, stdin *os.File, stdout *os.File, oldTermState *term.State, resize chan remotecommand.TerminalSize, ecChan chan int) chan error { // nolint: interfacer - errChan := make(chan error, 1) - // These are the special writers that encode input from the client. - varlinkStdinWriter := virtwriter.NewVirtWriteCloser(writer, virtwriter.ToStdin) - varlinkResizeWriter := virtwriter.NewVirtWriteCloser(writer, virtwriter.TerminalResize) - varlinkHangupWriter := virtwriter.NewVirtWriteCloser(writer, virtwriter.HangUpFromClient) - - go func() { - // Read from the wire and direct to stdout or stderr - err := virtwriter.Reader(reader, stdout, os.Stderr, nil, nil, ecChan) - defer restoreTerminal(oldTermState) // nolint: errcheck - sendGenericError(ecChan) - errChan <- err - }() - - go func() { - for termResize := range resize { - b, err := json.Marshal(termResize) - if err != nil { - defer restoreTerminal(oldTermState) // nolint: errcheck,staticcheck - sendGenericError(ecChan) - errChan <- err - } - _, err = varlinkResizeWriter.Write(b) - if err != nil { - defer restoreTerminal(oldTermState) // nolint: errcheck,staticcheck - sendGenericError(ecChan) - errChan <- err - } - } - }() - if stdin != nil { - // Takes stdinput and sends it over the wire after being encoded - go func() { - if _, err := io.Copy(varlinkStdinWriter, stdin); err != nil { - defer restoreTerminal(oldTermState) // nolint: errcheck - sendGenericError(ecChan) - errChan <- err - } - _, err := varlinkHangupWriter.Write([]byte("EOF")) - if err != nil { - logrus.Errorf("unable to notify server to hangup: %q", err) - } - err = varlinkStdinWriter.Close() - errChan <- err - }() - } - return errChan -} - -func sendGenericError(ecChan chan int) { - if ecChan != nil { - ecChan <- define.ExecErrorCodeGeneric - } -} diff --git a/pkg/adapter/errors.go b/pkg/adapter/errors.go deleted file mode 100644 index 012d01d39..000000000 --- a/pkg/adapter/errors.go +++ /dev/null @@ -1,31 +0,0 @@ -// +build remoteclient - -package adapter - -import ( - "github.com/containers/libpod/libpod/define" - iopodman "github.com/containers/libpod/pkg/varlink" - "github.com/pkg/errors" -) - -// TranslateMapErrors translates the errors a typical podman output struct -// from varlink errors to libpod errors -func TranslateMapErrors(failures map[string]error) map[string]error { - for k, v := range failures { - failures[k] = TranslateError(v) - } - return failures -} - -// TranslateError converts a single varlink error to a libpod error -func TranslateError(err error) error { - switch err.(type) { - case *iopodman.ContainerNotFound: - return errors.Wrap(define.ErrNoSuchCtr, err.Error()) - case *iopodman.ErrCtrStopped: - return errors.Wrap(define.ErrCtrStopped, err.Error()) - case *iopodman.InvalidState: - return errors.Wrap(define.ErrCtrStateInvalid, err.Error()) - } - return err -} diff --git a/pkg/adapter/images_remote.go b/pkg/adapter/images_remote.go deleted file mode 100644 index 2df0ffcde..000000000 --- a/pkg/adapter/images_remote.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build remoteclient - -package adapter - -import ( - "context" - "encoding/json" - - "github.com/containers/libpod/pkg/inspect" - iopodman "github.com/containers/libpod/pkg/varlink" -) - -// Inspect returns returns an ImageData struct from over a varlink connection -func (i *ContainerImage) Inspect(ctx context.Context) (*inspect.ImageData, error) { - reply, err := iopodman.InspectImage().Call(i.Runtime.Conn, i.ID()) - if err != nil { - return nil, err - } - data := inspect.ImageData{} - if err := json.Unmarshal([]byte(reply), &data); err != nil { - return nil, err - } - return &data, nil -} diff --git a/pkg/adapter/info_remote.go b/pkg/adapter/info_remote.go deleted file mode 100644 index 549b01f54..000000000 --- a/pkg/adapter/info_remote.go +++ /dev/null @@ -1,64 +0,0 @@ -// +build remoteclient - -package adapter - -import ( - "github.com/containers/libpod/libpod/define" - iopodman "github.com/containers/libpod/pkg/varlink" -) - -// Info returns information for the host system and its components -func (r RemoteRuntime) Info() (*define.Info, error) { - // TODO the varlink implementation for info should be updated to match the output for regular info - var ( - reply define.Info - ) - - info, err := iopodman.GetInfo().Call(r.Conn) - if err != nil { - return nil, err - } - hostInfo := define.HostInfo{ - Arch: info.Host.Arch, - BuildahVersion: info.Host.Buildah_version, - CPUs: int(info.Host.Cpus), - Distribution: define.DistributionInfo{ - Distribution: info.Host.Distribution.Distribution, - Version: info.Host.Distribution.Version, - }, - EventLogger: info.Host.Eventlogger, - Hostname: info.Host.Hostname, - Kernel: info.Host.Kernel, - MemFree: info.Host.Mem_free, - MemTotal: info.Host.Mem_total, - OS: info.Host.Os, - SwapFree: info.Host.Swap_free, - SwapTotal: info.Host.Swap_total, - Uptime: info.Host.Uptime, - } - storeInfo := define.StoreInfo{ - ContainerStore: define.ContainerStore{ - Number: int(info.Store.Containers), - }, - GraphDriverName: info.Store.Graph_driver_name, - GraphRoot: info.Store.Graph_root, - ImageStore: define.ImageStore{ - Number: int(info.Store.Images), - }, - RunRoot: info.Store.Run_root, - } - reply.Host = &hostInfo - reply.Store = &storeInfo - regs := make(map[string]interface{}) - if len(info.Registries.Search) > 0 { - regs["search"] = info.Registries.Search - } - if len(info.Registries.Blocked) > 0 { - regs["blocked"] = info.Registries.Blocked - } - if len(info.Registries.Insecure) > 0 { - regs["insecure"] = info.Registries.Insecure - } - reply.Registries = regs - return &reply, nil -} diff --git a/pkg/adapter/network.go b/pkg/adapter/network.go deleted file mode 100644 index 577ffe19f..000000000 --- a/pkg/adapter/network.go +++ /dev/null @@ -1,277 +0,0 @@ -// +build !remoteclient - -package adapter - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "text/tabwriter" - - cniversion "github.com/containernetworking/cni/pkg/version" - "github.com/containers/libpod/cmd/podman/cliconfig" - "github.com/containers/libpod/pkg/network" - "github.com/containers/libpod/pkg/util" - "github.com/pkg/errors" -) - -func getCNIConfDir(r *LocalRuntime) (string, error) { - config, err := r.GetConfig() - if err != nil { - return "", err - } - configPath := config.Network.NetworkConfigDir - - if len(config.Network.NetworkConfigDir) < 1 { - configPath = network.CNIConfigDir - } - return configPath, nil -} - -// NetworkList displays summary information about CNI networks -func (r *LocalRuntime) NetworkList(cli *cliconfig.NetworkListValues) error { - cniConfigPath, err := getCNIConfDir(r) - if err != nil { - return err - } - networks, err := network.LoadCNIConfsFromDir(cniConfigPath) - if err != nil { - return err - } - // quiet means we only print the network names - if cli.Quiet { - for _, cniNetwork := range networks { - fmt.Println(cniNetwork.Name) - } - return nil - } - w := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', 0) - if _, err := fmt.Fprintln(w, "NAME\tVERSION\tPLUGINS"); err != nil { - return err - } - for _, cniNetwork := range networks { - if _, err := fmt.Fprintf(w, "%s\t%s\t%s\n", cniNetwork.Name, cniNetwork.CNIVersion, network.GetCNIPlugins(cniNetwork)); err != nil { - return err - } - } - return w.Flush() -} - -// NetworkInspect displays the raw CNI configuration for one -// or more CNI networks -func (r *LocalRuntime) NetworkInspect(cli *cliconfig.NetworkInspectValues) error { - var ( - rawCNINetworks []map[string]interface{} - ) - for _, name := range cli.InputArgs { - rawList, err := network.InspectNetwork(name) - if err != nil { - return err - } - rawCNINetworks = append(rawCNINetworks, rawList) - } - out, err := json.MarshalIndent(rawCNINetworks, "", "\t") - if err != nil { - return err - } - fmt.Printf("%s\n", out) - return nil -} - -// NetworkRemove deletes one or more CNI networks -func (r *LocalRuntime) NetworkRemove(ctx context.Context, cli *cliconfig.NetworkRmValues) ([]string, map[string]error, error) { - var ( - networkRmSuccesses []string - lastError error - ) - networkRmErrors := make(map[string]error) - - for _, name := range cli.InputArgs { - containers, err := r.GetAllContainers() - if err != nil { - return networkRmSuccesses, networkRmErrors, err - } - // We need to iterate containers looking to see if they belong to the given network - for _, c := range containers { - if util.StringInSlice(name, c.Config().Networks) { - // if user passes force, we nuke containers - if !cli.Force { - // Without the force option, we return an error - return nil, nil, errors.Errorf("%q has associated containers with it. Use -f to forcibly delete containers", name) - } - if err := r.RemoveContainer(ctx, c.Container, true, true); err != nil { - return nil, nil, err - } - } - } - if err := network.RemoveNetwork(name); err != nil { - if lastError != nil { - networkRmErrors[name] = lastError - } - lastError = err - } else { - networkRmSuccesses = append(networkRmSuccesses, fmt.Sprintf("Deleted: %s\n", name)) - } - } - return networkRmSuccesses, networkRmErrors, lastError -} - -// NetworkCreateBridge creates a CNI network -func (r *LocalRuntime) NetworkCreateBridge(cli *cliconfig.NetworkCreateValues) (string, error) { - isGateway := true - ipMasq := true - subnet := &cli.Network - ipRange := cli.IPRange - runtimeConfig, err := r.GetConfig() - if err != nil { - return "", err - } - // if range is provided, make sure it is "in" network - if cli.IsSet("subnet") { - // if network is provided, does it conflict with existing CNI or live networks - err = network.ValidateUserNetworkIsAvailable(subnet) - } else { - // if no network is provided, figure out network - subnet, err = network.GetFreeNetwork() - } - if err != nil { - return "", err - } - - gateway := cli.Gateway - if gateway == nil { - // if no gateway is provided, provide it as first ip of network - gateway = network.CalcGatewayIP(subnet) - } - // if network is provided and if gateway is provided, make sure it is "in" network - if cli.IsSet("subnet") && cli.IsSet("gateway") { - if !subnet.Contains(gateway) { - return "", errors.Errorf("gateway %s is not in valid for subnet %s", gateway.String(), subnet.String()) - } - } - if cli.Internal { - isGateway = false - ipMasq = false - } - - // if a range is given, we need to ensure it is "in" the network range. - if cli.IsSet("ip-range") { - if !cli.IsSet("subnet") { - return "", errors.New("you must define a subnet range to define an ip-range") - } - firstIP, err := network.FirstIPInSubnet(&cli.IPRange) - if err != nil { - return "", err - } - lastIP, err := network.LastIPInSubnet(&cli.IPRange) - if err != nil { - return "", err - } - if !subnet.Contains(firstIP) || !subnet.Contains(lastIP) { - return "", errors.Errorf("the ip range %s does not fall within the subnet range %s", cli.IPRange.String(), subnet.String()) - } - } - bridgeDeviceName, err := network.GetFreeDeviceName() - if err != nil { - return "", err - } - // If no name is given, we give the name of the bridge device - name := bridgeDeviceName - if len(cli.InputArgs) > 0 { - name = cli.InputArgs[0] - netNames, err := network.GetNetworkNamesFromFileSystem() - if err != nil { - return "", err - } - if util.StringInSlice(name, netNames) { - return "", errors.Errorf("the network name %s is already used", name) - } - } - - ncList := network.NewNcList(name, cniversion.Current()) - var plugins []network.CNIPlugins - var routes []network.IPAMRoute - - defaultRoute, err := network.NewIPAMDefaultRoute() - if err != nil { - return "", err - } - routes = append(routes, defaultRoute) - ipamConfig, err := network.NewIPAMHostLocalConf(subnet, routes, ipRange, gateway) - if err != nil { - return "", err - } - - // TODO need to iron out the role of isDefaultGW and IPMasq - bridge := network.NewHostLocalBridge(bridgeDeviceName, isGateway, false, ipMasq, ipamConfig) - plugins = append(plugins, bridge) - plugins = append(plugins, network.NewPortMapPlugin()) - plugins = append(plugins, network.NewFirewallPlugin()) - // if we find the dnsname plugin, we add configuration for it - if network.HasDNSNamePlugin(runtimeConfig.Network.CNIPluginDirs) && !cli.DisableDNS { - // Note: in the future we might like to allow for dynamic domain names - plugins = append(plugins, network.NewDNSNamePlugin(network.DefaultPodmanDomainName)) - } - ncList["plugins"] = plugins - b, err := json.MarshalIndent(ncList, "", " ") - if err != nil { - return "", err - } - cniConfigPath, err := getCNIConfDir(r) - if err != nil { - return "", err - } - cniPathName := filepath.Join(cniConfigPath, fmt.Sprintf("%s.conflist", name)) - err = ioutil.WriteFile(cniPathName, b, 0644) - return cniPathName, err -} - -// NetworkCreateMacVLAN creates a CNI network -func (r *LocalRuntime) NetworkCreateMacVLAN(cli *cliconfig.NetworkCreateValues) (string, error) { - var ( - name string - plugins []network.CNIPlugins - ) - liveNetNames, err := network.GetLiveNetworkNames() - if err != nil { - return "", err - } - // Make sure the host-device exists - if !util.StringInSlice(cli.MacVLAN, liveNetNames) { - return "", errors.Errorf("failed to find network interface %q", cli.MacVLAN) - } - if len(cli.InputArgs) > 0 { - name = cli.InputArgs[0] - netNames, err := network.GetNetworkNamesFromFileSystem() - if err != nil { - return "", err - } - if util.StringInSlice(name, netNames) { - return "", errors.Errorf("the network name %s is already used", name) - } - } - if len(name) < 1 { - name, err = network.GetFreeDeviceName() - if err != nil { - return "", err - } - } - ncList := network.NewNcList(name, cniversion.Current()) - macvlan := network.NewMacVLANPlugin(cli.MacVLAN) - plugins = append(plugins, macvlan) - ncList["plugins"] = plugins - b, err := json.MarshalIndent(ncList, "", " ") - if err != nil { - return "", err - } - cniConfigPath, err := getCNIConfDir(r) - if err != nil { - return "", err - } - cniPathName := filepath.Join(cniConfigPath, fmt.Sprintf("%s.conflist", name)) - err = ioutil.WriteFile(cniPathName, b, 0644) - return cniPathName, err -} diff --git a/pkg/adapter/pods.go b/pkg/adapter/pods.go deleted file mode 100644 index 7c2a84cc7..000000000 --- a/pkg/adapter/pods.go +++ /dev/null @@ -1,1065 +0,0 @@ -// +build !remoteclient - -package adapter - -import ( - "context" - "fmt" - "io" - "io/ioutil" - "net" - "os" - "path/filepath" - "strings" - - "github.com/containers/buildah/pkg/parse" - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/types" - "github.com/containers/libpod/cmd/podman/cliconfig" - "github.com/containers/libpod/cmd/podman/shared" - "github.com/containers/libpod/libpod" - "github.com/containers/libpod/libpod/define" - "github.com/containers/libpod/libpod/image" - "github.com/containers/libpod/pkg/adapter/shortcuts" - ann "github.com/containers/libpod/pkg/annotations" - envLib "github.com/containers/libpod/pkg/env" - ns "github.com/containers/libpod/pkg/namespaces" - createconfig "github.com/containers/libpod/pkg/spec" - "github.com/containers/libpod/pkg/util" - "github.com/containers/storage" - "github.com/cri-o/ocicni/pkg/ocicni" - "github.com/ghodss/yaml" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - v1 "k8s.io/api/core/v1" -) - -const ( - // https://kubernetes.io/docs/concepts/storage/volumes/#hostpath - createDirectoryPermission = 0755 - // https://kubernetes.io/docs/concepts/storage/volumes/#hostpath - createFilePermission = 0644 -) - -// PodContainerStats is struct containing an adapter Pod and a libpod -// ContainerStats and is used primarily for outputting pod stats. -type PodContainerStats struct { - Pod *Pod - ContainerStats map[string]*libpod.ContainerStats -} - -// PrunePods removes pods -func (r *LocalRuntime) PrunePods(ctx context.Context, cli *cliconfig.PodPruneValues) ([]string, map[string]error, error) { - var ( - ok = []string{} - failures = map[string]error{} - ) - - maxWorkers := shared.DefaultPoolSize("rm") - if cli.GlobalIsSet("max-workers") { - maxWorkers = cli.GlobalFlags.MaxWorks - } - logrus.Debugf("Setting maximum rm workers to %d", maxWorkers) - - states := []string{define.PodStateStopped, define.PodStateExited} - if cli.Force { - states = append(states, define.PodStateRunning) - } - - pods, err := r.GetPodsByStatus(states) - if err != nil { - return ok, failures, err - } - if len(pods) < 1 { - return ok, failures, nil - } - - pool := shared.NewPool("pod_prune", maxWorkers, len(pods)) - for _, p := range pods { - p := p - - pool.Add(shared.Job{ - ID: p.ID(), - Fn: func() error { - err := r.Runtime.RemovePod(ctx, p, true, cli.Force) - if err != nil { - logrus.Debugf("Failed to remove pod %s: %s", p.ID(), err.Error()) - } - return err - }, - }) - } - return pool.Run() -} - -// RemovePods ... -func (r *LocalRuntime) RemovePods(ctx context.Context, cli *cliconfig.PodRmValues) ([]string, []error) { - var ( - errs []error - podids []string - ) - pods, err := shortcuts.GetPodsByContext(cli.All, cli.Latest, cli.InputArgs, r.Runtime) - if err != nil && !(cli.Ignore && errors.Cause(err) == define.ErrNoSuchPod) { - errs = append(errs, err) - return nil, errs - } - - for _, p := range pods { - if err := r.Runtime.RemovePod(ctx, p, true, cli.Force); err != nil { - errs = append(errs, err) - } else { - podids = append(podids, p.ID()) - } - } - return podids, errs -} - -// GetLatestPod gets the latest pod and wraps it in an adapter pod -func (r *LocalRuntime) GetLatestPod() (*Pod, error) { - pod := Pod{} - p, err := r.Runtime.GetLatestPod() - pod.Pod = p - return &pod, err -} - -// GetPodsWithFilters gets the filtered list of pods based on the filter parameters provided. -func (r *LocalRuntime) GetPodsWithFilters(filters string) ([]*Pod, error) { - pods, err := shared.GetPodsWithFilters(r.Runtime, filters) - if err != nil { - return nil, err - } - return r.podstoAdapterPods(pods) -} - -func (r *LocalRuntime) podstoAdapterPods(pod []*libpod.Pod) ([]*Pod, error) { - var pods []*Pod - for _, i := range pod { - - pods = append(pods, &Pod{i}) - } - return pods, nil -} - -// GetAllPods gets all pods and wraps it in an adapter pod -func (r *LocalRuntime) GetAllPods() ([]*Pod, error) { - allPods, err := r.Runtime.GetAllPods() - if err != nil { - return nil, err - } - return r.podstoAdapterPods(allPods) -} - -// LookupPod gets a pod by name or id and wraps it in an adapter pod -func (r *LocalRuntime) LookupPod(nameOrID string) (*Pod, error) { - pod := Pod{} - p, err := r.Runtime.LookupPod(nameOrID) - pod.Pod = p - return &pod, err -} - -// StopPods is a wrapper to libpod to stop pods based on a cli context -func (r *LocalRuntime) StopPods(ctx context.Context, cli *cliconfig.PodStopValues) ([]string, []error) { - timeout := -1 - if cli.Flags().Changed("timeout") { - timeout = int(cli.Timeout) - } - var ( - errs []error - podids []string - ) - pods, err := shortcuts.GetPodsByContext(cli.All, cli.Latest, cli.InputArgs, r.Runtime) - if err != nil && !(cli.Ignore && errors.Cause(err) == define.ErrNoSuchPod) { - errs = append(errs, err) - return nil, errs - } - - for _, p := range pods { - stopped := true - conErrs, stopErr := p.StopWithTimeout(ctx, true, timeout) - if stopErr != nil { - errs = append(errs, stopErr) - stopped = false - } - if conErrs != nil { - stopped = false - for _, err := range conErrs { - errs = append(errs, err) - } - } - if stopped { - podids = append(podids, p.ID()) - } - } - return podids, errs -} - -// KillPods is a wrapper to libpod to start pods based on the cli context -func (r *LocalRuntime) KillPods(ctx context.Context, cli *cliconfig.PodKillValues, signal uint) ([]string, []error) { - var ( - errs []error - podids []string - ) - pods, err := shortcuts.GetPodsByContext(cli.All, cli.Latest, cli.InputArgs, r.Runtime) - if err != nil { - errs = append(errs, err) - return nil, errs - } - for _, p := range pods { - killed := true - conErrs, killErr := p.Kill(signal) - if killErr != nil { - errs = append(errs, killErr) - killed = false - } - if conErrs != nil { - killed = false - for _, err := range conErrs { - errs = append(errs, err) - } - } - if killed { - podids = append(podids, p.ID()) - } - } - return podids, errs -} - -// StartPods is a wrapper to start pods based on the cli context -func (r *LocalRuntime) StartPods(ctx context.Context, cli *cliconfig.PodStartValues) ([]string, []error) { - var ( - errs []error - podids []string - ) - pods, err := shortcuts.GetPodsByContext(cli.All, cli.Latest, cli.InputArgs, r.Runtime) - if err != nil { - errs = append(errs, err) - return nil, errs - } - for _, p := range pods { - started := true - conErrs, startErr := p.Start(ctx) - if startErr != nil { - errs = append(errs, startErr) - started = false - } - if conErrs != nil { - started = false - for _, err := range conErrs { - errs = append(errs, err) - } - } - if started { - podids = append(podids, p.ID()) - } - } - return podids, errs -} - -// CreatePod is a wrapper for libpod and creating a new pod from the cli context -func (r *LocalRuntime) CreatePod(ctx context.Context, cli *cliconfig.PodCreateValues, labels map[string]string) (string, error) { - var ( - options []libpod.PodCreateOption - err error - ) - - // This needs to be first, as a lot of options depend on - // WithInfraContainer() - if cli.Infra { - options = append(options, libpod.WithInfraContainer()) - nsOptions, err := shared.GetNamespaceOptions(strings.Split(cli.Share, ",")) - if err != nil { - return "", err - } - options = append(options, nsOptions...) - } - - if cli.Flag("cgroup-parent").Changed { - options = append(options, libpod.WithPodCgroupParent(cli.CgroupParent)) - } - - if len(labels) != 0 { - options = append(options, libpod.WithPodLabels(labels)) - } - - if cli.Flag("name").Changed { - options = append(options, libpod.WithPodName(cli.Name)) - } - - if cli.Flag("hostname").Changed { - options = append(options, libpod.WithPodHostname(cli.Hostname)) - } - - if cli.Flag("add-host").Changed { - options = append(options, libpod.WithPodHosts(cli.StringSlice("add-host"))) - } - if cli.Flag("dns").Changed { - dns := cli.StringSlice("dns") - foundHost := false - for _, entry := range dns { - if entry == "host" { - foundHost = true - } - } - if foundHost && len(dns) > 1 { - return "", errors.Errorf("cannot set dns=host and still provide other DNS servers") - } - if foundHost { - options = append(options, libpod.WithPodUseImageResolvConf()) - } else { - options = append(options, libpod.WithPodDNS(cli.StringSlice("dns"))) - } - } - if cli.Flag("dns-opt").Changed { - options = append(options, libpod.WithPodDNSOption(cli.StringSlice("dns-opt"))) - } - if cli.Flag("dns-search").Changed { - options = append(options, libpod.WithPodDNSSearch(cli.StringSlice("dns-search"))) - } - if cli.Flag("ip").Changed { - ip := net.ParseIP(cli.String("ip")) - if ip == nil { - return "", errors.Errorf("invalid IP address %q passed to --ip", cli.String("ip")) - } - - options = append(options, libpod.WithPodStaticIP(ip)) - } - if cli.Flag("mac-address").Changed { - mac, err := net.ParseMAC(cli.String("mac-address")) - if err != nil { - return "", errors.Wrapf(err, "invalid MAC address %q passed to --mac-address", cli.String("mac-address")) - } - - options = append(options, libpod.WithPodStaticMAC(mac)) - } - if cli.Flag("network").Changed { - netValue := cli.String("network") - switch strings.ToLower(netValue) { - case "bridge": - // Do nothing. - // TODO: Maybe this should be split between slirp and - // bridge? Better to wait until someone asks... - logrus.Debugf("Pod using default network mode") - case "host": - logrus.Debugf("Pod will use host networking") - options = append(options, libpod.WithPodHostNetwork()) - case "": - return "", errors.Errorf("invalid value passed to --network: must provide a comma-separated list of CNI networks or host") - default: - // We'll assume this is a comma-separated list of CNI - // networks. - networks := strings.Split(netValue, ",") - logrus.Debugf("Pod joining CNI networks: %v", networks) - options = append(options, libpod.WithPodNetworks(networks)) - } - } - if cli.Flag("no-hosts").Changed { - if cli.Bool("no-hosts") { - options = append(options, libpod.WithPodUseImageHosts()) - } - } - - publish := cli.StringSlice("publish") - if len(publish) > 0 { - portBindings, err := shared.CreatePortBindings(publish) - if err != nil { - return "", err - } - options = append(options, libpod.WithInfraContainerPorts(portBindings)) - - } - // always have containers use pod cgroups - // User Opt out is not yet supported - options = append(options, libpod.WithPodCgroups()) - - pod, err := r.NewPod(ctx, options...) - if err != nil { - return "", err - } - return pod.ID(), nil -} - -// GetPodStatus is a wrapper to get the status of a local libpod pod -func (p *Pod) GetPodStatus() (string, error) { - return shared.GetPodStatus(p.Pod) -} - -// BatchContainerOp is a wrapper for the shared function of the same name -func BatchContainerOp(ctr *libpod.Container, opts shared.PsOptions) (shared.BatchContainerStruct, error) { - return shared.BatchContainerOp(ctr, opts) -} - -// PausePods is a wrapper for pausing pods via libpod -func (r *LocalRuntime) PausePods(c *cliconfig.PodPauseValues) ([]string, map[string]error, []error) { - var ( - pauseIDs []string - pauseErrors []error - ) - containerErrors := make(map[string]error) - - pods, err := shortcuts.GetPodsByContext(c.All, c.Latest, c.InputArgs, r.Runtime) - if err != nil { - pauseErrors = append(pauseErrors, err) - return nil, containerErrors, pauseErrors - } - - for _, pod := range pods { - ctrErrs, err := pod.Pause() - if err != nil { - pauseErrors = append(pauseErrors, err) - continue - } - if ctrErrs != nil { - for ctr, err := range ctrErrs { - containerErrors[ctr] = err - } - continue - } - pauseIDs = append(pauseIDs, pod.ID()) - - } - return pauseIDs, containerErrors, pauseErrors -} - -// UnpausePods is a wrapper for unpausing pods via libpod -func (r *LocalRuntime) UnpausePods(c *cliconfig.PodUnpauseValues) ([]string, map[string]error, []error) { - var ( - unpauseIDs []string - unpauseErrors []error - ) - containerErrors := make(map[string]error) - - pods, err := shortcuts.GetPodsByContext(c.All, c.Latest, c.InputArgs, r.Runtime) - if err != nil { - unpauseErrors = append(unpauseErrors, err) - return nil, containerErrors, unpauseErrors - } - - for _, pod := range pods { - ctrErrs, err := pod.Unpause() - if err != nil { - unpauseErrors = append(unpauseErrors, err) - continue - } - if ctrErrs != nil { - for ctr, err := range ctrErrs { - containerErrors[ctr] = err - } - continue - } - unpauseIDs = append(unpauseIDs, pod.ID()) - - } - return unpauseIDs, containerErrors, unpauseErrors -} - -// RestartPods is a wrapper to restart pods via libpod -func (r *LocalRuntime) RestartPods(ctx context.Context, c *cliconfig.PodRestartValues) ([]string, map[string]error, []error) { - var ( - restartIDs []string - restartErrors []error - ) - containerErrors := make(map[string]error) - - pods, err := shortcuts.GetPodsByContext(c.All, c.Latest, c.InputArgs, r.Runtime) - if err != nil { - restartErrors = append(restartErrors, err) - return nil, containerErrors, restartErrors - } - - for _, pod := range pods { - ctrErrs, err := pod.Restart(ctx) - if err != nil { - restartErrors = append(restartErrors, err) - continue - } - if ctrErrs != nil { - for ctr, err := range ctrErrs { - containerErrors[ctr] = err - } - continue - } - restartIDs = append(restartIDs, pod.ID()) - - } - return restartIDs, containerErrors, restartErrors - -} - -// PodTop is a wrapper function to call GetPodPidInformation in libpod and return its results -// for output -func (r *LocalRuntime) PodTop(c *cliconfig.PodTopValues, descriptors []string) ([]string, error) { - var ( - pod *Pod - err error - ) - - if c.Latest { - pod, err = r.GetLatestPod() - } else { - pod, err = r.LookupPod(c.InputArgs[0]) - } - if err != nil { - return nil, errors.Wrapf(err, "unable to lookup requested container") - } - podStatus, err := pod.GetPodStatus() - if err != nil { - return nil, errors.Wrapf(err, "unable to get status for pod %s", pod.ID()) - } - if podStatus != "Running" { - return nil, errors.Errorf("pod top can only be used on pods with at least one running container") - } - return pod.GetPodPidInformation(descriptors) -} - -// GetStatPods returns pods for use in pod stats -func (r *LocalRuntime) GetStatPods(c *cliconfig.PodStatsValues) ([]*Pod, error) { - var ( - adapterPods []*Pod - pods []*libpod.Pod - err error - ) - - if len(c.InputArgs) > 0 || c.Latest || c.All { - pods, err = shortcuts.GetPodsByContext(c.All, c.Latest, c.InputArgs, r.Runtime) - } else { - pods, err = r.Runtime.GetRunningPods() - } - if err != nil { - return nil, err - } - // convert libpod pods to adapter pods - for _, p := range pods { - adapterPod := Pod{ - p, - } - adapterPods = append(adapterPods, &adapterPod) - } - return adapterPods, nil -} - -// PlayKubeYAML creates pods and containers from a kube YAML file -func (r *LocalRuntime) PlayKubeYAML(ctx context.Context, c *cliconfig.KubePlayValues, yamlFile string) (*Pod, error) { - var ( - containers []*libpod.Container - pod *libpod.Pod - podOptions []libpod.PodCreateOption - podYAML v1.Pod - registryCreds *types.DockerAuthConfig - writer io.Writer - ) - - content, err := ioutil.ReadFile(yamlFile) - if err != nil { - return nil, err - } - - if err := yaml.Unmarshal(content, &podYAML); err != nil { - return nil, errors.Wrapf(err, "unable to read %s as YAML", yamlFile) - } - - if podYAML.Kind != "Pod" { - return nil, errors.Errorf("Invalid YAML kind: %s. Pod is the only supported Kubernetes YAML kind", podYAML.Kind) - } - - // check for name collision between pod and container - podName := podYAML.ObjectMeta.Name - if podName == "" { - return nil, errors.Errorf("pod does not have a name") - } - for _, n := range podYAML.Spec.Containers { - if n.Name == podName { - fmt.Printf("a container exists with the same name (%s) as the pod in your YAML file; changing pod name to %s_pod\n", podName, podName) - podName = fmt.Sprintf("%s_pod", podName) - } - } - - podOptions = append(podOptions, libpod.WithInfraContainer()) - podOptions = append(podOptions, libpod.WithPodName(podName)) - // TODO for now we just used the default kernel namespaces; we need to add/subtract this from yaml - - hostname := podYAML.Spec.Hostname - if hostname == "" { - hostname = podName - } - podOptions = append(podOptions, libpod.WithPodHostname(hostname)) - - if podYAML.Spec.HostNetwork { - podOptions = append(podOptions, libpod.WithPodHostNetwork()) - } - - nsOptions, err := shared.GetNamespaceOptions(strings.Split(shared.DefaultKernelNamespaces, ",")) - if err != nil { - return nil, err - } - podOptions = append(podOptions, nsOptions...) - podPorts := getPodPorts(podYAML.Spec.Containers) - podOptions = append(podOptions, libpod.WithInfraContainerPorts(podPorts)) - - if c.Flag("network").Changed { - netValue := c.String("network") - switch strings.ToLower(netValue) { - case "bridge", "host": - return nil, errors.Errorf("invalid value passed to --network: bridge or host networking must be configured in YAML") - case "": - return nil, errors.Errorf("invalid value passed to --network: must provide a comma-separated list of CNI networks") - default: - // We'll assume this is a comma-separated list of CNI - // networks. - networks := strings.Split(netValue, ",") - logrus.Debugf("Pod joining CNI networks: %v", networks) - podOptions = append(podOptions, libpod.WithPodNetworks(networks)) - } - } - - // Create the Pod - pod, err = r.NewPod(ctx, podOptions...) - if err != nil { - return nil, err - } - - podInfraID, err := pod.InfraContainerID() - if err != nil { - return nil, err - } - hasUserns := false - if podInfraID != "" { - podCtr, err := r.GetContainer(podInfraID) - if err != nil { - return nil, err - } - mappings, err := podCtr.IDMappings() - if err != nil { - return nil, err - } - hasUserns = len(mappings.UIDMap) > 0 - } - - namespaces := map[string]string{ - // Disabled during code review per mheon - //"pid": fmt.Sprintf("container:%s", podInfraID), - "net": fmt.Sprintf("container:%s", podInfraID), - "ipc": fmt.Sprintf("container:%s", podInfraID), - "uts": fmt.Sprintf("container:%s", podInfraID), - } - if hasUserns { - namespaces["user"] = fmt.Sprintf("container:%s", podInfraID) - } - if !c.Quiet { - writer = os.Stderr - } - - dockerRegistryOptions := image.DockerRegistryOptions{ - DockerRegistryCreds: registryCreds, - DockerCertPath: c.CertDir, - } - if c.Flag("tls-verify").Changed { - dockerRegistryOptions.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!c.TlsVerify) - } - - // map from name to mount point - volumes := make(map[string]string) - for _, volume := range podYAML.Spec.Volumes { - hostPath := volume.VolumeSource.HostPath - if hostPath == nil { - return nil, errors.Errorf("HostPath is currently the only supported VolumeSource") - } - if hostPath.Type != nil { - switch *hostPath.Type { - case v1.HostPathDirectoryOrCreate: - if _, err := os.Stat(hostPath.Path); os.IsNotExist(err) { - if err := os.Mkdir(hostPath.Path, createDirectoryPermission); err != nil { - return nil, errors.Errorf("Error creating HostPath %s at %s", volume.Name, hostPath.Path) - } - } - // Label a newly created volume - if err := libpod.LabelVolumePath(hostPath.Path); err != nil { - return nil, errors.Wrapf(err, "Error giving %s a label", hostPath.Path) - } - case v1.HostPathFileOrCreate: - if _, err := os.Stat(hostPath.Path); os.IsNotExist(err) { - f, err := os.OpenFile(hostPath.Path, os.O_RDONLY|os.O_CREATE, createFilePermission) - if err != nil { - return nil, errors.Errorf("Error creating HostPath %s at %s", volume.Name, hostPath.Path) - } - if err := f.Close(); err != nil { - logrus.Warnf("Error in closing newly created HostPath file: %v", err) - } - } - // unconditionally label a newly created volume - if err := libpod.LabelVolumePath(hostPath.Path); err != nil { - return nil, errors.Wrapf(err, "Error giving %s a label", hostPath.Path) - } - case v1.HostPathDirectory: - case v1.HostPathFile: - case v1.HostPathUnset: - // do nothing here because we will verify the path exists in validateVolumeHostDir - break - default: - return nil, errors.Errorf("Directories are the only supported HostPath type") - } - } - - if err := parse.ValidateVolumeHostDir(hostPath.Path); err != nil { - return nil, errors.Wrapf(err, "Error in parsing HostPath in YAML") - } - volumes[volume.Name] = hostPath.Path - } - - seccompPaths, err := initializeSeccompPaths(podYAML.ObjectMeta.Annotations, c.SeccompProfileRoot) - if err != nil { - return nil, err - } - - for _, container := range podYAML.Spec.Containers { - pullPolicy := util.PullImageMissing - if len(container.ImagePullPolicy) > 0 { - pullPolicy, err = util.ValidatePullType(string(container.ImagePullPolicy)) - if err != nil { - return nil, err - } - } - named, err := reference.ParseNormalizedNamed(container.Image) - if err != nil { - return nil, err - } - // In kube, if the image is tagged with latest, it should always pull - if tagged, isTagged := named.(reference.NamedTagged); isTagged { - if tagged.Tag() == image.LatestTag { - pullPolicy = util.PullImageAlways - } - } - newImage, err := r.ImageRuntime().New(ctx, container.Image, c.SignaturePolicy, c.Authfile, writer, &dockerRegistryOptions, image.SigningOptions{}, nil, pullPolicy) - if err != nil { - return nil, err - } - createConfig, err := kubeContainerToCreateConfig(ctx, container, r.Runtime, newImage, namespaces, volumes, pod.ID(), podInfraID, seccompPaths) - if err != nil { - return nil, err - } - ctr, err := shared.CreateContainerFromCreateConfig(r.Runtime, createConfig, ctx, pod) - if err != nil { - return nil, err - } - containers = append(containers, ctr) - } - - // start the containers - for _, ctr := range containers { - if err := ctr.Start(ctx, true); err != nil { - // Making this a hard failure here to avoid a mess - // the other containers are in created status - return nil, err - } - } - - // We've now successfully converted this YAML into a pod - // print our pod and containers, signifying we succeeded - fmt.Printf("Pod:\n%s\n", pod.ID()) - if len(containers) == 1 { - fmt.Printf("Container:\n") - } - if len(containers) > 1 { - fmt.Printf("Containers:\n") - } - for _, ctr := range containers { - fmt.Println(ctr.ID()) - } - - if err := playcleanup(ctx, r, pod, nil); err != nil { - logrus.Errorf("unable to remove pod %s after failing to play kube", pod.ID()) - } - return nil, nil -} - -func playcleanup(ctx context.Context, runtime *LocalRuntime, pod *libpod.Pod, err error) error { - if err != nil && pod != nil { - return runtime.RemovePod(ctx, pod, true, true) - } - return nil -} - -// getPodPorts converts a slice of kube container descriptions to an -// array of ocicni portmapping descriptions usable in libpod -func getPodPorts(containers []v1.Container) []ocicni.PortMapping { - var infraPorts []ocicni.PortMapping - for _, container := range containers { - for _, p := range container.Ports { - if p.HostPort != 0 && p.ContainerPort == 0 { - p.ContainerPort = p.HostPort - } - if p.Protocol == "" { - p.Protocol = "tcp" - } - portBinding := ocicni.PortMapping{ - HostPort: p.HostPort, - ContainerPort: p.ContainerPort, - Protocol: strings.ToLower(string(p.Protocol)), - } - if p.HostIP != "" { - logrus.Debug("HostIP on port bindings is not supported") - } - // only hostPort is utilized in podman context, all container ports - // are accessible inside the shared network namespace - if p.HostPort != 0 { - infraPorts = append(infraPorts, portBinding) - } - - } - } - return infraPorts -} - -func setupSecurityContext(securityConfig *createconfig.SecurityConfig, userConfig *createconfig.UserConfig, containerYAML v1.Container) { - if containerYAML.SecurityContext == nil { - return - } - if containerYAML.SecurityContext.ReadOnlyRootFilesystem != nil { - securityConfig.ReadOnlyRootfs = *containerYAML.SecurityContext.ReadOnlyRootFilesystem - } - if containerYAML.SecurityContext.Privileged != nil { - securityConfig.Privileged = *containerYAML.SecurityContext.Privileged - } - - if containerYAML.SecurityContext.AllowPrivilegeEscalation != nil { - securityConfig.NoNewPrivs = !*containerYAML.SecurityContext.AllowPrivilegeEscalation - } - - if seopt := containerYAML.SecurityContext.SELinuxOptions; seopt != nil { - if seopt.User != "" { - securityConfig.SecurityOpts = append(securityConfig.SecurityOpts, fmt.Sprintf("label=user:%s", seopt.User)) - securityConfig.LabelOpts = append(securityConfig.LabelOpts, fmt.Sprintf("user:%s", seopt.User)) - } - if seopt.Role != "" { - securityConfig.SecurityOpts = append(securityConfig.SecurityOpts, fmt.Sprintf("label=role:%s", seopt.Role)) - securityConfig.LabelOpts = append(securityConfig.LabelOpts, fmt.Sprintf("role:%s", seopt.Role)) - } - if seopt.Type != "" { - securityConfig.SecurityOpts = append(securityConfig.SecurityOpts, fmt.Sprintf("label=type:%s", seopt.Type)) - securityConfig.LabelOpts = append(securityConfig.LabelOpts, fmt.Sprintf("type:%s", seopt.Type)) - } - if seopt.Level != "" { - securityConfig.SecurityOpts = append(securityConfig.SecurityOpts, fmt.Sprintf("label=level:%s", seopt.Level)) - securityConfig.LabelOpts = append(securityConfig.LabelOpts, fmt.Sprintf("level:%s", seopt.Level)) - } - } - if caps := containerYAML.SecurityContext.Capabilities; caps != nil { - for _, capability := range caps.Add { - securityConfig.CapAdd = append(securityConfig.CapAdd, string(capability)) - } - for _, capability := range caps.Drop { - securityConfig.CapDrop = append(securityConfig.CapDrop, string(capability)) - } - } - if containerYAML.SecurityContext.RunAsUser != nil { - userConfig.User = fmt.Sprintf("%d", *containerYAML.SecurityContext.RunAsUser) - } - if containerYAML.SecurityContext.RunAsGroup != nil { - if userConfig.User == "" { - userConfig.User = "0" - } - userConfig.User = fmt.Sprintf("%s:%d", userConfig.User, *containerYAML.SecurityContext.RunAsGroup) - } -} - -// kubeContainerToCreateConfig takes a v1.Container and returns a createconfig describing a container -func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container, runtime *libpod.Runtime, newImage *image.Image, namespaces map[string]string, volumes map[string]string, podID, infraID string, seccompPaths *kubeSeccompPaths) (*createconfig.CreateConfig, error) { - var ( - containerConfig createconfig.CreateConfig - pidConfig createconfig.PidConfig - networkConfig createconfig.NetworkConfig - cgroupConfig createconfig.CgroupConfig - utsConfig createconfig.UtsConfig - ipcConfig createconfig.IpcConfig - userConfig createconfig.UserConfig - securityConfig createconfig.SecurityConfig - ) - - // The default for MemorySwappiness is -1, not 0 - containerConfig.Resources.MemorySwappiness = -1 - - containerConfig.Image = containerYAML.Image - containerConfig.ImageID = newImage.ID() - containerConfig.Name = containerYAML.Name - containerConfig.Tty = containerYAML.TTY - - containerConfig.Pod = podID - - imageData, _ := newImage.Inspect(ctx) - - userConfig.User = "0" - if imageData != nil { - userConfig.User = imageData.Config.User - } - - setupSecurityContext(&securityConfig, &userConfig, containerYAML) - - securityConfig.SeccompProfilePath = seccompPaths.findForContainer(containerConfig.Name) - - containerConfig.Command = []string{} - if imageData != nil && imageData.Config != nil { - containerConfig.Command = append(containerConfig.Command, imageData.Config.Entrypoint...) - } - if len(containerYAML.Command) != 0 { - containerConfig.Command = append(containerConfig.Command, containerYAML.Command...) - } else if imageData != nil && imageData.Config != nil { - containerConfig.Command = append(containerConfig.Command, imageData.Config.Cmd...) - } - if imageData != nil && len(containerConfig.Command) == 0 { - return nil, errors.Errorf("No command specified in container YAML or as CMD or ENTRYPOINT in this image for %s", containerConfig.Name) - } - - containerConfig.UserCommand = containerConfig.Command - - containerConfig.StopSignal = 15 - - containerConfig.WorkDir = "/" - if imageData != nil { - // FIXME, - // we are currently ignoring imageData.Config.ExposedPorts - containerConfig.BuiltinImgVolumes = imageData.Config.Volumes - if imageData.Config.WorkingDir != "" { - containerConfig.WorkDir = imageData.Config.WorkingDir - } - containerConfig.Labels = imageData.Config.Labels - if imageData.Config.StopSignal != "" { - stopSignal, err := util.ParseSignal(imageData.Config.StopSignal) - if err != nil { - return nil, err - } - containerConfig.StopSignal = stopSignal - } - } - - if containerYAML.WorkingDir != "" { - containerConfig.WorkDir = containerYAML.WorkingDir - } - // If the user does not pass in ID mappings, just set to basics - if userConfig.IDMappings == nil { - userConfig.IDMappings = &storage.IDMappingOptions{} - } - - networkConfig.NetMode = ns.NetworkMode(namespaces["net"]) - ipcConfig.IpcMode = ns.IpcMode(namespaces["ipc"]) - utsConfig.UtsMode = ns.UTSMode(namespaces["uts"]) - // disabled in code review per mheon - //containerConfig.PidMode = ns.PidMode(namespaces["pid"]) - userConfig.UsernsMode = ns.UsernsMode(namespaces["user"]) - if len(containerConfig.WorkDir) == 0 { - containerConfig.WorkDir = "/" - } - - containerConfig.Pid = pidConfig - containerConfig.Network = networkConfig - containerConfig.Uts = utsConfig - containerConfig.Ipc = ipcConfig - containerConfig.Cgroup = cgroupConfig - containerConfig.User = userConfig - containerConfig.Security = securityConfig - - annotations := make(map[string]string) - if infraID != "" { - annotations[ann.SandboxID] = infraID - annotations[ann.ContainerType] = ann.ContainerTypeContainer - } - containerConfig.Annotations = annotations - - // Environment Variables - envs := map[string]string{} - if imageData != nil { - imageEnv, err := envLib.ParseSlice(imageData.Config.Env) - if err != nil { - return nil, errors.Wrap(err, "error parsing image environment variables") - } - envs = imageEnv - } - for _, e := range containerYAML.Env { - envs[e.Name] = e.Value - } - containerConfig.Env = envs - - for _, volume := range containerYAML.VolumeMounts { - hostPath, exists := volumes[volume.Name] - if !exists { - return nil, errors.Errorf("Volume mount %s specified for container but not configured in volumes", volume.Name) - } - if err := parse.ValidateVolumeCtrDir(volume.MountPath); err != nil { - return nil, errors.Wrapf(err, "error in parsing MountPath") - } - containerConfig.Volumes = append(containerConfig.Volumes, fmt.Sprintf("%s:%s", hostPath, volume.MountPath)) - } - return &containerConfig, nil -} - -// kubeSeccompPaths holds information about a pod YAML's seccomp configuration -// it holds both container and pod seccomp paths -type kubeSeccompPaths struct { - containerPaths map[string]string - podPath string -} - -// findForContainer checks whether a container has a seccomp path configured for it -// if not, it returns the podPath, which should always have a value -func (k *kubeSeccompPaths) findForContainer(ctrName string) string { - if path, ok := k.containerPaths[ctrName]; ok { - return path - } - return k.podPath -} - -// initializeSeccompPaths takes annotations from the pod object metadata and finds annotations pertaining to seccomp -// it parses both pod and container level -// if the annotation is of the form "localhost/%s", the seccomp profile will be set to profileRoot/%s -func initializeSeccompPaths(annotations map[string]string, profileRoot string) (*kubeSeccompPaths, error) { - seccompPaths := &kubeSeccompPaths{containerPaths: make(map[string]string)} - var err error - if annotations != nil { - for annKeyValue, seccomp := range annotations { - // check if it is prefaced with container.seccomp.security.alpha.kubernetes.io/ - prefixAndCtr := strings.Split(annKeyValue, "/") - if prefixAndCtr[0]+"/" != v1.SeccompContainerAnnotationKeyPrefix { - continue - } else if len(prefixAndCtr) != 2 { - // this could be caused by a user inputting either of - // container.seccomp.security.alpha.kubernetes.io{,/} - // both of which are invalid - return nil, errors.Errorf("Invalid seccomp path: %s", prefixAndCtr[0]) - } - - path, err := verifySeccompPath(seccomp, profileRoot) - if err != nil { - return nil, err - } - seccompPaths.containerPaths[prefixAndCtr[1]] = path - } - - podSeccomp, ok := annotations[v1.SeccompPodAnnotationKey] - if ok { - seccompPaths.podPath, err = verifySeccompPath(podSeccomp, profileRoot) - } else { - seccompPaths.podPath, err = libpod.DefaultSeccompPath() - } - if err != nil { - return nil, err - } - } - return seccompPaths, nil -} - -// verifySeccompPath takes a path and checks whether it is a default, unconfined, or a path -// the available options are parsed as defined in https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp -func verifySeccompPath(path string, profileRoot string) (string, error) { - switch path { - case v1.DeprecatedSeccompProfileDockerDefault: - fallthrough - case v1.SeccompProfileRuntimeDefault: - return libpod.DefaultSeccompPath() - case "unconfined": - return path, nil - default: - parts := strings.Split(path, "/") - if parts[0] == "localhost" { - return filepath.Join(profileRoot, parts[1]), nil - } - return "", errors.Errorf("invalid seccomp path: %s", path) - } -} diff --git a/pkg/adapter/pods_remote.go b/pkg/adapter/pods_remote.go deleted file mode 100644 index 4c6eea9a7..000000000 --- a/pkg/adapter/pods_remote.go +++ /dev/null @@ -1,576 +0,0 @@ -// +build remoteclient - -package adapter - -import ( - "context" - "encoding/json" - "strings" - "time" - - "github.com/containers/libpod/cmd/podman/cliconfig" - "github.com/containers/libpod/cmd/podman/shared" - "github.com/containers/libpod/libpod" - "github.com/containers/libpod/libpod/define" - iopodman "github.com/containers/libpod/pkg/varlink" - "github.com/containers/libpod/pkg/varlinkapi" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// PodContainerStats is struct containing an adapter Pod and a libpod -// ContainerStats and is used primarily for outputting pod stats. -type PodContainerStats struct { - Pod *Pod - ContainerStats map[string]*libpod.ContainerStats -} - -// RemovePods removes one or more based on the cli context. -func (r *LocalRuntime) RemovePods(ctx context.Context, cli *cliconfig.PodRmValues) ([]string, []error) { - var ( - rmErrs []error - rmPods []string - ) - podIDs, err := iopodman.GetPodsByContext().Call(r.Conn, cli.All, cli.Latest, cli.InputArgs) - if err != nil { - rmErrs = append(rmErrs, err) - return nil, rmErrs - } - - for _, p := range podIDs { - reply, err := iopodman.RemovePod().Call(r.Conn, p, cli.Force) - if err != nil { - rmErrs = append(rmErrs, err) - } else { - rmPods = append(rmPods, reply) - } - } - return rmPods, rmErrs -} - -// Inspect looks up a pod by name or id and embeds its data into a remote pod -// object. -func (r *LocalRuntime) Inspect(nameOrID string) (*Pod, error) { - reply, err := iopodman.PodStateData().Call(r.Conn, nameOrID) - if err != nil { - return nil, err - } - data := libpod.PodInspect{} - if err := json.Unmarshal([]byte(reply), &data); err != nil { - return nil, err - } - pod := Pod{} - pod.Runtime = r - pod.config = data.Config - pod.state = data.State - pod.containers = data.Containers - return &pod, nil -} - -// GetLatestPod gets the latest pod and wraps it in an adapter pod -func (r *LocalRuntime) GetLatestPod() (*Pod, error) { - reply, err := iopodman.GetPodsByContext().Call(r.Conn, false, true, nil) - if err != nil { - return nil, err - } - if len(reply) > 0 { - return r.Inspect(reply[0]) - } - return nil, errors.New("no pods exist") -} - -// LookupPod gets a pod by name or ID and wraps it in an adapter pod -func (r *LocalRuntime) LookupPod(nameOrID string) (*Pod, error) { - return r.Inspect(nameOrID) -} - -// Inspect, like libpod pod inspect, returns a libpod.PodInspect object from -// the data of a remotepod data struct -func (p *Pod) Inspect() (*libpod.PodInspect, error) { - config := new(libpod.PodConfig) - if err := libpod.JSONDeepCopy(p.remotepod.config, config); err != nil { - return nil, err - } - inspectData := libpod.PodInspect{ - Config: config, - State: p.remotepod.state, - Containers: p.containers, - } - return &inspectData, nil -} - -// StopPods stops pods based on the cli context from the remote client. -func (r *LocalRuntime) StopPods(ctx context.Context, cli *cliconfig.PodStopValues) ([]string, []error) { - var ( - stopErrs []error - stopPods []string - ) - var timeout int64 = -1 - if cli.Flags().Changed("timeout") { - timeout = int64(cli.Timeout) - } - podIDs, err := iopodman.GetPodsByContext().Call(r.Conn, cli.All, cli.Latest, cli.InputArgs) - if err != nil { - return nil, []error{err} - } - - for _, p := range podIDs { - podID, err := iopodman.StopPod().Call(r.Conn, p, timeout) - if err != nil { - stopErrs = append(stopErrs, err) - } else { - stopPods = append(stopPods, podID) - } - } - return stopPods, stopErrs -} - -// KillPods kills pods over varlink for the remoteclient -func (r *LocalRuntime) KillPods(ctx context.Context, cli *cliconfig.PodKillValues, signal uint) ([]string, []error) { - var ( - killErrs []error - killPods []string - ) - - podIDs, err := iopodman.GetPodsByContext().Call(r.Conn, cli.All, cli.Latest, cli.InputArgs) - if err != nil { - return nil, []error{err} - } - - for _, p := range podIDs { - podID, err := iopodman.KillPod().Call(r.Conn, p, int64(signal)) - if err != nil { - killErrs = append(killErrs, err) - } else { - killPods = append(killPods, podID) - } - } - return killPods, killErrs -} - -// StartPods starts pods for the remote client over varlink -func (r *LocalRuntime) StartPods(ctx context.Context, cli *cliconfig.PodStartValues) ([]string, []error) { - var ( - startErrs []error - startPods []string - ) - - podIDs, err := iopodman.GetPodsByContext().Call(r.Conn, cli.All, cli.Latest, cli.InputArgs) - if err != nil { - return nil, []error{err} - } - - for _, p := range podIDs { - podID, err := iopodman.StartPod().Call(r.Conn, p) - if err != nil { - startErrs = append(startErrs, err) - } else { - startPods = append(startPods, podID) - } - } - return startPods, startErrs -} - -// CreatePod creates a pod for the remote client over a varlink connection -func (r *LocalRuntime) CreatePod(ctx context.Context, cli *cliconfig.PodCreateValues, labels map[string]string) (string, error) { - var share []string - if cli.Share != "" { - share = strings.Split(cli.Share, ",") - } - pc := iopodman.PodCreate{ - Name: cli.Name, - CgroupParent: cli.CgroupParent, - Labels: labels, - Share: share, - Infra: cli.Infra, - InfraCommand: cli.InfraCommand, - InfraImage: cli.InfraCommand, - Publish: cli.StringSlice("publish"), - } - - return iopodman.CreatePod().Call(r.Conn, pc) -} - -// GetAllPods is a helper function that gets all pods for the remote client -func (r *LocalRuntime) GetAllPods() ([]*Pod, error) { - var pods []*Pod - podIDs, err := iopodman.GetPodsByContext().Call(r.Conn, true, false, []string{}) - if err != nil { - return nil, err - } - for _, p := range podIDs { - pod, err := r.LookupPod(p) - if err != nil { - return nil, err - } - pods = append(pods, pod) - } - return pods, nil -} - -// This is a empty implementation stating remoteclient not yet implemented -func (r *LocalRuntime) GetPodsWithFilters(filters string) ([]*Pod, error) { - return nil, define.ErrNotImplemented -} - -// GetPodsByStatus returns a slice of pods filtered by a libpod status -func (r *LocalRuntime) GetPodsByStatus(statuses []string) ([]*Pod, error) { - podIDs, err := iopodman.GetPodsByStatus().Call(r.Conn, statuses) - if err != nil { - return nil, err - } - pods := make([]*Pod, 0, len(podIDs)) - for _, p := range podIDs { - pod, err := r.LookupPod(p) - if err != nil { - return nil, err - } - pods = append(pods, pod) - } - return pods, nil -} - -// ID returns the id of a remote pod -func (p *Pod) ID() string { - return p.config.ID -} - -// Name returns the name of the remote pod -func (p *Pod) Name() string { - return p.config.Name -} - -// AllContainersByID returns a slice of a pod's container IDs -func (p *Pod) AllContainersByID() ([]string, error) { - var containerIDs []string - for _, ctr := range p.containers { - containerIDs = append(containerIDs, ctr.ID) - } - return containerIDs, nil -} - -// AllContainers returns a pods containers -func (p *Pod) AllContainers() ([]*Container, error) { - var containers []*Container - for _, ctr := range p.containers { - container, err := p.Runtime.LookupContainer(ctr.ID) - if err != nil { - return nil, err - } - containers = append(containers, container) - } - return containers, nil -} - -// Status ... -func (p *Pod) Status() (map[string]define.ContainerStatus, error) { - ctrs := make(map[string]define.ContainerStatus) - for _, i := range p.containers { - var status define.ContainerStatus - switch i.State { - case "exited": - status = define.ContainerStateExited - case "stopped": - status = define.ContainerStateStopped - case "running": - status = define.ContainerStateRunning - case "paused": - status = define.ContainerStatePaused - case "created": - status = define.ContainerStateCreated - case "define.red": - status = define.ContainerStateConfigured - default: - status = define.ContainerStateUnknown - } - ctrs[i.ID] = status - } - return ctrs, nil -} - -// GetPodStatus is a wrapper to get the string version of the status -func (p *Pod) GetPodStatus() (string, error) { - ctrStatuses, err := p.Status() - if err != nil { - return "", err - } - return shared.CreatePodStatusResults(ctrStatuses) -} - -// InfraContainerID returns the ID of the infra container in a pod -func (p *Pod) InfraContainerID() (string, error) { - return p.state.InfraContainerID, nil -} - -// CreatedTime returns the time the container was created as a time.Time -func (p *Pod) CreatedTime() time.Time { - return p.config.CreatedTime -} - -// SharesPID .... -func (p *Pod) SharesPID() bool { - return p.config.UsePodPID -} - -// SharesIPC returns whether containers in pod -// default to use IPC namespace of first container in pod -func (p *Pod) SharesIPC() bool { - return p.config.UsePodIPC -} - -// SharesNet returns whether containers in pod -// default to use network namespace of first container in pod -func (p *Pod) SharesNet() bool { - return p.config.UsePodNet -} - -// SharesMount returns whether containers in pod -// default to use PID namespace of first container in pod -func (p *Pod) SharesMount() bool { - return p.config.UsePodMount -} - -// SharesUser returns whether containers in pod -// default to use user namespace of first container in pod -func (p *Pod) SharesUser() bool { - return p.config.UsePodUser -} - -// SharesUTS returns whether containers in pod -// default to use UTS namespace of first container in pod -func (p *Pod) SharesUTS() bool { - return p.config.UsePodUTS -} - -// SharesCgroup returns whether containers in the pod will default to this pod's -// cgroup instead of the default libpod parent -func (p *Pod) SharesCgroup() bool { - return p.config.UsePodCgroup -} - -// CgroupParent returns the pod's CGroup parent -func (p *Pod) CgroupParent() string { - return p.config.CgroupParent -} - -// PausePods pauses a pod using varlink and the remote client -func (r *LocalRuntime) PausePods(c *cliconfig.PodPauseValues) ([]string, map[string]error, []error) { - var ( - pauseIDs []string - pauseErrors []error - ) - containerErrors := make(map[string]error) - - pods, err := iopodman.GetPodsByContext().Call(r.Conn, c.All, c.Latest, c.InputArgs) - if err != nil { - pauseErrors = append(pauseErrors, err) - return nil, containerErrors, pauseErrors - } - for _, pod := range pods { - reply, err := iopodman.PausePod().Call(r.Conn, pod) - if err != nil { - pauseErrors = append(pauseErrors, err) - continue - } - pauseIDs = append(pauseIDs, reply) - } - return pauseIDs, nil, pauseErrors -} - -// UnpausePods unpauses a pod using varlink and the remote client -func (r *LocalRuntime) UnpausePods(c *cliconfig.PodUnpauseValues) ([]string, map[string]error, []error) { - var ( - unpauseIDs []string - unpauseErrors []error - ) - containerErrors := make(map[string]error) - - pods, err := iopodman.GetPodsByContext().Call(r.Conn, c.All, c.Latest, c.InputArgs) - if err != nil { - unpauseErrors = append(unpauseErrors, err) - return nil, containerErrors, unpauseErrors - } - for _, pod := range pods { - reply, err := iopodman.UnpausePod().Call(r.Conn, pod) - if err != nil { - unpauseErrors = append(unpauseErrors, err) - continue - } - unpauseIDs = append(unpauseIDs, reply) - } - return unpauseIDs, nil, unpauseErrors -} - -// RestartPods restarts pods using varlink and the remote client -func (r *LocalRuntime) RestartPods(ctx context.Context, c *cliconfig.PodRestartValues) ([]string, map[string]error, []error) { - var ( - restartIDs []string - restartErrors []error - ) - containerErrors := make(map[string]error) - - pods, err := iopodman.GetPodsByContext().Call(r.Conn, c.All, c.Latest, c.InputArgs) - if err != nil { - restartErrors = append(restartErrors, err) - return nil, containerErrors, restartErrors - } - for _, pod := range pods { - reply, err := iopodman.RestartPod().Call(r.Conn, pod) - if err != nil { - restartErrors = append(restartErrors, err) - continue - } - restartIDs = append(restartIDs, reply) - } - return restartIDs, nil, restartErrors -} - -// PodTop gets top statistics for a pod -func (r *LocalRuntime) PodTop(c *cliconfig.PodTopValues, descriptors []string) ([]string, error) { - var ( - latest bool - podName string - ) - if c.Latest { - latest = true - } else { - podName = c.InputArgs[0] - } - return iopodman.TopPod().Call(r.Conn, podName, latest, descriptors) -} - -// GetStatPods returns pods for use in pod stats -func (r *LocalRuntime) GetStatPods(c *cliconfig.PodStatsValues) ([]*Pod, error) { - var ( - pods []*Pod - err error - podIDs []string - running bool - ) - - if len(c.InputArgs) > 0 || c.Latest || c.All { - podIDs, err = iopodman.GetPodsByContext().Call(r.Conn, c.All, c.Latest, c.InputArgs) - } else { - podIDs, err = iopodman.GetPodsByContext().Call(r.Conn, true, false, []string{}) - running = true - } - if err != nil { - return nil, err - } - for _, p := range podIDs { - pod, err := r.Inspect(p) - if err != nil { - return nil, err - } - if running { - status, err := pod.GetPodStatus() - if err != nil { - // if we cannot get the status of the pod, skip and move on - continue - } - if strings.ToUpper(status) != "RUNNING" { - // if the pod is not running, skip and move on as well - continue - } - } - pods = append(pods, pod) - } - return pods, nil -} - -// GetPodStats returns the stats for each of its containers -func (p *Pod) GetPodStats(previousContainerStats map[string]*libpod.ContainerStats) (map[string]*libpod.ContainerStats, error) { - var ( - ok bool - prevStat *libpod.ContainerStats - ) - newContainerStats := make(map[string]*libpod.ContainerStats) - containers, err := p.AllContainers() - if err != nil { - return nil, err - } - for _, c := range containers { - if prevStat, ok = previousContainerStats[c.ID()]; !ok { - prevStat = &libpod.ContainerStats{ContainerID: c.ID()} - } - cStats := iopodman.ContainerStats{ - Id: prevStat.ContainerID, - Name: prevStat.Name, - Cpu: prevStat.CPU, - Cpu_nano: int64(prevStat.CPUNano), - System_nano: int64(prevStat.SystemNano), - Mem_usage: int64(prevStat.MemUsage), - Mem_limit: int64(prevStat.MemLimit), - Mem_perc: prevStat.MemPerc, - Net_input: int64(prevStat.NetInput), - Net_output: int64(prevStat.NetOutput), - Block_input: int64(prevStat.BlockInput), - Block_output: int64(prevStat.BlockOutput), - Pids: int64(prevStat.PIDs), - } - stats, err := iopodman.GetContainerStatsWithHistory().Call(p.Runtime.Conn, cStats) - if err != nil { - return nil, err - } - newStats := varlinkapi.ContainerStatsToLibpodContainerStats(stats) - // If the container wasn't running, don't include it - // but also suppress the error - if err != nil && errors.Cause(err) != define.ErrCtrStateInvalid { // nolint: govet - return nil, err - } - if err == nil { // nolint: govet - newContainerStats[c.ID()] = &newStats - } - } - return newContainerStats, nil -} - -// RemovePod removes a pod -// If removeCtrs is specified, containers will be removed -// Otherwise, a pod that is not empty will return an error and not be removed -// If force is specified with removeCtrs, all containers will be stopped before -// being removed -// Otherwise, the pod will not be removed if any containers are running -func (r *LocalRuntime) RemovePod(ctx context.Context, p *Pod, removeCtrs, force bool) error { - _, err := iopodman.RemovePod().Call(r.Conn, p.ID(), force) - if err != nil { - return err - } - return nil -} - -// PrunePods... -func (r *LocalRuntime) PrunePods(ctx context.Context, cli *cliconfig.PodPruneValues) ([]string, map[string]error, error) { - var ( - ok = []string{} - failures = map[string]error{} - ) - states := []string{define.PodStateStopped, define.PodStateExited} - if cli.Force { - states = append(states, define.PodStateRunning) - } - - ids, err := iopodman.GetPodsByStatus().Call(r.Conn, states) - if err != nil { - return ok, failures, err - } - if len(ids) < 1 { - return ok, failures, nil - } - - for _, id := range ids { - _, err := iopodman.RemovePod().Call(r.Conn, id, cli.Force) - if err != nil { - logrus.Debugf("Failed to remove pod %s: %s", id, err.Error()) - failures[id] = err - } else { - ok = append(ok, id) - } - } - return ok, failures, nil -} - -// PlayKubeYAML creates pods and containers from a kube YAML file -func (r *LocalRuntime) PlayKubeYAML(ctx context.Context, c *cliconfig.KubePlayValues, yamlFile string) (*Pod, error) { - return nil, define.ErrNotImplemented -} diff --git a/pkg/adapter/reset.go b/pkg/adapter/reset.go deleted file mode 100644 index 0decc3d15..000000000 --- a/pkg/adapter/reset.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !remoteclient - -package adapter - -import ( - "context" -) - -// Reset the container storage back to initial states. -// Removes all Pods, Containers, Images and Volumes. -func (r *LocalRuntime) Reset() error { - return r.Runtime.Reset(context.TODO()) -} diff --git a/pkg/adapter/reset_remote.go b/pkg/adapter/reset_remote.go deleted file mode 100644 index 284b54a17..000000000 --- a/pkg/adapter/reset_remote.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build remoteclient - -package adapter - -import ( - iopodman "github.com/containers/libpod/pkg/varlink" -) - -// Info returns information for the host system and its components -func (r RemoteRuntime) Reset() error { - return iopodman.Reset().Call(r.Conn) -} diff --git a/pkg/adapter/runtime.go b/pkg/adapter/runtime.go deleted file mode 100644 index 7a181e7e5..000000000 --- a/pkg/adapter/runtime.go +++ /dev/null @@ -1,477 +0,0 @@ -// +build !remoteclient - -package adapter - -import ( - "bufio" - "context" - "io" - "io/ioutil" - "os" - "text/template" - - "github.com/containers/buildah" - "github.com/containers/buildah/imagebuildah" - "github.com/containers/buildah/pkg/formats" - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/types" - "github.com/containers/libpod/cmd/podman/cliconfig" - "github.com/containers/libpod/cmd/podman/libpodruntime" - "github.com/containers/libpod/cmd/podman/shared" - "github.com/containers/libpod/libpod" - "github.com/containers/libpod/libpod/define" - "github.com/containers/libpod/libpod/events" - "github.com/containers/libpod/libpod/image" - "github.com/containers/libpod/pkg/rootless" - "github.com/containers/libpod/pkg/util" - "github.com/containers/storage/pkg/archive" - "github.com/pkg/errors" - v1 "k8s.io/api/core/v1" -) - -// LocalRuntime describes a typical libpod runtime -type LocalRuntime struct { - *libpod.Runtime - Remote bool -} - -// ContainerImage ... -type ContainerImage struct { - *image.Image -} - -// Container ... -type Container struct { - *libpod.Container -} - -// Pod encapsulates the libpod.Pod structure, helps with remote vs. local -type Pod struct { - *libpod.Pod -} - -// Volume ... -type Volume struct { - *libpod.Volume -} - -// VolumeFilter is for filtering volumes on the client -type VolumeFilter func(*Volume) bool - -// GetRuntimeNoStore returns a localruntime struct with an embedded runtime but -// without a configured storage. -func GetRuntimeNoStore(ctx context.Context, c *cliconfig.PodmanCommand) (*LocalRuntime, error) { - runtime, err := libpodruntime.GetRuntimeNoStore(ctx, c) - if err != nil { - return nil, err - } - return getRuntime(runtime) -} - -// GetRuntime returns a LocalRuntime struct with the actual runtime embedded in it -func GetRuntime(ctx context.Context, c *cliconfig.PodmanCommand) (*LocalRuntime, error) { - runtime, err := libpodruntime.GetRuntime(ctx, c) - if err != nil { - return nil, err - } - return getRuntime(runtime) -} - -func getRuntime(runtime *libpod.Runtime) (*LocalRuntime, error) { - return &LocalRuntime{ - Runtime: runtime, - }, nil -} - -// GetFilteredImages returns a slice of images in containerimages that are "filtered" -func (r *LocalRuntime) GetFilteredImages(filters []string, rwOnly bool) ([]*ContainerImage, error) { - images, err := r.ImageRuntime().GetImagesWithFilters(filters) - if err != nil { - return nil, err - } - return r.ImagestoContainerImages(images, rwOnly) -} - -// GetImages returns a slice of images in containerimages -func (r *LocalRuntime) GetImages() ([]*ContainerImage, error) { - return r.getImages(false) -} - -// GetRWImages returns a slice of read/write images in containerimages -func (r *LocalRuntime) GetRWImages() ([]*ContainerImage, error) { - return r.getImages(true) -} - -func (r *LocalRuntime) getImages(rwOnly bool) ([]*ContainerImage, error) { - images, err := r.Runtime.ImageRuntime().GetImages() - if err != nil { - return nil, err - } - return r.ImagestoContainerImages(images, rwOnly) -} - -// ImagestoContainerImages converts the slice of *image.Image to a slice of -// *ContainerImage. ReadOnly images are skipped when rwOnly is set. -func (r *LocalRuntime) ImagestoContainerImages(images []*image.Image, rwOnly bool) ([]*ContainerImage, error) { - var containerImages []*ContainerImage - for _, i := range images { - if rwOnly && i.IsReadOnly() { - continue - } - containerImages = append(containerImages, &ContainerImage{i}) - } - return containerImages, nil -} - -// NewImageFromLocal returns a containerimage representation of a image from local storage -func (r *LocalRuntime) NewImageFromLocal(name string) (*ContainerImage, error) { - img, err := r.Runtime.ImageRuntime().NewFromLocal(name) - if err != nil { - return nil, err - } - return &ContainerImage{img}, nil -} - -// ImageTree reutnrs an new image.Tree for the provided `imageOrID` and `whatrequires` flag -func (r *LocalRuntime) ImageTree(imageOrID string, whatRequires bool) (string, error) { - img, err := r.Runtime.ImageRuntime().NewFromLocal(imageOrID) - if err != nil { - return "", err - } - return img.GenerateTree(whatRequires) -} - -// LoadFromArchiveReference calls into local storage to load an image from an archive -func (r *LocalRuntime) LoadFromArchiveReference(ctx context.Context, srcRef types.ImageReference, signaturePolicyPath string, writer io.Writer) ([]*ContainerImage, error) { - var containerImages []*ContainerImage - imgs, err := r.Runtime.ImageRuntime().LoadFromArchiveReference(ctx, srcRef, signaturePolicyPath, writer) - if err != nil { - return nil, err - } - for _, i := range imgs { - ci := ContainerImage{i} - containerImages = append(containerImages, &ci) - } - return containerImages, nil -} - -// New calls into local storage to look for an image in local storage or to pull it -func (r *LocalRuntime) New(ctx context.Context, name, signaturePolicyPath, authfile string, writer io.Writer, dockeroptions *image.DockerRegistryOptions, signingoptions image.SigningOptions, label *string, pullType util.PullType) (*ContainerImage, error) { - img, err := r.Runtime.ImageRuntime().New(ctx, name, signaturePolicyPath, authfile, writer, dockeroptions, signingoptions, label, pullType) - if err != nil { - return nil, err - } - return &ContainerImage{img}, nil -} - -// RemoveImage calls into local storage and removes an image -func (r *LocalRuntime) RemoveImage(ctx context.Context, img *ContainerImage, force bool) (*image.ImageDeleteResponse, error) { - return r.Runtime.RemoveImage(ctx, img.Image, force) -} - -// PruneImages is wrapper into PruneImages within the image pkg -func (r *LocalRuntime) PruneImages(ctx context.Context, all bool, filter []string) ([]string, error) { - return r.ImageRuntime().PruneImages(ctx, all, filter) -} - -// Export is a wrapper to container export to a tarfile -func (r *LocalRuntime) Export(name string, path string) error { - ctr, err := r.Runtime.LookupContainer(name) - if err != nil { - return errors.Wrapf(err, "error looking up container %q", name) - } - return ctr.Export(path) -} - -// Import is a wrapper to import a container image -func (r *LocalRuntime) Import(ctx context.Context, source, reference string, changes []string, history string, quiet bool) (string, error) { - return r.Runtime.Import(ctx, source, reference, changes, history, quiet) -} - -// CreateVolume is a wrapper to create volumes -func (r *LocalRuntime) CreateVolume(ctx context.Context, c *cliconfig.VolumeCreateValues, labels, opts map[string]string) (string, error) { - var ( - options []libpod.VolumeCreateOption - volName string - ) - - if len(c.InputArgs) > 0 { - volName = c.InputArgs[0] - options = append(options, libpod.WithVolumeName(volName)) - } - - if c.Flag("driver").Changed { - options = append(options, libpod.WithVolumeDriver(c.Driver)) - } - - if len(labels) != 0 { - options = append(options, libpod.WithVolumeLabels(labels)) - } - - if len(opts) != 0 { - // We need to process -o for uid, gid - parsedOptions, err := shared.ParseVolumeOptions(opts) - if err != nil { - return "", err - } - options = append(options, parsedOptions...) - } - newVolume, err := r.NewVolume(ctx, options...) - if err != nil { - return "", err - } - return newVolume.Name(), nil -} - -// RemoveVolumes is a wrapper to remove volumes -func (r *LocalRuntime) RemoveVolumes(ctx context.Context, c *cliconfig.VolumeRmValues) ([]string, map[string]error, error) { - return shared.SharedRemoveVolumes(ctx, r.Runtime, c.InputArgs, c.All, c.Force) -} - -// Push is a wrapper to push an image to a registry -func (r *LocalRuntime) Push(ctx context.Context, srcName, destination, manifestMIMEType, authfile, digestfile, signaturePolicyPath string, writer io.Writer, forceCompress bool, signingOptions image.SigningOptions, dockerRegistryOptions *image.DockerRegistryOptions, additionalDockerArchiveTags []reference.NamedTagged) error { - newImage, err := r.ImageRuntime().NewFromLocal(srcName) - if err != nil { - return err - } - return newImage.PushImageToHeuristicDestination(ctx, destination, manifestMIMEType, authfile, digestfile, signaturePolicyPath, writer, forceCompress, signingOptions, dockerRegistryOptions, nil) -} - -// InspectVolumes returns a slice of volumes based on an arg list or --all -func (r *LocalRuntime) InspectVolumes(ctx context.Context, c *cliconfig.VolumeInspectValues) ([]*libpod.InspectVolumeData, error) { - var ( - volumes []*libpod.Volume - err error - ) - - if c.All { - volumes, err = r.GetAllVolumes() - } else { - for _, v := range c.InputArgs { - vol, err := r.LookupVolume(v) - if err != nil { - return nil, err - } - volumes = append(volumes, vol) - } - } - if err != nil { - return nil, err - } - - inspectVols := make([]*libpod.InspectVolumeData, 0, len(volumes)) - for _, vol := range volumes { - inspectOut, err := vol.Inspect() - if err != nil { - return nil, errors.Wrapf(err, "error inspecting volume %s", vol.Name()) - } - inspectVols = append(inspectVols, inspectOut) - } - - return inspectVols, nil -} - -// Volumes returns a slice of localruntime volumes -func (r *LocalRuntime) Volumes(ctx context.Context) ([]*Volume, error) { - vols, err := r.GetAllVolumes() - if err != nil { - return nil, err - } - return libpodVolumeToVolume(vols), nil -} - -// libpodVolumeToVolume converts a slice of libpod volumes to a slice -// of localruntime volumes (same as libpod) -func libpodVolumeToVolume(volumes []*libpod.Volume) []*Volume { - var vols []*Volume - for _, v := range volumes { - newVol := Volume{ - v, - } - vols = append(vols, &newVol) - } - return vols -} - -// Build is the wrapper to build images -func (r *LocalRuntime) Build(ctx context.Context, c *cliconfig.BuildValues, options imagebuildah.BuildOptions, dockerfiles []string) (string, reference.Canonical, error) { - - authfile := c.Authfile - if len(c.Authfile) == 0 { - authfile = os.Getenv("REGISTRY_AUTH_FILE") - } - - options.SystemContext.AuthFilePath = authfile - - if c.GlobalFlags.Runtime != "" { - options.Runtime = c.GlobalFlags.Runtime - } else { - options.Runtime = r.GetOCIRuntimePath() - } - - if c.Quiet { - options.ReportWriter = ioutil.Discard - } - - if rootless.IsRootless() { - options.Isolation = buildah.IsolationOCIRootless - } - - return r.Runtime.Build(ctx, options, dockerfiles...) -} - -// PruneVolumes is a wrapper function for libpod PruneVolumes -func (r *LocalRuntime) PruneVolumes(ctx context.Context) ([]string, []error) { - var ( - vids []string - errs []error - ) - reports, err := r.Runtime.PruneVolumes(ctx) - if err != nil { - errs = append(errs, err) - return vids, errs - } - for k, v := range reports { - if v == nil { - vids = append(vids, k) - } else { - errs = append(errs, v) - } - } - return vids, errs -} - -// SaveImage is a wrapper function for saving an image to the local filesystem -func (r *LocalRuntime) SaveImage(ctx context.Context, c *cliconfig.SaveValues) error { - source := c.InputArgs[0] - additionalTags := c.InputArgs[1:] - - newImage, err := r.Runtime.ImageRuntime().NewFromLocal(source) - if err != nil { - return err - } - return newImage.Save(ctx, source, c.Format, c.Output, additionalTags, c.Quiet, c.Compress) -} - -// LoadImage is a wrapper function for libpod LoadImage -func (r *LocalRuntime) LoadImage(ctx context.Context, name string, cli *cliconfig.LoadValues) (string, error) { - var ( - writer io.Writer - ) - if !cli.Quiet { - writer = os.Stderr - } - return r.Runtime.LoadImage(ctx, name, cli.Input, writer, cli.SignaturePolicy) -} - -// IsImageNotFound checks if the error indicates that no image was found. -func IsImageNotFound(err error) bool { - return errors.Cause(err) == image.ErrNoSuchImage -} - -// HealthCheck is a wrapper to same named function in libpod -func (r *LocalRuntime) HealthCheck(c *cliconfig.HealthCheckValues) (string, error) { - output := "unhealthy" - status, err := r.Runtime.HealthCheck(c.InputArgs[0]) - if status == libpod.HealthCheckSuccess { - output = "healthy" - } - return output, err -} - -// Events is a wrapper to libpod to obtain libpod/podman events -func (r *LocalRuntime) Events(c *cliconfig.EventValues) error { - var ( - fromStart bool - eventsError error - ) - var tmpl *template.Template - if c.Format != formats.JSONString { - template, err := template.New("events").Parse(c.Format) - if err != nil { - return err - } - tmpl = template - } - if len(c.Since) > 0 || len(c.Until) > 0 { - fromStart = true - } - eventChannel := make(chan *events.Event) - go func() { - readOpts := events.ReadOptions{FromStart: fromStart, Stream: c.Stream, Filters: c.Filter, EventChannel: eventChannel, Since: c.Since, Until: c.Until} - eventsError = r.Runtime.Events(readOpts) - }() - - if eventsError != nil { - return eventsError - } - w := bufio.NewWriter(os.Stdout) - for event := range eventChannel { - switch { - case c.Format == formats.JSONString: - jsonStr, err := event.ToJSONString() - if err != nil { - return errors.Wrapf(err, "unable to format json") - } - if _, err := w.Write([]byte(jsonStr)); err != nil { - return err - } - case len(c.Format) > 0: - if err := tmpl.Execute(w, event); err != nil { - return err - } - default: - if _, err := w.Write([]byte(event.ToHumanReadable())); err != nil { - return err - } - } - if _, err := w.Write([]byte("\n")); err != nil { - return err - } - if err := w.Flush(); err != nil { - return err - } - } - return nil -} - -// Diff shows the difference in two objects -func (r *LocalRuntime) Diff(c *cliconfig.DiffValues, to string) ([]archive.Change, error) { - return r.Runtime.GetDiff("", to) -} - -// GenerateKube creates kubernetes email from containers and pods -func (r *LocalRuntime) GenerateKube(c *cliconfig.GenerateKubeValues) (*v1.Pod, *v1.Service, error) { - return shared.GenerateKube(c.InputArgs[0], c.Service, r.Runtime) -} - -// GetPodsByStatus returns a slice of pods filtered by a libpod status -func (r *LocalRuntime) GetPodsByStatus(statuses []string) ([]*libpod.Pod, error) { - - filterFunc := func(p *libpod.Pod) bool { - state, _ := shared.GetPodStatus(p) - for _, status := range statuses { - if state == status { - return true - } - } - return false - } - - pods, err := r.Runtime.Pods(filterFunc) - if err != nil { - return nil, err - } - - return pods, nil -} - -// GetVersion is an alias to satisfy interface{} -func (r *LocalRuntime) GetVersion() (define.Version, error) { - return define.GetVersion() -} - -// RemoteEndpoint resolve interface requirement -func (r *LocalRuntime) RemoteEndpoint() (*Endpoint, error) { - return nil, errors.New("RemoteEndpoint() not implemented for local connection") -} diff --git a/pkg/adapter/runtime_remote.go b/pkg/adapter/runtime_remote.go deleted file mode 100644 index c511b70f1..000000000 --- a/pkg/adapter/runtime_remote.go +++ /dev/null @@ -1,1108 +0,0 @@ -// +build remoteclient - -package adapter - -import ( - "bufio" - "context" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "text/template" - "time" - - "github.com/containers/buildah/imagebuildah" - "github.com/containers/buildah/pkg/formats" - "github.com/containers/common/pkg/config" - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/types" - "github.com/containers/libpod/cmd/podman/cliconfig" - "github.com/containers/libpod/cmd/podman/remoteclientconfig" - "github.com/containers/libpod/libpod" - "github.com/containers/libpod/libpod/define" - "github.com/containers/libpod/libpod/events" - "github.com/containers/libpod/libpod/image" - "github.com/containers/libpod/pkg/util" - iopodman "github.com/containers/libpod/pkg/varlink" - "github.com/containers/libpod/utils" - "github.com/containers/storage/pkg/archive" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "github.com/varlink/go/varlink" - v1 "k8s.io/api/core/v1" -) - -// ImageRuntime is wrapper for image runtime -type RemoteImageRuntime struct{} - -// RemoteRuntime describes a wrapper runtime struct -type RemoteRuntime struct { - Conn *varlink.Connection - Remote bool - cmd cliconfig.MainFlags - config io.Reader -} - -// LocalRuntime describes a typical libpod runtime -type LocalRuntime struct { - *RemoteRuntime -} - -// GetRuntimeNoStore returns a LocalRuntime struct with the actual runtime embedded in it -// The nostore is ignored -func GetRuntimeNoStore(ctx context.Context, c *cliconfig.PodmanCommand) (*LocalRuntime, error) { - return GetRuntime(ctx, c) -} - -// GetRuntime returns a LocalRuntime struct with the actual runtime embedded in it -func GetRuntime(ctx context.Context, c *cliconfig.PodmanCommand) (*LocalRuntime, error) { - var ( - customConfig bool - err error - f *os.File - ) - runtime := RemoteRuntime{ - Remote: true, - cmd: c.GlobalFlags, - } - configPath := remoteclientconfig.GetConfigFilePath() - // Check if the basedir for configPath exists and if not, create it. - if _, err := os.Stat(filepath.Dir(configPath)); os.IsNotExist(err) { - if mkdirErr := os.MkdirAll(filepath.Dir(configPath), 0750); mkdirErr != nil { - return nil, mkdirErr - } - } - if len(c.GlobalFlags.RemoteConfigFilePath) > 0 { - configPath = c.GlobalFlags.RemoteConfigFilePath - customConfig = true - } - - f, err = os.Open(configPath) - if err != nil { - // If user does not explicitly provide a configuration file path and we cannot - // find a default, no error should occur. - if os.IsNotExist(err) && !customConfig { - logrus.Debugf("unable to load configuration file at %s", configPath) - runtime.config = nil - } else { - return nil, errors.Wrapf(err, "unable to load configuration file at %s", configPath) - } - } else { - // create the io reader for the remote client - runtime.config = bufio.NewReader(f) - } - conn, err := runtime.Connect() - if err != nil { - return nil, err - } - runtime.Conn = conn - return &LocalRuntime{ - &runtime, - }, nil -} - -// DeferredShutdown is a bogus wrapper for compaat with the libpod -// runtime and should only be run when a defer is being used -func (r RemoteRuntime) DeferredShutdown(force bool) { - if err := r.Shutdown(force); err != nil { - logrus.Error("unable to shutdown runtime") - } -} - -// Containers is a bogus wrapper for compat with the libpod runtime -type ContainersConfig struct { - // CGroupManager is the CGroup Manager to use - // Valid values are "cgroupfs" and "systemd" - CgroupManager string -} - -// RuntimeConfig is a bogus wrapper for compat with the libpod runtime -type RuntimeConfig struct { - Containers ContainersConfig -} - -// Shutdown is a bogus wrapper for compat with the libpod runtime -func (r *RemoteRuntime) GetConfig() (*config.Config, error) { - return nil, nil -} - -// Shutdown is a bogus wrapper for compat with the libpod runtime -func (r RemoteRuntime) Shutdown(force bool) error { - return nil -} - -// ContainerImage -type ContainerImage struct { - remoteImage -} - -type remoteImage struct { - ID string - Labels map[string]string - RepoTags []string - RepoDigests []string - Parent string - Size int64 - Created time.Time - InputName string - Names []string - Digest digest.Digest - Digests []digest.Digest - isParent bool - Runtime *LocalRuntime - TopLayer string - ReadOnly bool - NamesHistory []string -} - -// Container ... -type Container struct { - remoteContainer -} - -// remoteContainer .... -type remoteContainer struct { - Runtime *LocalRuntime - config *libpod.ContainerConfig - state *libpod.ContainerState -} - -// Pod ... -type Pod struct { - remotepod -} - -type remotepod struct { - config *libpod.PodConfig - state *libpod.PodInspectState - containers []libpod.PodContainerInfo // nolint: structcheck - Runtime *LocalRuntime -} - -type VolumeFilter func(*Volume) bool - -// Volume is embed for libpod volumes -type Volume struct { - remoteVolume -} - -type remoteVolume struct { - Runtime *LocalRuntime - config *libpod.VolumeConfig -} - -// GetImages returns a slice of containerimages over a varlink connection -func (r *LocalRuntime) GetImages() ([]*ContainerImage, error) { - return r.getImages(false) -} - -// GetRWImages returns a slice of read/write containerimages over a varlink connection -func (r *LocalRuntime) GetRWImages() ([]*ContainerImage, error) { - return r.getImages(true) -} - -func (r *LocalRuntime) GetFilteredImages(filters []string, rwOnly bool) ([]*ContainerImage, error) { - if len(filters) > 0 { - return nil, errors.Wrap(define.ErrNotImplemented, "filtering images is not supported on the remote client") - } - var newImages []*ContainerImage - images, err := iopodman.ListImages().Call(r.Conn) - if err != nil { - return nil, err - } - for _, i := range images { - if rwOnly && i.ReadOnly { - continue - } - name := i.Id - if len(i.RepoTags) > 1 { - name = i.RepoTags[0] - } - newImage, err := imageInListToContainerImage(i, name, r) - if err != nil { - return nil, err - } - newImages = append(newImages, newImage) - } - return newImages, nil -} -func (r *LocalRuntime) getImages(rwOnly bool) ([]*ContainerImage, error) { - var newImages []*ContainerImage - images, err := iopodman.ListImages().Call(r.Conn) - if err != nil { - return nil, err - } - for _, i := range images { - if rwOnly && i.ReadOnly { - continue - } - name := i.Id - if len(i.RepoTags) > 1 { - name = i.RepoTags[0] - } - newImage, err := imageInListToContainerImage(i, name, r) - if err != nil { - return nil, err - } - newImages = append(newImages, newImage) - } - return newImages, nil -} - -func imageInListToContainerImage(i iopodman.Image, name string, runtime *LocalRuntime) (*ContainerImage, error) { - created, err := time.ParseInLocation(time.RFC3339, i.Created, time.UTC) - if err != nil { - return nil, err - } - var digests []digest.Digest - for _, d := range i.Digests { - digests = append(digests, digest.Digest(d)) - } - ri := remoteImage{ - InputName: name, - ID: i.Id, - Digest: digest.Digest(i.Digest), - Digests: digests, - Labels: i.Labels, - RepoTags: i.RepoTags, - RepoDigests: i.RepoTags, - Parent: i.ParentId, - Size: i.Size, - Created: created, - Names: i.RepoTags, - isParent: i.IsParent, - Runtime: runtime, - TopLayer: i.TopLayer, - ReadOnly: i.ReadOnly, - NamesHistory: i.History, - } - return &ContainerImage{ri}, nil -} - -// NewImageFromLocal returns a container image representation of a image over varlink -func (r *LocalRuntime) NewImageFromLocal(name string) (*ContainerImage, error) { - img, err := iopodman.GetImage().Call(r.Conn, name) - if err != nil { - return nil, err - } - return imageInListToContainerImage(img, name, r) - -} - -// LoadFromArchiveReference creates an image from a local archive -func (r *LocalRuntime) LoadFromArchiveReference(ctx context.Context, srcRef types.ImageReference, signaturePolicyPath string, writer io.Writer) ([]*ContainerImage, error) { - var iid string - creds := iopodman.AuthConfig{} - reply, err := iopodman.PullImage().Send(r.Conn, varlink.More, srcRef.DockerReference().String(), creds) - if err != nil { - return nil, err - } - - for { - responses, flags, err := reply() - if err != nil { - return nil, err - } - for _, line := range responses.Logs { - fmt.Print(line) - } - iid = responses.Id - if flags&varlink.Continues == 0 { - break - } - } - - newImage, err := r.NewImageFromLocal(iid) - if err != nil { - return nil, err - } - return []*ContainerImage{newImage}, nil -} - -// New calls into local storage to look for an image in local storage or to pull it -func (r *LocalRuntime) New(ctx context.Context, name, signaturePolicyPath, authfile string, writer io.Writer, dockeroptions *image.DockerRegistryOptions, signingoptions image.SigningOptions, label *string, pullType util.PullType) (*ContainerImage, error) { - var iid string - if label != nil { - return nil, errors.New("the remote client function does not support checking a remote image for a label") - } - creds := iopodman.AuthConfig{} - if dockeroptions.DockerRegistryCreds != nil { - creds.Username = dockeroptions.DockerRegistryCreds.Username - creds.Password = dockeroptions.DockerRegistryCreds.Password - } - reply, err := iopodman.PullImage().Send(r.Conn, varlink.More, name, creds) - if err != nil { - return nil, err - } - for { - responses, flags, err := reply() - if err != nil { - return nil, err - } - for _, line := range responses.Logs { - fmt.Print(line) - } - iid = responses.Id - if flags&varlink.Continues == 0 { - break - } - } - newImage, err := r.NewImageFromLocal(iid) - if err != nil { - return nil, err - } - return newImage, nil -} - -func (r *LocalRuntime) ImageTree(imageOrID string, whatRequires bool) (string, error) { - return iopodman.ImageTree().Call(r.Conn, imageOrID, whatRequires) -} - -// IsParent goes through the layers in the store and checks if i.TopLayer is -// the parent of any other layer in store. Double check that image with that -// layer exists as well. -func (ci *ContainerImage) IsParent(context.Context) (bool, error) { - return ci.remoteImage.isParent, nil -} - -// ID returns the image ID as a string -func (ci *ContainerImage) ID() string { - return ci.remoteImage.ID -} - -// Names returns a string array of names associated with the image -func (ci *ContainerImage) Names() []string { - return ci.remoteImage.Names -} - -// NamesHistory returns a string array of names previously associated with the image -func (ci *ContainerImage) NamesHistory() []string { - return ci.remoteImage.NamesHistory -} - -// Created returns the time the image was created -func (ci *ContainerImage) Created() time.Time { - return ci.remoteImage.Created -} - -// IsReadOnly returns whether the image is ReadOnly -func (ci *ContainerImage) IsReadOnly() bool { - return ci.remoteImage.ReadOnly -} - -// Size returns the size of the image -func (ci *ContainerImage) Size(ctx context.Context) (*uint64, error) { - usize := uint64(ci.remoteImage.Size) - return &usize, nil -} - -// Digest returns the image's digest -func (ci *ContainerImage) Digest() digest.Digest { - return ci.remoteImage.Digest -} - -// Digests returns the image's digests -func (ci *ContainerImage) Digests() []digest.Digest { - return append([]digest.Digest{}, ci.remoteImage.Digests...) -} - -// Labels returns a map of the image's labels -func (ci *ContainerImage) Labels(ctx context.Context) (map[string]string, error) { - return ci.remoteImage.Labels, nil -} - -// Dangling returns a bool if the image is "dangling" -func (ci *ContainerImage) Dangling() bool { - return len(ci.Names()) == 0 -} - -// TopLayer returns an images top layer as a string -func (ci *ContainerImage) TopLayer() string { - return ci.remoteImage.TopLayer -} - -// TagImage ... -func (ci *ContainerImage) TagImage(tag string) error { - _, err := iopodman.TagImage().Call(ci.Runtime.Conn, ci.ID(), tag) - return err -} - -// UntagImage removes a single tag from an image -func (ci *ContainerImage) UntagImage(tag string) error { - _, err := iopodman.UntagImage().Call(ci.Runtime.Conn, ci.ID(), tag) - return err -} - -// RemoveImage calls varlink to remove an image -func (r *LocalRuntime) RemoveImage(ctx context.Context, img *ContainerImage, force bool) (*image.ImageDeleteResponse, error) { - ir := image.ImageDeleteResponse{} - response, err := iopodman.RemoveImageWithResponse().Call(r.Conn, img.InputName, force) - if err != nil { - return nil, err - } - ir.Deleted = response.Deleted - ir.Untagged = append(ir.Untagged, response.Untagged...) - return &ir, nil -} - -// History returns the history of an image and its layers -func (ci *ContainerImage) History(ctx context.Context) ([]*image.History, error) { - var imageHistories []*image.History - - reply, err := iopodman.HistoryImage().Call(ci.Runtime.Conn, ci.InputName) - if err != nil { - return nil, err - } - for _, h := range reply { - created, err := time.ParseInLocation(time.RFC3339, h.Created, time.UTC) - if err != nil { - return nil, err - } - ih := image.History{ - ID: h.Id, - Created: &created, - CreatedBy: h.CreatedBy, - Size: h.Size, - Comment: h.Comment, - } - imageHistories = append(imageHistories, &ih) - } - return imageHistories, nil -} - -// PruneImages is the wrapper call for a remote-client to prune images -func (r *LocalRuntime) PruneImages(ctx context.Context, all bool, filter []string) ([]string, error) { - return iopodman.ImagesPrune().Call(r.Conn, all, filter) -} - -// Export is a wrapper to container export to a tarfile -func (r *LocalRuntime) Export(name string, path string) error { - tempPath, err := iopodman.ExportContainer().Call(r.Conn, name, "") - if err != nil { - return err - } - return r.GetFileFromRemoteHost(tempPath, path, true) -} - -func (r *LocalRuntime) GetFileFromRemoteHost(remoteFilePath, outputPath string, delete bool) error { - outputFile, err := os.Create(outputPath) - if err != nil { - return err - } - defer outputFile.Close() - - writer := bufio.NewWriter(outputFile) - defer writer.Flush() - - reply, err := iopodman.ReceiveFile().Send(r.Conn, varlink.Upgrade, remoteFilePath, delete) - if err != nil { - return err - } - - length, _, err := reply() - if err != nil { - return errors.Wrap(err, "unable to get file length for transfer") - } - - reader := r.Conn.Reader - if _, err := io.CopyN(writer, reader, length); err != nil { - return errors.Wrap(err, "file transfer failed") - } - return nil -} - -// Import implements the remote calls required to import a container image to the store -func (r *LocalRuntime) Import(ctx context.Context, source, reference string, changes []string, history string, quiet bool) (string, error) { - // First we send the file to the host - tempFile, err := r.SendFileOverVarlink(source) - if err != nil { - return "", err - } - return iopodman.ImportImage().Call(r.Conn, strings.TrimRight(tempFile, ":"), reference, history, changes, true) -} - -func (r *LocalRuntime) Build(ctx context.Context, c *cliconfig.BuildValues, options imagebuildah.BuildOptions, dockerfiles []string) (string, reference.Canonical, error) { - buildOptions := iopodman.BuildOptions{ - AddHosts: options.CommonBuildOpts.AddHost, - CgroupParent: options.CommonBuildOpts.CgroupParent, - CpuPeriod: int64(options.CommonBuildOpts.CPUPeriod), - CpuQuota: options.CommonBuildOpts.CPUQuota, - CpuShares: int64(options.CommonBuildOpts.CPUShares), - CpusetCpus: options.CommonBuildOpts.CPUSetMems, - CpusetMems: options.CommonBuildOpts.CPUSetMems, - Memory: options.CommonBuildOpts.Memory, - MemorySwap: options.CommonBuildOpts.MemorySwap, - ShmSize: options.CommonBuildOpts.ShmSize, - Ulimit: options.CommonBuildOpts.Ulimit, - Volume: options.CommonBuildOpts.Volumes, - } - buildinfo := iopodman.BuildInfo{ - // Err: string(options.Err), - // Out: - // ReportWriter: - Architecture: options.Architecture, - AddCapabilities: options.AddCapabilities, - AdditionalTags: options.AdditionalTags, - Annotations: options.Annotations, - BuildArgs: options.Args, - BuildOptions: buildOptions, - CniConfigDir: options.CNIConfigDir, - CniPluginDir: options.CNIPluginPath, - Compression: string(options.Compression), - Devices: options.Devices, - DefaultsMountFilePath: options.DefaultMountsFilePath, - Dockerfiles: dockerfiles, - DropCapabilities: options.DropCapabilities, - ForceRmIntermediateCtrs: options.ForceRmIntermediateCtrs, - Iidfile: options.IIDFile, - Label: options.Labels, - Layers: options.Layers, - // NamespaceOptions: options.NamespaceOptions, - Nocache: options.NoCache, - Os: options.OS, - Output: options.Output, - OutputFormat: options.OutputFormat, - PullPolicy: options.PullPolicy.String(), - Quiet: options.Quiet, - RemoteIntermediateCtrs: options.RemoveIntermediateCtrs, - RuntimeArgs: options.RuntimeArgs, - SignBy: options.SignBy, - Squash: options.Squash, - Target: options.Target, - TransientMounts: options.TransientMounts, - } - // tar the file - outputFile, err := ioutil.TempFile("", "varlink_tar_send") - if err != nil { - return "", nil, err - } - defer outputFile.Close() - defer os.Remove(outputFile.Name()) - - // Create the tarball of the context dir to a tempfile - if err := utils.TarToFilesystem(options.ContextDirectory, outputFile); err != nil { - return "", nil, err - } - // Send the context dir tarball over varlink. - tempFile, err := r.SendFileOverVarlink(outputFile.Name()) - if err != nil { - return "", nil, err - } - buildinfo.ContextDir = tempFile - - reply, err := iopodman.BuildImage().Send(r.Conn, varlink.More, buildinfo) - if err != nil { - return "", nil, err - } - - for { - responses, flags, err := reply() - if err != nil { - return "", nil, err - } - for _, line := range responses.Logs { - fmt.Print(line) - } - if flags&varlink.Continues == 0 { - break - } - } - return "", nil, err -} - -// SendFileOverVarlink sends a file over varlink in an upgraded connection -func (r *LocalRuntime) SendFileOverVarlink(source string) (string, error) { - fs, err := os.Open(source) - if err != nil { - return "", err - } - - fileInfo, err := fs.Stat() - if err != nil { - return "", err - } - logrus.Debugf("sending %s over varlink connection", source) - reply, err := iopodman.SendFile().Send(r.Conn, varlink.Upgrade, "", fileInfo.Size()) - if err != nil { - return "", err - } - _, _, err = reply() - if err != nil { - return "", err - } - - reader := bufio.NewReader(fs) - _, err = reader.WriteTo(r.Conn.Writer) - if err != nil { - return "", err - } - logrus.Debugf("file transfer complete for %s", source) - r.Conn.Writer.Flush() - - // All was sent, wait for the ACK from the server - tempFile, err := r.Conn.Reader.ReadString(':') - if err != nil { - return "", err - } - - // r.Conn is kaput at this point due to the upgrade - if err := r.RemoteRuntime.RefreshConnection(); err != nil { - return "", err - - } - - return strings.Replace(tempFile, ":", "", -1), nil -} - -// GetAllVolumes retrieves all the volumes -func (r *LocalRuntime) GetAllVolumes() ([]*libpod.Volume, error) { - return nil, define.ErrNotImplemented -} - -// RemoveVolume removes a volumes -func (r *LocalRuntime) RemoveVolume(ctx context.Context, v *libpod.Volume, force, prune bool) error { - return define.ErrNotImplemented -} - -// GetContainers retrieves all containers from the state -// Filters can be provided which will determine what containers are included in -// the output. Multiple filters are handled by ANDing their output, so only -// containers matching all filters are returned -func (r *LocalRuntime) GetContainers(filters ...libpod.ContainerFilter) ([]*libpod.Container, error) { - return nil, define.ErrNotImplemented -} - -// RemoveContainer removes the given container -// If force is specified, the container will be stopped first -// Otherwise, RemoveContainer will return an error if the container is running -func (r *LocalRuntime) RemoveContainer(ctx context.Context, c *libpod.Container, force, volumes bool) error { - return define.ErrNotImplemented -} - -// CreateVolume creates a volume over a varlink connection for the remote client -func (r *LocalRuntime) CreateVolume(ctx context.Context, c *cliconfig.VolumeCreateValues, labels, opts map[string]string) (string, error) { - cvOpts := iopodman.VolumeCreateOpts{ - Options: opts, - Labels: labels, - } - if len(c.InputArgs) > 0 { - cvOpts.VolumeName = c.InputArgs[0] - } - - if c.Flag("driver").Changed { - cvOpts.Driver = c.Driver - } - - return iopodman.VolumeCreate().Call(r.Conn, cvOpts) -} - -// RemoveVolumes removes volumes over a varlink connection for the remote client -func (r *LocalRuntime) RemoveVolumes(ctx context.Context, c *cliconfig.VolumeRmValues) ([]string, map[string]error, error) { - rmOpts := iopodman.VolumeRemoveOpts{ - All: c.All, - Force: c.Force, - Volumes: c.InputArgs, - } - success, failures, err := iopodman.VolumeRemove().Call(r.Conn, rmOpts) - stringsToErrors := make(map[string]error) - for k, v := range failures { - stringsToErrors[k] = errors.New(v) - } - return success, stringsToErrors, err -} - -func (r *LocalRuntime) Push(ctx context.Context, srcName, destination, manifestMIMEType, authfile, digestfile, signaturePolicyPath string, writer io.Writer, forceCompress bool, signingOptions image.SigningOptions, dockerRegistryOptions *image.DockerRegistryOptions, additionalDockerArchiveTags []reference.NamedTagged) error { - - reply, err := iopodman.PushImage().Send(r.Conn, varlink.More, srcName, destination, forceCompress, manifestMIMEType, signingOptions.RemoveSignatures, signingOptions.SignBy) - if err != nil { - return err - } - for { - responses, flags, err := reply() - if err != nil { - return err - } - for _, line := range responses.Logs { - fmt.Print(line) - } - if flags&varlink.Continues == 0 { - break - } - } - - return err -} - -// InspectVolumes returns a slice of volumes based on an arg list or --all -func (r *LocalRuntime) InspectVolumes(ctx context.Context, c *cliconfig.VolumeInspectValues) ([]*libpod.InspectVolumeData, error) { - var ( - inspectData []*libpod.InspectVolumeData - volumes []string - ) - - if c.All { - allVolumes, err := r.Volumes(ctx) - if err != nil { - return nil, err - } - for _, vol := range allVolumes { - volumes = append(volumes, vol.Name()) - } - } else { - volumes = append(volumes, c.InputArgs...) - } - - for _, vol := range volumes { - jsonString, err := iopodman.InspectVolume().Call(r.Conn, vol) - if err != nil { - return nil, err - } - inspectJSON := new(libpod.InspectVolumeData) - if err := json.Unmarshal([]byte(jsonString), inspectJSON); err != nil { - return nil, errors.Wrapf(err, "error unmarshalling inspect JSON for volume %s", vol) - } - inspectData = append(inspectData, inspectJSON) - } - - return inspectData, nil -} - -// Volumes returns a slice of adapter.volumes based on information about libpod -// volumes over a varlink connection -func (r *LocalRuntime) Volumes(ctx context.Context) ([]*Volume, error) { - reply, err := iopodman.GetVolumes().Call(r.Conn, []string{}, true) - if err != nil { - return nil, err - } - return varlinkVolumeToVolume(r, reply), nil -} - -func varlinkVolumeToVolume(r *LocalRuntime, volumes []iopodman.Volume) []*Volume { - var vols []*Volume - for _, v := range volumes { - volumeConfig := libpod.VolumeConfig{ - Name: v.Name, - Labels: v.Labels, - MountPoint: v.MountPoint, - Driver: v.Driver, - Options: v.Options, - } - n := remoteVolume{ - Runtime: r, - config: &volumeConfig, - } - newVol := Volume{ - n, - } - vols = append(vols, &newVol) - } - return vols -} - -// PruneVolumes removes all unused volumes from the remote system -func (r *LocalRuntime) PruneVolumes(ctx context.Context) ([]string, []error) { - var errs []error - prunedNames, prunedErrors, err := iopodman.VolumesPrune().Call(r.Conn) - if err != nil { - return []string{}, []error{err} - } - // We need to transform the string results of the error into actual error types - for _, e := range prunedErrors { - errs = append(errs, errors.New(e)) - } - return prunedNames, errs -} - -// SaveImage is a wrapper function for saving an image to the local filesystem -func (r *LocalRuntime) SaveImage(ctx context.Context, c *cliconfig.SaveValues) error { - source := c.InputArgs[0] - additionalTags := c.InputArgs[1:] - - options := iopodman.ImageSaveOptions{ - Name: source, - Format: c.Format, - Output: c.Output, - MoreTags: additionalTags, - Quiet: c.Quiet, - Compress: c.Compress, - } - reply, err := iopodman.ImageSave().Send(r.Conn, varlink.More, options) - if err != nil { - return err - } - - var fetchfile string - for { - responses, flags, err := reply() - if err != nil { - return err - } - if len(responses.Id) > 0 { - fetchfile = responses.Id - } - for _, line := range responses.Logs { - fmt.Print(line) - } - if flags&varlink.Continues == 0 { - break - } - - } - if err != nil { // nolint: govet - return err - } - - outputToDir := false - outfile := c.Output - var outputFile *os.File - // If the result is supposed to be a dir, then we need to put the tarfile - // from the host in a temporary file - if options.Format != "oci-archive" && options.Format != "docker-archive" { - outputToDir = true - outputFile, err = ioutil.TempFile("", "saveimage_tempfile") - if err != nil { - return err - } - outfile = outputFile.Name() - defer outputFile.Close() - defer os.Remove(outputFile.Name()) - } - // We now need to fetch the tarball result back to the more system - if err := r.GetFileFromRemoteHost(fetchfile, outfile, true); err != nil { - return err - } - - // If the result is a tarball, we're done - // If it is a dir, we need to untar the temporary file into the dir - if outputToDir { - if err := utils.UntarToFileSystem(c.Output, outputFile, &archive.TarOptions{}); err != nil { - return err - } - } - return nil -} - -// LoadImage loads a container image from a remote client's filesystem -func (r *LocalRuntime) LoadImage(ctx context.Context, name string, cli *cliconfig.LoadValues) (string, error) { - var names string - remoteTempFile, err := r.SendFileOverVarlink(cli.Input) - if err != nil { - return "", nil - } - more := varlink.More - if cli.Quiet { - more = 0 - } - reply, err := iopodman.LoadImage().Send(r.Conn, uint64(more), name, remoteTempFile, cli.Quiet, true) - if err != nil { - return "", err - } - - for { - responses, flags, err := reply() - if err != nil { - logrus.Error(err) - return "", err - } - for _, line := range responses.Logs { - fmt.Print(line) - } - names = responses.Id - if flags&varlink.Continues == 0 { - break - } - } - return names, nil -} - -// IsImageNotFound checks if the error indicates that no image was found. -func IsImageNotFound(err error) bool { - if errors.Cause(err) == image.ErrNoSuchImage { - return true - } - switch err.(type) { // nolint: gocritic - case *iopodman.ImageNotFound: - return true - } - return false -} - -// HealthCheck executes a container's healthcheck over a varlink connection -func (r *LocalRuntime) HealthCheck(c *cliconfig.HealthCheckValues) (string, error) { - return iopodman.HealthCheckRun().Call(r.Conn, c.InputArgs[0]) -} - -// Events monitors libpod/podman events over a varlink connection -func (r *LocalRuntime) Events(c *cliconfig.EventValues) error { - var more uint64 - if c.Stream { - more = uint64(varlink.More) - } - reply, err := iopodman.GetEvents().Send(r.Conn, more, c.Filter, c.Since, c.Until) - if err != nil { - return errors.Wrapf(err, "unable to obtain events") - } - - w := bufio.NewWriter(os.Stdout) - var tmpl *template.Template - if c.Format != formats.JSONString { - template, err := template.New("events").Parse(c.Format) - if err != nil { - return err - } - tmpl = template - } - - for { - returnedEvent, flags, err := reply() - if err != nil { - // When the error handling is back into podman, we can flip this to a better way to check - // for problems. For now, this works. - return err - } - if returnedEvent.Time == "" && returnedEvent.Status == "" && returnedEvent.Type == "" { - // We got a blank event return, signals end of stream in certain cases - break - } - eTime, err := time.Parse(time.RFC3339Nano, returnedEvent.Time) - if err != nil { - return errors.Wrapf(err, "unable to parse time of event %s", returnedEvent.Time) - } - eType, err := events.StringToType(returnedEvent.Type) - if err != nil { - return err - } - eStatus, err := events.StringToStatus(returnedEvent.Status) - if err != nil { - return err - } - event := events.Event{ - ID: returnedEvent.Id, - Image: returnedEvent.Image, - Name: returnedEvent.Name, - Status: eStatus, - Time: eTime, - Type: eType, - } - if c.Format == formats.JSONString { // nolint: gocritic - jsonStr, err := event.ToJSONString() - if err != nil { - return errors.Wrapf(err, "unable to format json") - } - if _, err := w.Write([]byte(jsonStr)); err != nil { - return err - } - } else if len(c.Format) > 0 { - if err := tmpl.Execute(w, event); err != nil { - return err - } - } else { - if _, err := w.Write([]byte(event.ToHumanReadable())); err != nil { - return err - } - } - - if _, err := w.Write([]byte("\n")); err != nil { - return err - } - if err := w.Flush(); err != nil { - return err - } - if flags&varlink.Continues == 0 { - break - } - } - return nil -} - -// Diff ... -func (r *LocalRuntime) Diff(c *cliconfig.DiffValues, to string) ([]archive.Change, error) { - var changes []archive.Change - reply, err := iopodman.Diff().Call(r.Conn, to) - if err != nil { - return nil, err - } - for _, change := range reply { - changes = append(changes, archive.Change{Path: change.Path, Kind: stringToChangeType(change.ChangeType)}) - } - return changes, nil -} - -func stringToChangeType(change string) archive.ChangeType { - switch change { - case "A": - return archive.ChangeAdd - case "D": - return archive.ChangeDelete - default: // nolint: gocritic,stylecheck - logrus.Errorf("'%s' is unknown archive type", change) - fallthrough - case "C": - return archive.ChangeModify - } -} - -// GenerateKube creates kubernetes email from containers and pods -func (r *LocalRuntime) GenerateKube(c *cliconfig.GenerateKubeValues) (*v1.Pod, *v1.Service, error) { - var ( - pod v1.Pod - service v1.Service - ) - reply, err := iopodman.GenerateKube().Call(r.Conn, c.InputArgs[0], c.Service) - if err != nil { - return nil, nil, errors.Wrap(err, "unable to create kubernetes YAML") - } - if err := json.Unmarshal([]byte(reply.Pod), &pod); err != nil { - return nil, nil, err - } - err = json.Unmarshal([]byte(reply.Service), &service) - return &pod, &service, err -} - -// GetContainersByContext looks up containers based on the cli input of all, latest, or a list -func (r *LocalRuntime) GetContainersByContext(all bool, latest bool, namesOrIDs []string) ([]*Container, error) { - var containers []*Container - cids, err := iopodman.GetContainersByContext().Call(r.Conn, all, latest, namesOrIDs) - if err != nil { - return nil, err - } - for _, cid := range cids { - ctr, err := r.LookupContainer(cid) - if err != nil { - return nil, err - } - containers = append(containers, ctr) - } - return containers, nil -} - -// GetVersion returns version information from service -func (r *LocalRuntime) GetVersion() (define.Version, error) { - version, goVersion, gitCommit, built, osArch, apiVersion, err := iopodman.GetVersion().Call(r.Conn) - if err != nil { - return define.Version{}, errors.Wrapf(err, "Unable to obtain server version information") - } - - var buildTime int64 - if built != "" { - t, err := time.Parse(time.RFC3339, built) - if err != nil { - return define.Version{}, nil - } - buildTime = t.Unix() - } - - return define.Version{ - RemoteAPIVersion: apiVersion, - Version: version, - GoVersion: goVersion, - GitCommit: gitCommit, - Built: buildTime, - OsArch: osArch, - }, nil -} diff --git a/pkg/adapter/runtime_remote_supported.go b/pkg/adapter/runtime_remote_supported.go deleted file mode 100644 index b8e8da308..000000000 --- a/pkg/adapter/runtime_remote_supported.go +++ /dev/null @@ -1 +0,0 @@ -package adapter diff --git a/pkg/adapter/shortcuts/shortcuts.go b/pkg/adapter/shortcuts/shortcuts.go deleted file mode 100644 index 8a8459c6c..000000000 --- a/pkg/adapter/shortcuts/shortcuts.go +++ /dev/null @@ -1,66 +0,0 @@ -package shortcuts - -import ( - "github.com/containers/libpod/libpod" - "github.com/sirupsen/logrus" -) - -// GetPodsByContext returns a slice of pods. Note that all, latest and pods are -// mutually exclusive arguments. -func GetPodsByContext(all, latest bool, pods []string, runtime *libpod.Runtime) ([]*libpod.Pod, error) { - var outpods []*libpod.Pod - if all { - return runtime.GetAllPods() - } - if latest { - p, err := runtime.GetLatestPod() - if err != nil { - return nil, err - } - outpods = append(outpods, p) - return outpods, nil - } - var err error - for _, p := range pods { - pod, e := runtime.LookupPod(p) - if e != nil { - // Log all errors here, so callers don't need to. - logrus.Debugf("Error looking up pod %q: %v", p, e) - if err == nil { - err = e - } - } else { - outpods = append(outpods, pod) - } - } - return outpods, err -} - -// GetContainersByContext gets pods whether all, latest, or a slice of names/ids -// is specified. -func GetContainersByContext(all, latest bool, names []string, runtime *libpod.Runtime) (ctrs []*libpod.Container, err error) { - var ctr *libpod.Container - ctrs = []*libpod.Container{} - - switch { - case all: - ctrs, err = runtime.GetAllContainers() - case latest: - ctr, err = runtime.GetLatestContainer() - ctrs = append(ctrs, ctr) - default: - for _, n := range names { - ctr, e := runtime.LookupContainer(n) - if e != nil { - // Log all errors here, so callers don't need to. - logrus.Debugf("Error looking up container %q: %v", n, e) - if err == nil { - err = e - } - } else { - ctrs = append(ctrs, ctr) - } - } - } - return -} diff --git a/pkg/adapter/sigproxy_linux.go b/pkg/adapter/sigproxy_linux.go deleted file mode 100644 index 5695d0e42..000000000 --- a/pkg/adapter/sigproxy_linux.go +++ /dev/null @@ -1,45 +0,0 @@ -package adapter - -import ( - "os" - "syscall" - - "github.com/containers/libpod/libpod" - "github.com/containers/libpod/pkg/signal" - "github.com/sirupsen/logrus" -) - -// ProxySignals ... -func ProxySignals(ctr *libpod.Container) { - sigBuffer := make(chan os.Signal, 128) - signal.CatchAll(sigBuffer) - - logrus.Debugf("Enabling signal proxying") - - go func() { - for s := range sigBuffer { - // Ignore SIGCHLD and SIGPIPE - these are mostly likely - // intended for the podman command itself. - // SIGURG was added because of golang 1.14 and its preemptive changes - // causing more signals to "show up". - // https://github.com/containers/libpod/issues/5483 - if s == syscall.SIGCHLD || s == syscall.SIGPIPE || s == syscall.SIGURG { - continue - } - - if err := ctr.Kill(uint(s.(syscall.Signal))); err != nil { - // If the container dies, and we find out here, - // we need to forward that one signal to - // ourselves so that it is not lost, and then - // we terminate the proxy and let the defaults - // play out. - logrus.Errorf("Error forwarding signal %d to container %s: %v", s, ctr.ID(), err) - signal.StopCatch(sigBuffer) - if err := syscall.Kill(syscall.Getpid(), s.(syscall.Signal)); err != nil { - logrus.Errorf("failed to kill pid %d", syscall.Getpid()) - } - return - } - } - }() -} diff --git a/pkg/adapter/terminal.go b/pkg/adapter/terminal.go deleted file mode 100644 index 499e77def..000000000 --- a/pkg/adapter/terminal.go +++ /dev/null @@ -1,101 +0,0 @@ -package adapter - -import ( - "context" - "os" - "os/signal" - - lsignal "github.com/containers/libpod/pkg/signal" - "github.com/docker/docker/pkg/term" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "k8s.io/client-go/tools/remotecommand" -) - -// RawTtyFormatter ... -type RawTtyFormatter struct { -} - -// getResize returns a TerminalSize command matching stdin's current -// size on success, and nil on errors. -func getResize() *remotecommand.TerminalSize { - winsize, err := term.GetWinsize(os.Stdin.Fd()) - if err != nil { - logrus.Warnf("Could not get terminal size %v", err) - return nil - } - return &remotecommand.TerminalSize{ - Width: winsize.Width, - Height: winsize.Height, - } -} - -// Helper for prepareAttach - set up a goroutine to generate terminal resize events -func resizeTty(ctx context.Context, resize chan remotecommand.TerminalSize) { - sigchan := make(chan os.Signal, 1) - signal.Notify(sigchan, lsignal.SIGWINCH) - go func() { - defer close(resize) - // Update the terminal size immediately without waiting - // for a SIGWINCH to get the correct initial size. - resizeEvent := getResize() - for { - if resizeEvent == nil { - select { - case <-ctx.Done(): - return - case <-sigchan: - resizeEvent = getResize() - } - } else { - select { - case <-ctx.Done(): - return - case <-sigchan: - resizeEvent = getResize() - case resize <- *resizeEvent: - resizeEvent = nil - } - } - } - }() -} - -func restoreTerminal(state *term.State) error { - logrus.SetFormatter(&logrus.TextFormatter{}) - return term.RestoreTerminal(os.Stdin.Fd(), state) -} - -// Format ... -func (f *RawTtyFormatter) Format(entry *logrus.Entry) ([]byte, error) { - textFormatter := logrus.TextFormatter{} - bytes, err := textFormatter.Format(entry) - - if err == nil { - bytes = append(bytes, '\r') - } - - return bytes, err -} - -func handleTerminalAttach(ctx context.Context, resize chan remotecommand.TerminalSize) (context.CancelFunc, *term.State, error) { - logrus.Debugf("Handling terminal attach") - - subCtx, cancel := context.WithCancel(ctx) - - resizeTty(subCtx, resize) - - oldTermState, err := term.SaveState(os.Stdin.Fd()) - if err != nil { - // allow caller to not have to do any cleaning up if we error here - cancel() - return nil, nil, errors.Wrapf(err, "unable to save terminal state") - } - - logrus.SetFormatter(&RawTtyFormatter{}) - if _, err := term.SetRawTerminal(os.Stdin.Fd()); err != nil { - return cancel, nil, err - } - - return cancel, oldTermState, nil -} diff --git a/pkg/adapter/terminal_linux.go b/pkg/adapter/terminal_linux.go deleted file mode 100644 index a56704be6..000000000 --- a/pkg/adapter/terminal_linux.go +++ /dev/null @@ -1,121 +0,0 @@ -package adapter - -import ( - "bufio" - "context" - "fmt" - "os" - - "github.com/containers/libpod/libpod" - "github.com/containers/libpod/libpod/define" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/crypto/ssh/terminal" - "k8s.io/client-go/tools/remotecommand" -) - -// ExecAttachCtr execs and attaches to a container -func ExecAttachCtr(ctx context.Context, ctr *libpod.Container, tty, privileged bool, env map[string]string, cmd []string, user, workDir string, streams *define.AttachStreams, preserveFDs uint, detachKeys string) (int, error) { - resize := make(chan remotecommand.TerminalSize) - haveTerminal := terminal.IsTerminal(int(os.Stdin.Fd())) - - // Check if we are attached to a terminal. If we are, generate resize - // events, and set the terminal to raw mode - if haveTerminal && tty { - cancel, oldTermState, err := handleTerminalAttach(ctx, resize) - if err != nil { - return -1, err - } - defer cancel() - defer func() { - if err := restoreTerminal(oldTermState); err != nil { - logrus.Errorf("unable to restore terminal: %q", err) - } - }() - } - - execConfig := new(libpod.ExecConfig) - execConfig.Command = cmd - execConfig.Terminal = tty - execConfig.Privileged = privileged - execConfig.Environment = env - execConfig.User = user - execConfig.WorkDir = workDir - execConfig.DetachKeys = &detachKeys - execConfig.PreserveFDs = preserveFDs - - return ctr.Exec(execConfig, streams, resize) -} - -// StartAttachCtr starts and (if required) attaches to a container -// if you change the signature of this function from os.File to io.Writer, it will trigger a downstream -// error. we may need to just lint disable this one. -func StartAttachCtr(ctx context.Context, ctr *libpod.Container, stdout, stderr, stdin *os.File, detachKeys string, sigProxy bool, startContainer bool, recursive bool) error { //nolint-interfacer - resize := make(chan remotecommand.TerminalSize) - - haveTerminal := terminal.IsTerminal(int(os.Stdin.Fd())) - - // Check if we are attached to a terminal. If we are, generate resize - // events, and set the terminal to raw mode - if haveTerminal && ctr.Spec().Process.Terminal { - cancel, oldTermState, err := handleTerminalAttach(ctx, resize) - if err != nil { - return err - } - defer func() { - if err := restoreTerminal(oldTermState); err != nil { - logrus.Errorf("unable to restore terminal: %q", err) - } - }() - defer cancel() - } - - streams := new(define.AttachStreams) - streams.OutputStream = stdout - streams.ErrorStream = stderr - streams.InputStream = bufio.NewReader(stdin) - streams.AttachOutput = true - streams.AttachError = true - streams.AttachInput = true - - if stdout == nil { - logrus.Debugf("Not attaching to stdout") - streams.AttachOutput = false - } - if stderr == nil { - logrus.Debugf("Not attaching to stderr") - streams.AttachError = false - } - if stdin == nil { - logrus.Debugf("Not attaching to stdin") - streams.AttachInput = false - } - - if !startContainer { - if sigProxy { - ProxySignals(ctr) - } - - return ctr.Attach(streams, detachKeys, resize) - } - - attachChan, err := ctr.StartAndAttach(ctx, streams, detachKeys, resize, recursive) - if err != nil { - return err - } - - if sigProxy { - ProxySignals(ctr) - } - - if stdout == nil && stderr == nil { - fmt.Printf("%s\n", ctr.ID()) - } - - err = <-attachChan - if err != nil { - return errors.Wrapf(err, "error attaching to container %s", ctr.ID()) - } - - return nil -} diff --git a/pkg/adapter/terminal_unsupported.go b/pkg/adapter/terminal_unsupported.go deleted file mode 100644 index 9067757a1..000000000 --- a/pkg/adapter/terminal_unsupported.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build !linux - -package adapter - -import ( - "context" - "os" - - "github.com/containers/libpod/libpod" - "github.com/containers/libpod/libpod/define" -) - -// ExecAttachCtr execs and attaches to a container -func ExecAttachCtr(ctx context.Context, ctr *libpod.Container, tty, privileged bool, env map[string]string, cmd []string, user, workDir string, streams *define.AttachStreams, preserveFDs uint, detachKeys string) (int, error) { - return -1, define.ErrNotImplemented -} - -// StartAttachCtr starts and (if required) attaches to a container -// if you change the signature of this function from os.File to io.Writer, it will trigger a downstream -// error. we may need to just lint disable this one. -func StartAttachCtr(ctx context.Context, ctr *libpod.Container, stdout, stderr, stdin *os.File, detachKeys string, sigProxy bool, startContainer bool, recursive bool) error { //nolint-interfacer - return define.ErrNotImplemented -} diff --git a/pkg/adapter/volumes_remote.go b/pkg/adapter/volumes_remote.go deleted file mode 100644 index 58f9ba625..000000000 --- a/pkg/adapter/volumes_remote.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build remoteclient - -package adapter - -// Name returns the name of the volume -func (v *Volume) Name() string { - return v.config.Name -} - -//Labels returns the labels for a volume -func (v *Volume) Labels() map[string]string { - return v.config.Labels -} - -// Driver returns the driver for the volume -func (v *Volume) Driver() string { - return v.config.Driver -} - -// Options returns the options a volume was created with -func (v *Volume) Options() map[string]string { - return v.config.Options -} - -// MountPath returns the path the volume is mounted to -func (v *Volume) MountPoint() string { - return v.config.MountPoint -} - -// Scope returns the scope for an adapter.volume -func (v *Volume) Scope() string { - return "local" -} |