diff options
author | Brent Baude <bbaude@redhat.com> | 2020-04-16 12:25:26 -0500 |
---|---|---|
committer | Brent Baude <bbaude@redhat.com> | 2020-04-16 15:53:58 -0500 |
commit | 241326a9a8c20ad7f2bcf651416b836e7778e090 (patch) | |
tree | 4001e8e47a022bb1b9bfbf2332c42e1aeb802f9e /pkg | |
parent | 88c6fd06cd54fb9a8826306dfdf1a77e400de5de (diff) | |
download | podman-241326a9a8c20ad7f2bcf651416b836e7778e090.tar.gz podman-241326a9a8c20ad7f2bcf651416b836e7778e090.tar.bz2 podman-241326a9a8c20ad7f2bcf651416b836e7778e090.zip |
Podman V2 birth
remote podman v1 and replace with podman v2.
Signed-off-by: Brent Baude <bbaude@redhat.com>
Diffstat (limited to 'pkg')
41 files changed, 3089 insertions, 6785 deletions
diff --git a/pkg/adapter/autoupdate.go b/pkg/adapter/autoupdate.go deleted file mode 100644 index 01f7a29c5..000000000 --- a/pkg/adapter/autoupdate.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !remoteclient - -package adapter - -import ( - "github.com/containers/libpod/pkg/autoupdate" -) - -func (r *LocalRuntime) AutoUpdate() ([]string, []error) { - return autoupdate.AutoUpdate(r.Runtime) -} diff --git a/pkg/adapter/autoupdate_remote.go b/pkg/adapter/autoupdate_remote.go deleted file mode 100644 index a2a82d0d4..000000000 --- a/pkg/adapter/autoupdate_remote.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build remoteclient - -package adapter - -import ( - "github.com/containers/libpod/libpod/define" -) - -func (r *LocalRuntime) AutoUpdate() ([]string, []error) { - return nil, []error{define.ErrNotImplemented} -} diff --git a/pkg/adapter/client.go b/pkg/adapter/client.go deleted file mode 100644 index a1b2bd507..000000000 --- a/pkg/adapter/client.go +++ /dev/null @@ -1,115 +0,0 @@ -// +build remoteclient - -package adapter - -import ( - "fmt" - "os" - - "github.com/containers/libpod/cmd/podman/remoteclientconfig" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "github.com/varlink/go/varlink" -) - -var remoteEndpoint *Endpoint // nolint: deadcode,unused - -func (r RemoteRuntime) RemoteEndpoint() (remoteEndpoint *Endpoint, err error) { - remoteConfigConnections, err := remoteclientconfig.ReadRemoteConfig(r.config) - if err != nil && errors.Cause(err) != remoteclientconfig.ErrNoConfigationFile { - return nil, err - } - // If the user defines an env variable for podman_varlink_bridge - // we use that as passed. - if bridge := os.Getenv("PODMAN_VARLINK_BRIDGE"); bridge != "" { - logrus.Debug("creating a varlink bridge based on env variable") - remoteEndpoint, err = newBridgeConnection(bridge, nil, r.cmd.LogLevel) - // if an environment variable for podman_varlink_address is defined, - // we used that as passed - } else if address := os.Getenv("PODMAN_VARLINK_ADDRESS"); address != "" { // nolint:gocritic - logrus.Debugf("creating a varlink address based on env variable: %s", address) - remoteEndpoint, err = newSocketConnection(address) - // if the user provides a remote host, we use it to configure a bridge connection - } else if len(r.cmd.RemoteHost) > 0 { - logrus.Debug("creating a varlink bridge based on user input") - if len(r.cmd.RemoteUserName) < 1 { - return nil, errors.New("you must provide a username when providing a remote host name") - } - rc := remoteclientconfig.RemoteConnection{r.cmd.RemoteHost, r.cmd.RemoteUserName, false, r.cmd.Port, r.cmd.IdentityFile, r.cmd.IgnoreHosts} // nolint: govet - remoteEndpoint, err = newBridgeConnection("", &rc, r.cmd.LogLevel) - // if the user has a config file with connections in it - } else if len(remoteConfigConnections.Connections) > 0 { - logrus.Debug("creating a varlink bridge based configuration file") - var rc *remoteclientconfig.RemoteConnection - if len(r.cmd.ConnectionName) > 0 { - rc, err = remoteConfigConnections.GetRemoteConnection(r.cmd.ConnectionName) - } else { - rc, err = remoteConfigConnections.GetDefault() - } - if err != nil { - return nil, err - } - if len(rc.Username) < 1 { - logrus.Debugf("Connection has no username, using current user %q", r.cmd.RemoteUserName) - rc.Username = r.cmd.RemoteUserName - } - remoteEndpoint, err = newBridgeConnection("", rc, r.cmd.LogLevel) - // last resort is to make a socket connection with the default varlink address for root user - } else { - logrus.Debug("creating a varlink address based default root address") - remoteEndpoint, err = newSocketConnection(DefaultVarlinkAddress) - } - return // nolint: nakedret -} - -// Connect provides a varlink connection -func (r RemoteRuntime) Connect() (*varlink.Connection, error) { - ep, err := r.RemoteEndpoint() - if err != nil { - return nil, err - } - switch ep.Type { - case DirectConnection: - return varlink.NewConnection(ep.Connection) - case BridgeConnection: - return varlink.NewBridge(ep.Connection) - } - return nil, errors.New(fmt.Sprintf("Unable to determine type of varlink connection: %s", ep.Connection)) -} - -// RefreshConnection is used to replace the current r.Conn after things like -// using an upgraded varlink connection -func (r RemoteRuntime) RefreshConnection() error { - newConn, err := r.Connect() - if err != nil { - return err - } - r.Conn = newConn - return nil -} - -// newSocketConnection returns an endpoint for a uds based connection -func newSocketConnection(address string) (*Endpoint, error) { - endpoint := Endpoint{ - Type: DirectConnection, - Connection: address, - } - return &endpoint, nil -} - -// newBridgeConnection creates a bridge type endpoint with username, destination, and log-level -func newBridgeConnection(formattedBridge string, remoteConn *remoteclientconfig.RemoteConnection, logLevel string) (*Endpoint, error) { - endpoint := Endpoint{ - Type: BridgeConnection, - } - - if len(formattedBridge) < 1 && remoteConn == nil { - return nil, errors.New("bridge connections must either be created by string or remoteconnection") - } - if len(formattedBridge) > 0 { - endpoint.Connection = formattedBridge - return &endpoint, nil - } - endpoint.Connection = formatDefaultBridge(remoteConn, logLevel) - return &endpoint, nil -} diff --git a/pkg/adapter/client_config.go b/pkg/adapter/client_config.go deleted file mode 100644 index 8187b03b1..000000000 --- a/pkg/adapter/client_config.go +++ /dev/null @@ -1,39 +0,0 @@ -package adapter - -// DefaultAPIAddress is the default address of the REST socket -const DefaultAPIAddress = "unix:/run/podman/podman.sock" - -// DefaultVarlinkAddress is the default address of the varlink socket -const DefaultVarlinkAddress = "unix:/run/podman/io.podman" - -// EndpointType declares the type of server connection -type EndpointType int - -// Enum of connection types -const ( - Unknown = iota - 1 // Unknown connection type - BridgeConnection // BridgeConnection proxy connection via ssh - DirectConnection // DirectConnection socket connection to server -) - -// String prints ASCII string for EndpointType -func (e EndpointType) String() string { - // declare an array of strings - // ... operator counts how many - // items in the array (7) - names := [...]string{ - "BridgeConnection", - "DirectConnection", - } - - if e < BridgeConnection || e > DirectConnection { - return "Unknown" - } - return names[e] -} - -// Endpoint type and connection string to use -type Endpoint struct { - Type EndpointType - Connection string -} diff --git a/pkg/adapter/client_unix.go b/pkg/adapter/client_unix.go deleted file mode 100644 index 7af8b24c6..000000000 --- a/pkg/adapter/client_unix.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build linux darwin -// +build remoteclient - -package adapter - -import ( - "fmt" - - "github.com/containers/libpod/cmd/podman/remoteclientconfig" -) - -func formatDefaultBridge(remoteConn *remoteclientconfig.RemoteConnection, logLevel string) string { - port := remoteConn.Port - if port == 0 { - port = 22 - } - options := "" - if remoteConn.IdentityFile != "" { - options += " -i " + remoteConn.IdentityFile - } - if remoteConn.IgnoreHosts { - options += " -q -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" - } - return fmt.Sprintf( - `ssh -p %d -T%s %s@%s -- varlink -A \'podman --log-level=%s varlink \\\$VARLINK_ADDRESS\' bridge`, - port, options, remoteConn.Username, remoteConn.Destination, logLevel) -} diff --git a/pkg/adapter/client_windows.go b/pkg/adapter/client_windows.go deleted file mode 100644 index 32302a600..000000000 --- a/pkg/adapter/client_windows.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build remoteclient - -package adapter - -import ( - "fmt" - - "github.com/containers/libpod/cmd/podman/remoteclientconfig" -) - -func formatDefaultBridge(remoteConn *remoteclientconfig.RemoteConnection, logLevel string) string { - port := remoteConn.Port - if port == 0 { - port = 22 - } - options := "" - if remoteConn.IdentityFile != "" { - options += " -i " + remoteConn.IdentityFile - } - if remoteConn.IgnoreHosts { - options += " -q -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" - } - return fmt.Sprintf( - `ssh -p %d -T%s %s@%s -- varlink -A 'podman --log-level=%s varlink $VARLINK_ADDRESS' bridge`, - port, options, remoteConn.Username, remoteConn.Destination, logLevel) -} diff --git a/pkg/adapter/containers.go b/pkg/adapter/containers.go deleted file mode 100644 index ecadbd2f9..000000000 --- a/pkg/adapter/containers.go +++ /dev/null @@ -1,1394 +0,0 @@ -// +build !remoteclient - -package adapter - -import ( - "bufio" - "context" - "fmt" - "io" - "io/ioutil" - "os" - "strconv" - "strings" - "sync" - "syscall" - "time" - - "github.com/containers/buildah" - cfg "github.com/containers/common/pkg/config" - "github.com/containers/image/v5/manifest" - "github.com/containers/libpod/cmd/podman/cliconfig" - "github.com/containers/libpod/cmd/podman/shared" - "github.com/containers/libpod/libpod" - "github.com/containers/libpod/libpod/define" - "github.com/containers/libpod/libpod/events" - "github.com/containers/libpod/libpod/image" - "github.com/containers/libpod/libpod/logs" - "github.com/containers/libpod/pkg/adapter/shortcuts" - "github.com/containers/libpod/pkg/checkpoint" - envLib "github.com/containers/libpod/pkg/env" - "github.com/containers/libpod/pkg/systemd/generate" - "github.com/containers/storage" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// GetLatestContainer gets the latest Container and wraps it in an adapter Container -func (r *LocalRuntime) GetLatestContainer() (*Container, error) { - Container := Container{} - c, err := r.Runtime.GetLatestContainer() - Container.Container = c - return &Container, err -} - -// GetAllContainers gets all Containers and wraps each one in an adapter Container -func (r *LocalRuntime) GetAllContainers() ([]*Container, error) { - var containers []*Container - allContainers, err := r.Runtime.GetAllContainers() - if err != nil { - return nil, err - } - - for _, c := range allContainers { - containers = append(containers, &Container{c}) - } - return containers, nil -} - -// LookupContainer gets a Container by name or id and wraps it in an adapter Container -func (r *LocalRuntime) LookupContainer(idOrName string) (*Container, error) { - ctr, err := r.Runtime.LookupContainer(idOrName) - if err != nil { - return nil, err - } - return &Container{ctr}, nil -} - -// StopContainers stops container(s) based on CLI inputs. -// Returns list of successful id(s), map of failed id(s) + error, or error not from container -func (r *LocalRuntime) StopContainers(ctx context.Context, cli *cliconfig.StopValues) ([]string, map[string]error, error) { - var timeout *uint - if cli.Flags().Changed("timeout") || cli.Flags().Changed("time") { - t := cli.Timeout - timeout = &t - } - - maxWorkers := shared.DefaultPoolSize("stop") - if cli.GlobalIsSet("max-workers") { - maxWorkers = cli.GlobalFlags.MaxWorks - } - logrus.Debugf("Setting maximum stop workers to %d", maxWorkers) - - names := cli.InputArgs - for _, cidFile := range cli.CIDFiles { - content, err := ioutil.ReadFile(cidFile) - if err != nil { - return nil, nil, errors.Wrap(err, "error reading CIDFile") - } - id := strings.Split(string(content), "\n")[0] - names = append(names, id) - } - - ctrs, err := shortcuts.GetContainersByContext(cli.All, cli.Latest, names, r.Runtime) - if err != nil && !(cli.Ignore && errors.Cause(err) == define.ErrNoSuchCtr) { - return nil, nil, err - } - - pool := shared.NewPool("stop", maxWorkers, len(ctrs)) - for _, c := range ctrs { - c := c - - if timeout == nil { - t := c.StopTimeout() - timeout = &t - logrus.Debugf("Set timeout to container %s default (%d)", c.ID(), *timeout) - } - - pool.Add(shared.Job{ - ID: c.ID(), - Fn: func() error { - err := c.StopWithTimeout(*timeout) - if err != nil { - if errors.Cause(err) == define.ErrCtrStopped { - logrus.Debugf("Container %s is already stopped", c.ID()) - return nil - } else if cli.All && errors.Cause(err) == define.ErrCtrStateInvalid { - logrus.Debugf("Container %s is not running, could not stop", c.ID()) - return nil - } - logrus.Debugf("Failed to stop container %s: %s", c.ID(), err.Error()) - } - return err - }, - }) - } - return pool.Run() -} - -// KillContainers sends signal to container(s) based on CLI inputs. -// Returns list of successful id(s), map of failed id(s) + error, or error not from container -func (r *LocalRuntime) KillContainers(ctx context.Context, cli *cliconfig.KillValues, signal syscall.Signal) ([]string, map[string]error, error) { - maxWorkers := shared.DefaultPoolSize("kill") - if cli.GlobalIsSet("max-workers") { - maxWorkers = cli.GlobalFlags.MaxWorks - } - logrus.Debugf("Setting maximum kill workers to %d", maxWorkers) - - ctrs, err := shortcuts.GetContainersByContext(cli.All, cli.Latest, cli.InputArgs, r.Runtime) - if err != nil { - return nil, nil, err - } - - pool := shared.NewPool("kill", maxWorkers, len(ctrs)) - for _, c := range ctrs { - c := c - - pool.Add(shared.Job{ - ID: c.ID(), - Fn: func() error { - return c.Kill(uint(signal)) - }, - }) - } - return pool.Run() -} - -// InitContainers initializes container(s) based on CLI inputs. -// Returns list of successful id(s), map of failed id(s) to errors, or a general -// error not from the container. -func (r *LocalRuntime) InitContainers(ctx context.Context, cli *cliconfig.InitValues) ([]string, map[string]error, error) { - maxWorkers := shared.DefaultPoolSize("init") - if cli.GlobalIsSet("max-workers") { - maxWorkers = cli.GlobalFlags.MaxWorks - } - logrus.Debugf("Setting maximum init workers to %d", maxWorkers) - - ctrs, err := shortcuts.GetContainersByContext(cli.All, cli.Latest, cli.InputArgs, r.Runtime) - if err != nil { - return nil, nil, err - } - - pool := shared.NewPool("init", maxWorkers, len(ctrs)) - for _, c := range ctrs { - ctr := c - - pool.Add(shared.Job{ - ID: ctr.ID(), - Fn: func() error { - err := ctr.Init(ctx) - if err != nil { - // If we're initializing all containers, ignore invalid state errors - if cli.All && errors.Cause(err) == define.ErrCtrStateInvalid { - return nil - } - return err - } - return nil - }, - }) - } - return pool.Run() -} - -// RemoveContainers removes container(s) based on CLI inputs. -func (r *LocalRuntime) RemoveContainers(ctx context.Context, cli *cliconfig.RmValues) ([]string, map[string]error, error) { - var ( - ok = []string{} - failures = map[string]error{} - ) - - maxWorkers := shared.DefaultPoolSize("rm") - if cli.GlobalIsSet("max-workers") { - maxWorkers = cli.GlobalFlags.MaxWorks - } - logrus.Debugf("Setting maximum rm workers to %d", maxWorkers) - - if cli.Storage { - for _, ctr := range cli.InputArgs { - if err := r.RemoveStorageContainer(ctr, cli.Force); err != nil { - failures[ctr] = err - } - ok = append(ok, ctr) - } - return ok, failures, nil - } - - names := cli.InputArgs - for _, cidFile := range cli.CIDFiles { - content, err := ioutil.ReadFile(cidFile) - if err != nil { - return nil, nil, errors.Wrap(err, "error reading CIDFile") - } - id := strings.Split(string(content), "\n")[0] - names = append(names, id) - } - - ctrs, err := shortcuts.GetContainersByContext(cli.All, cli.Latest, names, r.Runtime) - if err != nil && !(cli.Ignore && errors.Cause(err) == define.ErrNoSuchCtr) { - // Failed to get containers. If force is specified, get the containers ID - // and evict them - if !cli.Force { - return ok, failures, err - } - - for _, ctr := range cli.InputArgs { - logrus.Debugf("Evicting container %q", ctr) - id, err := r.EvictContainer(ctx, ctr, cli.Volumes) - if err != nil { - if cli.Ignore && errors.Cause(err) == define.ErrNoSuchCtr { - logrus.Debugf("Ignoring error (--allow-missing): %v", err) - continue - } - failures[ctr] = errors.Wrapf(err, "Failed to evict container: %q", id) - continue - } - ok = append(ok, id) - } - return ok, failures, nil - } - - pool := shared.NewPool("rm", maxWorkers, len(ctrs)) - for _, c := range ctrs { - c := c - - pool.Add(shared.Job{ - ID: c.ID(), - Fn: func() error { - err := r.RemoveContainer(ctx, c, cli.Force, cli.Volumes) - if err != nil { - if cli.Ignore && errors.Cause(err) == define.ErrNoSuchCtr { - logrus.Debugf("Ignoring error (--allow-missing): %v", err) - return nil - } - logrus.Debugf("Failed to remove container %s: %s", c.ID(), err.Error()) - } - return err - }, - }) - } - return pool.Run() -} - -// UmountRootFilesystems removes container(s) based on CLI inputs. -func (r *LocalRuntime) UmountRootFilesystems(ctx context.Context, cli *cliconfig.UmountValues) ([]string, map[string]error, error) { - var ( - ok = []string{} - failures = map[string]error{} - ) - - ctrs, err := shortcuts.GetContainersByContext(cli.All, cli.Latest, cli.InputArgs, r.Runtime) - if err != nil { - return ok, failures, err - } - - for _, ctr := range ctrs { - state, err := ctr.State() - if err != nil { - logrus.Debugf("Error umounting container %s state: %s", ctr.ID(), err.Error()) - continue - } - if state == define.ContainerStateRunning { - logrus.Debugf("Error umounting container %s, is running", ctr.ID()) - continue - } - - if err := ctr.Unmount(cli.Force); err != nil { - if cli.All && errors.Cause(err) == storage.ErrLayerNotMounted { - logrus.Debugf("Error umounting container %s, storage.ErrLayerNotMounted", ctr.ID()) - continue - } - failures[ctr.ID()] = errors.Wrapf(err, "error unmounting container %s", ctr.ID()) - } else { - ok = append(ok, ctr.ID()) - } - } - return ok, failures, nil -} - -// WaitOnContainers waits for all given container(s) to stop -func (r *LocalRuntime) WaitOnContainers(ctx context.Context, cli *cliconfig.WaitValues, interval time.Duration) ([]string, map[string]error, error) { - var ( - ok = []string{} - failures = map[string]error{} - ) - - ctrs, err := shortcuts.GetContainersByContext(false, cli.Latest, cli.InputArgs, r.Runtime) - if err != nil { - return ok, failures, err - } - - for _, c := range ctrs { - if returnCode, err := c.WaitWithInterval(interval); err == nil { - ok = append(ok, strconv.Itoa(int(returnCode))) - } else { - failures[c.ID()] = err - } - } - return ok, failures, err -} - -// Log logs one or more containers -func (r *LocalRuntime) Log(c *cliconfig.LogsValues, options *logs.LogOptions) error { - - var wg sync.WaitGroup - options.WaitGroup = &wg - if len(c.InputArgs) > 1 { - options.Multi = true - } - tailLen := int(c.Tail) - if tailLen < 0 { - tailLen = 0 - } - numContainers := len(c.InputArgs) - if numContainers == 0 { - numContainers = 1 - } - logChannel := make(chan *logs.LogLine, tailLen*numContainers+1) - containers, err := shortcuts.GetContainersByContext(false, c.Latest, c.InputArgs, r.Runtime) - if err != nil { - return err - } - if err := r.Runtime.Log(containers, options, logChannel); err != nil { - return err - } - go func() { - wg.Wait() - close(logChannel) - }() - for line := range logChannel { - fmt.Println(line.String(options)) - } - return nil -} - -// CreateContainer creates a libpod container -func (r *LocalRuntime) CreateContainer(ctx context.Context, c *cliconfig.CreateValues) (string, error) { - results := shared.NewIntermediateLayer(&c.PodmanCommand, false) - ctr, _, err := shared.CreateContainer(ctx, &results, r.Runtime) - if err != nil { - return "", err - } - return ctr.ID(), nil -} - -// Select the detach keys to use from user input flag, config file, or default value -func (r *LocalRuntime) selectDetachKeys(flagValue string) (string, error) { - if flagValue != "" { - return flagValue, nil - } - - config, err := r.GetConfig() - if err != nil { - return "", errors.Wrapf(err, "unable to retrieve runtime config") - } - if config.Engine.DetachKeys != "" { - return config.Engine.DetachKeys, nil - } - - return cfg.DefaultDetachKeys, nil -} - -// Run a libpod container -func (r *LocalRuntime) Run(ctx context.Context, c *cliconfig.RunValues, exitCode int) (int, error) { - results := shared.NewIntermediateLayer(&c.PodmanCommand, false) - - ctr, createConfig, err := shared.CreateContainer(ctx, &results, r.Runtime) - if err != nil { - return exitCode, err - } - - if logrus.GetLevel() == logrus.DebugLevel { - cgroupPath, err := ctr.CGroupPath() - if err == nil { - logrus.Debugf("container %q has CgroupParent %q", ctr.ID(), cgroupPath) - } - } - - // Handle detached start - if createConfig.Detach { - // if the container was created as part of a pod, also start its dependencies, if any. - if err := ctr.Start(ctx, c.IsSet("pod")); err != nil { - // This means the command did not exist - return define.ExitCode(err), err - } - - fmt.Printf("%s\n", ctr.ID()) - exitCode = 0 - return exitCode, nil - } - - outputStream := os.Stdout - errorStream := os.Stderr - inputStream := os.Stdin - - // If -i is not set, clear stdin - if !c.Bool("interactive") { - inputStream = nil - } - - // If attach is set, clear stdin/stdout/stderr and only attach requested - if c.IsSet("attach") || c.IsSet("a") { - outputStream = nil - errorStream = nil - if !c.Bool("interactive") { - inputStream = nil - } - - attachTo := c.StringSlice("attach") - for _, stream := range attachTo { - switch strings.ToLower(stream) { - case "stdout": - outputStream = os.Stdout - case "stderr": - errorStream = os.Stderr - case "stdin": - inputStream = os.Stdin - default: - return exitCode, errors.Wrapf(define.ErrInvalidArg, "invalid stream %q for --attach - must be one of stdin, stdout, or stderr", stream) - } - } - } - - keys := c.String("detach-keys") - if !c.IsSet("detach-keys") { - keys, err = r.selectDetachKeys(keys) - if err != nil { - return exitCode, err - } - } - - // if the container was created as part of a pod, also start its dependencies, if any. - if err := StartAttachCtr(ctx, ctr, outputStream, errorStream, inputStream, keys, c.Bool("sig-proxy"), true, c.IsSet("pod")); err != nil { - // We've manually detached from the container - // Do not perform cleanup, or wait for container exit code - // Just exit immediately - if errors.Cause(err) == define.ErrDetach { - return 0, nil - } - if c.IsSet("rm") { - if deleteError := r.Runtime.RemoveContainer(ctx, ctr, true, false); deleteError != nil { - logrus.Debugf("unable to remove container %s after failing to start and attach to it", ctr.ID()) - } - } - if errors.Cause(err) == define.ErrWillDeadlock { - logrus.Debugf("Deadlock error: %v", err) - return define.ExitCode(err), errors.Errorf("attempting to start container %s would cause a deadlock; please run 'podman system renumber' to resolve", ctr.ID()) - } - return define.ExitCode(err), err - } - - if ecode, err := ctr.Wait(); err != nil { - if errors.Cause(err) == define.ErrNoSuchCtr { - // Check events - event, err := r.Runtime.GetLastContainerEvent(ctr.ID(), events.Exited) - if err != nil { - logrus.Errorf("Cannot get exit code: %v", err) - exitCode = define.ExecErrorCodeNotFound - } else { - exitCode = event.ContainerExitCode - } - } - } else { - exitCode = int(ecode) - } - - if c.IsSet("rm") { - if err := r.Runtime.RemoveContainer(ctx, ctr, false, true); err != nil { - if errors.Cause(err) == define.ErrNoSuchCtr || - errors.Cause(err) == define.ErrCtrRemoved { - logrus.Warnf("Container %s does not exist: %v", ctr.ID(), err) - } else { - logrus.Errorf("Error removing container %s: %v", ctr.ID(), err) - } - } - } - - return exitCode, nil -} - -// Ps ... -func (r *LocalRuntime) Ps(c *cliconfig.PsValues, opts shared.PsOptions) ([]shared.PsContainerOutput, error) { - maxWorkers := shared.Parallelize("ps") - if c.GlobalIsSet("max-workers") { - maxWorkers = c.GlobalFlags.MaxWorks - } - logrus.Debugf("Setting maximum workers to %d", maxWorkers) - return shared.GetPsContainerOutput(r.Runtime, opts, c.Filter, maxWorkers) -} - -// Attach ... -func (r *LocalRuntime) Attach(ctx context.Context, c *cliconfig.AttachValues) error { - var ( - ctr *libpod.Container - err error - ) - - if c.Latest { - ctr, err = r.Runtime.GetLatestContainer() - } else { - ctr, err = r.Runtime.LookupContainer(c.InputArgs[0]) - } - - if err != nil { - return errors.Wrapf(err, "unable to exec into %s", c.InputArgs[0]) - } - - conState, err := ctr.State() - if err != nil { - return errors.Wrapf(err, "unable to determine state of %s", ctr.ID()) - } - if conState != define.ContainerStateRunning { - return errors.Errorf("you can only attach to running containers") - } - - inputStream := os.Stdin - if c.NoStdin { - inputStream = nil - } - - keys := c.DetachKeys - if !c.IsSet("detach-keys") { - keys, err = r.selectDetachKeys(keys) - if err != nil { - return err - } - } - - // If the container is in a pod, also set to recursively start dependencies - if err := StartAttachCtr(ctx, ctr, os.Stdout, os.Stderr, inputStream, keys, c.SigProxy, false, ctr.PodID() != ""); err != nil && errors.Cause(err) != define.ErrDetach { - return errors.Wrapf(err, "error attaching to container %s", ctr.ID()) - } - return nil -} - -// Checkpoint one or more containers -func (r *LocalRuntime) Checkpoint(c *cliconfig.CheckpointValues) error { - var ( - containers []*libpod.Container - err, lastError error - ) - - options := libpod.ContainerCheckpointOptions{ - Keep: c.Keep, - KeepRunning: c.LeaveRunning, - TCPEstablished: c.TcpEstablished, - TargetFile: c.Export, - IgnoreRootfs: c.IgnoreRootfs, - } - if c.Export == "" && c.IgnoreRootfs { - return errors.Errorf("--ignore-rootfs can only be used with --export") - } - if c.All { - containers, err = r.Runtime.GetRunningContainers() - } else { - containers, err = shortcuts.GetContainersByContext(false, c.Latest, c.InputArgs, r.Runtime) - } - if err != nil { - return err - } - - for _, ctr := range containers { - if err = ctr.Checkpoint(context.TODO(), options); err != nil { - if lastError != nil { - fmt.Fprintln(os.Stderr, lastError) - } - lastError = errors.Wrapf(err, "failed to checkpoint container %v", ctr.ID()) - } else { - fmt.Println(ctr.ID()) - } - } - return lastError -} - -// Restore one or more containers -func (r *LocalRuntime) Restore(ctx context.Context, c *cliconfig.RestoreValues) error { - var ( - containers []*libpod.Container - err, lastError error - filterFuncs []libpod.ContainerFilter - ) - - options := libpod.ContainerCheckpointOptions{ - Keep: c.Keep, - TCPEstablished: c.TcpEstablished, - TargetFile: c.Import, - Name: c.Name, - IgnoreRootfs: c.IgnoreRootfs, - IgnoreStaticIP: c.IgnoreStaticIP, - IgnoreStaticMAC: c.IgnoreStaticMAC, - } - - filterFuncs = append(filterFuncs, func(c *libpod.Container) bool { - state, _ := c.State() - return state == define.ContainerStateExited - }) - - switch { - case c.Import != "": - containers, err = checkpoint.CRImportCheckpoint(ctx, r.Runtime, c.Import, c.Name) - case c.All: - containers, err = r.GetContainers(filterFuncs...) - default: - containers, err = shortcuts.GetContainersByContext(false, c.Latest, c.InputArgs, r.Runtime) - } - if err != nil { - return err - } - - for _, ctr := range containers { - if err = ctr.Restore(context.TODO(), options); err != nil { - if lastError != nil { - fmt.Fprintln(os.Stderr, lastError) - } - lastError = errors.Wrapf(err, "failed to restore container %v", ctr.ID()) - } else { - fmt.Println(ctr.ID()) - } - } - return lastError -} - -// Start will start a container -func (r *LocalRuntime) Start(ctx context.Context, c *cliconfig.StartValues, sigProxy bool) (int, error) { - var ( - exitCode = define.ExecErrorCodeGeneric - lastError error - ) - - args := c.InputArgs - if c.Latest { - lastCtr, err := r.GetLatestContainer() - if err != nil { - return 0, errors.Wrapf(err, "unable to get latest container") - } - args = append(args, lastCtr.ID()) - } - - for _, container := range args { - ctr, err := r.LookupContainer(container) - if err != nil { - if lastError != nil { - fmt.Fprintln(os.Stderr, lastError) - } - lastError = errors.Wrapf(err, "unable to find container %s", container) - continue - } - - ctrState, err := ctr.State() - if err != nil { - return exitCode, errors.Wrapf(err, "unable to get container state") - } - - ctrRunning := ctrState == define.ContainerStateRunning - - if c.Attach { - inputStream := os.Stdin - if !c.Interactive { - if !ctr.Stdin() { - inputStream = nil - } - } - - keys := c.DetachKeys - if !c.IsSet("detach-keys") { - keys, err = r.selectDetachKeys(keys) - if err != nil { - return exitCode, err - } - } - - // attach to the container and also start it not already running - // If the container is in a pod, also set to recursively start dependencies - err = StartAttachCtr(ctx, ctr.Container, os.Stdout, os.Stderr, inputStream, keys, sigProxy, !ctrRunning, ctr.PodID() != "") - if errors.Cause(err) == define.ErrDetach { - // User manually detached - // Exit cleanly immediately - exitCode = 0 - return exitCode, nil - } - - if errors.Cause(err) == define.ErrWillDeadlock { - logrus.Debugf("Deadlock error: %v", err) - return define.ExitCode(err), errors.Errorf("attempting to start container %s would cause a deadlock; please run 'podman system renumber' to resolve", ctr.ID()) - } - - if ctrRunning { - return 0, err - } - - if err != nil { - return exitCode, errors.Wrapf(err, "unable to start container %s", ctr.ID()) - } - - if ecode, err := ctr.Wait(); err != nil { - if errors.Cause(err) == define.ErrNoSuchCtr { - // Check events - event, err := r.Runtime.GetLastContainerEvent(ctr.ID(), events.Exited) - if err != nil { - logrus.Errorf("Cannot get exit code: %v", err) - exitCode = define.ExecErrorCodeNotFound - } else { - exitCode = event.ContainerExitCode - } - } - } else { - exitCode = int(ecode) - } - - return exitCode, nil - } - // Start the container if it's not running already. - if !ctrRunning { - // Handle non-attach start - // If the container is in a pod, also set to recursively start dependencies - if err := ctr.Start(ctx, ctr.PodID() != ""); err != nil { - if lastError != nil { - fmt.Fprintln(os.Stderr, lastError) - } - if errors.Cause(err) == define.ErrWillDeadlock { - lastError = errors.Wrapf(err, "please run 'podman system renumber' to resolve deadlocks") - continue - } - lastError = errors.Wrapf(err, "unable to start container %q", container) - continue - } - } - // Check if the container is referenced by ID or by name and print - // it accordingly. - if strings.HasPrefix(ctr.ID(), container) { - fmt.Println(ctr.ID()) - } else { - fmt.Println(container) - } - } - return exitCode, lastError -} - -// PauseContainers removes container(s) based on CLI inputs. -func (r *LocalRuntime) PauseContainers(ctx context.Context, cli *cliconfig.PauseValues) ([]string, map[string]error, error) { - var ( - ok = []string{} - failures = map[string]error{} - ctrs []*libpod.Container - err error - ) - - maxWorkers := shared.DefaultPoolSize("pause") - if cli.GlobalIsSet("max-workers") { - maxWorkers = cli.GlobalFlags.MaxWorks - } - logrus.Debugf("Setting maximum rm workers to %d", maxWorkers) - - if cli.All { - ctrs, err = r.GetRunningContainers() - } else { - ctrs, err = shortcuts.GetContainersByContext(false, false, cli.InputArgs, r.Runtime) - } - if err != nil { - return ok, failures, err - } - - pool := shared.NewPool("pause", maxWorkers, len(ctrs)) - for _, c := range ctrs { - ctr := c - pool.Add(shared.Job{ - ID: ctr.ID(), - Fn: func() error { - err := ctr.Pause() - if err != nil { - logrus.Debugf("Failed to pause container %s: %s", ctr.ID(), err.Error()) - } - return err - }, - }) - } - return pool.Run() -} - -// UnpauseContainers removes container(s) based on CLI inputs. -func (r *LocalRuntime) UnpauseContainers(ctx context.Context, cli *cliconfig.UnpauseValues) ([]string, map[string]error, error) { - var ( - ok = []string{} - failures = map[string]error{} - ctrs []*libpod.Container - err error - ) - - maxWorkers := shared.DefaultPoolSize("pause") - if cli.GlobalIsSet("max-workers") { - maxWorkers = cli.GlobalFlags.MaxWorks - } - logrus.Debugf("Setting maximum rm workers to %d", maxWorkers) - - if cli.All { - var filterFuncs []libpod.ContainerFilter - filterFuncs = append(filterFuncs, func(c *libpod.Container) bool { - state, _ := c.State() - return state == define.ContainerStatePaused - }) - ctrs, err = r.GetContainers(filterFuncs...) - } else { - ctrs, err = shortcuts.GetContainersByContext(false, false, cli.InputArgs, r.Runtime) - } - if err != nil { - return ok, failures, err - } - - pool := shared.NewPool("pause", maxWorkers, len(ctrs)) - for _, c := range ctrs { - ctr := c - pool.Add(shared.Job{ - ID: ctr.ID(), - Fn: func() error { - err := ctr.Unpause() - if err != nil { - logrus.Debugf("Failed to unpause container %s: %s", ctr.ID(), err.Error()) - } - return err - }, - }) - } - return pool.Run() -} - -// Restart containers without or without a timeout -func (r *LocalRuntime) Restart(ctx context.Context, c *cliconfig.RestartValues) ([]string, map[string]error, error) { - var ( - containers []*libpod.Container - restartContainers []*libpod.Container - err error - ) - useTimeout := c.Flag("timeout").Changed || c.Flag("time").Changed - inputTimeout := c.Timeout - - // Handle --latest - switch { - case c.Latest: - lastCtr, err := r.Runtime.GetLatestContainer() - if err != nil { - return nil, nil, errors.Wrapf(err, "unable to get latest container") - } - restartContainers = append(restartContainers, lastCtr) - case c.Running: - containers, err = r.GetRunningContainers() - if err != nil { - return nil, nil, err - } - restartContainers = append(restartContainers, containers...) - case c.All: - containers, err = r.Runtime.GetAllContainers() - if err != nil { - return nil, nil, err - } - restartContainers = append(restartContainers, containers...) - default: - for _, id := range c.InputArgs { - ctr, err := r.Runtime.LookupContainer(id) - if err != nil { - return nil, nil, err - } - restartContainers = append(restartContainers, ctr) - } - } - - maxWorkers := shared.DefaultPoolSize("restart") - if c.GlobalIsSet("max-workers") { - maxWorkers = c.GlobalFlags.MaxWorks - } - - logrus.Debugf("Setting maximum workers to %d", maxWorkers) - - // We now have a slice of all the containers to be restarted. Iterate them to - // create restart Funcs with a timeout as needed - pool := shared.NewPool("restart", maxWorkers, len(restartContainers)) - for _, c := range restartContainers { - ctr := c - timeout := ctr.StopTimeout() - if useTimeout { - timeout = inputTimeout - } - pool.Add(shared.Job{ - ID: ctr.ID(), - Fn: func() error { - err := ctr.RestartWithTimeout(ctx, timeout) - if err != nil { - logrus.Debugf("Failed to restart container %s: %s", ctr.ID(), err.Error()) - } - return err - }, - }) - } - return pool.Run() -} - -// Top display the running processes of a container -func (r *LocalRuntime) Top(cli *cliconfig.TopValues) ([]string, error) { - var ( - descriptors []string - container *libpod.Container - err error - ) - if cli.Latest { - descriptors = cli.InputArgs - container, err = r.Runtime.GetLatestContainer() - } else { - descriptors = cli.InputArgs[1:] - container, err = r.Runtime.LookupContainer(cli.InputArgs[0]) - } - if err != nil { - return nil, errors.Wrapf(err, "unable to lookup requested container") - } - - return container.Top(descriptors) -} - -// ExecContainer executes a command in the container -func (r *LocalRuntime) ExecContainer(ctx context.Context, cli *cliconfig.ExecValues) (int, error) { - var ( - ctr *Container - err error - cmd []string - ) - // default invalid command exit code - ec := define.ExecErrorCodeGeneric - - if cli.Latest { - if ctr, err = r.GetLatestContainer(); err != nil { - return ec, err - } - cmd = cli.InputArgs[0:] - } else { - if ctr, err = r.LookupContainer(cli.InputArgs[0]); err != nil { - return ec, err - } - cmd = cli.InputArgs[1:] - } - - if cli.PreserveFDs > 0 { - entries, err := ioutil.ReadDir("/proc/self/fd") - if err != nil { - return ec, errors.Wrapf(err, "unable to read /proc/self/fd") - } - - m := make(map[int]bool) - for _, e := range entries { - i, err := strconv.Atoi(e.Name()) - if err != nil { - return ec, errors.Wrapf(err, "cannot parse %s in /proc/self/fd", e.Name()) - } - m[i] = true - } - - for i := 3; i < 3+cli.PreserveFDs; i++ { - if _, found := m[i]; !found { - return ec, errors.New("invalid --preserve-fds=N specified. Not enough FDs available") - } - } - } - - // Validate given environment variables - env := map[string]string{} - if len(cli.EnvFile) > 0 { - for _, f := range cli.EnvFile { - fileEnv, err := envLib.ParseFile(f) - if err != nil { - return ec, err - } - env = envLib.Join(env, fileEnv) - } - } - cliEnv, err := envLib.ParseSlice(cli.Env) - if err != nil { - return ec, errors.Wrap(err, "error parsing environment variables") - } - env = envLib.Join(env, cliEnv) - - streams := new(define.AttachStreams) - streams.OutputStream = os.Stdout - streams.ErrorStream = os.Stderr - if cli.Interactive { - streams.InputStream = bufio.NewReader(os.Stdin) - streams.AttachInput = true - } - streams.AttachOutput = true - streams.AttachError = true - - keys := cli.DetachKeys - if !cli.IsSet("detach-keys") { - keys, err = r.selectDetachKeys(keys) - if err != nil { - return ec, err - } - } - - ec, err = ExecAttachCtr(ctx, ctr.Container, cli.Tty, cli.Privileged, env, cmd, cli.User, cli.Workdir, streams, uint(cli.PreserveFDs), keys) - return define.TranslateExecErrorToExitCode(ec, err), err -} - -// Prune removes stopped containers -func (r *LocalRuntime) Prune(ctx context.Context, maxWorkers int, filters []string) ([]string, map[string]error, error) { - var ( - ok = []string{} - failures = map[string]error{} - err error - filterFunc []libpod.ContainerFilter - ) - - logrus.Debugf("Setting maximum rm workers to %d", maxWorkers) - - for _, filter := range filters { - filterSplit := strings.SplitN(filter, "=", 2) - if len(filterSplit) < 2 { - return ok, failures, errors.Errorf("filter input must be in the form of filter=value: %s is invalid", filter) - } - - f, err := shared.GenerateContainerFilterFuncs(filterSplit[0], filterSplit[1], r.Runtime) - if err != nil { - return ok, failures, err - } - filterFunc = append(filterFunc, f) - } - - containerStateFilter := func(c *libpod.Container) bool { - state, err := c.State() - if err != nil { - logrus.Error(err) - return false - } - if c.PodID() != "" { - return false - } - if state == define.ContainerStateStopped || state == define.ContainerStateExited || - state == define.ContainerStateCreated || state == define.ContainerStateConfigured { - return true - } - return false - } - filterFunc = append(filterFunc, containerStateFilter) - - delContainers, err := r.Runtime.GetContainers(filterFunc...) - if err != nil { - return ok, failures, err - } - if len(delContainers) < 1 { - return ok, failures, err - } - pool := shared.NewPool("prune", maxWorkers, len(delContainers)) - for _, c := range delContainers { - ctr := c - pool.Add(shared.Job{ - ID: ctr.ID(), - Fn: func() error { - err := r.Runtime.RemoveContainer(ctx, ctr, false, false) - if err != nil { - logrus.Debugf("Failed to prune container %s: %s", ctr.ID(), err.Error()) - } - return err - }, - }) - } - return pool.Run() -} - -// CleanupContainers any leftovers bits of stopped containers -func (r *LocalRuntime) CleanupContainers(ctx context.Context, cli *cliconfig.CleanupValues) ([]string, map[string]error, error) { - var ( - ok = []string{} - failures = map[string]error{} - ) - - ctrs, err := shortcuts.GetContainersByContext(cli.All, cli.Latest, cli.InputArgs, r.Runtime) - if err != nil { - return ok, failures, err - } - - for _, ctr := range ctrs { - if cli.Remove { - err = removeContainer(ctx, ctr, r) - } else { - err = cleanupContainer(ctx, ctr, r) - } - - if err == nil { - ok = append(ok, ctr.ID()) - } else { - failures[ctr.ID()] = err - } - - if cli.RemoveImage { - _, imageName := ctr.Image() - if err := removeContainerImage(ctx, ctr, r); err != nil { - failures[imageName] = err - } else { - ok = append(ok, imageName) - } - } - } - return ok, failures, nil -} - -// Only used when cleaning up containers -func removeContainer(ctx context.Context, ctr *libpod.Container, runtime *LocalRuntime) error { - if err := runtime.RemoveContainer(ctx, ctr, false, true); err != nil { - return errors.Wrapf(err, "failed to cleanup and remove container %v", ctr.ID()) - } - return nil -} - -func cleanupContainer(ctx context.Context, ctr *libpod.Container, runtime *LocalRuntime) error { - if err := ctr.Cleanup(ctx); err != nil { - return errors.Wrapf(err, "failed to cleanup container %v", ctr.ID()) - } - return nil -} - -func removeContainerImage(ctx context.Context, ctr *libpod.Container, runtime *LocalRuntime) error { - _, imageName := ctr.Image() - ctrImage, err := runtime.NewImageFromLocal(imageName) - if err != nil { - return err - } - _, err = runtime.RemoveImage(ctx, ctrImage, false) - return err -} - -// Port displays port information about existing containers -func (r *LocalRuntime) Port(c *cliconfig.PortValues) ([]*Container, error) { - var ( - portContainers []*Container - containers []*libpod.Container - err error - ) - - if !c.All { - names := []string{} - if len(c.InputArgs) >= 1 { - names = []string{c.InputArgs[0]} - } - containers, err = shortcuts.GetContainersByContext(false, c.Latest, names, r.Runtime) - } else { - containers, err = r.Runtime.GetRunningContainers() - } - if err != nil { - return nil, err - } - - //Convert libpod containers to adapter Containers - for _, con := range containers { - if state, _ := con.State(); state != define.ContainerStateRunning { - continue - } - portContainers = append(portContainers, &Container{con}) - } - return portContainers, nil -} - -// generateServiceName generates the container name and the service name for systemd service. -func generateServiceName(c *cliconfig.GenerateSystemdValues, ctr *libpod.Container, pod *libpod.Pod) (string, string) { - var kind, name, ctrName string - if pod == nil { - kind = "container" - name = ctr.ID() - if c.Name { - name = ctr.Name() - } - ctrName = name - } else { - kind = "pod" - name = pod.ID() - ctrName = ctr.ID() - if c.Name { - name = pod.Name() - ctrName = ctr.Name() - } - } - return ctrName, fmt.Sprintf("%s-%s", kind, name) -} - -// generateSystemdgenContainerInfo is a helper to generate a -// systemdgen.ContainerInfo for `GenerateSystemd`. -func (r *LocalRuntime) generateSystemdgenContainerInfo(c *cliconfig.GenerateSystemdValues, nameOrID string, pod *libpod.Pod) (*generate.ContainerInfo, bool, error) { - ctr, err := r.Runtime.LookupContainer(nameOrID) - if err != nil { - return nil, false, err - } - - timeout := ctr.StopTimeout() - if c.Flags().Changed("timeout") || c.Flags().Changed("time") { - timeout = c.StopTimeout - } - - config := ctr.Config() - conmonPidFile := config.ConmonPidFile - if conmonPidFile == "" { - return nil, true, errors.Errorf("conmon PID file path is empty, try to recreate the container with --conmon-pidfile flag") - } - - name, serviceName := generateServiceName(c, ctr, pod) - info := &generate.ContainerInfo{ - ServiceName: serviceName, - ContainerName: name, - RestartPolicy: c.RestartPolicy, - PIDFile: conmonPidFile, - StopTimeout: timeout, - GenerateTimestamp: true, - CreateCommand: config.CreateCommand, - } - - return info, true, nil -} - -// GenerateSystemd creates a unit file for a container or pod. -func (r *LocalRuntime) GenerateSystemd(c *cliconfig.GenerateSystemdValues) (string, error) { - opts := generate.Options{ - Files: c.Files, - New: c.New, - } - - // First assume it's a container. - if info, found, err := r.generateSystemdgenContainerInfo(c, c.InputArgs[0], nil); found && err != nil { - return "", err - } else if found && err == nil { - return generate.CreateContainerSystemdUnit(info, opts) - } - - // --new does not support pods. - if c.New { - return "", errors.Errorf("error generating systemd unit files: cannot generate generic files for a pod") - } - - // We're either having a pod or garbage. - pod, err := r.Runtime.LookupPod(c.InputArgs[0]) - if err != nil { - return "", err - } - - // Error out if the pod has no infra container, which we require to be the - // main service. - if !pod.HasInfraContainer() { - return "", fmt.Errorf("error generating systemd unit files: Pod %q has no infra container", pod.Name()) - } - - // Generate a systemdgen.ContainerInfo for the infra container. This - // ContainerInfo acts as the main service of the pod. - infraID, err := pod.InfraContainerID() - if err != nil { - return "", nil - } - podInfo, _, err := r.generateSystemdgenContainerInfo(c, infraID, pod) - if err != nil { - return "", nil - } - - // Compute the container-dependency graph for the Pod. - containers, err := pod.AllContainers() - if err != nil { - return "", err - } - if len(containers) == 0 { - return "", fmt.Errorf("error generating systemd unit files: Pod %q has no containers", pod.Name()) - } - graph, err := libpod.BuildContainerGraph(containers) - if err != nil { - return "", err - } - - // Traverse the dependency graph and create systemdgen.ContainerInfo's for - // each container. - containerInfos := []*generate.ContainerInfo{podInfo} - for ctr, dependencies := range graph.DependencyMap() { - // Skip the infra container as we already generated it. - if ctr.ID() == infraID { - continue - } - ctrInfo, _, err := r.generateSystemdgenContainerInfo(c, ctr.ID(), nil) - if err != nil { - return "", err - } - // Now add the container's dependencies and at the container as a - // required service of the infra container. - for _, dep := range dependencies { - if dep.ID() == infraID { - ctrInfo.BoundToServices = append(ctrInfo.BoundToServices, podInfo.ServiceName) - } else { - _, serviceName := generateServiceName(c, dep, nil) - ctrInfo.BoundToServices = append(ctrInfo.BoundToServices, serviceName) - } - } - podInfo.RequiredServices = append(podInfo.RequiredServices, ctrInfo.ServiceName) - containerInfos = append(containerInfos, ctrInfo) - } - - // Now generate the systemd service for all containers. - builder := strings.Builder{} - for i, info := range containerInfos { - if i > 0 { - builder.WriteByte('\n') - } - out, err := generate.CreateContainerSystemdUnit(info, opts) - if err != nil { - return "", err - } - builder.WriteString(out) - } - - return builder.String(), nil -} - -// GetNamespaces returns namespace information about a container for PS -func (r *LocalRuntime) GetNamespaces(container shared.PsContainerOutput) *shared.Namespace { - return shared.GetNamespaces(container.Pid) -} - -// Commit creates a local image from a container -func (r *LocalRuntime) Commit(ctx context.Context, c *cliconfig.CommitValues, container, imageName string) (string, error) { - var ( - writer io.Writer - mimeType string - ) - switch c.Format { - case "oci": - mimeType = buildah.OCIv1ImageManifest - if c.Flag("message").Changed { - return "", errors.Errorf("messages are only compatible with the docker image format (-f docker)") - } - case "docker": - mimeType = manifest.DockerV2Schema2MediaType - default: - return "", errors.Errorf("unrecognized image format %q", c.Format) - } - if !c.Quiet { - writer = os.Stderr - } - ctr, err := r.Runtime.LookupContainer(container) - if err != nil { - return "", errors.Wrapf(err, "error looking up container %q", container) - } - - rtc, err := r.Runtime.GetConfig() - if err != nil { - return "", err - } - - sc := image.GetSystemContext(rtc.Engine.SignaturePolicyPath, "", false) - coptions := buildah.CommitOptions{ - SignaturePolicyPath: rtc.Engine.SignaturePolicyPath, - ReportWriter: writer, - SystemContext: sc, - PreferredManifestType: mimeType, - } - options := libpod.ContainerCommitOptions{ - CommitOptions: coptions, - Pause: c.Pause, - IncludeVolumes: c.IncludeVolumes, - Message: c.Message, - Changes: c.Change, - Author: c.Author, - } - newImage, err := ctr.Commit(ctx, imageName, options) - if err != nil { - return "", err - } - return newImage.ID(), nil -} diff --git a/pkg/adapter/containers_remote.go b/pkg/adapter/containers_remote.go deleted file mode 100644 index 777605896..000000000 --- a/pkg/adapter/containers_remote.go +++ /dev/null @@ -1,1139 +0,0 @@ -// +build remoteclient - -package adapter - -import ( - "bufio" - "context" - "encoding/json" - "fmt" - "io" - "os" - "strconv" - "syscall" - "time" - - "github.com/containers/libpod/cmd/podman/cliconfig" - "github.com/containers/libpod/cmd/podman/shared" - "github.com/containers/libpod/libpod" - "github.com/containers/libpod/libpod/define" - "github.com/containers/libpod/libpod/logs" - envLib "github.com/containers/libpod/pkg/env" - iopodman "github.com/containers/libpod/pkg/varlink" - "github.com/containers/libpod/pkg/varlinkapi/virtwriter" - "github.com/cri-o/ocicni/pkg/ocicni" - "github.com/docker/docker/pkg/term" - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "github.com/varlink/go/varlink" - "golang.org/x/crypto/ssh/terminal" - "k8s.io/client-go/tools/remotecommand" -) - -// Inspect returns an inspect struct from varlink -func (c *Container) Inspect(size bool) (*define.InspectContainerData, error) { - reply, err := iopodman.ContainerInspectData().Call(c.Runtime.Conn, c.ID(), size) - if err != nil { - return nil, err - } - data := define.InspectContainerData{} - if err := json.Unmarshal([]byte(reply), &data); err != nil { - return nil, err - } - return &data, err -} - -// ID returns the ID of the container -func (c *Container) ID() string { - return c.config.ID -} - -// Restart a single container -func (c *Container) Restart(timeout int64) error { - _, err := iopodman.RestartContainer().Call(c.Runtime.Conn, c.ID(), timeout) - return err -} - -// Pause a container -func (c *Container) Pause() error { - _, err := iopodman.PauseContainer().Call(c.Runtime.Conn, c.ID()) - return err -} - -// Unpause a container -func (c *Container) Unpause() error { - _, err := iopodman.UnpauseContainer().Call(c.Runtime.Conn, c.ID()) - return err -} - -func (c *Container) PortMappings() ([]ocicni.PortMapping, error) { - // First check if the container belongs to a network namespace (like a pod) - // Taken from libpod portmappings() - if len(c.config.NetNsCtr) > 0 { - netNsCtr, err := c.Runtime.LookupContainer(c.config.NetNsCtr) - if err != nil { - return nil, errors.Wrapf(err, "unable to lookup network namespace for container %s", c.ID()) - } - return netNsCtr.PortMappings() - } - return c.config.PortMappings, nil -} - -// Config returns a container config -func (r *LocalRuntime) Config(name string) *libpod.ContainerConfig { - // TODO the Spec being returned is not populated. Matt and I could not figure out why. Will defer - // further looking into it for after devconf. - // The libpod function for this has no errors so we are kind of in a tough - // spot here. Logging the errors for now. - reply, err := iopodman.ContainerConfig().Call(r.Conn, name) - if err != nil { - logrus.Error("call to container.config failed") - } - data := libpod.ContainerConfig{} - if err := json.Unmarshal([]byte(reply), &data); err != nil { - logrus.Error("failed to unmarshal container inspect data") - } - return &data - -} - -// ContainerState returns the "state" of the container. -func (r *LocalRuntime) ContainerState(name string) (*libpod.ContainerState, error) { // no-lint - reply, err := iopodman.ContainerStateData().Call(r.Conn, name) - if err != nil { - return nil, err - } - data := libpod.ContainerState{} - if err := json.Unmarshal([]byte(reply), &data); err != nil { - return nil, err - } - return &data, err - -} - -// Spec obtains the container spec. -func (r *LocalRuntime) Spec(name string) (*specs.Spec, error) { - reply, err := iopodman.Spec().Call(r.Conn, name) - if err != nil { - return nil, err - } - data := specs.Spec{} - if err := json.Unmarshal([]byte(reply), &data); err != nil { - return nil, err - } - return &data, nil -} - -// LookupContainers is a wrapper for LookupContainer -func (r *LocalRuntime) LookupContainers(idsOrNames []string) ([]*Container, error) { - var containers []*Container - for _, name := range idsOrNames { - ctr, err := r.LookupContainer(name) - if err != nil { - return nil, err - } - containers = append(containers, ctr) - } - return containers, nil -} - -// LookupContainer gets basic information about container over a varlink -// connection and then translates it to a *Container -func (r *LocalRuntime) LookupContainer(idOrName string) (*Container, error) { - state, err := r.ContainerState(idOrName) - if err != nil { - return nil, err - } - config := r.Config(idOrName) - return &Container{ - remoteContainer{ - r, - config, - state, - }, - }, nil -} - -// GetAllContainers returns all containers in a slice -func (r *LocalRuntime) GetAllContainers() ([]*Container, error) { - var containers []*Container - ctrs, err := iopodman.GetContainersByContext().Call(r.Conn, true, false, []string{}) - if err != nil { - return nil, err - } - for _, ctr := range ctrs { - container, err := r.LookupContainer(ctr) - if err != nil { - return nil, err - } - containers = append(containers, container) - } - return containers, nil -} - -func (r *LocalRuntime) LookupContainersWithStatus(filters []string) ([]*Container, error) { - var containers []*Container - ctrs, err := iopodman.GetContainersByStatus().Call(r.Conn, filters) - if err != nil { - return nil, err - } - // This is not performance savvy; if this turns out to be a problematic series of lookups, we need to - // create a new endpoint to speed things up - for _, ctr := range ctrs { - container, err := r.LookupContainer(ctr.Id) - if err != nil { - return nil, err - } - containers = append(containers, container) - } - return containers, nil -} - -func (r *LocalRuntime) GetLatestContainer() (*Container, error) { - reply, err := iopodman.GetContainersByContext().Call(r.Conn, false, true, nil) - if err != nil { - return nil, err - } - if len(reply) > 0 { - return r.LookupContainer(reply[0]) - } - return nil, errors.New("no containers exist") -} - -// GetArtifact returns a container's artifacts -func (c *Container) GetArtifact(name string) ([]byte, error) { - var data []byte - reply, err := iopodman.ContainerArtifacts().Call(c.Runtime.Conn, c.ID(), name) - if err != nil { - return nil, err - } - if err := json.Unmarshal([]byte(reply), &data); err != nil { - return nil, err - } - return data, err -} - -// Config returns a container's Config ... same as ctr.Config() -func (c *Container) Config() *libpod.ContainerConfig { - if c.config != nil { - return c.config - } - return c.Runtime.Config(c.ID()) -} - -// Name returns the name of the container -func (c *Container) Name() string { - return c.config.Name -} - -// StopContainers stops requested containers using varlink. -// Returns the list of stopped container ids, map of failed to stop container ids + errors, or any non-container error -func (r *LocalRuntime) StopContainers(ctx context.Context, cli *cliconfig.StopValues) ([]string, map[string]error, error) { - var ( - ok = []string{} - failures = map[string]error{} - ) - - ids, err := iopodman.GetContainersByContext().Call(r.Conn, cli.All, cli.Latest, cli.InputArgs) - if err != nil { - return ok, failures, TranslateError(err) - } - - for _, id := range ids { - if _, err := iopodman.StopContainer().Call(r.Conn, id, int64(cli.Timeout)); err != nil { - transError := TranslateError(err) - if errors.Cause(transError) == define.ErrCtrStopped { - ok = append(ok, id) - continue - } - if errors.Cause(transError) == define.ErrCtrStateInvalid && cli.All { - ok = append(ok, id) - continue - } - failures[id] = err - } else { - // We should be using ID here because in varlink, only successful returns - // include the string id - ok = append(ok, id) - } - } - return ok, failures, nil -} - -// InitContainers initializes container(s) based on Varlink. -// It returns a list of successful ID(s), a map of failed container ID to error, -// or an error if a more general error occurred. -func (r *LocalRuntime) InitContainers(ctx context.Context, cli *cliconfig.InitValues) ([]string, map[string]error, error) { - var ( - ok = []string{} - failures = map[string]error{} - ) - - ids, err := iopodman.GetContainersByContext().Call(r.Conn, cli.All, cli.Latest, cli.InputArgs) - if err != nil { - return nil, nil, err - } - - for _, id := range ids { - initialized, err := iopodman.InitContainer().Call(r.Conn, id) - if err != nil { - if cli.All { - switch err.(type) { - case *iopodman.InvalidState: - ok = append(ok, initialized) - default: - failures[id] = err - } - } else { - failures[id] = err - } - } else { - ok = append(ok, initialized) - } - } - return ok, failures, nil -} - -// KillContainers sends signal to container(s) based on varlink. -// Returns list of successful id(s), map of failed id(s) + error, or error not from container -func (r *LocalRuntime) KillContainers(ctx context.Context, cli *cliconfig.KillValues, signal syscall.Signal) ([]string, map[string]error, error) { - var ( - ok = []string{} - failures = map[string]error{} - ) - - ids, err := iopodman.GetContainersByContext().Call(r.Conn, cli.All, cli.Latest, cli.InputArgs) - if err != nil { - return ok, failures, err - } - - for _, id := range ids { - killed, err := iopodman.KillContainer().Call(r.Conn, id, int64(signal)) - if err != nil { - failures[id] = err - } else { - ok = append(ok, killed) - } - } - return ok, failures, nil -} - -// RemoveContainer removes container(s) based on varlink inputs. -func (r *LocalRuntime) RemoveContainers(ctx context.Context, cli *cliconfig.RmValues) ([]string, map[string]error, error) { - var ( - ok = []string{} - failures = map[string]error{} - ) - - ids, err := iopodman.GetContainersByContext().Call(r.Conn, cli.All, cli.Latest, cli.InputArgs) - if err != nil { - // Failed to get containers. If force is specified, get the containers ID - // and evict them - if !cli.Force { - return nil, nil, TranslateError(err) - } - - for _, ctr := range cli.InputArgs { - logrus.Debugf("Evicting container %q", ctr) - id, err := iopodman.EvictContainer().Call(r.Conn, ctr, cli.Volumes) - if err != nil { - failures[ctr] = errors.Wrapf(err, "Failed to evict container: %q", id) - continue - } - ok = append(ok, id) - } - return ok, failures, nil - } - - for _, id := range ids { - _, err := iopodman.RemoveContainer().Call(r.Conn, id, cli.Force, cli.Volumes) - if err != nil { - failures[id] = err - } else { - ok = append(ok, id) - } - } - return ok, failures, nil -} - -// UmountRootFilesystems umounts container(s) root filesystems based on varlink inputs -func (r *LocalRuntime) UmountRootFilesystems(ctx context.Context, cli *cliconfig.UmountValues) ([]string, map[string]error, error) { - ids, err := iopodman.GetContainersByContext().Call(r.Conn, cli.All, cli.Latest, cli.InputArgs) - if err != nil { - return nil, nil, err - } - - var ( - ok = []string{} - failures = map[string]error{} - ) - - for _, id := range ids { - err := iopodman.UnmountContainer().Call(r.Conn, id, cli.Force) - if err != nil { - failures[id] = err - } else { - ok = append(ok, id) - } - } - return ok, failures, nil -} - -// WaitOnContainers waits for all given container(s) to stop. -// interval is currently ignored. -func (r *LocalRuntime) WaitOnContainers(ctx context.Context, cli *cliconfig.WaitValues, interval time.Duration) ([]string, map[string]error, error) { - var ( - ok = []string{} - failures = map[string]error{} - ) - - ids, err := iopodman.GetContainersByContext().Call(r.Conn, false, cli.Latest, cli.InputArgs) - if err != nil { - return ok, failures, err - } - - for _, id := range ids { - stopped, err := iopodman.WaitContainer().Call(r.Conn, id, int64(interval)) - if err != nil { - failures[id] = err - } else { - ok = append(ok, strconv.FormatInt(stopped, 10)) - } - } - return ok, failures, nil -} - -// BatchContainerOp is wrapper func to mimic shared's function with a similar name meant for libpod -func BatchContainerOp(ctr *Container, opts shared.PsOptions) (shared.BatchContainerStruct, error) { - // TODO If pod ps ever shows container's sizes, re-enable this code; otherwise it isn't needed - // and would be a perf hit - // data, err := ctr.Inspect(true) - // if err != nil { - // return shared.BatchContainerStruct{}, err - // } - // - // size := new(shared.ContainerSize) - // size.RootFsSize = data.SizeRootFs - // size.RwSize = data.SizeRw - - bcs := shared.BatchContainerStruct{ - ConConfig: ctr.config, - ConState: ctr.state.State, - ExitCode: ctr.state.ExitCode, - Pid: ctr.state.PID, - StartedTime: ctr.state.StartedTime, - ExitedTime: ctr.state.FinishedTime, - // Size: size, - } - return bcs, nil -} - -// Log one or more containers over a varlink connection -func (r *LocalRuntime) Log(c *cliconfig.LogsValues, options *logs.LogOptions) error { - // GetContainersLogs - reply, err := iopodman.GetContainersLogs().Send(r.Conn, uint64(varlink.More), c.InputArgs, c.Follow, c.Latest, options.Since.Format(time.RFC3339Nano), c.Tail, c.Timestamps) - if err != nil { - return errors.Wrapf(err, "failed to get container logs") - } - if len(c.InputArgs) > 1 { - options.Multi = true - } - for { - log, flags, err := reply() - if err != nil { - return err - } - if log.Time == "" && log.Msg == "" { - // We got a blank log line which can signal end of stream - break - } - lTime, err := time.Parse(time.RFC3339Nano, log.Time) - if err != nil { - return errors.Wrapf(err, "unable to parse time of log %s", log.Time) - } - logLine := logs.LogLine{ - Device: log.Device, - ParseLogType: log.ParseLogType, - Time: lTime, - Msg: log.Msg, - CID: log.Cid, - } - fmt.Println(logLine.String(options)) - if flags&varlink.Continues == 0 { - break - } - } - return nil -} - -// CreateContainer creates a container from the cli over varlink -func (r *LocalRuntime) CreateContainer(ctx context.Context, c *cliconfig.CreateValues) (string, error) { - results := shared.NewIntermediateLayer(&c.PodmanCommand, true) - return iopodman.CreateContainer().Call(r.Conn, results.MakeVarlink()) -} - -// Run creates a container overvarlink and then starts it -func (r *LocalRuntime) Run(ctx context.Context, c *cliconfig.RunValues, exitCode int) (int, error) { - // TODO the exit codes for run need to be figured out for remote connections - results := shared.NewIntermediateLayer(&c.PodmanCommand, true) - cid, err := iopodman.CreateContainer().Call(r.Conn, results.MakeVarlink()) - if err != nil { - return exitCode, err - } - if c.Bool("detach") { - if _, err := iopodman.StartContainer().Call(r.Conn, cid); err != nil { - return exitCode, err - } - fmt.Println(cid) - return 0, nil - } - inputStream := os.Stdin - // If -i is not set, clear stdin - if !c.Bool("interactive") { - inputStream = nil - } - exitChan, errChan, err := r.attach(ctx, inputStream, os.Stdout, cid, true, c.String("detach-keys")) - if err != nil { - return exitCode, err - } - exitCode = <-exitChan - finalError := <-errChan - return exitCode, finalError -} - -func ReadExitFile(runtimeTmp, ctrID string) (int, error) { - return 0, define.ErrNotImplemented -} - -// Ps lists containers based on criteria from user -func (r *LocalRuntime) Ps(c *cliconfig.PsValues, opts shared.PsOptions) ([]shared.PsContainerOutput, error) { - var psContainers []shared.PsContainerOutput - last := int64(c.Last) - PsOpts := iopodman.PsOpts{ - All: c.All, - Filters: &c.Filter, - Last: &last, - Latest: &c.Latest, - NoTrunc: &c.NoTrunct, - Pod: &c.Pod, - Quiet: &c.Quiet, - Size: &c.Size, - Sort: &c.Sort, - Sync: &c.Sync, - } - containers, err := iopodman.Ps().Call(r.Conn, PsOpts) - if err != nil { - return nil, err - } - for _, ctr := range containers { - createdAt, err := time.Parse(time.RFC3339Nano, ctr.CreatedAt) - if err != nil { - return nil, err - } - exitedAt, err := time.Parse(time.RFC3339Nano, ctr.ExitedAt) - if err != nil { - return nil, err - } - startedAt, err := time.Parse(time.RFC3339Nano, ctr.StartedAt) - if err != nil { - return nil, err - } - containerSize := shared.ContainerSize{ - RootFsSize: ctr.RootFsSize, - RwSize: ctr.RwSize, - } - state, err := define.StringToContainerStatus(ctr.State) - if err != nil { - return nil, err - } - psc := shared.PsContainerOutput{ - ID: ctr.Id, - Image: ctr.Image, - Command: ctr.Command, - Created: ctr.Created, - Ports: ctr.Ports, - Names: ctr.Names, - IsInfra: ctr.IsInfra, - Status: ctr.Status, - State: state, - Pid: int(ctr.PidNum), - Size: &containerSize, - Pod: ctr.Pod, - CreatedAt: createdAt, - ExitedAt: exitedAt, - StartedAt: startedAt, - Labels: ctr.Labels, - PID: ctr.NsPid, - Cgroup: ctr.Cgroup, - IPC: ctr.Ipc, - MNT: ctr.Mnt, - NET: ctr.Net, - PIDNS: ctr.PidNs, - User: ctr.User, - UTS: ctr.Uts, - Mounts: ctr.Mounts, - } - psContainers = append(psContainers, psc) - } - return psContainers, nil -} - -// Attach to a remote terminal -func (r *LocalRuntime) Attach(ctx context.Context, c *cliconfig.AttachValues) error { - ctr, err := r.LookupContainer(c.InputArgs[0]) - if err != nil { - return nil - } - if ctr.state.State != define.ContainerStateRunning { - return errors.New("you can only attach to running containers") - } - inputStream := os.Stdin - if c.NoStdin { - inputStream, err = os.Open(os.DevNull) - if err != nil { - return err - } - } - _, errChan, err := r.attach(ctx, inputStream, os.Stdout, c.InputArgs[0], false, c.DetachKeys) - if err != nil { - return err - } - return <-errChan -} - -// Checkpoint one or more containers -func (r *LocalRuntime) Checkpoint(c *cliconfig.CheckpointValues) error { - if c.Export != "" { - return errors.New("the remote client does not support exporting checkpoints") - } - if c.IgnoreRootfs { - return errors.New("the remote client does not support --ignore-rootfs") - } - - var lastError error - ids, err := iopodman.GetContainersByContext().Call(r.Conn, c.All, c.Latest, c.InputArgs) - if err != nil { - return err - } - if c.All { - // We don't have a great way to get all the running containers, so need to get all and then - // check status on them bc checkpoint considers checkpointing a stopped container an error - var runningIds []string - for _, id := range ids { - ctr, err := r.LookupContainer(id) - if err != nil { - return err - } - if ctr.state.State == define.ContainerStateRunning { - runningIds = append(runningIds, id) - } - } - ids = runningIds - } - - for _, id := range ids { - if _, err := iopodman.ContainerCheckpoint().Call(r.Conn, id, c.Keep, c.Keep, c.TcpEstablished); err != nil { - if lastError != nil { - fmt.Fprintln(os.Stderr, lastError) - } - lastError = errors.Wrapf(err, "failed to checkpoint container %v", id) - } else { - fmt.Println(id) - } - } - return lastError -} - -// Restore one or more containers -func (r *LocalRuntime) Restore(ctx context.Context, c *cliconfig.RestoreValues) error { - if c.Import != "" { - return errors.New("the remote client does not support importing checkpoints") - } - if c.IgnoreRootfs { - return errors.New("the remote client does not support --ignore-rootfs") - } - - var lastError error - ids, err := iopodman.GetContainersByContext().Call(r.Conn, c.All, c.Latest, c.InputArgs) - if err != nil { - return err - } - if c.All { - // We don't have a great way to get all the exited containers, so need to get all and then - // check status on them bc checkpoint considers restoring a running container an error - var exitedIDs []string - for _, id := range ids { - ctr, err := r.LookupContainer(id) - if err != nil { - return err - } - if ctr.state.State != define.ContainerStateRunning { - exitedIDs = append(exitedIDs, id) - } - } - ids = exitedIDs - } - - for _, id := range ids { - if _, err := iopodman.ContainerRestore().Call(r.Conn, id, c.Keep, c.TcpEstablished); err != nil { - if lastError != nil { - fmt.Fprintln(os.Stderr, lastError) - } - lastError = errors.Wrapf(err, "failed to restore container %v", id) - } else { - fmt.Println(id) - } - } - return lastError -} - -// Start starts an already created container -func (r *LocalRuntime) Start(ctx context.Context, c *cliconfig.StartValues, sigProxy bool) (int, error) { - var ( - finalErr error - exitCode = define.ExecErrorCodeGeneric - ) - // TODO Figure out how to deal with exit codes - inputStream := os.Stdin - if !c.Interactive { - inputStream = nil - } - - containerIDs, err := iopodman.GetContainersByContext().Call(r.Conn, false, c.Latest, c.InputArgs) - if err != nil { - return exitCode, err - } - if len(containerIDs) < 1 { - return exitCode, errors.New("failed to find containers to start") - } - // start.go makes sure that if attach, there can be only one ctr - if c.Attach { - exitChan, errChan, err := r.attach(ctx, inputStream, os.Stdout, containerIDs[0], true, c.DetachKeys) - if err != nil { - return exitCode, nil - } - exitCode := <-exitChan - err = <-errChan - return exitCode, err - } - - // TODO the notion of starting a pod container and its deps still needs to be worked through - // Everything else is detached - for _, cid := range containerIDs { - reply, err := iopodman.StartContainer().Call(r.Conn, cid) - if err != nil { - if finalErr != nil { - fmt.Println(err) - } - finalErr = err - } else { - fmt.Println(reply) - } - } - return exitCode, finalErr -} - -func (r *LocalRuntime) attach(ctx context.Context, stdin, stdout *os.File, cid string, start bool, detachKeys string) (chan int, chan error, error) { - var ( - oldTermState *term.State - ) - spec, err := r.Spec(cid) - if err != nil { - return nil, nil, err - } - resize := make(chan remotecommand.TerminalSize, 5) - haveTerminal := terminal.IsTerminal(int(os.Stdin.Fd())) - - // Check if we are attached to a terminal. If we are, generate resize - // events, and set the terminal to raw mode - if haveTerminal && spec.Process.Terminal { - cancel, oldTermState, err := handleTerminalAttach(ctx, resize) - if err != nil { - return nil, nil, err - } - defer cancel() - defer restoreTerminal(oldTermState) // nolint: errcheck - - logrus.SetFormatter(&RawTtyFormatter{}) - term.SetRawTerminal(os.Stdin.Fd()) // nolint: errcheck - } - - reply, err := iopodman.Attach().Send(r.Conn, varlink.Upgrade, cid, detachKeys, start) - if err != nil { - restoreTerminal(oldTermState) // nolint: errcheck - return nil, nil, err - } - - // See if the server accepts the upgraded connection or returns an error - _, err = reply() - - if err != nil { - restoreTerminal(oldTermState) // nolint: errcheck - return nil, nil, err - } - - ecChan := make(chan int, 1) - errChan := configureVarlinkAttachStdio(r.Conn.Reader, r.Conn.Writer, stdin, stdout, oldTermState, resize, ecChan) - return ecChan, errChan, nil -} - -// PauseContainers pauses container(s) based on CLI inputs. -func (r *LocalRuntime) PauseContainers(ctx context.Context, cli *cliconfig.PauseValues) ([]string, map[string]error, error) { - var ( - ok []string - failures = map[string]error{} - ctrs []*Container - err error - ) - - if cli.All { - filters := []string{define.ContainerStateRunning.String()} - ctrs, err = r.LookupContainersWithStatus(filters) - } else { - ctrs, err = r.LookupContainers(cli.InputArgs) - } - if err != nil { - return ok, failures, err - } - - for _, c := range ctrs { - c := c - err := c.Pause() - if err != nil { - failures[c.ID()] = err - } else { - ok = append(ok, c.ID()) - } - } - return ok, failures, nil -} - -// UnpauseContainers unpauses containers based on input -func (r *LocalRuntime) UnpauseContainers(ctx context.Context, cli *cliconfig.UnpauseValues) ([]string, map[string]error, error) { - var ( - ok = []string{} - failures = map[string]error{} - ctrs []*Container - err error - ) - - maxWorkers := shared.DefaultPoolSize("unpause") - if cli.GlobalIsSet("max-workers") { - maxWorkers = cli.GlobalFlags.MaxWorks - } - logrus.Debugf("Setting maximum rm workers to %d", maxWorkers) - - if cli.All { - filters := []string{define.ContainerStatePaused.String()} - ctrs, err = r.LookupContainersWithStatus(filters) - } else { - ctrs, err = r.LookupContainers(cli.InputArgs) - } - if err != nil { - return ok, failures, err - } - for _, c := range ctrs { - c := c - err := c.Unpause() - if err != nil { - failures[c.ID()] = err - } else { - ok = append(ok, c.ID()) - } - } - return ok, failures, nil -} - -// Restart restarts a container over varlink -func (r *LocalRuntime) Restart(ctx context.Context, c *cliconfig.RestartValues) ([]string, map[string]error, error) { - var ( - containers []*Container - restartContainers []*Container - err error - ok = []string{} - failures = map[string]error{} - ) - useTimeout := c.Flag("timeout").Changed || c.Flag("time").Changed - inputTimeout := c.Timeout - - if c.Latest { // nolint: gocritic - lastCtr, err := r.GetLatestContainer() - if err != nil { - return nil, nil, errors.Wrapf(err, "unable to get latest container") - } - restartContainers = append(restartContainers, lastCtr) - } else if c.Running { - containers, err = r.LookupContainersWithStatus([]string{define.ContainerStateRunning.String()}) - if err != nil { - return nil, nil, err - } - restartContainers = append(restartContainers, containers...) - } else if c.All { - containers, err = r.GetAllContainers() - if err != nil { - return nil, nil, err - } - restartContainers = append(restartContainers, containers...) - } else { - for _, id := range c.InputArgs { - ctr, err := r.LookupContainer(id) - if err != nil { - return nil, nil, err - } - restartContainers = append(restartContainers, ctr) - } - } - - for _, c := range restartContainers { - c := c - timeout := c.config.StopTimeout - if useTimeout { - timeout = inputTimeout - } - err := c.Restart(int64(timeout)) - if err != nil { - failures[c.ID()] = err - } else { - ok = append(ok, c.ID()) - } - } - return ok, failures, nil -} - -// Top display the running processes of a container -func (r *LocalRuntime) Top(cli *cliconfig.TopValues) ([]string, error) { - var ( - ctr *Container - err error - descriptors []string - ) - if cli.Latest { - ctr, err = r.GetLatestContainer() - descriptors = cli.InputArgs - } else { - ctr, err = r.LookupContainer(cli.InputArgs[0]) - descriptors = cli.InputArgs[1:] - } - if err != nil { - return nil, err - } - return iopodman.Top().Call(r.Conn, ctr.ID(), descriptors) -} - -// Prune removes stopped containers -func (r *LocalRuntime) Prune(ctx context.Context, maxWorkers int, filter []string) ([]string, map[string]error, error) { - - var ( - ok = []string{} - failures = map[string]error{} - ctrs []*Container - err error - ) - logrus.Debugf("Setting maximum rm workers to %d", maxWorkers) - - filters := []string{define.ContainerStateExited.String()} - ctrs, err = r.LookupContainersWithStatus(filters) - if err != nil { - return ok, failures, err - } - for _, c := range ctrs { - c := c - _, err := iopodman.RemoveContainer().Call(r.Conn, c.ID(), false, false) - if err != nil { - failures[c.ID()] = err - } else { - ok = append(ok, c.ID()) - } - } - return ok, failures, nil -} - -// Cleanup any leftovers bits of stopped containers -func (r *LocalRuntime) CleanupContainers(ctx context.Context, cli *cliconfig.CleanupValues) ([]string, map[string]error, error) { - return nil, nil, errors.New("container cleanup not supported for remote clients") -} - -// Port displays port information about existing containers -func (r *LocalRuntime) Port(c *cliconfig.PortValues) ([]*Container, error) { - var ( - containers []*Container - err error - ) - // This one is a bit odd because when all is used, we only use running containers. - if !c.All { - containers, err = r.GetContainersByContext(false, c.Latest, c.InputArgs) - } else { - // we need to only use running containers if all - filters := []string{define.ContainerStateRunning.String()} - containers, err = r.LookupContainersWithStatus(filters) - } - if err != nil { - return nil, err - } - return containers, nil -} - -// GenerateSystemd creates a systemd until for a container -func (r *LocalRuntime) GenerateSystemd(c *cliconfig.GenerateSystemdValues) (string, error) { - return "", errors.New("systemd generation not supported for remote clients") -} - -// GetNamespaces returns namespace information about a container for PS -func (r *LocalRuntime) GetNamespaces(container shared.PsContainerOutput) *shared.Namespace { - ns := shared.Namespace{ - PID: container.PID, - Cgroup: container.Cgroup, - IPC: container.IPC, - MNT: container.MNT, - NET: container.NET, - PIDNS: container.PIDNS, - User: container.User, - UTS: container.UTS, - } - return &ns -} - -// Commit creates a local image from a container -func (r *LocalRuntime) Commit(ctx context.Context, c *cliconfig.CommitValues, container, imageName string) (string, error) { - var iid string - reply, err := iopodman.Commit().Send(r.Conn, varlink.More, container, imageName, c.Change, c.Author, c.Message, c.Pause, c.Format) - if err != nil { - return "", err - } - for { - responses, flags, err := reply() - if err != nil { - return "", err - } - for _, line := range responses.Logs { - fmt.Fprintln(os.Stderr, line) - } - iid = responses.Id - if flags&varlink.Continues == 0 { - break - } - } - return iid, nil -} - -// ExecContainer executes a command in the container -func (r *LocalRuntime) ExecContainer(ctx context.Context, cli *cliconfig.ExecValues) (int, error) { - var ( - oldTermState *term.State - ec = define.ExecErrorCodeGeneric - ) - // default invalid command exit code - // Validate given environment variables - cliEnv, err := envLib.ParseSlice(cli.Env) - if err != nil { - return 0, errors.Wrap(err, "error parsing environment variables") - } - envs := envLib.Slice(cliEnv) - - resize := make(chan remotecommand.TerminalSize, 5) - haveTerminal := terminal.IsTerminal(int(os.Stdin.Fd())) - - // Check if we are attached to a terminal. If we are, generate resize - // events, and set the terminal to raw mode - if haveTerminal && cli.Tty { - cancel, oldTermState, err := handleTerminalAttach(ctx, resize) - if err != nil { - return ec, err - } - defer cancel() - defer restoreTerminal(oldTermState) // nolint: errcheck - - logrus.SetFormatter(&RawTtyFormatter{}) - term.SetRawTerminal(os.Stdin.Fd()) // nolint: errcheck - } - - opts := iopodman.ExecOpts{ - Name: cli.InputArgs[0], - Tty: cli.Tty, - Privileged: cli.Privileged, - Cmd: cli.InputArgs[1:], - User: &cli.User, - Workdir: &cli.Workdir, - Env: &envs, - DetachKeys: &cli.DetachKeys, - } - - inputStream := os.Stdin - if !cli.Interactive { - inputStream = nil - } - - reply, err := iopodman.ExecContainer().Send(r.Conn, varlink.Upgrade, opts) - if err != nil { - return ec, errors.Wrapf(err, "Exec failed to contact service for %s", cli.InputArgs) - } - - _, err = reply() - if err != nil { - return ec, errors.Wrapf(err, "Exec operation failed for %s", cli.InputArgs) - } - ecChan := make(chan int, 1) - errChan := configureVarlinkAttachStdio(r.Conn.Reader, r.Conn.Writer, inputStream, os.Stdout, oldTermState, resize, ecChan) - - ec = <-ecChan - err = <-errChan - - return ec, err -} - -func configureVarlinkAttachStdio(reader *bufio.Reader, writer *bufio.Writer, stdin *os.File, stdout *os.File, oldTermState *term.State, resize chan remotecommand.TerminalSize, ecChan chan int) chan error { // nolint: interfacer - errChan := make(chan error, 1) - // These are the special writers that encode input from the client. - varlinkStdinWriter := virtwriter.NewVirtWriteCloser(writer, virtwriter.ToStdin) - varlinkResizeWriter := virtwriter.NewVirtWriteCloser(writer, virtwriter.TerminalResize) - varlinkHangupWriter := virtwriter.NewVirtWriteCloser(writer, virtwriter.HangUpFromClient) - - go func() { - // Read from the wire and direct to stdout or stderr - err := virtwriter.Reader(reader, stdout, os.Stderr, nil, nil, ecChan) - defer restoreTerminal(oldTermState) // nolint: errcheck - sendGenericError(ecChan) - errChan <- err - }() - - go func() { - for termResize := range resize { - b, err := json.Marshal(termResize) - if err != nil { - defer restoreTerminal(oldTermState) // nolint: errcheck,staticcheck - sendGenericError(ecChan) - errChan <- err - } - _, err = varlinkResizeWriter.Write(b) - if err != nil { - defer restoreTerminal(oldTermState) // nolint: errcheck,staticcheck - sendGenericError(ecChan) - errChan <- err - } - } - }() - if stdin != nil { - // Takes stdinput and sends it over the wire after being encoded - go func() { - if _, err := io.Copy(varlinkStdinWriter, stdin); err != nil { - defer restoreTerminal(oldTermState) // nolint: errcheck - sendGenericError(ecChan) - errChan <- err - } - _, err := varlinkHangupWriter.Write([]byte("EOF")) - if err != nil { - logrus.Errorf("unable to notify server to hangup: %q", err) - } - err = varlinkStdinWriter.Close() - errChan <- err - }() - } - return errChan -} - -func sendGenericError(ecChan chan int) { - if ecChan != nil { - ecChan <- define.ExecErrorCodeGeneric - } -} diff --git a/pkg/adapter/errors.go b/pkg/adapter/errors.go deleted file mode 100644 index 012d01d39..000000000 --- a/pkg/adapter/errors.go +++ /dev/null @@ -1,31 +0,0 @@ -// +build remoteclient - -package adapter - -import ( - "github.com/containers/libpod/libpod/define" - iopodman "github.com/containers/libpod/pkg/varlink" - "github.com/pkg/errors" -) - -// TranslateMapErrors translates the errors a typical podman output struct -// from varlink errors to libpod errors -func TranslateMapErrors(failures map[string]error) map[string]error { - for k, v := range failures { - failures[k] = TranslateError(v) - } - return failures -} - -// TranslateError converts a single varlink error to a libpod error -func TranslateError(err error) error { - switch err.(type) { - case *iopodman.ContainerNotFound: - return errors.Wrap(define.ErrNoSuchCtr, err.Error()) - case *iopodman.ErrCtrStopped: - return errors.Wrap(define.ErrCtrStopped, err.Error()) - case *iopodman.InvalidState: - return errors.Wrap(define.ErrCtrStateInvalid, err.Error()) - } - return err -} diff --git a/pkg/adapter/images_remote.go b/pkg/adapter/images_remote.go deleted file mode 100644 index 2df0ffcde..000000000 --- a/pkg/adapter/images_remote.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build remoteclient - -package adapter - -import ( - "context" - "encoding/json" - - "github.com/containers/libpod/pkg/inspect" - iopodman "github.com/containers/libpod/pkg/varlink" -) - -// Inspect returns returns an ImageData struct from over a varlink connection -func (i *ContainerImage) Inspect(ctx context.Context) (*inspect.ImageData, error) { - reply, err := iopodman.InspectImage().Call(i.Runtime.Conn, i.ID()) - if err != nil { - return nil, err - } - data := inspect.ImageData{} - if err := json.Unmarshal([]byte(reply), &data); err != nil { - return nil, err - } - return &data, nil -} diff --git a/pkg/adapter/info_remote.go b/pkg/adapter/info_remote.go deleted file mode 100644 index 549b01f54..000000000 --- a/pkg/adapter/info_remote.go +++ /dev/null @@ -1,64 +0,0 @@ -// +build remoteclient - -package adapter - -import ( - "github.com/containers/libpod/libpod/define" - iopodman "github.com/containers/libpod/pkg/varlink" -) - -// Info returns information for the host system and its components -func (r RemoteRuntime) Info() (*define.Info, error) { - // TODO the varlink implementation for info should be updated to match the output for regular info - var ( - reply define.Info - ) - - info, err := iopodman.GetInfo().Call(r.Conn) - if err != nil { - return nil, err - } - hostInfo := define.HostInfo{ - Arch: info.Host.Arch, - BuildahVersion: info.Host.Buildah_version, - CPUs: int(info.Host.Cpus), - Distribution: define.DistributionInfo{ - Distribution: info.Host.Distribution.Distribution, - Version: info.Host.Distribution.Version, - }, - EventLogger: info.Host.Eventlogger, - Hostname: info.Host.Hostname, - Kernel: info.Host.Kernel, - MemFree: info.Host.Mem_free, - MemTotal: info.Host.Mem_total, - OS: info.Host.Os, - SwapFree: info.Host.Swap_free, - SwapTotal: info.Host.Swap_total, - Uptime: info.Host.Uptime, - } - storeInfo := define.StoreInfo{ - ContainerStore: define.ContainerStore{ - Number: int(info.Store.Containers), - }, - GraphDriverName: info.Store.Graph_driver_name, - GraphRoot: info.Store.Graph_root, - ImageStore: define.ImageStore{ - Number: int(info.Store.Images), - }, - RunRoot: info.Store.Run_root, - } - reply.Host = &hostInfo - reply.Store = &storeInfo - regs := make(map[string]interface{}) - if len(info.Registries.Search) > 0 { - regs["search"] = info.Registries.Search - } - if len(info.Registries.Blocked) > 0 { - regs["blocked"] = info.Registries.Blocked - } - if len(info.Registries.Insecure) > 0 { - regs["insecure"] = info.Registries.Insecure - } - reply.Registries = regs - return &reply, nil -} diff --git a/pkg/adapter/network.go b/pkg/adapter/network.go deleted file mode 100644 index 577ffe19f..000000000 --- a/pkg/adapter/network.go +++ /dev/null @@ -1,277 +0,0 @@ -// +build !remoteclient - -package adapter - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "text/tabwriter" - - cniversion "github.com/containernetworking/cni/pkg/version" - "github.com/containers/libpod/cmd/podman/cliconfig" - "github.com/containers/libpod/pkg/network" - "github.com/containers/libpod/pkg/util" - "github.com/pkg/errors" -) - -func getCNIConfDir(r *LocalRuntime) (string, error) { - config, err := r.GetConfig() - if err != nil { - return "", err - } - configPath := config.Network.NetworkConfigDir - - if len(config.Network.NetworkConfigDir) < 1 { - configPath = network.CNIConfigDir - } - return configPath, nil -} - -// NetworkList displays summary information about CNI networks -func (r *LocalRuntime) NetworkList(cli *cliconfig.NetworkListValues) error { - cniConfigPath, err := getCNIConfDir(r) - if err != nil { - return err - } - networks, err := network.LoadCNIConfsFromDir(cniConfigPath) - if err != nil { - return err - } - // quiet means we only print the network names - if cli.Quiet { - for _, cniNetwork := range networks { - fmt.Println(cniNetwork.Name) - } - return nil - } - w := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', 0) - if _, err := fmt.Fprintln(w, "NAME\tVERSION\tPLUGINS"); err != nil { - return err - } - for _, cniNetwork := range networks { - if _, err := fmt.Fprintf(w, "%s\t%s\t%s\n", cniNetwork.Name, cniNetwork.CNIVersion, network.GetCNIPlugins(cniNetwork)); err != nil { - return err - } - } - return w.Flush() -} - -// NetworkInspect displays the raw CNI configuration for one -// or more CNI networks -func (r *LocalRuntime) NetworkInspect(cli *cliconfig.NetworkInspectValues) error { - var ( - rawCNINetworks []map[string]interface{} - ) - for _, name := range cli.InputArgs { - rawList, err := network.InspectNetwork(name) - if err != nil { - return err - } - rawCNINetworks = append(rawCNINetworks, rawList) - } - out, err := json.MarshalIndent(rawCNINetworks, "", "\t") - if err != nil { - return err - } - fmt.Printf("%s\n", out) - return nil -} - -// NetworkRemove deletes one or more CNI networks -func (r *LocalRuntime) NetworkRemove(ctx context.Context, cli *cliconfig.NetworkRmValues) ([]string, map[string]error, error) { - var ( - networkRmSuccesses []string - lastError error - ) - networkRmErrors := make(map[string]error) - - for _, name := range cli.InputArgs { - containers, err := r.GetAllContainers() - if err != nil { - return networkRmSuccesses, networkRmErrors, err - } - // We need to iterate containers looking to see if they belong to the given network - for _, c := range containers { - if util.StringInSlice(name, c.Config().Networks) { - // if user passes force, we nuke containers - if !cli.Force { - // Without the force option, we return an error - return nil, nil, errors.Errorf("%q has associated containers with it. Use -f to forcibly delete containers", name) - } - if err := r.RemoveContainer(ctx, c.Container, true, true); err != nil { - return nil, nil, err - } - } - } - if err := network.RemoveNetwork(name); err != nil { - if lastError != nil { - networkRmErrors[name] = lastError - } - lastError = err - } else { - networkRmSuccesses = append(networkRmSuccesses, fmt.Sprintf("Deleted: %s\n", name)) - } - } - return networkRmSuccesses, networkRmErrors, lastError -} - -// NetworkCreateBridge creates a CNI network -func (r *LocalRuntime) NetworkCreateBridge(cli *cliconfig.NetworkCreateValues) (string, error) { - isGateway := true - ipMasq := true - subnet := &cli.Network - ipRange := cli.IPRange - runtimeConfig, err := r.GetConfig() - if err != nil { - return "", err - } - // if range is provided, make sure it is "in" network - if cli.IsSet("subnet") { - // if network is provided, does it conflict with existing CNI or live networks - err = network.ValidateUserNetworkIsAvailable(subnet) - } else { - // if no network is provided, figure out network - subnet, err = network.GetFreeNetwork() - } - if err != nil { - return "", err - } - - gateway := cli.Gateway - if gateway == nil { - // if no gateway is provided, provide it as first ip of network - gateway = network.CalcGatewayIP(subnet) - } - // if network is provided and if gateway is provided, make sure it is "in" network - if cli.IsSet("subnet") && cli.IsSet("gateway") { - if !subnet.Contains(gateway) { - return "", errors.Errorf("gateway %s is not in valid for subnet %s", gateway.String(), subnet.String()) - } - } - if cli.Internal { - isGateway = false - ipMasq = false - } - - // if a range is given, we need to ensure it is "in" the network range. - if cli.IsSet("ip-range") { - if !cli.IsSet("subnet") { - return "", errors.New("you must define a subnet range to define an ip-range") - } - firstIP, err := network.FirstIPInSubnet(&cli.IPRange) - if err != nil { - return "", err - } - lastIP, err := network.LastIPInSubnet(&cli.IPRange) - if err != nil { - return "", err - } - if !subnet.Contains(firstIP) || !subnet.Contains(lastIP) { - return "", errors.Errorf("the ip range %s does not fall within the subnet range %s", cli.IPRange.String(), subnet.String()) - } - } - bridgeDeviceName, err := network.GetFreeDeviceName() - if err != nil { - return "", err - } - // If no name is given, we give the name of the bridge device - name := bridgeDeviceName - if len(cli.InputArgs) > 0 { - name = cli.InputArgs[0] - netNames, err := network.GetNetworkNamesFromFileSystem() - if err != nil { - return "", err - } - if util.StringInSlice(name, netNames) { - return "", errors.Errorf("the network name %s is already used", name) - } - } - - ncList := network.NewNcList(name, cniversion.Current()) - var plugins []network.CNIPlugins - var routes []network.IPAMRoute - - defaultRoute, err := network.NewIPAMDefaultRoute() - if err != nil { - return "", err - } - routes = append(routes, defaultRoute) - ipamConfig, err := network.NewIPAMHostLocalConf(subnet, routes, ipRange, gateway) - if err != nil { - return "", err - } - - // TODO need to iron out the role of isDefaultGW and IPMasq - bridge := network.NewHostLocalBridge(bridgeDeviceName, isGateway, false, ipMasq, ipamConfig) - plugins = append(plugins, bridge) - plugins = append(plugins, network.NewPortMapPlugin()) - plugins = append(plugins, network.NewFirewallPlugin()) - // if we find the dnsname plugin, we add configuration for it - if network.HasDNSNamePlugin(runtimeConfig.Network.CNIPluginDirs) && !cli.DisableDNS { - // Note: in the future we might like to allow for dynamic domain names - plugins = append(plugins, network.NewDNSNamePlugin(network.DefaultPodmanDomainName)) - } - ncList["plugins"] = plugins - b, err := json.MarshalIndent(ncList, "", " ") - if err != nil { - return "", err - } - cniConfigPath, err := getCNIConfDir(r) - if err != nil { - return "", err - } - cniPathName := filepath.Join(cniConfigPath, fmt.Sprintf("%s.conflist", name)) - err = ioutil.WriteFile(cniPathName, b, 0644) - return cniPathName, err -} - -// NetworkCreateMacVLAN creates a CNI network -func (r *LocalRuntime) NetworkCreateMacVLAN(cli *cliconfig.NetworkCreateValues) (string, error) { - var ( - name string - plugins []network.CNIPlugins - ) - liveNetNames, err := network.GetLiveNetworkNames() - if err != nil { - return "", err - } - // Make sure the host-device exists - if !util.StringInSlice(cli.MacVLAN, liveNetNames) { - return "", errors.Errorf("failed to find network interface %q", cli.MacVLAN) - } - if len(cli.InputArgs) > 0 { - name = cli.InputArgs[0] - netNames, err := network.GetNetworkNamesFromFileSystem() - if err != nil { - return "", err - } - if util.StringInSlice(name, netNames) { - return "", errors.Errorf("the network name %s is already used", name) - } - } - if len(name) < 1 { - name, err = network.GetFreeDeviceName() - if err != nil { - return "", err - } - } - ncList := network.NewNcList(name, cniversion.Current()) - macvlan := network.NewMacVLANPlugin(cli.MacVLAN) - plugins = append(plugins, macvlan) - ncList["plugins"] = plugins - b, err := json.MarshalIndent(ncList, "", " ") - if err != nil { - return "", err - } - cniConfigPath, err := getCNIConfDir(r) - if err != nil { - return "", err - } - cniPathName := filepath.Join(cniConfigPath, fmt.Sprintf("%s.conflist", name)) - err = ioutil.WriteFile(cniPathName, b, 0644) - return cniPathName, err -} diff --git a/pkg/adapter/pods.go b/pkg/adapter/pods.go deleted file mode 100644 index 7c2a84cc7..000000000 --- a/pkg/adapter/pods.go +++ /dev/null @@ -1,1065 +0,0 @@ -// +build !remoteclient - -package adapter - -import ( - "context" - "fmt" - "io" - "io/ioutil" - "net" - "os" - "path/filepath" - "strings" - - "github.com/containers/buildah/pkg/parse" - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/types" - "github.com/containers/libpod/cmd/podman/cliconfig" - "github.com/containers/libpod/cmd/podman/shared" - "github.com/containers/libpod/libpod" - "github.com/containers/libpod/libpod/define" - "github.com/containers/libpod/libpod/image" - "github.com/containers/libpod/pkg/adapter/shortcuts" - ann "github.com/containers/libpod/pkg/annotations" - envLib "github.com/containers/libpod/pkg/env" - ns "github.com/containers/libpod/pkg/namespaces" - createconfig "github.com/containers/libpod/pkg/spec" - "github.com/containers/libpod/pkg/util" - "github.com/containers/storage" - "github.com/cri-o/ocicni/pkg/ocicni" - "github.com/ghodss/yaml" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - v1 "k8s.io/api/core/v1" -) - -const ( - // https://kubernetes.io/docs/concepts/storage/volumes/#hostpath - createDirectoryPermission = 0755 - // https://kubernetes.io/docs/concepts/storage/volumes/#hostpath - createFilePermission = 0644 -) - -// PodContainerStats is struct containing an adapter Pod and a libpod -// ContainerStats and is used primarily for outputting pod stats. -type PodContainerStats struct { - Pod *Pod - ContainerStats map[string]*libpod.ContainerStats -} - -// PrunePods removes pods -func (r *LocalRuntime) PrunePods(ctx context.Context, cli *cliconfig.PodPruneValues) ([]string, map[string]error, error) { - var ( - ok = []string{} - failures = map[string]error{} - ) - - maxWorkers := shared.DefaultPoolSize("rm") - if cli.GlobalIsSet("max-workers") { - maxWorkers = cli.GlobalFlags.MaxWorks - } - logrus.Debugf("Setting maximum rm workers to %d", maxWorkers) - - states := []string{define.PodStateStopped, define.PodStateExited} - if cli.Force { - states = append(states, define.PodStateRunning) - } - - pods, err := r.GetPodsByStatus(states) - if err != nil { - return ok, failures, err - } - if len(pods) < 1 { - return ok, failures, nil - } - - pool := shared.NewPool("pod_prune", maxWorkers, len(pods)) - for _, p := range pods { - p := p - - pool.Add(shared.Job{ - ID: p.ID(), - Fn: func() error { - err := r.Runtime.RemovePod(ctx, p, true, cli.Force) - if err != nil { - logrus.Debugf("Failed to remove pod %s: %s", p.ID(), err.Error()) - } - return err - }, - }) - } - return pool.Run() -} - -// RemovePods ... -func (r *LocalRuntime) RemovePods(ctx context.Context, cli *cliconfig.PodRmValues) ([]string, []error) { - var ( - errs []error - podids []string - ) - pods, err := shortcuts.GetPodsByContext(cli.All, cli.Latest, cli.InputArgs, r.Runtime) - if err != nil && !(cli.Ignore && errors.Cause(err) == define.ErrNoSuchPod) { - errs = append(errs, err) - return nil, errs - } - - for _, p := range pods { - if err := r.Runtime.RemovePod(ctx, p, true, cli.Force); err != nil { - errs = append(errs, err) - } else { - podids = append(podids, p.ID()) - } - } - return podids, errs -} - -// GetLatestPod gets the latest pod and wraps it in an adapter pod -func (r *LocalRuntime) GetLatestPod() (*Pod, error) { - pod := Pod{} - p, err := r.Runtime.GetLatestPod() - pod.Pod = p - return &pod, err -} - -// GetPodsWithFilters gets the filtered list of pods based on the filter parameters provided. -func (r *LocalRuntime) GetPodsWithFilters(filters string) ([]*Pod, error) { - pods, err := shared.GetPodsWithFilters(r.Runtime, filters) - if err != nil { - return nil, err - } - return r.podstoAdapterPods(pods) -} - -func (r *LocalRuntime) podstoAdapterPods(pod []*libpod.Pod) ([]*Pod, error) { - var pods []*Pod - for _, i := range pod { - - pods = append(pods, &Pod{i}) - } - return pods, nil -} - -// GetAllPods gets all pods and wraps it in an adapter pod -func (r *LocalRuntime) GetAllPods() ([]*Pod, error) { - allPods, err := r.Runtime.GetAllPods() - if err != nil { - return nil, err - } - return r.podstoAdapterPods(allPods) -} - -// LookupPod gets a pod by name or id and wraps it in an adapter pod -func (r *LocalRuntime) LookupPod(nameOrID string) (*Pod, error) { - pod := Pod{} - p, err := r.Runtime.LookupPod(nameOrID) - pod.Pod = p - return &pod, err -} - -// StopPods is a wrapper to libpod to stop pods based on a cli context -func (r *LocalRuntime) StopPods(ctx context.Context, cli *cliconfig.PodStopValues) ([]string, []error) { - timeout := -1 - if cli.Flags().Changed("timeout") { - timeout = int(cli.Timeout) - } - var ( - errs []error - podids []string - ) - pods, err := shortcuts.GetPodsByContext(cli.All, cli.Latest, cli.InputArgs, r.Runtime) - if err != nil && !(cli.Ignore && errors.Cause(err) == define.ErrNoSuchPod) { - errs = append(errs, err) - return nil, errs - } - - for _, p := range pods { - stopped := true - conErrs, stopErr := p.StopWithTimeout(ctx, true, timeout) - if stopErr != nil { - errs = append(errs, stopErr) - stopped = false - } - if conErrs != nil { - stopped = false - for _, err := range conErrs { - errs = append(errs, err) - } - } - if stopped { - podids = append(podids, p.ID()) - } - } - return podids, errs -} - -// KillPods is a wrapper to libpod to start pods based on the cli context -func (r *LocalRuntime) KillPods(ctx context.Context, cli *cliconfig.PodKillValues, signal uint) ([]string, []error) { - var ( - errs []error - podids []string - ) - pods, err := shortcuts.GetPodsByContext(cli.All, cli.Latest, cli.InputArgs, r.Runtime) - if err != nil { - errs = append(errs, err) - return nil, errs - } - for _, p := range pods { - killed := true - conErrs, killErr := p.Kill(signal) - if killErr != nil { - errs = append(errs, killErr) - killed = false - } - if conErrs != nil { - killed = false - for _, err := range conErrs { - errs = append(errs, err) - } - } - if killed { - podids = append(podids, p.ID()) - } - } - return podids, errs -} - -// StartPods is a wrapper to start pods based on the cli context -func (r *LocalRuntime) StartPods(ctx context.Context, cli *cliconfig.PodStartValues) ([]string, []error) { - var ( - errs []error - podids []string - ) - pods, err := shortcuts.GetPodsByContext(cli.All, cli.Latest, cli.InputArgs, r.Runtime) - if err != nil { - errs = append(errs, err) - return nil, errs - } - for _, p := range pods { - started := true - conErrs, startErr := p.Start(ctx) - if startErr != nil { - errs = append(errs, startErr) - started = false - } - if conErrs != nil { - started = false - for _, err := range conErrs { - errs = append(errs, err) - } - } - if started { - podids = append(podids, p.ID()) - } - } - return podids, errs -} - -// CreatePod is a wrapper for libpod and creating a new pod from the cli context -func (r *LocalRuntime) CreatePod(ctx context.Context, cli *cliconfig.PodCreateValues, labels map[string]string) (string, error) { - var ( - options []libpod.PodCreateOption - err error - ) - - // This needs to be first, as a lot of options depend on - // WithInfraContainer() - if cli.Infra { - options = append(options, libpod.WithInfraContainer()) - nsOptions, err := shared.GetNamespaceOptions(strings.Split(cli.Share, ",")) - if err != nil { - return "", err - } - options = append(options, nsOptions...) - } - - if cli.Flag("cgroup-parent").Changed { - options = append(options, libpod.WithPodCgroupParent(cli.CgroupParent)) - } - - if len(labels) != 0 { - options = append(options, libpod.WithPodLabels(labels)) - } - - if cli.Flag("name").Changed { - options = append(options, libpod.WithPodName(cli.Name)) - } - - if cli.Flag("hostname").Changed { - options = append(options, libpod.WithPodHostname(cli.Hostname)) - } - - if cli.Flag("add-host").Changed { - options = append(options, libpod.WithPodHosts(cli.StringSlice("add-host"))) - } - if cli.Flag("dns").Changed { - dns := cli.StringSlice("dns") - foundHost := false - for _, entry := range dns { - if entry == "host" { - foundHost = true - } - } - if foundHost && len(dns) > 1 { - return "", errors.Errorf("cannot set dns=host and still provide other DNS servers") - } - if foundHost { - options = append(options, libpod.WithPodUseImageResolvConf()) - } else { - options = append(options, libpod.WithPodDNS(cli.StringSlice("dns"))) - } - } - if cli.Flag("dns-opt").Changed { - options = append(options, libpod.WithPodDNSOption(cli.StringSlice("dns-opt"))) - } - if cli.Flag("dns-search").Changed { - options = append(options, libpod.WithPodDNSSearch(cli.StringSlice("dns-search"))) - } - if cli.Flag("ip").Changed { - ip := net.ParseIP(cli.String("ip")) - if ip == nil { - return "", errors.Errorf("invalid IP address %q passed to --ip", cli.String("ip")) - } - - options = append(options, libpod.WithPodStaticIP(ip)) - } - if cli.Flag("mac-address").Changed { - mac, err := net.ParseMAC(cli.String("mac-address")) - if err != nil { - return "", errors.Wrapf(err, "invalid MAC address %q passed to --mac-address", cli.String("mac-address")) - } - - options = append(options, libpod.WithPodStaticMAC(mac)) - } - if cli.Flag("network").Changed { - netValue := cli.String("network") - switch strings.ToLower(netValue) { - case "bridge": - // Do nothing. - // TODO: Maybe this should be split between slirp and - // bridge? Better to wait until someone asks... - logrus.Debugf("Pod using default network mode") - case "host": - logrus.Debugf("Pod will use host networking") - options = append(options, libpod.WithPodHostNetwork()) - case "": - return "", errors.Errorf("invalid value passed to --network: must provide a comma-separated list of CNI networks or host") - default: - // We'll assume this is a comma-separated list of CNI - // networks. - networks := strings.Split(netValue, ",") - logrus.Debugf("Pod joining CNI networks: %v", networks) - options = append(options, libpod.WithPodNetworks(networks)) - } - } - if cli.Flag("no-hosts").Changed { - if cli.Bool("no-hosts") { - options = append(options, libpod.WithPodUseImageHosts()) - } - } - - publish := cli.StringSlice("publish") - if len(publish) > 0 { - portBindings, err := shared.CreatePortBindings(publish) - if err != nil { - return "", err - } - options = append(options, libpod.WithInfraContainerPorts(portBindings)) - - } - // always have containers use pod cgroups - // User Opt out is not yet supported - options = append(options, libpod.WithPodCgroups()) - - pod, err := r.NewPod(ctx, options...) - if err != nil { - return "", err - } - return pod.ID(), nil -} - -// GetPodStatus is a wrapper to get the status of a local libpod pod -func (p *Pod) GetPodStatus() (string, error) { - return shared.GetPodStatus(p.Pod) -} - -// BatchContainerOp is a wrapper for the shared function of the same name -func BatchContainerOp(ctr *libpod.Container, opts shared.PsOptions) (shared.BatchContainerStruct, error) { - return shared.BatchContainerOp(ctr, opts) -} - -// PausePods is a wrapper for pausing pods via libpod -func (r *LocalRuntime) PausePods(c *cliconfig.PodPauseValues) ([]string, map[string]error, []error) { - var ( - pauseIDs []string - pauseErrors []error - ) - containerErrors := make(map[string]error) - - pods, err := shortcuts.GetPodsByContext(c.All, c.Latest, c.InputArgs, r.Runtime) - if err != nil { - pauseErrors = append(pauseErrors, err) - return nil, containerErrors, pauseErrors - } - - for _, pod := range pods { - ctrErrs, err := pod.Pause() - if err != nil { - pauseErrors = append(pauseErrors, err) - continue - } - if ctrErrs != nil { - for ctr, err := range ctrErrs { - containerErrors[ctr] = err - } - continue - } - pauseIDs = append(pauseIDs, pod.ID()) - - } - return pauseIDs, containerErrors, pauseErrors -} - -// UnpausePods is a wrapper for unpausing pods via libpod -func (r *LocalRuntime) UnpausePods(c *cliconfig.PodUnpauseValues) ([]string, map[string]error, []error) { - var ( - unpauseIDs []string - unpauseErrors []error - ) - containerErrors := make(map[string]error) - - pods, err := shortcuts.GetPodsByContext(c.All, c.Latest, c.InputArgs, r.Runtime) - if err != nil { - unpauseErrors = append(unpauseErrors, err) - return nil, containerErrors, unpauseErrors - } - - for _, pod := range pods { - ctrErrs, err := pod.Unpause() - if err != nil { - unpauseErrors = append(unpauseErrors, err) - continue - } - if ctrErrs != nil { - for ctr, err := range ctrErrs { - containerErrors[ctr] = err - } - continue - } - unpauseIDs = append(unpauseIDs, pod.ID()) - - } - return unpauseIDs, containerErrors, unpauseErrors -} - -// RestartPods is a wrapper to restart pods via libpod -func (r *LocalRuntime) RestartPods(ctx context.Context, c *cliconfig.PodRestartValues) ([]string, map[string]error, []error) { - var ( - restartIDs []string - restartErrors []error - ) - containerErrors := make(map[string]error) - - pods, err := shortcuts.GetPodsByContext(c.All, c.Latest, c.InputArgs, r.Runtime) - if err != nil { - restartErrors = append(restartErrors, err) - return nil, containerErrors, restartErrors - } - - for _, pod := range pods { - ctrErrs, err := pod.Restart(ctx) - if err != nil { - restartErrors = append(restartErrors, err) - continue - } - if ctrErrs != nil { - for ctr, err := range ctrErrs { - containerErrors[ctr] = err - } - continue - } - restartIDs = append(restartIDs, pod.ID()) - - } - return restartIDs, containerErrors, restartErrors - -} - -// PodTop is a wrapper function to call GetPodPidInformation in libpod and return its results -// for output -func (r *LocalRuntime) PodTop(c *cliconfig.PodTopValues, descriptors []string) ([]string, error) { - var ( - pod *Pod - err error - ) - - if c.Latest { - pod, err = r.GetLatestPod() - } else { - pod, err = r.LookupPod(c.InputArgs[0]) - } - if err != nil { - return nil, errors.Wrapf(err, "unable to lookup requested container") - } - podStatus, err := pod.GetPodStatus() - if err != nil { - return nil, errors.Wrapf(err, "unable to get status for pod %s", pod.ID()) - } - if podStatus != "Running" { - return nil, errors.Errorf("pod top can only be used on pods with at least one running container") - } - return pod.GetPodPidInformation(descriptors) -} - -// GetStatPods returns pods for use in pod stats -func (r *LocalRuntime) GetStatPods(c *cliconfig.PodStatsValues) ([]*Pod, error) { - var ( - adapterPods []*Pod - pods []*libpod.Pod - err error - ) - - if len(c.InputArgs) > 0 || c.Latest || c.All { - pods, err = shortcuts.GetPodsByContext(c.All, c.Latest, c.InputArgs, r.Runtime) - } else { - pods, err = r.Runtime.GetRunningPods() - } - if err != nil { - return nil, err - } - // convert libpod pods to adapter pods - for _, p := range pods { - adapterPod := Pod{ - p, - } - adapterPods = append(adapterPods, &adapterPod) - } - return adapterPods, nil -} - -// PlayKubeYAML creates pods and containers from a kube YAML file -func (r *LocalRuntime) PlayKubeYAML(ctx context.Context, c *cliconfig.KubePlayValues, yamlFile string) (*Pod, error) { - var ( - containers []*libpod.Container - pod *libpod.Pod - podOptions []libpod.PodCreateOption - podYAML v1.Pod - registryCreds *types.DockerAuthConfig - writer io.Writer - ) - - content, err := ioutil.ReadFile(yamlFile) - if err != nil { - return nil, err - } - - if err := yaml.Unmarshal(content, &podYAML); err != nil { - return nil, errors.Wrapf(err, "unable to read %s as YAML", yamlFile) - } - - if podYAML.Kind != "Pod" { - return nil, errors.Errorf("Invalid YAML kind: %s. Pod is the only supported Kubernetes YAML kind", podYAML.Kind) - } - - // check for name collision between pod and container - podName := podYAML.ObjectMeta.Name - if podName == "" { - return nil, errors.Errorf("pod does not have a name") - } - for _, n := range podYAML.Spec.Containers { - if n.Name == podName { - fmt.Printf("a container exists with the same name (%s) as the pod in your YAML file; changing pod name to %s_pod\n", podName, podName) - podName = fmt.Sprintf("%s_pod", podName) - } - } - - podOptions = append(podOptions, libpod.WithInfraContainer()) - podOptions = append(podOptions, libpod.WithPodName(podName)) - // TODO for now we just used the default kernel namespaces; we need to add/subtract this from yaml - - hostname := podYAML.Spec.Hostname - if hostname == "" { - hostname = podName - } - podOptions = append(podOptions, libpod.WithPodHostname(hostname)) - - if podYAML.Spec.HostNetwork { - podOptions = append(podOptions, libpod.WithPodHostNetwork()) - } - - nsOptions, err := shared.GetNamespaceOptions(strings.Split(shared.DefaultKernelNamespaces, ",")) - if err != nil { - return nil, err - } - podOptions = append(podOptions, nsOptions...) - podPorts := getPodPorts(podYAML.Spec.Containers) - podOptions = append(podOptions, libpod.WithInfraContainerPorts(podPorts)) - - if c.Flag("network").Changed { - netValue := c.String("network") - switch strings.ToLower(netValue) { - case "bridge", "host": - return nil, errors.Errorf("invalid value passed to --network: bridge or host networking must be configured in YAML") - case "": - return nil, errors.Errorf("invalid value passed to --network: must provide a comma-separated list of CNI networks") - default: - // We'll assume this is a comma-separated list of CNI - // networks. - networks := strings.Split(netValue, ",") - logrus.Debugf("Pod joining CNI networks: %v", networks) - podOptions = append(podOptions, libpod.WithPodNetworks(networks)) - } - } - - // Create the Pod - pod, err = r.NewPod(ctx, podOptions...) - if err != nil { - return nil, err - } - - podInfraID, err := pod.InfraContainerID() - if err != nil { - return nil, err - } - hasUserns := false - if podInfraID != "" { - podCtr, err := r.GetContainer(podInfraID) - if err != nil { - return nil, err - } - mappings, err := podCtr.IDMappings() - if err != nil { - return nil, err - } - hasUserns = len(mappings.UIDMap) > 0 - } - - namespaces := map[string]string{ - // Disabled during code review per mheon - //"pid": fmt.Sprintf("container:%s", podInfraID), - "net": fmt.Sprintf("container:%s", podInfraID), - "ipc": fmt.Sprintf("container:%s", podInfraID), - "uts": fmt.Sprintf("container:%s", podInfraID), - } - if hasUserns { - namespaces["user"] = fmt.Sprintf("container:%s", podInfraID) - } - if !c.Quiet { - writer = os.Stderr - } - - dockerRegistryOptions := image.DockerRegistryOptions{ - DockerRegistryCreds: registryCreds, - DockerCertPath: c.CertDir, - } - if c.Flag("tls-verify").Changed { - dockerRegistryOptions.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!c.TlsVerify) - } - - // map from name to mount point - volumes := make(map[string]string) - for _, volume := range podYAML.Spec.Volumes { - hostPath := volume.VolumeSource.HostPath - if hostPath == nil { - return nil, errors.Errorf("HostPath is currently the only supported VolumeSource") - } - if hostPath.Type != nil { - switch *hostPath.Type { - case v1.HostPathDirectoryOrCreate: - if _, err := os.Stat(hostPath.Path); os.IsNotExist(err) { - if err := os.Mkdir(hostPath.Path, createDirectoryPermission); err != nil { - return nil, errors.Errorf("Error creating HostPath %s at %s", volume.Name, hostPath.Path) - } - } - // Label a newly created volume - if err := libpod.LabelVolumePath(hostPath.Path); err != nil { - return nil, errors.Wrapf(err, "Error giving %s a label", hostPath.Path) - } - case v1.HostPathFileOrCreate: - if _, err := os.Stat(hostPath.Path); os.IsNotExist(err) { - f, err := os.OpenFile(hostPath.Path, os.O_RDONLY|os.O_CREATE, createFilePermission) - if err != nil { - return nil, errors.Errorf("Error creating HostPath %s at %s", volume.Name, hostPath.Path) - } - if err := f.Close(); err != nil { - logrus.Warnf("Error in closing newly created HostPath file: %v", err) - } - } - // unconditionally label a newly created volume - if err := libpod.LabelVolumePath(hostPath.Path); err != nil { - return nil, errors.Wrapf(err, "Error giving %s a label", hostPath.Path) - } - case v1.HostPathDirectory: - case v1.HostPathFile: - case v1.HostPathUnset: - // do nothing here because we will verify the path exists in validateVolumeHostDir - break - default: - return nil, errors.Errorf("Directories are the only supported HostPath type") - } - } - - if err := parse.ValidateVolumeHostDir(hostPath.Path); err != nil { - return nil, errors.Wrapf(err, "Error in parsing HostPath in YAML") - } - volumes[volume.Name] = hostPath.Path - } - - seccompPaths, err := initializeSeccompPaths(podYAML.ObjectMeta.Annotations, c.SeccompProfileRoot) - if err != nil { - return nil, err - } - - for _, container := range podYAML.Spec.Containers { - pullPolicy := util.PullImageMissing - if len(container.ImagePullPolicy) > 0 { - pullPolicy, err = util.ValidatePullType(string(container.ImagePullPolicy)) - if err != nil { - return nil, err - } - } - named, err := reference.ParseNormalizedNamed(container.Image) - if err != nil { - return nil, err - } - // In kube, if the image is tagged with latest, it should always pull - if tagged, isTagged := named.(reference.NamedTagged); isTagged { - if tagged.Tag() == image.LatestTag { - pullPolicy = util.PullImageAlways - } - } - newImage, err := r.ImageRuntime().New(ctx, container.Image, c.SignaturePolicy, c.Authfile, writer, &dockerRegistryOptions, image.SigningOptions{}, nil, pullPolicy) - if err != nil { - return nil, err - } - createConfig, err := kubeContainerToCreateConfig(ctx, container, r.Runtime, newImage, namespaces, volumes, pod.ID(), podInfraID, seccompPaths) - if err != nil { - return nil, err - } - ctr, err := shared.CreateContainerFromCreateConfig(r.Runtime, createConfig, ctx, pod) - if err != nil { - return nil, err - } - containers = append(containers, ctr) - } - - // start the containers - for _, ctr := range containers { - if err := ctr.Start(ctx, true); err != nil { - // Making this a hard failure here to avoid a mess - // the other containers are in created status - return nil, err - } - } - - // We've now successfully converted this YAML into a pod - // print our pod and containers, signifying we succeeded - fmt.Printf("Pod:\n%s\n", pod.ID()) - if len(containers) == 1 { - fmt.Printf("Container:\n") - } - if len(containers) > 1 { - fmt.Printf("Containers:\n") - } - for _, ctr := range containers { - fmt.Println(ctr.ID()) - } - - if err := playcleanup(ctx, r, pod, nil); err != nil { - logrus.Errorf("unable to remove pod %s after failing to play kube", pod.ID()) - } - return nil, nil -} - -func playcleanup(ctx context.Context, runtime *LocalRuntime, pod *libpod.Pod, err error) error { - if err != nil && pod != nil { - return runtime.RemovePod(ctx, pod, true, true) - } - return nil -} - -// getPodPorts converts a slice of kube container descriptions to an -// array of ocicni portmapping descriptions usable in libpod -func getPodPorts(containers []v1.Container) []ocicni.PortMapping { - var infraPorts []ocicni.PortMapping - for _, container := range containers { - for _, p := range container.Ports { - if p.HostPort != 0 && p.ContainerPort == 0 { - p.ContainerPort = p.HostPort - } - if p.Protocol == "" { - p.Protocol = "tcp" - } - portBinding := ocicni.PortMapping{ - HostPort: p.HostPort, - ContainerPort: p.ContainerPort, - Protocol: strings.ToLower(string(p.Protocol)), - } - if p.HostIP != "" { - logrus.Debug("HostIP on port bindings is not supported") - } - // only hostPort is utilized in podman context, all container ports - // are accessible inside the shared network namespace - if p.HostPort != 0 { - infraPorts = append(infraPorts, portBinding) - } - - } - } - return infraPorts -} - -func setupSecurityContext(securityConfig *createconfig.SecurityConfig, userConfig *createconfig.UserConfig, containerYAML v1.Container) { - if containerYAML.SecurityContext == nil { - return - } - if containerYAML.SecurityContext.ReadOnlyRootFilesystem != nil { - securityConfig.ReadOnlyRootfs = *containerYAML.SecurityContext.ReadOnlyRootFilesystem - } - if containerYAML.SecurityContext.Privileged != nil { - securityConfig.Privileged = *containerYAML.SecurityContext.Privileged - } - - if containerYAML.SecurityContext.AllowPrivilegeEscalation != nil { - securityConfig.NoNewPrivs = !*containerYAML.SecurityContext.AllowPrivilegeEscalation - } - - if seopt := containerYAML.SecurityContext.SELinuxOptions; seopt != nil { - if seopt.User != "" { - securityConfig.SecurityOpts = append(securityConfig.SecurityOpts, fmt.Sprintf("label=user:%s", seopt.User)) - securityConfig.LabelOpts = append(securityConfig.LabelOpts, fmt.Sprintf("user:%s", seopt.User)) - } - if seopt.Role != "" { - securityConfig.SecurityOpts = append(securityConfig.SecurityOpts, fmt.Sprintf("label=role:%s", seopt.Role)) - securityConfig.LabelOpts = append(securityConfig.LabelOpts, fmt.Sprintf("role:%s", seopt.Role)) - } - if seopt.Type != "" { - securityConfig.SecurityOpts = append(securityConfig.SecurityOpts, fmt.Sprintf("label=type:%s", seopt.Type)) - securityConfig.LabelOpts = append(securityConfig.LabelOpts, fmt.Sprintf("type:%s", seopt.Type)) - } - if seopt.Level != "" { - securityConfig.SecurityOpts = append(securityConfig.SecurityOpts, fmt.Sprintf("label=level:%s", seopt.Level)) - securityConfig.LabelOpts = append(securityConfig.LabelOpts, fmt.Sprintf("level:%s", seopt.Level)) - } - } - if caps := containerYAML.SecurityContext.Capabilities; caps != nil { - for _, capability := range caps.Add { - securityConfig.CapAdd = append(securityConfig.CapAdd, string(capability)) - } - for _, capability := range caps.Drop { - securityConfig.CapDrop = append(securityConfig.CapDrop, string(capability)) - } - } - if containerYAML.SecurityContext.RunAsUser != nil { - userConfig.User = fmt.Sprintf("%d", *containerYAML.SecurityContext.RunAsUser) - } - if containerYAML.SecurityContext.RunAsGroup != nil { - if userConfig.User == "" { - userConfig.User = "0" - } - userConfig.User = fmt.Sprintf("%s:%d", userConfig.User, *containerYAML.SecurityContext.RunAsGroup) - } -} - -// kubeContainerToCreateConfig takes a v1.Container and returns a createconfig describing a container -func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container, runtime *libpod.Runtime, newImage *image.Image, namespaces map[string]string, volumes map[string]string, podID, infraID string, seccompPaths *kubeSeccompPaths) (*createconfig.CreateConfig, error) { - var ( - containerConfig createconfig.CreateConfig - pidConfig createconfig.PidConfig - networkConfig createconfig.NetworkConfig - cgroupConfig createconfig.CgroupConfig - utsConfig createconfig.UtsConfig - ipcConfig createconfig.IpcConfig - userConfig createconfig.UserConfig - securityConfig createconfig.SecurityConfig - ) - - // The default for MemorySwappiness is -1, not 0 - containerConfig.Resources.MemorySwappiness = -1 - - containerConfig.Image = containerYAML.Image - containerConfig.ImageID = newImage.ID() - containerConfig.Name = containerYAML.Name - containerConfig.Tty = containerYAML.TTY - - containerConfig.Pod = podID - - imageData, _ := newImage.Inspect(ctx) - - userConfig.User = "0" - if imageData != nil { - userConfig.User = imageData.Config.User - } - - setupSecurityContext(&securityConfig, &userConfig, containerYAML) - - securityConfig.SeccompProfilePath = seccompPaths.findForContainer(containerConfig.Name) - - containerConfig.Command = []string{} - if imageData != nil && imageData.Config != nil { - containerConfig.Command = append(containerConfig.Command, imageData.Config.Entrypoint...) - } - if len(containerYAML.Command) != 0 { - containerConfig.Command = append(containerConfig.Command, containerYAML.Command...) - } else if imageData != nil && imageData.Config != nil { - containerConfig.Command = append(containerConfig.Command, imageData.Config.Cmd...) - } - if imageData != nil && len(containerConfig.Command) == 0 { - return nil, errors.Errorf("No command specified in container YAML or as CMD or ENTRYPOINT in this image for %s", containerConfig.Name) - } - - containerConfig.UserCommand = containerConfig.Command - - containerConfig.StopSignal = 15 - - containerConfig.WorkDir = "/" - if imageData != nil { - // FIXME, - // we are currently ignoring imageData.Config.ExposedPorts - containerConfig.BuiltinImgVolumes = imageData.Config.Volumes - if imageData.Config.WorkingDir != "" { - containerConfig.WorkDir = imageData.Config.WorkingDir - } - containerConfig.Labels = imageData.Config.Labels - if imageData.Config.StopSignal != "" { - stopSignal, err := util.ParseSignal(imageData.Config.StopSignal) - if err != nil { - return nil, err - } - containerConfig.StopSignal = stopSignal - } - } - - if containerYAML.WorkingDir != "" { - containerConfig.WorkDir = containerYAML.WorkingDir - } - // If the user does not pass in ID mappings, just set to basics - if userConfig.IDMappings == nil { - userConfig.IDMappings = &storage.IDMappingOptions{} - } - - networkConfig.NetMode = ns.NetworkMode(namespaces["net"]) - ipcConfig.IpcMode = ns.IpcMode(namespaces["ipc"]) - utsConfig.UtsMode = ns.UTSMode(namespaces["uts"]) - // disabled in code review per mheon - //containerConfig.PidMode = ns.PidMode(namespaces["pid"]) - userConfig.UsernsMode = ns.UsernsMode(namespaces["user"]) - if len(containerConfig.WorkDir) == 0 { - containerConfig.WorkDir = "/" - } - - containerConfig.Pid = pidConfig - containerConfig.Network = networkConfig - containerConfig.Uts = utsConfig - containerConfig.Ipc = ipcConfig - containerConfig.Cgroup = cgroupConfig - containerConfig.User = userConfig - containerConfig.Security = securityConfig - - annotations := make(map[string]string) - if infraID != "" { - annotations[ann.SandboxID] = infraID - annotations[ann.ContainerType] = ann.ContainerTypeContainer - } - containerConfig.Annotations = annotations - - // Environment Variables - envs := map[string]string{} - if imageData != nil { - imageEnv, err := envLib.ParseSlice(imageData.Config.Env) - if err != nil { - return nil, errors.Wrap(err, "error parsing image environment variables") - } - envs = imageEnv - } - for _, e := range containerYAML.Env { - envs[e.Name] = e.Value - } - containerConfig.Env = envs - - for _, volume := range containerYAML.VolumeMounts { - hostPath, exists := volumes[volume.Name] - if !exists { - return nil, errors.Errorf("Volume mount %s specified for container but not configured in volumes", volume.Name) - } - if err := parse.ValidateVolumeCtrDir(volume.MountPath); err != nil { - return nil, errors.Wrapf(err, "error in parsing MountPath") - } - containerConfig.Volumes = append(containerConfig.Volumes, fmt.Sprintf("%s:%s", hostPath, volume.MountPath)) - } - return &containerConfig, nil -} - -// kubeSeccompPaths holds information about a pod YAML's seccomp configuration -// it holds both container and pod seccomp paths -type kubeSeccompPaths struct { - containerPaths map[string]string - podPath string -} - -// findForContainer checks whether a container has a seccomp path configured for it -// if not, it returns the podPath, which should always have a value -func (k *kubeSeccompPaths) findForContainer(ctrName string) string { - if path, ok := k.containerPaths[ctrName]; ok { - return path - } - return k.podPath -} - -// initializeSeccompPaths takes annotations from the pod object metadata and finds annotations pertaining to seccomp -// it parses both pod and container level -// if the annotation is of the form "localhost/%s", the seccomp profile will be set to profileRoot/%s -func initializeSeccompPaths(annotations map[string]string, profileRoot string) (*kubeSeccompPaths, error) { - seccompPaths := &kubeSeccompPaths{containerPaths: make(map[string]string)} - var err error - if annotations != nil { - for annKeyValue, seccomp := range annotations { - // check if it is prefaced with container.seccomp.security.alpha.kubernetes.io/ - prefixAndCtr := strings.Split(annKeyValue, "/") - if prefixAndCtr[0]+"/" != v1.SeccompContainerAnnotationKeyPrefix { - continue - } else if len(prefixAndCtr) != 2 { - // this could be caused by a user inputting either of - // container.seccomp.security.alpha.kubernetes.io{,/} - // both of which are invalid - return nil, errors.Errorf("Invalid seccomp path: %s", prefixAndCtr[0]) - } - - path, err := verifySeccompPath(seccomp, profileRoot) - if err != nil { - return nil, err - } - seccompPaths.containerPaths[prefixAndCtr[1]] = path - } - - podSeccomp, ok := annotations[v1.SeccompPodAnnotationKey] - if ok { - seccompPaths.podPath, err = verifySeccompPath(podSeccomp, profileRoot) - } else { - seccompPaths.podPath, err = libpod.DefaultSeccompPath() - } - if err != nil { - return nil, err - } - } - return seccompPaths, nil -} - -// verifySeccompPath takes a path and checks whether it is a default, unconfined, or a path -// the available options are parsed as defined in https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp -func verifySeccompPath(path string, profileRoot string) (string, error) { - switch path { - case v1.DeprecatedSeccompProfileDockerDefault: - fallthrough - case v1.SeccompProfileRuntimeDefault: - return libpod.DefaultSeccompPath() - case "unconfined": - return path, nil - default: - parts := strings.Split(path, "/") - if parts[0] == "localhost" { - return filepath.Join(profileRoot, parts[1]), nil - } - return "", errors.Errorf("invalid seccomp path: %s", path) - } -} diff --git a/pkg/adapter/pods_remote.go b/pkg/adapter/pods_remote.go deleted file mode 100644 index 4c6eea9a7..000000000 --- a/pkg/adapter/pods_remote.go +++ /dev/null @@ -1,576 +0,0 @@ -// +build remoteclient - -package adapter - -import ( - "context" - "encoding/json" - "strings" - "time" - - "github.com/containers/libpod/cmd/podman/cliconfig" - "github.com/containers/libpod/cmd/podman/shared" - "github.com/containers/libpod/libpod" - "github.com/containers/libpod/libpod/define" - iopodman "github.com/containers/libpod/pkg/varlink" - "github.com/containers/libpod/pkg/varlinkapi" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// PodContainerStats is struct containing an adapter Pod and a libpod -// ContainerStats and is used primarily for outputting pod stats. -type PodContainerStats struct { - Pod *Pod - ContainerStats map[string]*libpod.ContainerStats -} - -// RemovePods removes one or more based on the cli context. -func (r *LocalRuntime) RemovePods(ctx context.Context, cli *cliconfig.PodRmValues) ([]string, []error) { - var ( - rmErrs []error - rmPods []string - ) - podIDs, err := iopodman.GetPodsByContext().Call(r.Conn, cli.All, cli.Latest, cli.InputArgs) - if err != nil { - rmErrs = append(rmErrs, err) - return nil, rmErrs - } - - for _, p := range podIDs { - reply, err := iopodman.RemovePod().Call(r.Conn, p, cli.Force) - if err != nil { - rmErrs = append(rmErrs, err) - } else { - rmPods = append(rmPods, reply) - } - } - return rmPods, rmErrs -} - -// Inspect looks up a pod by name or id and embeds its data into a remote pod -// object. -func (r *LocalRuntime) Inspect(nameOrID string) (*Pod, error) { - reply, err := iopodman.PodStateData().Call(r.Conn, nameOrID) - if err != nil { - return nil, err - } - data := libpod.PodInspect{} - if err := json.Unmarshal([]byte(reply), &data); err != nil { - return nil, err - } - pod := Pod{} - pod.Runtime = r - pod.config = data.Config - pod.state = data.State - pod.containers = data.Containers - return &pod, nil -} - -// GetLatestPod gets the latest pod and wraps it in an adapter pod -func (r *LocalRuntime) GetLatestPod() (*Pod, error) { - reply, err := iopodman.GetPodsByContext().Call(r.Conn, false, true, nil) - if err != nil { - return nil, err - } - if len(reply) > 0 { - return r.Inspect(reply[0]) - } - return nil, errors.New("no pods exist") -} - -// LookupPod gets a pod by name or ID and wraps it in an adapter pod -func (r *LocalRuntime) LookupPod(nameOrID string) (*Pod, error) { - return r.Inspect(nameOrID) -} - -// Inspect, like libpod pod inspect, returns a libpod.PodInspect object from -// the data of a remotepod data struct -func (p *Pod) Inspect() (*libpod.PodInspect, error) { - config := new(libpod.PodConfig) - if err := libpod.JSONDeepCopy(p.remotepod.config, config); err != nil { - return nil, err - } - inspectData := libpod.PodInspect{ - Config: config, - State: p.remotepod.state, - Containers: p.containers, - } - return &inspectData, nil -} - -// StopPods stops pods based on the cli context from the remote client. -func (r *LocalRuntime) StopPods(ctx context.Context, cli *cliconfig.PodStopValues) ([]string, []error) { - var ( - stopErrs []error - stopPods []string - ) - var timeout int64 = -1 - if cli.Flags().Changed("timeout") { - timeout = int64(cli.Timeout) - } - podIDs, err := iopodman.GetPodsByContext().Call(r.Conn, cli.All, cli.Latest, cli.InputArgs) - if err != nil { - return nil, []error{err} - } - - for _, p := range podIDs { - podID, err := iopodman.StopPod().Call(r.Conn, p, timeout) - if err != nil { - stopErrs = append(stopErrs, err) - } else { - stopPods = append(stopPods, podID) - } - } - return stopPods, stopErrs -} - -// KillPods kills pods over varlink for the remoteclient -func (r *LocalRuntime) KillPods(ctx context.Context, cli *cliconfig.PodKillValues, signal uint) ([]string, []error) { - var ( - killErrs []error - killPods []string - ) - - podIDs, err := iopodman.GetPodsByContext().Call(r.Conn, cli.All, cli.Latest, cli.InputArgs) - if err != nil { - return nil, []error{err} - } - - for _, p := range podIDs { - podID, err := iopodman.KillPod().Call(r.Conn, p, int64(signal)) - if err != nil { - killErrs = append(killErrs, err) - } else { - killPods = append(killPods, podID) - } - } - return killPods, killErrs -} - -// StartPods starts pods for the remote client over varlink -func (r *LocalRuntime) StartPods(ctx context.Context, cli *cliconfig.PodStartValues) ([]string, []error) { - var ( - startErrs []error - startPods []string - ) - - podIDs, err := iopodman.GetPodsByContext().Call(r.Conn, cli.All, cli.Latest, cli.InputArgs) - if err != nil { - return nil, []error{err} - } - - for _, p := range podIDs { - podID, err := iopodman.StartPod().Call(r.Conn, p) - if err != nil { - startErrs = append(startErrs, err) - } else { - startPods = append(startPods, podID) - } - } - return startPods, startErrs -} - -// CreatePod creates a pod for the remote client over a varlink connection -func (r *LocalRuntime) CreatePod(ctx context.Context, cli *cliconfig.PodCreateValues, labels map[string]string) (string, error) { - var share []string - if cli.Share != "" { - share = strings.Split(cli.Share, ",") - } - pc := iopodman.PodCreate{ - Name: cli.Name, - CgroupParent: cli.CgroupParent, - Labels: labels, - Share: share, - Infra: cli.Infra, - InfraCommand: cli.InfraCommand, - InfraImage: cli.InfraCommand, - Publish: cli.StringSlice("publish"), - } - - return iopodman.CreatePod().Call(r.Conn, pc) -} - -// GetAllPods is a helper function that gets all pods for the remote client -func (r *LocalRuntime) GetAllPods() ([]*Pod, error) { - var pods []*Pod - podIDs, err := iopodman.GetPodsByContext().Call(r.Conn, true, false, []string{}) - if err != nil { - return nil, err - } - for _, p := range podIDs { - pod, err := r.LookupPod(p) - if err != nil { - return nil, err - } - pods = append(pods, pod) - } - return pods, nil -} - -// This is a empty implementation stating remoteclient not yet implemented -func (r *LocalRuntime) GetPodsWithFilters(filters string) ([]*Pod, error) { - return nil, define.ErrNotImplemented -} - -// GetPodsByStatus returns a slice of pods filtered by a libpod status -func (r *LocalRuntime) GetPodsByStatus(statuses []string) ([]*Pod, error) { - podIDs, err := iopodman.GetPodsByStatus().Call(r.Conn, statuses) - if err != nil { - return nil, err - } - pods := make([]*Pod, 0, len(podIDs)) - for _, p := range podIDs { - pod, err := r.LookupPod(p) - if err != nil { - return nil, err - } - pods = append(pods, pod) - } - return pods, nil -} - -// ID returns the id of a remote pod -func (p *Pod) ID() string { - return p.config.ID -} - -// Name returns the name of the remote pod -func (p *Pod) Name() string { - return p.config.Name -} - -// AllContainersByID returns a slice of a pod's container IDs -func (p *Pod) AllContainersByID() ([]string, error) { - var containerIDs []string - for _, ctr := range p.containers { - containerIDs = append(containerIDs, ctr.ID) - } - return containerIDs, nil -} - -// AllContainers returns a pods containers -func (p *Pod) AllContainers() ([]*Container, error) { - var containers []*Container - for _, ctr := range p.containers { - container, err := p.Runtime.LookupContainer(ctr.ID) - if err != nil { - return nil, err - } - containers = append(containers, container) - } - return containers, nil -} - -// Status ... -func (p *Pod) Status() (map[string]define.ContainerStatus, error) { - ctrs := make(map[string]define.ContainerStatus) - for _, i := range p.containers { - var status define.ContainerStatus - switch i.State { - case "exited": - status = define.ContainerStateExited - case "stopped": - status = define.ContainerStateStopped - case "running": - status = define.ContainerStateRunning - case "paused": - status = define.ContainerStatePaused - case "created": - status = define.ContainerStateCreated - case "define.red": - status = define.ContainerStateConfigured - default: - status = define.ContainerStateUnknown - } - ctrs[i.ID] = status - } - return ctrs, nil -} - -// GetPodStatus is a wrapper to get the string version of the status -func (p *Pod) GetPodStatus() (string, error) { - ctrStatuses, err := p.Status() - if err != nil { - return "", err - } - return shared.CreatePodStatusResults(ctrStatuses) -} - -// InfraContainerID returns the ID of the infra container in a pod -func (p *Pod) InfraContainerID() (string, error) { - return p.state.InfraContainerID, nil -} - -// CreatedTime returns the time the container was created as a time.Time -func (p *Pod) CreatedTime() time.Time { - return p.config.CreatedTime -} - -// SharesPID .... -func (p *Pod) SharesPID() bool { - return p.config.UsePodPID -} - -// SharesIPC returns whether containers in pod -// default to use IPC namespace of first container in pod -func (p *Pod) SharesIPC() bool { - return p.config.UsePodIPC -} - -// SharesNet returns whether containers in pod -// default to use network namespace of first container in pod -func (p *Pod) SharesNet() bool { - return p.config.UsePodNet -} - -// SharesMount returns whether containers in pod -// default to use PID namespace of first container in pod -func (p *Pod) SharesMount() bool { - return p.config.UsePodMount -} - -// SharesUser returns whether containers in pod -// default to use user namespace of first container in pod -func (p *Pod) SharesUser() bool { - return p.config.UsePodUser -} - -// SharesUTS returns whether containers in pod -// default to use UTS namespace of first container in pod -func (p *Pod) SharesUTS() bool { - return p.config.UsePodUTS -} - -// SharesCgroup returns whether containers in the pod will default to this pod's -// cgroup instead of the default libpod parent -func (p *Pod) SharesCgroup() bool { - return p.config.UsePodCgroup -} - -// CgroupParent returns the pod's CGroup parent -func (p *Pod) CgroupParent() string { - return p.config.CgroupParent -} - -// PausePods pauses a pod using varlink and the remote client -func (r *LocalRuntime) PausePods(c *cliconfig.PodPauseValues) ([]string, map[string]error, []error) { - var ( - pauseIDs []string - pauseErrors []error - ) - containerErrors := make(map[string]error) - - pods, err := iopodman.GetPodsByContext().Call(r.Conn, c.All, c.Latest, c.InputArgs) - if err != nil { - pauseErrors = append(pauseErrors, err) - return nil, containerErrors, pauseErrors - } - for _, pod := range pods { - reply, err := iopodman.PausePod().Call(r.Conn, pod) - if err != nil { - pauseErrors = append(pauseErrors, err) - continue - } - pauseIDs = append(pauseIDs, reply) - } - return pauseIDs, nil, pauseErrors -} - -// UnpausePods unpauses a pod using varlink and the remote client -func (r *LocalRuntime) UnpausePods(c *cliconfig.PodUnpauseValues) ([]string, map[string]error, []error) { - var ( - unpauseIDs []string - unpauseErrors []error - ) - containerErrors := make(map[string]error) - - pods, err := iopodman.GetPodsByContext().Call(r.Conn, c.All, c.Latest, c.InputArgs) - if err != nil { - unpauseErrors = append(unpauseErrors, err) - return nil, containerErrors, unpauseErrors - } - for _, pod := range pods { - reply, err := iopodman.UnpausePod().Call(r.Conn, pod) - if err != nil { - unpauseErrors = append(unpauseErrors, err) - continue - } - unpauseIDs = append(unpauseIDs, reply) - } - return unpauseIDs, nil, unpauseErrors -} - -// RestartPods restarts pods using varlink and the remote client -func (r *LocalRuntime) RestartPods(ctx context.Context, c *cliconfig.PodRestartValues) ([]string, map[string]error, []error) { - var ( - restartIDs []string - restartErrors []error - ) - containerErrors := make(map[string]error) - - pods, err := iopodman.GetPodsByContext().Call(r.Conn, c.All, c.Latest, c.InputArgs) - if err != nil { - restartErrors = append(restartErrors, err) - return nil, containerErrors, restartErrors - } - for _, pod := range pods { - reply, err := iopodman.RestartPod().Call(r.Conn, pod) - if err != nil { - restartErrors = append(restartErrors, err) - continue - } - restartIDs = append(restartIDs, reply) - } - return restartIDs, nil, restartErrors -} - -// PodTop gets top statistics for a pod -func (r *LocalRuntime) PodTop(c *cliconfig.PodTopValues, descriptors []string) ([]string, error) { - var ( - latest bool - podName string - ) - if c.Latest { - latest = true - } else { - podName = c.InputArgs[0] - } - return iopodman.TopPod().Call(r.Conn, podName, latest, descriptors) -} - -// GetStatPods returns pods for use in pod stats -func (r *LocalRuntime) GetStatPods(c *cliconfig.PodStatsValues) ([]*Pod, error) { - var ( - pods []*Pod - err error - podIDs []string - running bool - ) - - if len(c.InputArgs) > 0 || c.Latest || c.All { - podIDs, err = iopodman.GetPodsByContext().Call(r.Conn, c.All, c.Latest, c.InputArgs) - } else { - podIDs, err = iopodman.GetPodsByContext().Call(r.Conn, true, false, []string{}) - running = true - } - if err != nil { - return nil, err - } - for _, p := range podIDs { - pod, err := r.Inspect(p) - if err != nil { - return nil, err - } - if running { - status, err := pod.GetPodStatus() - if err != nil { - // if we cannot get the status of the pod, skip and move on - continue - } - if strings.ToUpper(status) != "RUNNING" { - // if the pod is not running, skip and move on as well - continue - } - } - pods = append(pods, pod) - } - return pods, nil -} - -// GetPodStats returns the stats for each of its containers -func (p *Pod) GetPodStats(previousContainerStats map[string]*libpod.ContainerStats) (map[string]*libpod.ContainerStats, error) { - var ( - ok bool - prevStat *libpod.ContainerStats - ) - newContainerStats := make(map[string]*libpod.ContainerStats) - containers, err := p.AllContainers() - if err != nil { - return nil, err - } - for _, c := range containers { - if prevStat, ok = previousContainerStats[c.ID()]; !ok { - prevStat = &libpod.ContainerStats{ContainerID: c.ID()} - } - cStats := iopodman.ContainerStats{ - Id: prevStat.ContainerID, - Name: prevStat.Name, - Cpu: prevStat.CPU, - Cpu_nano: int64(prevStat.CPUNano), - System_nano: int64(prevStat.SystemNano), - Mem_usage: int64(prevStat.MemUsage), - Mem_limit: int64(prevStat.MemLimit), - Mem_perc: prevStat.MemPerc, - Net_input: int64(prevStat.NetInput), - Net_output: int64(prevStat.NetOutput), - Block_input: int64(prevStat.BlockInput), - Block_output: int64(prevStat.BlockOutput), - Pids: int64(prevStat.PIDs), - } - stats, err := iopodman.GetContainerStatsWithHistory().Call(p.Runtime.Conn, cStats) - if err != nil { - return nil, err - } - newStats := varlinkapi.ContainerStatsToLibpodContainerStats(stats) - // If the container wasn't running, don't include it - // but also suppress the error - if err != nil && errors.Cause(err) != define.ErrCtrStateInvalid { // nolint: govet - return nil, err - } - if err == nil { // nolint: govet - newContainerStats[c.ID()] = &newStats - } - } - return newContainerStats, nil -} - -// RemovePod removes a pod -// If removeCtrs is specified, containers will be removed -// Otherwise, a pod that is not empty will return an error and not be removed -// If force is specified with removeCtrs, all containers will be stopped before -// being removed -// Otherwise, the pod will not be removed if any containers are running -func (r *LocalRuntime) RemovePod(ctx context.Context, p *Pod, removeCtrs, force bool) error { - _, err := iopodman.RemovePod().Call(r.Conn, p.ID(), force) - if err != nil { - return err - } - return nil -} - -// PrunePods... -func (r *LocalRuntime) PrunePods(ctx context.Context, cli *cliconfig.PodPruneValues) ([]string, map[string]error, error) { - var ( - ok = []string{} - failures = map[string]error{} - ) - states := []string{define.PodStateStopped, define.PodStateExited} - if cli.Force { - states = append(states, define.PodStateRunning) - } - - ids, err := iopodman.GetPodsByStatus().Call(r.Conn, states) - if err != nil { - return ok, failures, err - } - if len(ids) < 1 { - return ok, failures, nil - } - - for _, id := range ids { - _, err := iopodman.RemovePod().Call(r.Conn, id, cli.Force) - if err != nil { - logrus.Debugf("Failed to remove pod %s: %s", id, err.Error()) - failures[id] = err - } else { - ok = append(ok, id) - } - } - return ok, failures, nil -} - -// PlayKubeYAML creates pods and containers from a kube YAML file -func (r *LocalRuntime) PlayKubeYAML(ctx context.Context, c *cliconfig.KubePlayValues, yamlFile string) (*Pod, error) { - return nil, define.ErrNotImplemented -} diff --git a/pkg/adapter/reset.go b/pkg/adapter/reset.go deleted file mode 100644 index 0decc3d15..000000000 --- a/pkg/adapter/reset.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !remoteclient - -package adapter - -import ( - "context" -) - -// Reset the container storage back to initial states. -// Removes all Pods, Containers, Images and Volumes. -func (r *LocalRuntime) Reset() error { - return r.Runtime.Reset(context.TODO()) -} diff --git a/pkg/adapter/reset_remote.go b/pkg/adapter/reset_remote.go deleted file mode 100644 index 284b54a17..000000000 --- a/pkg/adapter/reset_remote.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build remoteclient - -package adapter - -import ( - iopodman "github.com/containers/libpod/pkg/varlink" -) - -// Info returns information for the host system and its components -func (r RemoteRuntime) Reset() error { - return iopodman.Reset().Call(r.Conn) -} diff --git a/pkg/adapter/runtime.go b/pkg/adapter/runtime.go deleted file mode 100644 index 7a181e7e5..000000000 --- a/pkg/adapter/runtime.go +++ /dev/null @@ -1,477 +0,0 @@ -// +build !remoteclient - -package adapter - -import ( - "bufio" - "context" - "io" - "io/ioutil" - "os" - "text/template" - - "github.com/containers/buildah" - "github.com/containers/buildah/imagebuildah" - "github.com/containers/buildah/pkg/formats" - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/types" - "github.com/containers/libpod/cmd/podman/cliconfig" - "github.com/containers/libpod/cmd/podman/libpodruntime" - "github.com/containers/libpod/cmd/podman/shared" - "github.com/containers/libpod/libpod" - "github.com/containers/libpod/libpod/define" - "github.com/containers/libpod/libpod/events" - "github.com/containers/libpod/libpod/image" - "github.com/containers/libpod/pkg/rootless" - "github.com/containers/libpod/pkg/util" - "github.com/containers/storage/pkg/archive" - "github.com/pkg/errors" - v1 "k8s.io/api/core/v1" -) - -// LocalRuntime describes a typical libpod runtime -type LocalRuntime struct { - *libpod.Runtime - Remote bool -} - -// ContainerImage ... -type ContainerImage struct { - *image.Image -} - -// Container ... -type Container struct { - *libpod.Container -} - -// Pod encapsulates the libpod.Pod structure, helps with remote vs. local -type Pod struct { - *libpod.Pod -} - -// Volume ... -type Volume struct { - *libpod.Volume -} - -// VolumeFilter is for filtering volumes on the client -type VolumeFilter func(*Volume) bool - -// GetRuntimeNoStore returns a localruntime struct with an embedded runtime but -// without a configured storage. -func GetRuntimeNoStore(ctx context.Context, c *cliconfig.PodmanCommand) (*LocalRuntime, error) { - runtime, err := libpodruntime.GetRuntimeNoStore(ctx, c) - if err != nil { - return nil, err - } - return getRuntime(runtime) -} - -// GetRuntime returns a LocalRuntime struct with the actual runtime embedded in it -func GetRuntime(ctx context.Context, c *cliconfig.PodmanCommand) (*LocalRuntime, error) { - runtime, err := libpodruntime.GetRuntime(ctx, c) - if err != nil { - return nil, err - } - return getRuntime(runtime) -} - -func getRuntime(runtime *libpod.Runtime) (*LocalRuntime, error) { - return &LocalRuntime{ - Runtime: runtime, - }, nil -} - -// GetFilteredImages returns a slice of images in containerimages that are "filtered" -func (r *LocalRuntime) GetFilteredImages(filters []string, rwOnly bool) ([]*ContainerImage, error) { - images, err := r.ImageRuntime().GetImagesWithFilters(filters) - if err != nil { - return nil, err - } - return r.ImagestoContainerImages(images, rwOnly) -} - -// GetImages returns a slice of images in containerimages -func (r *LocalRuntime) GetImages() ([]*ContainerImage, error) { - return r.getImages(false) -} - -// GetRWImages returns a slice of read/write images in containerimages -func (r *LocalRuntime) GetRWImages() ([]*ContainerImage, error) { - return r.getImages(true) -} - -func (r *LocalRuntime) getImages(rwOnly bool) ([]*ContainerImage, error) { - images, err := r.Runtime.ImageRuntime().GetImages() - if err != nil { - return nil, err - } - return r.ImagestoContainerImages(images, rwOnly) -} - -// ImagestoContainerImages converts the slice of *image.Image to a slice of -// *ContainerImage. ReadOnly images are skipped when rwOnly is set. -func (r *LocalRuntime) ImagestoContainerImages(images []*image.Image, rwOnly bool) ([]*ContainerImage, error) { - var containerImages []*ContainerImage - for _, i := range images { - if rwOnly && i.IsReadOnly() { - continue - } - containerImages = append(containerImages, &ContainerImage{i}) - } - return containerImages, nil -} - -// NewImageFromLocal returns a containerimage representation of a image from local storage -func (r *LocalRuntime) NewImageFromLocal(name string) (*ContainerImage, error) { - img, err := r.Runtime.ImageRuntime().NewFromLocal(name) - if err != nil { - return nil, err - } - return &ContainerImage{img}, nil -} - -// ImageTree reutnrs an new image.Tree for the provided `imageOrID` and `whatrequires` flag -func (r *LocalRuntime) ImageTree(imageOrID string, whatRequires bool) (string, error) { - img, err := r.Runtime.ImageRuntime().NewFromLocal(imageOrID) - if err != nil { - return "", err - } - return img.GenerateTree(whatRequires) -} - -// LoadFromArchiveReference calls into local storage to load an image from an archive -func (r *LocalRuntime) LoadFromArchiveReference(ctx context.Context, srcRef types.ImageReference, signaturePolicyPath string, writer io.Writer) ([]*ContainerImage, error) { - var containerImages []*ContainerImage - imgs, err := r.Runtime.ImageRuntime().LoadFromArchiveReference(ctx, srcRef, signaturePolicyPath, writer) - if err != nil { - return nil, err - } - for _, i := range imgs { - ci := ContainerImage{i} - containerImages = append(containerImages, &ci) - } - return containerImages, nil -} - -// New calls into local storage to look for an image in local storage or to pull it -func (r *LocalRuntime) New(ctx context.Context, name, signaturePolicyPath, authfile string, writer io.Writer, dockeroptions *image.DockerRegistryOptions, signingoptions image.SigningOptions, label *string, pullType util.PullType) (*ContainerImage, error) { - img, err := r.Runtime.ImageRuntime().New(ctx, name, signaturePolicyPath, authfile, writer, dockeroptions, signingoptions, label, pullType) - if err != nil { - return nil, err - } - return &ContainerImage{img}, nil -} - -// RemoveImage calls into local storage and removes an image -func (r *LocalRuntime) RemoveImage(ctx context.Context, img *ContainerImage, force bool) (*image.ImageDeleteResponse, error) { - return r.Runtime.RemoveImage(ctx, img.Image, force) -} - -// PruneImages is wrapper into PruneImages within the image pkg -func (r *LocalRuntime) PruneImages(ctx context.Context, all bool, filter []string) ([]string, error) { - return r.ImageRuntime().PruneImages(ctx, all, filter) -} - -// Export is a wrapper to container export to a tarfile -func (r *LocalRuntime) Export(name string, path string) error { - ctr, err := r.Runtime.LookupContainer(name) - if err != nil { - return errors.Wrapf(err, "error looking up container %q", name) - } - return ctr.Export(path) -} - -// Import is a wrapper to import a container image -func (r *LocalRuntime) Import(ctx context.Context, source, reference string, changes []string, history string, quiet bool) (string, error) { - return r.Runtime.Import(ctx, source, reference, changes, history, quiet) -} - -// CreateVolume is a wrapper to create volumes -func (r *LocalRuntime) CreateVolume(ctx context.Context, c *cliconfig.VolumeCreateValues, labels, opts map[string]string) (string, error) { - var ( - options []libpod.VolumeCreateOption - volName string - ) - - if len(c.InputArgs) > 0 { - volName = c.InputArgs[0] - options = append(options, libpod.WithVolumeName(volName)) - } - - if c.Flag("driver").Changed { - options = append(options, libpod.WithVolumeDriver(c.Driver)) - } - - if len(labels) != 0 { - options = append(options, libpod.WithVolumeLabels(labels)) - } - - if len(opts) != 0 { - // We need to process -o for uid, gid - parsedOptions, err := shared.ParseVolumeOptions(opts) - if err != nil { - return "", err - } - options = append(options, parsedOptions...) - } - newVolume, err := r.NewVolume(ctx, options...) - if err != nil { - return "", err - } - return newVolume.Name(), nil -} - -// RemoveVolumes is a wrapper to remove volumes -func (r *LocalRuntime) RemoveVolumes(ctx context.Context, c *cliconfig.VolumeRmValues) ([]string, map[string]error, error) { - return shared.SharedRemoveVolumes(ctx, r.Runtime, c.InputArgs, c.All, c.Force) -} - -// Push is a wrapper to push an image to a registry -func (r *LocalRuntime) Push(ctx context.Context, srcName, destination, manifestMIMEType, authfile, digestfile, signaturePolicyPath string, writer io.Writer, forceCompress bool, signingOptions image.SigningOptions, dockerRegistryOptions *image.DockerRegistryOptions, additionalDockerArchiveTags []reference.NamedTagged) error { - newImage, err := r.ImageRuntime().NewFromLocal(srcName) - if err != nil { - return err - } - return newImage.PushImageToHeuristicDestination(ctx, destination, manifestMIMEType, authfile, digestfile, signaturePolicyPath, writer, forceCompress, signingOptions, dockerRegistryOptions, nil) -} - -// InspectVolumes returns a slice of volumes based on an arg list or --all -func (r *LocalRuntime) InspectVolumes(ctx context.Context, c *cliconfig.VolumeInspectValues) ([]*libpod.InspectVolumeData, error) { - var ( - volumes []*libpod.Volume - err error - ) - - if c.All { - volumes, err = r.GetAllVolumes() - } else { - for _, v := range c.InputArgs { - vol, err := r.LookupVolume(v) - if err != nil { - return nil, err - } - volumes = append(volumes, vol) - } - } - if err != nil { - return nil, err - } - - inspectVols := make([]*libpod.InspectVolumeData, 0, len(volumes)) - for _, vol := range volumes { - inspectOut, err := vol.Inspect() - if err != nil { - return nil, errors.Wrapf(err, "error inspecting volume %s", vol.Name()) - } - inspectVols = append(inspectVols, inspectOut) - } - - return inspectVols, nil -} - -// Volumes returns a slice of localruntime volumes -func (r *LocalRuntime) Volumes(ctx context.Context) ([]*Volume, error) { - vols, err := r.GetAllVolumes() - if err != nil { - return nil, err - } - return libpodVolumeToVolume(vols), nil -} - -// libpodVolumeToVolume converts a slice of libpod volumes to a slice -// of localruntime volumes (same as libpod) -func libpodVolumeToVolume(volumes []*libpod.Volume) []*Volume { - var vols []*Volume - for _, v := range volumes { - newVol := Volume{ - v, - } - vols = append(vols, &newVol) - } - return vols -} - -// Build is the wrapper to build images -func (r *LocalRuntime) Build(ctx context.Context, c *cliconfig.BuildValues, options imagebuildah.BuildOptions, dockerfiles []string) (string, reference.Canonical, error) { - - authfile := c.Authfile - if len(c.Authfile) == 0 { - authfile = os.Getenv("REGISTRY_AUTH_FILE") - } - - options.SystemContext.AuthFilePath = authfile - - if c.GlobalFlags.Runtime != "" { - options.Runtime = c.GlobalFlags.Runtime - } else { - options.Runtime = r.GetOCIRuntimePath() - } - - if c.Quiet { - options.ReportWriter = ioutil.Discard - } - - if rootless.IsRootless() { - options.Isolation = buildah.IsolationOCIRootless - } - - return r.Runtime.Build(ctx, options, dockerfiles...) -} - -// PruneVolumes is a wrapper function for libpod PruneVolumes -func (r *LocalRuntime) PruneVolumes(ctx context.Context) ([]string, []error) { - var ( - vids []string - errs []error - ) - reports, err := r.Runtime.PruneVolumes(ctx) - if err != nil { - errs = append(errs, err) - return vids, errs - } - for k, v := range reports { - if v == nil { - vids = append(vids, k) - } else { - errs = append(errs, v) - } - } - return vids, errs -} - -// SaveImage is a wrapper function for saving an image to the local filesystem -func (r *LocalRuntime) SaveImage(ctx context.Context, c *cliconfig.SaveValues) error { - source := c.InputArgs[0] - additionalTags := c.InputArgs[1:] - - newImage, err := r.Runtime.ImageRuntime().NewFromLocal(source) - if err != nil { - return err - } - return newImage.Save(ctx, source, c.Format, c.Output, additionalTags, c.Quiet, c.Compress) -} - -// LoadImage is a wrapper function for libpod LoadImage -func (r *LocalRuntime) LoadImage(ctx context.Context, name string, cli *cliconfig.LoadValues) (string, error) { - var ( - writer io.Writer - ) - if !cli.Quiet { - writer = os.Stderr - } - return r.Runtime.LoadImage(ctx, name, cli.Input, writer, cli.SignaturePolicy) -} - -// IsImageNotFound checks if the error indicates that no image was found. -func IsImageNotFound(err error) bool { - return errors.Cause(err) == image.ErrNoSuchImage -} - -// HealthCheck is a wrapper to same named function in libpod -func (r *LocalRuntime) HealthCheck(c *cliconfig.HealthCheckValues) (string, error) { - output := "unhealthy" - status, err := r.Runtime.HealthCheck(c.InputArgs[0]) - if status == libpod.HealthCheckSuccess { - output = "healthy" - } - return output, err -} - -// Events is a wrapper to libpod to obtain libpod/podman events -func (r *LocalRuntime) Events(c *cliconfig.EventValues) error { - var ( - fromStart bool - eventsError error - ) - var tmpl *template.Template - if c.Format != formats.JSONString { - template, err := template.New("events").Parse(c.Format) - if err != nil { - return err - } - tmpl = template - } - if len(c.Since) > 0 || len(c.Until) > 0 { - fromStart = true - } - eventChannel := make(chan *events.Event) - go func() { - readOpts := events.ReadOptions{FromStart: fromStart, Stream: c.Stream, Filters: c.Filter, EventChannel: eventChannel, Since: c.Since, Until: c.Until} - eventsError = r.Runtime.Events(readOpts) - }() - - if eventsError != nil { - return eventsError - } - w := bufio.NewWriter(os.Stdout) - for event := range eventChannel { - switch { - case c.Format == formats.JSONString: - jsonStr, err := event.ToJSONString() - if err != nil { - return errors.Wrapf(err, "unable to format json") - } - if _, err := w.Write([]byte(jsonStr)); err != nil { - return err - } - case len(c.Format) > 0: - if err := tmpl.Execute(w, event); err != nil { - return err - } - default: - if _, err := w.Write([]byte(event.ToHumanReadable())); err != nil { - return err - } - } - if _, err := w.Write([]byte("\n")); err != nil { - return err - } - if err := w.Flush(); err != nil { - return err - } - } - return nil -} - -// Diff shows the difference in two objects -func (r *LocalRuntime) Diff(c *cliconfig.DiffValues, to string) ([]archive.Change, error) { - return r.Runtime.GetDiff("", to) -} - -// GenerateKube creates kubernetes email from containers and pods -func (r *LocalRuntime) GenerateKube(c *cliconfig.GenerateKubeValues) (*v1.Pod, *v1.Service, error) { - return shared.GenerateKube(c.InputArgs[0], c.Service, r.Runtime) -} - -// GetPodsByStatus returns a slice of pods filtered by a libpod status -func (r *LocalRuntime) GetPodsByStatus(statuses []string) ([]*libpod.Pod, error) { - - filterFunc := func(p *libpod.Pod) bool { - state, _ := shared.GetPodStatus(p) - for _, status := range statuses { - if state == status { - return true - } - } - return false - } - - pods, err := r.Runtime.Pods(filterFunc) - if err != nil { - return nil, err - } - - return pods, nil -} - -// GetVersion is an alias to satisfy interface{} -func (r *LocalRuntime) GetVersion() (define.Version, error) { - return define.GetVersion() -} - -// RemoteEndpoint resolve interface requirement -func (r *LocalRuntime) RemoteEndpoint() (*Endpoint, error) { - return nil, errors.New("RemoteEndpoint() not implemented for local connection") -} diff --git a/pkg/adapter/runtime_remote.go b/pkg/adapter/runtime_remote.go deleted file mode 100644 index c511b70f1..000000000 --- a/pkg/adapter/runtime_remote.go +++ /dev/null @@ -1,1108 +0,0 @@ -// +build remoteclient - -package adapter - -import ( - "bufio" - "context" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "text/template" - "time" - - "github.com/containers/buildah/imagebuildah" - "github.com/containers/buildah/pkg/formats" - "github.com/containers/common/pkg/config" - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/types" - "github.com/containers/libpod/cmd/podman/cliconfig" - "github.com/containers/libpod/cmd/podman/remoteclientconfig" - "github.com/containers/libpod/libpod" - "github.com/containers/libpod/libpod/define" - "github.com/containers/libpod/libpod/events" - "github.com/containers/libpod/libpod/image" - "github.com/containers/libpod/pkg/util" - iopodman "github.com/containers/libpod/pkg/varlink" - "github.com/containers/libpod/utils" - "github.com/containers/storage/pkg/archive" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "github.com/varlink/go/varlink" - v1 "k8s.io/api/core/v1" -) - -// ImageRuntime is wrapper for image runtime -type RemoteImageRuntime struct{} - -// RemoteRuntime describes a wrapper runtime struct -type RemoteRuntime struct { - Conn *varlink.Connection - Remote bool - cmd cliconfig.MainFlags - config io.Reader -} - -// LocalRuntime describes a typical libpod runtime -type LocalRuntime struct { - *RemoteRuntime -} - -// GetRuntimeNoStore returns a LocalRuntime struct with the actual runtime embedded in it -// The nostore is ignored -func GetRuntimeNoStore(ctx context.Context, c *cliconfig.PodmanCommand) (*LocalRuntime, error) { - return GetRuntime(ctx, c) -} - -// GetRuntime returns a LocalRuntime struct with the actual runtime embedded in it -func GetRuntime(ctx context.Context, c *cliconfig.PodmanCommand) (*LocalRuntime, error) { - var ( - customConfig bool - err error - f *os.File - ) - runtime := RemoteRuntime{ - Remote: true, - cmd: c.GlobalFlags, - } - configPath := remoteclientconfig.GetConfigFilePath() - // Check if the basedir for configPath exists and if not, create it. - if _, err := os.Stat(filepath.Dir(configPath)); os.IsNotExist(err) { - if mkdirErr := os.MkdirAll(filepath.Dir(configPath), 0750); mkdirErr != nil { - return nil, mkdirErr - } - } - if len(c.GlobalFlags.RemoteConfigFilePath) > 0 { - configPath = c.GlobalFlags.RemoteConfigFilePath - customConfig = true - } - - f, err = os.Open(configPath) - if err != nil { - // If user does not explicitly provide a configuration file path and we cannot - // find a default, no error should occur. - if os.IsNotExist(err) && !customConfig { - logrus.Debugf("unable to load configuration file at %s", configPath) - runtime.config = nil - } else { - return nil, errors.Wrapf(err, "unable to load configuration file at %s", configPath) - } - } else { - // create the io reader for the remote client - runtime.config = bufio.NewReader(f) - } - conn, err := runtime.Connect() - if err != nil { - return nil, err - } - runtime.Conn = conn - return &LocalRuntime{ - &runtime, - }, nil -} - -// DeferredShutdown is a bogus wrapper for compaat with the libpod -// runtime and should only be run when a defer is being used -func (r RemoteRuntime) DeferredShutdown(force bool) { - if err := r.Shutdown(force); err != nil { - logrus.Error("unable to shutdown runtime") - } -} - -// Containers is a bogus wrapper for compat with the libpod runtime -type ContainersConfig struct { - // CGroupManager is the CGroup Manager to use - // Valid values are "cgroupfs" and "systemd" - CgroupManager string -} - -// RuntimeConfig is a bogus wrapper for compat with the libpod runtime -type RuntimeConfig struct { - Containers ContainersConfig -} - -// Shutdown is a bogus wrapper for compat with the libpod runtime -func (r *RemoteRuntime) GetConfig() (*config.Config, error) { - return nil, nil -} - -// Shutdown is a bogus wrapper for compat with the libpod runtime -func (r RemoteRuntime) Shutdown(force bool) error { - return nil -} - -// ContainerImage -type ContainerImage struct { - remoteImage -} - -type remoteImage struct { - ID string - Labels map[string]string - RepoTags []string - RepoDigests []string - Parent string - Size int64 - Created time.Time - InputName string - Names []string - Digest digest.Digest - Digests []digest.Digest - isParent bool - Runtime *LocalRuntime - TopLayer string - ReadOnly bool - NamesHistory []string -} - -// Container ... -type Container struct { - remoteContainer -} - -// remoteContainer .... -type remoteContainer struct { - Runtime *LocalRuntime - config *libpod.ContainerConfig - state *libpod.ContainerState -} - -// Pod ... -type Pod struct { - remotepod -} - -type remotepod struct { - config *libpod.PodConfig - state *libpod.PodInspectState - containers []libpod.PodContainerInfo // nolint: structcheck - Runtime *LocalRuntime -} - -type VolumeFilter func(*Volume) bool - -// Volume is embed for libpod volumes -type Volume struct { - remoteVolume -} - -type remoteVolume struct { - Runtime *LocalRuntime - config *libpod.VolumeConfig -} - -// GetImages returns a slice of containerimages over a varlink connection -func (r *LocalRuntime) GetImages() ([]*ContainerImage, error) { - return r.getImages(false) -} - -// GetRWImages returns a slice of read/write containerimages over a varlink connection -func (r *LocalRuntime) GetRWImages() ([]*ContainerImage, error) { - return r.getImages(true) -} - -func (r *LocalRuntime) GetFilteredImages(filters []string, rwOnly bool) ([]*ContainerImage, error) { - if len(filters) > 0 { - return nil, errors.Wrap(define.ErrNotImplemented, "filtering images is not supported on the remote client") - } - var newImages []*ContainerImage - images, err := iopodman.ListImages().Call(r.Conn) - if err != nil { - return nil, err - } - for _, i := range images { - if rwOnly && i.ReadOnly { - continue - } - name := i.Id - if len(i.RepoTags) > 1 { - name = i.RepoTags[0] - } - newImage, err := imageInListToContainerImage(i, name, r) - if err != nil { - return nil, err - } - newImages = append(newImages, newImage) - } - return newImages, nil -} -func (r *LocalRuntime) getImages(rwOnly bool) ([]*ContainerImage, error) { - var newImages []*ContainerImage - images, err := iopodman.ListImages().Call(r.Conn) - if err != nil { - return nil, err - } - for _, i := range images { - if rwOnly && i.ReadOnly { - continue - } - name := i.Id - if len(i.RepoTags) > 1 { - name = i.RepoTags[0] - } - newImage, err := imageInListToContainerImage(i, name, r) - if err != nil { - return nil, err - } - newImages = append(newImages, newImage) - } - return newImages, nil -} - -func imageInListToContainerImage(i iopodman.Image, name string, runtime *LocalRuntime) (*ContainerImage, error) { - created, err := time.ParseInLocation(time.RFC3339, i.Created, time.UTC) - if err != nil { - return nil, err - } - var digests []digest.Digest - for _, d := range i.Digests { - digests = append(digests, digest.Digest(d)) - } - ri := remoteImage{ - InputName: name, - ID: i.Id, - Digest: digest.Digest(i.Digest), - Digests: digests, - Labels: i.Labels, - RepoTags: i.RepoTags, - RepoDigests: i.RepoTags, - Parent: i.ParentId, - Size: i.Size, - Created: created, - Names: i.RepoTags, - isParent: i.IsParent, - Runtime: runtime, - TopLayer: i.TopLayer, - ReadOnly: i.ReadOnly, - NamesHistory: i.History, - } - return &ContainerImage{ri}, nil -} - -// NewImageFromLocal returns a container image representation of a image over varlink -func (r *LocalRuntime) NewImageFromLocal(name string) (*ContainerImage, error) { - img, err := iopodman.GetImage().Call(r.Conn, name) - if err != nil { - return nil, err - } - return imageInListToContainerImage(img, name, r) - -} - -// LoadFromArchiveReference creates an image from a local archive -func (r *LocalRuntime) LoadFromArchiveReference(ctx context.Context, srcRef types.ImageReference, signaturePolicyPath string, writer io.Writer) ([]*ContainerImage, error) { - var iid string - creds := iopodman.AuthConfig{} - reply, err := iopodman.PullImage().Send(r.Conn, varlink.More, srcRef.DockerReference().String(), creds) - if err != nil { - return nil, err - } - - for { - responses, flags, err := reply() - if err != nil { - return nil, err - } - for _, line := range responses.Logs { - fmt.Print(line) - } - iid = responses.Id - if flags&varlink.Continues == 0 { - break - } - } - - newImage, err := r.NewImageFromLocal(iid) - if err != nil { - return nil, err - } - return []*ContainerImage{newImage}, nil -} - -// New calls into local storage to look for an image in local storage or to pull it -func (r *LocalRuntime) New(ctx context.Context, name, signaturePolicyPath, authfile string, writer io.Writer, dockeroptions *image.DockerRegistryOptions, signingoptions image.SigningOptions, label *string, pullType util.PullType) (*ContainerImage, error) { - var iid string - if label != nil { - return nil, errors.New("the remote client function does not support checking a remote image for a label") - } - creds := iopodman.AuthConfig{} - if dockeroptions.DockerRegistryCreds != nil { - creds.Username = dockeroptions.DockerRegistryCreds.Username - creds.Password = dockeroptions.DockerRegistryCreds.Password - } - reply, err := iopodman.PullImage().Send(r.Conn, varlink.More, name, creds) - if err != nil { - return nil, err - } - for { - responses, flags, err := reply() - if err != nil { - return nil, err - } - for _, line := range responses.Logs { - fmt.Print(line) - } - iid = responses.Id - if flags&varlink.Continues == 0 { - break - } - } - newImage, err := r.NewImageFromLocal(iid) - if err != nil { - return nil, err - } - return newImage, nil -} - -func (r *LocalRuntime) ImageTree(imageOrID string, whatRequires bool) (string, error) { - return iopodman.ImageTree().Call(r.Conn, imageOrID, whatRequires) -} - -// IsParent goes through the layers in the store and checks if i.TopLayer is -// the parent of any other layer in store. Double check that image with that -// layer exists as well. -func (ci *ContainerImage) IsParent(context.Context) (bool, error) { - return ci.remoteImage.isParent, nil -} - -// ID returns the image ID as a string -func (ci *ContainerImage) ID() string { - return ci.remoteImage.ID -} - -// Names returns a string array of names associated with the image -func (ci *ContainerImage) Names() []string { - return ci.remoteImage.Names -} - -// NamesHistory returns a string array of names previously associated with the image -func (ci *ContainerImage) NamesHistory() []string { - return ci.remoteImage.NamesHistory -} - -// Created returns the time the image was created -func (ci *ContainerImage) Created() time.Time { - return ci.remoteImage.Created -} - -// IsReadOnly returns whether the image is ReadOnly -func (ci *ContainerImage) IsReadOnly() bool { - return ci.remoteImage.ReadOnly -} - -// Size returns the size of the image -func (ci *ContainerImage) Size(ctx context.Context) (*uint64, error) { - usize := uint64(ci.remoteImage.Size) - return &usize, nil -} - -// Digest returns the image's digest -func (ci *ContainerImage) Digest() digest.Digest { - return ci.remoteImage.Digest -} - -// Digests returns the image's digests -func (ci *ContainerImage) Digests() []digest.Digest { - return append([]digest.Digest{}, ci.remoteImage.Digests...) -} - -// Labels returns a map of the image's labels -func (ci *ContainerImage) Labels(ctx context.Context) (map[string]string, error) { - return ci.remoteImage.Labels, nil -} - -// Dangling returns a bool if the image is "dangling" -func (ci *ContainerImage) Dangling() bool { - return len(ci.Names()) == 0 -} - -// TopLayer returns an images top layer as a string -func (ci *ContainerImage) TopLayer() string { - return ci.remoteImage.TopLayer -} - -// TagImage ... -func (ci *ContainerImage) TagImage(tag string) error { - _, err := iopodman.TagImage().Call(ci.Runtime.Conn, ci.ID(), tag) - return err -} - -// UntagImage removes a single tag from an image -func (ci *ContainerImage) UntagImage(tag string) error { - _, err := iopodman.UntagImage().Call(ci.Runtime.Conn, ci.ID(), tag) - return err -} - -// RemoveImage calls varlink to remove an image -func (r *LocalRuntime) RemoveImage(ctx context.Context, img *ContainerImage, force bool) (*image.ImageDeleteResponse, error) { - ir := image.ImageDeleteResponse{} - response, err := iopodman.RemoveImageWithResponse().Call(r.Conn, img.InputName, force) - if err != nil { - return nil, err - } - ir.Deleted = response.Deleted - ir.Untagged = append(ir.Untagged, response.Untagged...) - return &ir, nil -} - -// History returns the history of an image and its layers -func (ci *ContainerImage) History(ctx context.Context) ([]*image.History, error) { - var imageHistories []*image.History - - reply, err := iopodman.HistoryImage().Call(ci.Runtime.Conn, ci.InputName) - if err != nil { - return nil, err - } - for _, h := range reply { - created, err := time.ParseInLocation(time.RFC3339, h.Created, time.UTC) - if err != nil { - return nil, err - } - ih := image.History{ - ID: h.Id, - Created: &created, - CreatedBy: h.CreatedBy, - Size: h.Size, - Comment: h.Comment, - } - imageHistories = append(imageHistories, &ih) - } - return imageHistories, nil -} - -// PruneImages is the wrapper call for a remote-client to prune images -func (r *LocalRuntime) PruneImages(ctx context.Context, all bool, filter []string) ([]string, error) { - return iopodman.ImagesPrune().Call(r.Conn, all, filter) -} - -// Export is a wrapper to container export to a tarfile -func (r *LocalRuntime) Export(name string, path string) error { - tempPath, err := iopodman.ExportContainer().Call(r.Conn, name, "") - if err != nil { - return err - } - return r.GetFileFromRemoteHost(tempPath, path, true) -} - -func (r *LocalRuntime) GetFileFromRemoteHost(remoteFilePath, outputPath string, delete bool) error { - outputFile, err := os.Create(outputPath) - if err != nil { - return err - } - defer outputFile.Close() - - writer := bufio.NewWriter(outputFile) - defer writer.Flush() - - reply, err := iopodman.ReceiveFile().Send(r.Conn, varlink.Upgrade, remoteFilePath, delete) - if err != nil { - return err - } - - length, _, err := reply() - if err != nil { - return errors.Wrap(err, "unable to get file length for transfer") - } - - reader := r.Conn.Reader - if _, err := io.CopyN(writer, reader, length); err != nil { - return errors.Wrap(err, "file transfer failed") - } - return nil -} - -// Import implements the remote calls required to import a container image to the store -func (r *LocalRuntime) Import(ctx context.Context, source, reference string, changes []string, history string, quiet bool) (string, error) { - // First we send the file to the host - tempFile, err := r.SendFileOverVarlink(source) - if err != nil { - return "", err - } - return iopodman.ImportImage().Call(r.Conn, strings.TrimRight(tempFile, ":"), reference, history, changes, true) -} - -func (r *LocalRuntime) Build(ctx context.Context, c *cliconfig.BuildValues, options imagebuildah.BuildOptions, dockerfiles []string) (string, reference.Canonical, error) { - buildOptions := iopodman.BuildOptions{ - AddHosts: options.CommonBuildOpts.AddHost, - CgroupParent: options.CommonBuildOpts.CgroupParent, - CpuPeriod: int64(options.CommonBuildOpts.CPUPeriod), - CpuQuota: options.CommonBuildOpts.CPUQuota, - CpuShares: int64(options.CommonBuildOpts.CPUShares), - CpusetCpus: options.CommonBuildOpts.CPUSetMems, - CpusetMems: options.CommonBuildOpts.CPUSetMems, - Memory: options.CommonBuildOpts.Memory, - MemorySwap: options.CommonBuildOpts.MemorySwap, - ShmSize: options.CommonBuildOpts.ShmSize, - Ulimit: options.CommonBuildOpts.Ulimit, - Volume: options.CommonBuildOpts.Volumes, - } - buildinfo := iopodman.BuildInfo{ - // Err: string(options.Err), - // Out: - // ReportWriter: - Architecture: options.Architecture, - AddCapabilities: options.AddCapabilities, - AdditionalTags: options.AdditionalTags, - Annotations: options.Annotations, - BuildArgs: options.Args, - BuildOptions: buildOptions, - CniConfigDir: options.CNIConfigDir, - CniPluginDir: options.CNIPluginPath, - Compression: string(options.Compression), - Devices: options.Devices, - DefaultsMountFilePath: options.DefaultMountsFilePath, - Dockerfiles: dockerfiles, - DropCapabilities: options.DropCapabilities, - ForceRmIntermediateCtrs: options.ForceRmIntermediateCtrs, - Iidfile: options.IIDFile, - Label: options.Labels, - Layers: options.Layers, - // NamespaceOptions: options.NamespaceOptions, - Nocache: options.NoCache, - Os: options.OS, - Output: options.Output, - OutputFormat: options.OutputFormat, - PullPolicy: options.PullPolicy.String(), - Quiet: options.Quiet, - RemoteIntermediateCtrs: options.RemoveIntermediateCtrs, - RuntimeArgs: options.RuntimeArgs, - SignBy: options.SignBy, - Squash: options.Squash, - Target: options.Target, - TransientMounts: options.TransientMounts, - } - // tar the file - outputFile, err := ioutil.TempFile("", "varlink_tar_send") - if err != nil { - return "", nil, err - } - defer outputFile.Close() - defer os.Remove(outputFile.Name()) - - // Create the tarball of the context dir to a tempfile - if err := utils.TarToFilesystem(options.ContextDirectory, outputFile); err != nil { - return "", nil, err - } - // Send the context dir tarball over varlink. - tempFile, err := r.SendFileOverVarlink(outputFile.Name()) - if err != nil { - return "", nil, err - } - buildinfo.ContextDir = tempFile - - reply, err := iopodman.BuildImage().Send(r.Conn, varlink.More, buildinfo) - if err != nil { - return "", nil, err - } - - for { - responses, flags, err := reply() - if err != nil { - return "", nil, err - } - for _, line := range responses.Logs { - fmt.Print(line) - } - if flags&varlink.Continues == 0 { - break - } - } - return "", nil, err -} - -// SendFileOverVarlink sends a file over varlink in an upgraded connection -func (r *LocalRuntime) SendFileOverVarlink(source string) (string, error) { - fs, err := os.Open(source) - if err != nil { - return "", err - } - - fileInfo, err := fs.Stat() - if err != nil { - return "", err - } - logrus.Debugf("sending %s over varlink connection", source) - reply, err := iopodman.SendFile().Send(r.Conn, varlink.Upgrade, "", fileInfo.Size()) - if err != nil { - return "", err - } - _, _, err = reply() - if err != nil { - return "", err - } - - reader := bufio.NewReader(fs) - _, err = reader.WriteTo(r.Conn.Writer) - if err != nil { - return "", err - } - logrus.Debugf("file transfer complete for %s", source) - r.Conn.Writer.Flush() - - // All was sent, wait for the ACK from the server - tempFile, err := r.Conn.Reader.ReadString(':') - if err != nil { - return "", err - } - - // r.Conn is kaput at this point due to the upgrade - if err := r.RemoteRuntime.RefreshConnection(); err != nil { - return "", err - - } - - return strings.Replace(tempFile, ":", "", -1), nil -} - -// GetAllVolumes retrieves all the volumes -func (r *LocalRuntime) GetAllVolumes() ([]*libpod.Volume, error) { - return nil, define.ErrNotImplemented -} - -// RemoveVolume removes a volumes -func (r *LocalRuntime) RemoveVolume(ctx context.Context, v *libpod.Volume, force, prune bool) error { - return define.ErrNotImplemented -} - -// GetContainers retrieves all containers from the state -// Filters can be provided which will determine what containers are included in -// the output. Multiple filters are handled by ANDing their output, so only -// containers matching all filters are returned -func (r *LocalRuntime) GetContainers(filters ...libpod.ContainerFilter) ([]*libpod.Container, error) { - return nil, define.ErrNotImplemented -} - -// RemoveContainer removes the given container -// If force is specified, the container will be stopped first -// Otherwise, RemoveContainer will return an error if the container is running -func (r *LocalRuntime) RemoveContainer(ctx context.Context, c *libpod.Container, force, volumes bool) error { - return define.ErrNotImplemented -} - -// CreateVolume creates a volume over a varlink connection for the remote client -func (r *LocalRuntime) CreateVolume(ctx context.Context, c *cliconfig.VolumeCreateValues, labels, opts map[string]string) (string, error) { - cvOpts := iopodman.VolumeCreateOpts{ - Options: opts, - Labels: labels, - } - if len(c.InputArgs) > 0 { - cvOpts.VolumeName = c.InputArgs[0] - } - - if c.Flag("driver").Changed { - cvOpts.Driver = c.Driver - } - - return iopodman.VolumeCreate().Call(r.Conn, cvOpts) -} - -// RemoveVolumes removes volumes over a varlink connection for the remote client -func (r *LocalRuntime) RemoveVolumes(ctx context.Context, c *cliconfig.VolumeRmValues) ([]string, map[string]error, error) { - rmOpts := iopodman.VolumeRemoveOpts{ - All: c.All, - Force: c.Force, - Volumes: c.InputArgs, - } - success, failures, err := iopodman.VolumeRemove().Call(r.Conn, rmOpts) - stringsToErrors := make(map[string]error) - for k, v := range failures { - stringsToErrors[k] = errors.New(v) - } - return success, stringsToErrors, err -} - -func (r *LocalRuntime) Push(ctx context.Context, srcName, destination, manifestMIMEType, authfile, digestfile, signaturePolicyPath string, writer io.Writer, forceCompress bool, signingOptions image.SigningOptions, dockerRegistryOptions *image.DockerRegistryOptions, additionalDockerArchiveTags []reference.NamedTagged) error { - - reply, err := iopodman.PushImage().Send(r.Conn, varlink.More, srcName, destination, forceCompress, manifestMIMEType, signingOptions.RemoveSignatures, signingOptions.SignBy) - if err != nil { - return err - } - for { - responses, flags, err := reply() - if err != nil { - return err - } - for _, line := range responses.Logs { - fmt.Print(line) - } - if flags&varlink.Continues == 0 { - break - } - } - - return err -} - -// InspectVolumes returns a slice of volumes based on an arg list or --all -func (r *LocalRuntime) InspectVolumes(ctx context.Context, c *cliconfig.VolumeInspectValues) ([]*libpod.InspectVolumeData, error) { - var ( - inspectData []*libpod.InspectVolumeData - volumes []string - ) - - if c.All { - allVolumes, err := r.Volumes(ctx) - if err != nil { - return nil, err - } - for _, vol := range allVolumes { - volumes = append(volumes, vol.Name()) - } - } else { - volumes = append(volumes, c.InputArgs...) - } - - for _, vol := range volumes { - jsonString, err := iopodman.InspectVolume().Call(r.Conn, vol) - if err != nil { - return nil, err - } - inspectJSON := new(libpod.InspectVolumeData) - if err := json.Unmarshal([]byte(jsonString), inspectJSON); err != nil { - return nil, errors.Wrapf(err, "error unmarshalling inspect JSON for volume %s", vol) - } - inspectData = append(inspectData, inspectJSON) - } - - return inspectData, nil -} - -// Volumes returns a slice of adapter.volumes based on information about libpod -// volumes over a varlink connection -func (r *LocalRuntime) Volumes(ctx context.Context) ([]*Volume, error) { - reply, err := iopodman.GetVolumes().Call(r.Conn, []string{}, true) - if err != nil { - return nil, err - } - return varlinkVolumeToVolume(r, reply), nil -} - -func varlinkVolumeToVolume(r *LocalRuntime, volumes []iopodman.Volume) []*Volume { - var vols []*Volume - for _, v := range volumes { - volumeConfig := libpod.VolumeConfig{ - Name: v.Name, - Labels: v.Labels, - MountPoint: v.MountPoint, - Driver: v.Driver, - Options: v.Options, - } - n := remoteVolume{ - Runtime: r, - config: &volumeConfig, - } - newVol := Volume{ - n, - } - vols = append(vols, &newVol) - } - return vols -} - -// PruneVolumes removes all unused volumes from the remote system -func (r *LocalRuntime) PruneVolumes(ctx context.Context) ([]string, []error) { - var errs []error - prunedNames, prunedErrors, err := iopodman.VolumesPrune().Call(r.Conn) - if err != nil { - return []string{}, []error{err} - } - // We need to transform the string results of the error into actual error types - for _, e := range prunedErrors { - errs = append(errs, errors.New(e)) - } - return prunedNames, errs -} - -// SaveImage is a wrapper function for saving an image to the local filesystem -func (r *LocalRuntime) SaveImage(ctx context.Context, c *cliconfig.SaveValues) error { - source := c.InputArgs[0] - additionalTags := c.InputArgs[1:] - - options := iopodman.ImageSaveOptions{ - Name: source, - Format: c.Format, - Output: c.Output, - MoreTags: additionalTags, - Quiet: c.Quiet, - Compress: c.Compress, - } - reply, err := iopodman.ImageSave().Send(r.Conn, varlink.More, options) - if err != nil { - return err - } - - var fetchfile string - for { - responses, flags, err := reply() - if err != nil { - return err - } - if len(responses.Id) > 0 { - fetchfile = responses.Id - } - for _, line := range responses.Logs { - fmt.Print(line) - } - if flags&varlink.Continues == 0 { - break - } - - } - if err != nil { // nolint: govet - return err - } - - outputToDir := false - outfile := c.Output - var outputFile *os.File - // If the result is supposed to be a dir, then we need to put the tarfile - // from the host in a temporary file - if options.Format != "oci-archive" && options.Format != "docker-archive" { - outputToDir = true - outputFile, err = ioutil.TempFile("", "saveimage_tempfile") - if err != nil { - return err - } - outfile = outputFile.Name() - defer outputFile.Close() - defer os.Remove(outputFile.Name()) - } - // We now need to fetch the tarball result back to the more system - if err := r.GetFileFromRemoteHost(fetchfile, outfile, true); err != nil { - return err - } - - // If the result is a tarball, we're done - // If it is a dir, we need to untar the temporary file into the dir - if outputToDir { - if err := utils.UntarToFileSystem(c.Output, outputFile, &archive.TarOptions{}); err != nil { - return err - } - } - return nil -} - -// LoadImage loads a container image from a remote client's filesystem -func (r *LocalRuntime) LoadImage(ctx context.Context, name string, cli *cliconfig.LoadValues) (string, error) { - var names string - remoteTempFile, err := r.SendFileOverVarlink(cli.Input) - if err != nil { - return "", nil - } - more := varlink.More - if cli.Quiet { - more = 0 - } - reply, err := iopodman.LoadImage().Send(r.Conn, uint64(more), name, remoteTempFile, cli.Quiet, true) - if err != nil { - return "", err - } - - for { - responses, flags, err := reply() - if err != nil { - logrus.Error(err) - return "", err - } - for _, line := range responses.Logs { - fmt.Print(line) - } - names = responses.Id - if flags&varlink.Continues == 0 { - break - } - } - return names, nil -} - -// IsImageNotFound checks if the error indicates that no image was found. -func IsImageNotFound(err error) bool { - if errors.Cause(err) == image.ErrNoSuchImage { - return true - } - switch err.(type) { // nolint: gocritic - case *iopodman.ImageNotFound: - return true - } - return false -} - -// HealthCheck executes a container's healthcheck over a varlink connection -func (r *LocalRuntime) HealthCheck(c *cliconfig.HealthCheckValues) (string, error) { - return iopodman.HealthCheckRun().Call(r.Conn, c.InputArgs[0]) -} - -// Events monitors libpod/podman events over a varlink connection -func (r *LocalRuntime) Events(c *cliconfig.EventValues) error { - var more uint64 - if c.Stream { - more = uint64(varlink.More) - } - reply, err := iopodman.GetEvents().Send(r.Conn, more, c.Filter, c.Since, c.Until) - if err != nil { - return errors.Wrapf(err, "unable to obtain events") - } - - w := bufio.NewWriter(os.Stdout) - var tmpl *template.Template - if c.Format != formats.JSONString { - template, err := template.New("events").Parse(c.Format) - if err != nil { - return err - } - tmpl = template - } - - for { - returnedEvent, flags, err := reply() - if err != nil { - // When the error handling is back into podman, we can flip this to a better way to check - // for problems. For now, this works. - return err - } - if returnedEvent.Time == "" && returnedEvent.Status == "" && returnedEvent.Type == "" { - // We got a blank event return, signals end of stream in certain cases - break - } - eTime, err := time.Parse(time.RFC3339Nano, returnedEvent.Time) - if err != nil { - return errors.Wrapf(err, "unable to parse time of event %s", returnedEvent.Time) - } - eType, err := events.StringToType(returnedEvent.Type) - if err != nil { - return err - } - eStatus, err := events.StringToStatus(returnedEvent.Status) - if err != nil { - return err - } - event := events.Event{ - ID: returnedEvent.Id, - Image: returnedEvent.Image, - Name: returnedEvent.Name, - Status: eStatus, - Time: eTime, - Type: eType, - } - if c.Format == formats.JSONString { // nolint: gocritic - jsonStr, err := event.ToJSONString() - if err != nil { - return errors.Wrapf(err, "unable to format json") - } - if _, err := w.Write([]byte(jsonStr)); err != nil { - return err - } - } else if len(c.Format) > 0 { - if err := tmpl.Execute(w, event); err != nil { - return err - } - } else { - if _, err := w.Write([]byte(event.ToHumanReadable())); err != nil { - return err - } - } - - if _, err := w.Write([]byte("\n")); err != nil { - return err - } - if err := w.Flush(); err != nil { - return err - } - if flags&varlink.Continues == 0 { - break - } - } - return nil -} - -// Diff ... -func (r *LocalRuntime) Diff(c *cliconfig.DiffValues, to string) ([]archive.Change, error) { - var changes []archive.Change - reply, err := iopodman.Diff().Call(r.Conn, to) - if err != nil { - return nil, err - } - for _, change := range reply { - changes = append(changes, archive.Change{Path: change.Path, Kind: stringToChangeType(change.ChangeType)}) - } - return changes, nil -} - -func stringToChangeType(change string) archive.ChangeType { - switch change { - case "A": - return archive.ChangeAdd - case "D": - return archive.ChangeDelete - default: // nolint: gocritic,stylecheck - logrus.Errorf("'%s' is unknown archive type", change) - fallthrough - case "C": - return archive.ChangeModify - } -} - -// GenerateKube creates kubernetes email from containers and pods -func (r *LocalRuntime) GenerateKube(c *cliconfig.GenerateKubeValues) (*v1.Pod, *v1.Service, error) { - var ( - pod v1.Pod - service v1.Service - ) - reply, err := iopodman.GenerateKube().Call(r.Conn, c.InputArgs[0], c.Service) - if err != nil { - return nil, nil, errors.Wrap(err, "unable to create kubernetes YAML") - } - if err := json.Unmarshal([]byte(reply.Pod), &pod); err != nil { - return nil, nil, err - } - err = json.Unmarshal([]byte(reply.Service), &service) - return &pod, &service, err -} - -// GetContainersByContext looks up containers based on the cli input of all, latest, or a list -func (r *LocalRuntime) GetContainersByContext(all bool, latest bool, namesOrIDs []string) ([]*Container, error) { - var containers []*Container - cids, err := iopodman.GetContainersByContext().Call(r.Conn, all, latest, namesOrIDs) - if err != nil { - return nil, err - } - for _, cid := range cids { - ctr, err := r.LookupContainer(cid) - if err != nil { - return nil, err - } - containers = append(containers, ctr) - } - return containers, nil -} - -// GetVersion returns version information from service -func (r *LocalRuntime) GetVersion() (define.Version, error) { - version, goVersion, gitCommit, built, osArch, apiVersion, err := iopodman.GetVersion().Call(r.Conn) - if err != nil { - return define.Version{}, errors.Wrapf(err, "Unable to obtain server version information") - } - - var buildTime int64 - if built != "" { - t, err := time.Parse(time.RFC3339, built) - if err != nil { - return define.Version{}, nil - } - buildTime = t.Unix() - } - - return define.Version{ - RemoteAPIVersion: apiVersion, - Version: version, - GoVersion: goVersion, - GitCommit: gitCommit, - Built: buildTime, - OsArch: osArch, - }, nil -} diff --git a/pkg/adapter/runtime_remote_supported.go b/pkg/adapter/runtime_remote_supported.go deleted file mode 100644 index b8e8da308..000000000 --- a/pkg/adapter/runtime_remote_supported.go +++ /dev/null @@ -1 +0,0 @@ -package adapter diff --git a/pkg/adapter/sigproxy_linux.go b/pkg/adapter/sigproxy_linux.go deleted file mode 100644 index 5695d0e42..000000000 --- a/pkg/adapter/sigproxy_linux.go +++ /dev/null @@ -1,45 +0,0 @@ -package adapter - -import ( - "os" - "syscall" - - "github.com/containers/libpod/libpod" - "github.com/containers/libpod/pkg/signal" - "github.com/sirupsen/logrus" -) - -// ProxySignals ... -func ProxySignals(ctr *libpod.Container) { - sigBuffer := make(chan os.Signal, 128) - signal.CatchAll(sigBuffer) - - logrus.Debugf("Enabling signal proxying") - - go func() { - for s := range sigBuffer { - // Ignore SIGCHLD and SIGPIPE - these are mostly likely - // intended for the podman command itself. - // SIGURG was added because of golang 1.14 and its preemptive changes - // causing more signals to "show up". - // https://github.com/containers/libpod/issues/5483 - if s == syscall.SIGCHLD || s == syscall.SIGPIPE || s == syscall.SIGURG { - continue - } - - if err := ctr.Kill(uint(s.(syscall.Signal))); err != nil { - // If the container dies, and we find out here, - // we need to forward that one signal to - // ourselves so that it is not lost, and then - // we terminate the proxy and let the defaults - // play out. - logrus.Errorf("Error forwarding signal %d to container %s: %v", s, ctr.ID(), err) - signal.StopCatch(sigBuffer) - if err := syscall.Kill(syscall.Getpid(), s.(syscall.Signal)); err != nil { - logrus.Errorf("failed to kill pid %d", syscall.Getpid()) - } - return - } - } - }() -} diff --git a/pkg/adapter/terminal.go b/pkg/adapter/terminal.go deleted file mode 100644 index 499e77def..000000000 --- a/pkg/adapter/terminal.go +++ /dev/null @@ -1,101 +0,0 @@ -package adapter - -import ( - "context" - "os" - "os/signal" - - lsignal "github.com/containers/libpod/pkg/signal" - "github.com/docker/docker/pkg/term" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "k8s.io/client-go/tools/remotecommand" -) - -// RawTtyFormatter ... -type RawTtyFormatter struct { -} - -// getResize returns a TerminalSize command matching stdin's current -// size on success, and nil on errors. -func getResize() *remotecommand.TerminalSize { - winsize, err := term.GetWinsize(os.Stdin.Fd()) - if err != nil { - logrus.Warnf("Could not get terminal size %v", err) - return nil - } - return &remotecommand.TerminalSize{ - Width: winsize.Width, - Height: winsize.Height, - } -} - -// Helper for prepareAttach - set up a goroutine to generate terminal resize events -func resizeTty(ctx context.Context, resize chan remotecommand.TerminalSize) { - sigchan := make(chan os.Signal, 1) - signal.Notify(sigchan, lsignal.SIGWINCH) - go func() { - defer close(resize) - // Update the terminal size immediately without waiting - // for a SIGWINCH to get the correct initial size. - resizeEvent := getResize() - for { - if resizeEvent == nil { - select { - case <-ctx.Done(): - return - case <-sigchan: - resizeEvent = getResize() - } - } else { - select { - case <-ctx.Done(): - return - case <-sigchan: - resizeEvent = getResize() - case resize <- *resizeEvent: - resizeEvent = nil - } - } - } - }() -} - -func restoreTerminal(state *term.State) error { - logrus.SetFormatter(&logrus.TextFormatter{}) - return term.RestoreTerminal(os.Stdin.Fd(), state) -} - -// Format ... -func (f *RawTtyFormatter) Format(entry *logrus.Entry) ([]byte, error) { - textFormatter := logrus.TextFormatter{} - bytes, err := textFormatter.Format(entry) - - if err == nil { - bytes = append(bytes, '\r') - } - - return bytes, err -} - -func handleTerminalAttach(ctx context.Context, resize chan remotecommand.TerminalSize) (context.CancelFunc, *term.State, error) { - logrus.Debugf("Handling terminal attach") - - subCtx, cancel := context.WithCancel(ctx) - - resizeTty(subCtx, resize) - - oldTermState, err := term.SaveState(os.Stdin.Fd()) - if err != nil { - // allow caller to not have to do any cleaning up if we error here - cancel() - return nil, nil, errors.Wrapf(err, "unable to save terminal state") - } - - logrus.SetFormatter(&RawTtyFormatter{}) - if _, err := term.SetRawTerminal(os.Stdin.Fd()); err != nil { - return cancel, nil, err - } - - return cancel, oldTermState, nil -} diff --git a/pkg/adapter/terminal_linux.go b/pkg/adapter/terminal_linux.go deleted file mode 100644 index a56704be6..000000000 --- a/pkg/adapter/terminal_linux.go +++ /dev/null @@ -1,121 +0,0 @@ -package adapter - -import ( - "bufio" - "context" - "fmt" - "os" - - "github.com/containers/libpod/libpod" - "github.com/containers/libpod/libpod/define" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/crypto/ssh/terminal" - "k8s.io/client-go/tools/remotecommand" -) - -// ExecAttachCtr execs and attaches to a container -func ExecAttachCtr(ctx context.Context, ctr *libpod.Container, tty, privileged bool, env map[string]string, cmd []string, user, workDir string, streams *define.AttachStreams, preserveFDs uint, detachKeys string) (int, error) { - resize := make(chan remotecommand.TerminalSize) - haveTerminal := terminal.IsTerminal(int(os.Stdin.Fd())) - - // Check if we are attached to a terminal. If we are, generate resize - // events, and set the terminal to raw mode - if haveTerminal && tty { - cancel, oldTermState, err := handleTerminalAttach(ctx, resize) - if err != nil { - return -1, err - } - defer cancel() - defer func() { - if err := restoreTerminal(oldTermState); err != nil { - logrus.Errorf("unable to restore terminal: %q", err) - } - }() - } - - execConfig := new(libpod.ExecConfig) - execConfig.Command = cmd - execConfig.Terminal = tty - execConfig.Privileged = privileged - execConfig.Environment = env - execConfig.User = user - execConfig.WorkDir = workDir - execConfig.DetachKeys = &detachKeys - execConfig.PreserveFDs = preserveFDs - - return ctr.Exec(execConfig, streams, resize) -} - -// StartAttachCtr starts and (if required) attaches to a container -// if you change the signature of this function from os.File to io.Writer, it will trigger a downstream -// error. we may need to just lint disable this one. -func StartAttachCtr(ctx context.Context, ctr *libpod.Container, stdout, stderr, stdin *os.File, detachKeys string, sigProxy bool, startContainer bool, recursive bool) error { //nolint-interfacer - resize := make(chan remotecommand.TerminalSize) - - haveTerminal := terminal.IsTerminal(int(os.Stdin.Fd())) - - // Check if we are attached to a terminal. If we are, generate resize - // events, and set the terminal to raw mode - if haveTerminal && ctr.Spec().Process.Terminal { - cancel, oldTermState, err := handleTerminalAttach(ctx, resize) - if err != nil { - return err - } - defer func() { - if err := restoreTerminal(oldTermState); err != nil { - logrus.Errorf("unable to restore terminal: %q", err) - } - }() - defer cancel() - } - - streams := new(define.AttachStreams) - streams.OutputStream = stdout - streams.ErrorStream = stderr - streams.InputStream = bufio.NewReader(stdin) - streams.AttachOutput = true - streams.AttachError = true - streams.AttachInput = true - - if stdout == nil { - logrus.Debugf("Not attaching to stdout") - streams.AttachOutput = false - } - if stderr == nil { - logrus.Debugf("Not attaching to stderr") - streams.AttachError = false - } - if stdin == nil { - logrus.Debugf("Not attaching to stdin") - streams.AttachInput = false - } - - if !startContainer { - if sigProxy { - ProxySignals(ctr) - } - - return ctr.Attach(streams, detachKeys, resize) - } - - attachChan, err := ctr.StartAndAttach(ctx, streams, detachKeys, resize, recursive) - if err != nil { - return err - } - - if sigProxy { - ProxySignals(ctr) - } - - if stdout == nil && stderr == nil { - fmt.Printf("%s\n", ctr.ID()) - } - - err = <-attachChan - if err != nil { - return errors.Wrapf(err, "error attaching to container %s", ctr.ID()) - } - - return nil -} diff --git a/pkg/adapter/terminal_unsupported.go b/pkg/adapter/terminal_unsupported.go deleted file mode 100644 index 9067757a1..000000000 --- a/pkg/adapter/terminal_unsupported.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build !linux - -package adapter - -import ( - "context" - "os" - - "github.com/containers/libpod/libpod" - "github.com/containers/libpod/libpod/define" -) - -// ExecAttachCtr execs and attaches to a container -func ExecAttachCtr(ctx context.Context, ctr *libpod.Container, tty, privileged bool, env map[string]string, cmd []string, user, workDir string, streams *define.AttachStreams, preserveFDs uint, detachKeys string) (int, error) { - return -1, define.ErrNotImplemented -} - -// StartAttachCtr starts and (if required) attaches to a container -// if you change the signature of this function from os.File to io.Writer, it will trigger a downstream -// error. we may need to just lint disable this one. -func StartAttachCtr(ctx context.Context, ctr *libpod.Container, stdout, stderr, stdin *os.File, detachKeys string, sigProxy bool, startContainer bool, recursive bool) error { //nolint-interfacer - return define.ErrNotImplemented -} diff --git a/pkg/adapter/volumes_remote.go b/pkg/adapter/volumes_remote.go deleted file mode 100644 index 58f9ba625..000000000 --- a/pkg/adapter/volumes_remote.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build remoteclient - -package adapter - -// Name returns the name of the volume -func (v *Volume) Name() string { - return v.config.Name -} - -//Labels returns the labels for a volume -func (v *Volume) Labels() map[string]string { - return v.config.Labels -} - -// Driver returns the driver for the volume -func (v *Volume) Driver() string { - return v.config.Driver -} - -// Options returns the options a volume was created with -func (v *Volume) Options() map[string]string { - return v.config.Options -} - -// MountPath returns the path the volume is mounted to -func (v *Volume) MountPoint() string { - return v.config.MountPoint -} - -// Scope returns the scope for an adapter.volume -func (v *Volume) Scope() string { - return "local" -} diff --git a/pkg/api/handlers/utils/containers.go b/pkg/api/handlers/utils/containers.go index bc247c4ae..a46b308b5 100644 --- a/pkg/api/handlers/utils/containers.go +++ b/pkg/api/handlers/utils/containers.go @@ -5,7 +5,6 @@ import ( "net/http" "time" - "github.com/containers/libpod/cmd/podman/shared" "github.com/containers/libpod/libpod" "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/pkg/domain/entities" @@ -63,7 +62,7 @@ func WaitContainer(w http.ResponseWriter, r *http.Request) (int32, error) { func CreateContainer(ctx context.Context, w http.ResponseWriter, runtime *libpod.Runtime, cc *createconfig.CreateConfig) { var pod *libpod.Pod - ctr, err := shared.CreateContainerFromCreateConfig(runtime, cc, ctx, pod) + ctr, err := createconfig.CreateContainerFromCreateConfig(runtime, cc, ctx, pod) if err != nil { Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "CreateContainerFromCreateConfig()")) return diff --git a/pkg/domain/infra/runtime_image_proxy.go b/pkg/domain/infra/runtime_image_proxy.go index 45c5425a3..535fba858 100644 --- a/pkg/domain/infra/runtime_image_proxy.go +++ b/pkg/domain/infra/runtime_image_proxy.go @@ -10,7 +10,7 @@ import ( "github.com/spf13/pflag" ) -// ContainerEngine Image Proxy will be EOL'ed after podmanV2 is separated from libpod repo +// ContainerEngine Image Proxy will be EOL'ed after podman is separated from libpod repo func NewLibpodImageRuntime(flags *pflag.FlagSet, opts entities.PodmanConfig) (entities.ImageEngine, error) { r, err := GetRuntime(context.Background(), flags, opts) diff --git a/pkg/domain/infra/runtime_proxy.go b/pkg/domain/infra/runtime_proxy.go index 18f716ea0..fbe45ea8f 100644 --- a/pkg/domain/infra/runtime_proxy.go +++ b/pkg/domain/infra/runtime_proxy.go @@ -10,7 +10,7 @@ import ( flag "github.com/spf13/pflag" ) -// ContainerEngine Proxy will be EOL'ed after podmanV2 is separated from libpod repo +// ContainerEngine Proxy will be EOL'ed after podman is separated from libpod repo func NewLibpodRuntime(flags *flag.FlagSet, opts entities.PodmanConfig) (entities.ContainerEngine, error) { r, err := GetRuntime(context.Background(), flags, opts) diff --git a/pkg/spec/createconfig.go b/pkg/spec/createconfig.go index daa997104..2cf30a59e 100644 --- a/pkg/spec/createconfig.go +++ b/pkg/spec/createconfig.go @@ -1,6 +1,7 @@ package createconfig import ( + "context" "os" "strconv" "strings" @@ -397,3 +398,20 @@ func (c *CreateConfig) getContainerCreateOptions(runtime *libpod.Runtime, pod *l func AddPrivilegedDevices(g *generate.Generator) error { return addPrivilegedDevices(g) } + +func CreateContainerFromCreateConfig(r *libpod.Runtime, createConfig *CreateConfig, ctx context.Context, pod *libpod.Pod) (*libpod.Container, error) { + runtimeSpec, options, err := createConfig.MakeContainerConfig(r, pod) + if err != nil { + return nil, err + } + + // Set the CreateCommand explicitly. Some (future) consumers of libpod + // might not want to set it. + options = append(options, libpod.WithCreateCommand()) + + ctr, err := r.NewContainer(ctx, runtimeSpec, options...) + if err != nil { + return nil, err + } + return ctr, nil +} diff --git a/pkg/varlinkapi/container.go b/pkg/varlinkapi/container.go new file mode 100644 index 000000000..eae54dfeb --- /dev/null +++ b/pkg/varlinkapi/container.go @@ -0,0 +1,928 @@ +package varlinkapi + +import ( + "context" + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/containers/image/v5/types" + "github.com/containers/libpod/libpod" + "github.com/containers/libpod/libpod/define" + "github.com/containers/libpod/libpod/image" + "github.com/containers/libpod/pkg/timetype" + "github.com/containers/libpod/pkg/util" + "github.com/cri-o/ocicni/pkg/ocicni" + "github.com/docker/go-units" + "github.com/google/shlex" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + v1 "k8s.io/api/core/v1" +) + +const ( + cidTruncLength = 12 + podTruncLength = 12 + iidTruncLength = 12 + cmdTruncLength = 17 +) + +// PsOptions describes the struct being formed for ps. +type PsOptions struct { + All bool + Format string + Last int + Latest bool + NoTrunc bool + Pod bool + Quiet bool + Size bool + Sort string + Namespace bool + Sync bool +} + +// BatchContainerStruct is the return object from BatchContainer and contains +// container related information. +type BatchContainerStruct struct { + ConConfig *libpod.ContainerConfig + ConState define.ContainerStatus + ExitCode int32 + Exited bool + Pid int + StartedTime time.Time + ExitedTime time.Time + Size *ContainerSize +} + +// PsContainerOutput is the struct being returned from a parallel +// batch operation. +type PsContainerOutput struct { + ID string + Image string + ImageID string + Command string + Created string + Ports string + Names string + IsInfra bool + Status string + State define.ContainerStatus + Pid int + Size *ContainerSize + Pod string + PodName string + CreatedAt time.Time + ExitedAt time.Time + StartedAt time.Time + Labels map[string]string + PID string + Cgroup string + IPC string + MNT string + NET string + PIDNS string + User string + UTS string + Mounts string +} + +// Namespace describes output for ps namespace. +type Namespace struct { + PID string `json:"pid,omitempty"` + Cgroup string `json:"cgroup,omitempty"` + IPC string `json:"ipc,omitempty"` + MNT string `json:"mnt,omitempty"` + NET string `json:"net,omitempty"` + PIDNS string `json:"pidns,omitempty"` + User string `json:"user,omitempty"` + UTS string `json:"uts,omitempty"` +} + +// ContainerSize holds the size of the container's root filesystem and top +// read-write layer. +type ContainerSize struct { + RootFsSize int64 `json:"rootFsSize"` + RwSize int64 `json:"rwSize"` +} + +// NewBatchContainer runs a batch process under one lock to get container information and only +// be called in PBatch. +func NewBatchContainer(r *libpod.Runtime, ctr *libpod.Container, opts PsOptions) (PsContainerOutput, error) { + var ( + conState define.ContainerStatus + command string + created string + status string + exitedAt time.Time + startedAt time.Time + exitCode int32 + err error + pid int + size *ContainerSize + ns *Namespace + pso PsContainerOutput + ) + batchErr := ctr.Batch(func(c *libpod.Container) error { + if opts.Sync { + if err := c.Sync(); err != nil { + return err + } + } + + conState, err = c.State() + if err != nil { + return errors.Wrapf(err, "unable to obtain container state") + } + command = strings.Join(c.Command(), " ") + created = units.HumanDuration(time.Since(c.CreatedTime())) + " ago" + + exitCode, _, err = c.ExitCode() + if err != nil { + return errors.Wrapf(err, "unable to obtain container exit code") + } + startedAt, err = c.StartedTime() + if err != nil { + logrus.Errorf("error getting started time for %q: %v", c.ID(), err) + } + exitedAt, err = c.FinishedTime() + if err != nil { + logrus.Errorf("error getting exited time for %q: %v", c.ID(), err) + } + if opts.Namespace { + pid, err = c.PID() + if err != nil { + return errors.Wrapf(err, "unable to obtain container pid") + } + ns = GetNamespaces(pid) + } + if opts.Size { + size = new(ContainerSize) + + rootFsSize, err := c.RootFsSize() + if err != nil { + logrus.Errorf("error getting root fs size for %q: %v", c.ID(), err) + } + + rwSize, err := c.RWSize() + if err != nil { + logrus.Errorf("error getting rw size for %q: %v", c.ID(), err) + } + + size.RootFsSize = rootFsSize + size.RwSize = rwSize + } + + return nil + }) + + if batchErr != nil { + return pso, batchErr + } + + switch conState.String() { + case define.ContainerStateExited.String(): + fallthrough + case define.ContainerStateStopped.String(): + exitedSince := units.HumanDuration(time.Since(exitedAt)) + status = fmt.Sprintf("Exited (%d) %s ago", exitCode, exitedSince) + case define.ContainerStateRunning.String(): + status = "Up " + units.HumanDuration(time.Since(startedAt)) + " ago" + case define.ContainerStatePaused.String(): + status = "Paused" + case define.ContainerStateCreated.String(), define.ContainerStateConfigured.String(): + status = "Created" + case define.ContainerStateRemoving.String(): + status = "Removing" + default: + status = "Error" + } + + imageID, imageName := ctr.Image() + cid := ctr.ID() + podID := ctr.PodID() + if !opts.NoTrunc { + cid = cid[0:cidTruncLength] + if len(podID) > podTruncLength { + podID = podID[0:podTruncLength] + } + if len(command) > cmdTruncLength { + command = command[0:cmdTruncLength] + "..." + } + if len(imageID) > iidTruncLength { + imageID = imageID[0:iidTruncLength] + } + } + + ports, err := ctr.PortMappings() + if err != nil { + logrus.Errorf("unable to lookup namespace container for %s", ctr.ID()) + } + + pso.ID = cid + pso.Image = imageName + pso.ImageID = imageID + pso.Command = command + pso.Created = created + pso.Ports = portsToString(ports) + pso.Names = ctr.Name() + pso.IsInfra = ctr.IsInfra() + pso.Status = status + pso.State = conState + pso.Pid = pid + pso.Size = size + pso.ExitedAt = exitedAt + pso.CreatedAt = ctr.CreatedTime() + pso.StartedAt = startedAt + pso.Labels = ctr.Labels() + pso.Mounts = strings.Join(ctr.UserVolumes(), " ") + + // Add pod name and pod ID if requested by user. + // No need to look up the pod if its ID is empty. + if opts.Pod && len(podID) > 0 { + // The pod name is not in the container definition + // so we need to retrieve it using the pod ID. + var podName string + pod, err := r.LookupPod(podID) + if err != nil { + logrus.Errorf("unable to lookup pod for container %s", ctr.ID()) + } else { + podName = pod.Name() + } + + pso.Pod = podID + pso.PodName = podName + } + + if opts.Namespace { + pso.Cgroup = ns.Cgroup + pso.IPC = ns.IPC + pso.MNT = ns.MNT + pso.NET = ns.NET + pso.User = ns.User + pso.UTS = ns.UTS + pso.PIDNS = ns.PIDNS + } + + return pso, nil +} + +type batchFunc func() (PsContainerOutput, error) + +type workerInput struct { + parallelFunc batchFunc + opts PsOptions + cid string + job int +} + +// worker is a "threaded" worker that takes jobs from the channel "queue". +func worker(wg *sync.WaitGroup, jobs <-chan workerInput, results chan<- PsContainerOutput, errors chan<- error) { + for j := range jobs { + r, err := j.parallelFunc() + // If we find an error, we return just the error. + if err != nil { + errors <- err + } else { + // Return the result. + results <- r + } + wg.Done() + } +} + +// GenerateContainerFilterFuncs return ContainerFilter functions based of filter. +func GenerateContainerFilterFuncs(filter, filterValue string, r *libpod.Runtime) (func(container *libpod.Container) bool, error) { + switch filter { + case "id": + return func(c *libpod.Container) bool { + return strings.Contains(c.ID(), filterValue) + }, nil + case "label": + var filterArray = strings.SplitN(filterValue, "=", 2) + var filterKey = filterArray[0] + if len(filterArray) > 1 { + filterValue = filterArray[1] + } else { + filterValue = "" + } + return func(c *libpod.Container) bool { + for labelKey, labelValue := range c.Labels() { + if labelKey == filterKey && ("" == filterValue || labelValue == filterValue) { + return true + } + } + return false + }, nil + case "name": + return func(c *libpod.Container) bool { + match, err := regexp.MatchString(filterValue, c.Name()) + if err != nil { + return false + } + return match + }, nil + case "exited": + exitCode, err := strconv.ParseInt(filterValue, 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "exited code out of range %q", filterValue) + } + return func(c *libpod.Container) bool { + ec, exited, err := c.ExitCode() + if ec == int32(exitCode) && err == nil && exited { + return true + } + return false + }, nil + case "status": + if !util.StringInSlice(filterValue, []string{"created", "running", "paused", "stopped", "exited", "unknown"}) { + return nil, errors.Errorf("%s is not a valid status", filterValue) + } + return func(c *libpod.Container) bool { + status, err := c.State() + if err != nil { + return false + } + if filterValue == "stopped" { + filterValue = "exited" + } + state := status.String() + if status == define.ContainerStateConfigured { + state = "created" + } else if status == define.ContainerStateStopped { + state = "exited" + } + return state == filterValue + }, nil + case "ancestor": + // This needs to refine to match docker + // - ancestor=(<image-name>[:tag]|<image-id>| ⟨image@digest⟩) - containers created from an image or a descendant. + return func(c *libpod.Container) bool { + containerConfig := c.Config() + if strings.Contains(containerConfig.RootfsImageID, filterValue) || strings.Contains(containerConfig.RootfsImageName, filterValue) { + return true + } + return false + }, nil + case "before": + ctr, err := r.LookupContainer(filterValue) + if err != nil { + return nil, errors.Errorf("unable to find container by name or id of %s", filterValue) + } + containerConfig := ctr.Config() + createTime := containerConfig.CreatedTime + return func(c *libpod.Container) bool { + cc := c.Config() + return createTime.After(cc.CreatedTime) + }, nil + case "since": + ctr, err := r.LookupContainer(filterValue) + if err != nil { + return nil, errors.Errorf("unable to find container by name or id of %s", filterValue) + } + containerConfig := ctr.Config() + createTime := containerConfig.CreatedTime + return func(c *libpod.Container) bool { + cc := c.Config() + return createTime.Before(cc.CreatedTime) + }, nil + case "volume": + //- volume=(<volume-name>|<mount-point-destination>) + return func(c *libpod.Container) bool { + containerConfig := c.Config() + var dest string + arr := strings.Split(filterValue, ":") + source := arr[0] + if len(arr) == 2 { + dest = arr[1] + } + for _, mount := range containerConfig.Spec.Mounts { + if dest != "" && (mount.Source == source && mount.Destination == dest) { + return true + } + if dest == "" && mount.Source == source { + return true + } + } + return false + }, nil + case "health": + return func(c *libpod.Container) bool { + hcStatus, err := c.HealthCheckStatus() + if err != nil { + return false + } + return hcStatus == filterValue + }, nil + case "until": + ts, err := timetype.GetTimestamp(filterValue, time.Now()) + if err != nil { + return nil, err + } + seconds, nanoseconds, err := timetype.ParseTimestamps(ts, 0) + if err != nil { + return nil, err + } + until := time.Unix(seconds, nanoseconds) + return func(c *libpod.Container) bool { + if !until.IsZero() && c.CreatedTime().After((until)) { + return true + } + return false + }, nil + } + return nil, errors.Errorf("%s is an invalid filter", filter) +} + +// GetPsContainerOutput returns a slice of containers specifically for ps output. +func GetPsContainerOutput(r *libpod.Runtime, opts PsOptions, filters []string, maxWorkers int) ([]PsContainerOutput, error) { + var ( + filterFuncs []libpod.ContainerFilter + outputContainers []*libpod.Container + ) + + if len(filters) > 0 { + for _, f := range filters { + filterSplit := strings.SplitN(f, "=", 2) + if len(filterSplit) < 2 { + return nil, errors.Errorf("filter input must be in the form of filter=value: %s is invalid", f) + } + generatedFunc, err := GenerateContainerFilterFuncs(filterSplit[0], filterSplit[1], r) + if err != nil { + return nil, errors.Wrapf(err, "invalid filter") + } + filterFuncs = append(filterFuncs, generatedFunc) + } + } + if !opts.Latest { + // Get all containers. + containers, err := r.GetContainers(filterFuncs...) + if err != nil { + return nil, err + } + + // We only want the last few containers. + if opts.Last > 0 && opts.Last <= len(containers) { + return nil, errors.Errorf("--last not yet supported") + } else { + outputContainers = containers + } + } else { + // Get just the latest container. + // Ignore filters. + latestCtr, err := r.GetLatestContainer() + if err != nil { + return nil, err + } + + outputContainers = []*libpod.Container{latestCtr} + } + + pss := PBatch(r, outputContainers, maxWorkers, opts) + return pss, nil +} + +// PBatch performs batch operations on a container in parallel. It spawns the +// number of workers relative to the number of parallel operations desired. +func PBatch(r *libpod.Runtime, containers []*libpod.Container, workers int, opts PsOptions) []PsContainerOutput { + var wg sync.WaitGroup + psResults := []PsContainerOutput{} + + // If the number of containers in question is less than the number of + // proposed parallel operations, we shouldn't spawn so many workers. + if workers > len(containers) { + workers = len(containers) + } + + jobs := make(chan workerInput, len(containers)) + results := make(chan PsContainerOutput, len(containers)) + batchErrors := make(chan error, len(containers)) + + // Create the workers. + for w := 1; w <= workers; w++ { + go worker(&wg, jobs, results, batchErrors) + } + + // Add jobs to the workers. + for i, j := range containers { + j := j + wg.Add(1) + f := func() (PsContainerOutput, error) { + return NewBatchContainer(r, j, opts) + } + jobs <- workerInput{ + parallelFunc: f, + opts: opts, + cid: j.ID(), + job: i, + } + } + close(jobs) + wg.Wait() + close(results) + close(batchErrors) + for err := range batchErrors { + logrus.Errorf("unable to get container info: %q", err) + } + for res := range results { + // We sort out running vs non-running here to save lots of copying + // later. + if !opts.All && !opts.Latest && opts.Last < 1 { + if !res.IsInfra && res.State == define.ContainerStateRunning { + psResults = append(psResults, res) + } + } else { + psResults = append(psResults, res) + } + } + return psResults +} + +// BatchContainerOp is used in ps to reduce performance hits by "batching" +// locks. +func BatchContainerOp(ctr *libpod.Container, opts PsOptions) (BatchContainerStruct, error) { + var ( + conConfig *libpod.ContainerConfig + conState define.ContainerStatus + err error + exitCode int32 + exited bool + pid int + size *ContainerSize + startedTime time.Time + exitedTime time.Time + ) + + batchErr := ctr.Batch(func(c *libpod.Container) error { + conConfig = c.Config() + conState, err = c.State() + if err != nil { + return errors.Wrapf(err, "unable to obtain container state") + } + + exitCode, exited, err = c.ExitCode() + if err != nil { + return errors.Wrapf(err, "unable to obtain container exit code") + } + startedTime, err = c.StartedTime() + if err != nil { + logrus.Errorf("error getting started time for %q: %v", c.ID(), err) + } + exitedTime, err = c.FinishedTime() + if err != nil { + logrus.Errorf("error getting exited time for %q: %v", c.ID(), err) + } + + if !opts.Size && !opts.Namespace { + return nil + } + + if opts.Namespace { + pid, err = c.PID() + if err != nil { + return errors.Wrapf(err, "unable to obtain container pid") + } + } + if opts.Size { + size = new(ContainerSize) + + rootFsSize, err := c.RootFsSize() + if err != nil { + logrus.Errorf("error getting root fs size for %q: %v", c.ID(), err) + } + + rwSize, err := c.RWSize() + if err != nil { + logrus.Errorf("error getting rw size for %q: %v", c.ID(), err) + } + + size.RootFsSize = rootFsSize + size.RwSize = rwSize + } + return nil + }) + if batchErr != nil { + return BatchContainerStruct{}, batchErr + } + return BatchContainerStruct{ + ConConfig: conConfig, + ConState: conState, + ExitCode: exitCode, + Exited: exited, + Pid: pid, + StartedTime: startedTime, + ExitedTime: exitedTime, + Size: size, + }, nil +} + +// GetNamespaces returns a populated namespace struct. +func GetNamespaces(pid int) *Namespace { + ctrPID := strconv.Itoa(pid) + cgroup, _ := getNamespaceInfo(filepath.Join("/proc", ctrPID, "ns", "cgroup")) + ipc, _ := getNamespaceInfo(filepath.Join("/proc", ctrPID, "ns", "ipc")) + mnt, _ := getNamespaceInfo(filepath.Join("/proc", ctrPID, "ns", "mnt")) + net, _ := getNamespaceInfo(filepath.Join("/proc", ctrPID, "ns", "net")) + pidns, _ := getNamespaceInfo(filepath.Join("/proc", ctrPID, "ns", "pid")) + user, _ := getNamespaceInfo(filepath.Join("/proc", ctrPID, "ns", "user")) + uts, _ := getNamespaceInfo(filepath.Join("/proc", ctrPID, "ns", "uts")) + + return &Namespace{ + PID: ctrPID, + Cgroup: cgroup, + IPC: ipc, + MNT: mnt, + NET: net, + PIDNS: pidns, + User: user, + UTS: uts, + } +} + +// GetNamespaceInfo is an exported wrapper for getNamespaceInfo +func GetNamespaceInfo(path string) (string, error) { + return getNamespaceInfo(path) +} + +func getNamespaceInfo(path string) (string, error) { + val, err := os.Readlink(path) + if err != nil { + return "", errors.Wrapf(err, "error getting info from %q", path) + } + return getStrFromSquareBrackets(val), nil +} + +// getStrFromSquareBrackets gets the string inside [] from a string. +func getStrFromSquareBrackets(cmd string) string { + reg := regexp.MustCompile(`.*\[|\].*`) + arr := strings.Split(reg.ReplaceAllLiteralString(cmd, ""), ",") + return strings.Join(arr, ",") +} + +func comparePorts(i, j ocicni.PortMapping) bool { + if i.ContainerPort != j.ContainerPort { + return i.ContainerPort < j.ContainerPort + } + + if i.HostIP != j.HostIP { + return i.HostIP < j.HostIP + } + + if i.HostPort != j.HostPort { + return i.HostPort < j.HostPort + } + + return i.Protocol < j.Protocol +} + +// formatGroup returns the group as <IP:startPort:lastPort->startPort:lastPort/Proto> +// e.g 0.0.0.0:1000-1006->1000-1006/tcp. +func formatGroup(key string, start, last int32) string { + parts := strings.Split(key, "/") + groupType := parts[0] + var ip string + if len(parts) > 1 { + ip = parts[0] + groupType = parts[1] + } + group := strconv.Itoa(int(start)) + if start != last { + group = fmt.Sprintf("%s-%d", group, last) + } + if ip != "" { + group = fmt.Sprintf("%s:%s->%s", ip, group, group) + } + return fmt.Sprintf("%s/%s", group, groupType) +} + +// portsToString converts the ports used to a string of the from "port1, port2" +// and also groups a continuous list of ports into a readable format. +func portsToString(ports []ocicni.PortMapping) string { + type portGroup struct { + first int32 + last int32 + } + var portDisplay []string + if len(ports) == 0 { + return "" + } + //Sort the ports, so grouping continuous ports become easy. + sort.Slice(ports, func(i, j int) bool { + return comparePorts(ports[i], ports[j]) + }) + + // portGroupMap is used for grouping continuous ports. + portGroupMap := make(map[string]*portGroup) + var groupKeyList []string + + for _, v := range ports { + + hostIP := v.HostIP + if hostIP == "" { + hostIP = "0.0.0.0" + } + // If hostPort and containerPort are not same, consider as individual port. + if v.ContainerPort != v.HostPort { + portDisplay = append(portDisplay, fmt.Sprintf("%s:%d->%d/%s", hostIP, v.HostPort, v.ContainerPort, v.Protocol)) + continue + } + + portMapKey := fmt.Sprintf("%s/%s", hostIP, v.Protocol) + + portgroup, ok := portGroupMap[portMapKey] + if !ok { + portGroupMap[portMapKey] = &portGroup{first: v.ContainerPort, last: v.ContainerPort} + // This list is required to traverse portGroupMap. + groupKeyList = append(groupKeyList, portMapKey) + continue + } + + if portgroup.last == (v.ContainerPort - 1) { + portgroup.last = v.ContainerPort + continue + } + } + // For each portMapKey, format group list and appned to output string. + for _, portKey := range groupKeyList { + group := portGroupMap[portKey] + portDisplay = append(portDisplay, formatGroup(portKey, group.first, group.last)) + } + return strings.Join(portDisplay, ", ") +} + +// GetRunlabel is a helper function for runlabel; it gets the image if needed and begins the +// construction of the runlabel output and environment variables. +func GetRunlabel(label string, runlabelImage string, ctx context.Context, runtime *libpod.Runtime, pull bool, inputCreds string, dockerRegistryOptions image.DockerRegistryOptions, authfile string, signaturePolicyPath string, output io.Writer) (string, string, error) { + var ( + newImage *image.Image + err error + imageName string + ) + if pull { + var registryCreds *types.DockerAuthConfig + if inputCreds != "" { + creds, err := util.ParseRegistryCreds(inputCreds) + if err != nil { + return "", "", err + } + registryCreds = creds + } + dockerRegistryOptions.DockerRegistryCreds = registryCreds + newImage, err = runtime.ImageRuntime().New(ctx, runlabelImage, signaturePolicyPath, authfile, output, &dockerRegistryOptions, image.SigningOptions{}, &label, util.PullImageMissing) + } else { + newImage, err = runtime.ImageRuntime().NewFromLocal(runlabelImage) + } + if err != nil { + return "", "", errors.Wrapf(err, "unable to find image") + } + + if len(newImage.Names()) < 1 { + imageName = newImage.ID() + } else { + imageName = newImage.Names()[0] + } + + runLabel, err := newImage.GetLabel(ctx, label) + return runLabel, imageName, err +} + +// GenerateRunlabelCommand generates the command that will eventually be execucted by Podman. +func GenerateRunlabelCommand(runLabel, imageName, name string, opts map[string]string, extraArgs []string, globalOpts string) ([]string, []string, error) { + // If no name is provided, we use the image's basename instead. + if name == "" { + baseName, err := image.GetImageBaseName(imageName) + if err != nil { + return nil, nil, err + } + name = baseName + } + // The user provided extra arguments that need to be tacked onto the label's command. + if len(extraArgs) > 0 { + runLabel = fmt.Sprintf("%s %s", runLabel, strings.Join(extraArgs, " ")) + } + cmd, err := GenerateCommand(runLabel, imageName, name, globalOpts) + if err != nil { + return nil, nil, errors.Wrapf(err, "unable to generate command") + } + env := GenerateRunEnvironment(name, imageName, opts) + env = append(env, "PODMAN_RUNLABEL_NESTED=1") + + envmap := envSliceToMap(env) + + envmapper := func(k string) string { + switch k { + case "OPT1": + return envmap["OPT1"] + case "OPT2": + return envmap["OPT2"] + case "OPT3": + return envmap["OPT3"] + case "PWD": + // I would prefer to use os.getenv but it appears PWD is not in the os env list. + d, err := os.Getwd() + if err != nil { + logrus.Error("unable to determine current working directory") + return "" + } + return d + } + return "" + } + newS := os.Expand(strings.Join(cmd, " "), envmapper) + cmd, err = shlex.Split(newS) + if err != nil { + return nil, nil, err + } + return cmd, env, nil +} + +func envSliceToMap(env []string) map[string]string { + m := make(map[string]string) + for _, i := range env { + split := strings.Split(i, "=") + m[split[0]] = strings.Join(split[1:], " ") + } + return m +} + +// GenerateKube generates kubernetes yaml based on a pod or container. +func GenerateKube(name string, service bool, r *libpod.Runtime) (*v1.Pod, *v1.Service, error) { + var ( + pod *libpod.Pod + podYAML *v1.Pod + err error + container *libpod.Container + servicePorts []v1.ServicePort + serviceYAML v1.Service + ) + // Get the container in question. + container, err = r.LookupContainer(name) + if err != nil { + pod, err = r.LookupPod(name) + if err != nil { + return nil, nil, err + } + podYAML, servicePorts, err = pod.GenerateForKube() + } else { + if len(container.Dependencies()) > 0 { + return nil, nil, errors.Wrapf(define.ErrNotImplemented, "containers with dependencies") + } + podYAML, err = container.GenerateForKube() + } + if err != nil { + return nil, nil, err + } + + if service { + serviceYAML = libpod.GenerateKubeServiceFromV1Pod(podYAML, servicePorts) + } + return podYAML, &serviceYAML, nil +} + +// Parallelize provides the maximum number of parallel workers (int) as calculated by a basic +// heuristic. This can be overridden by the --max-workers primary switch to podman. +func Parallelize(job string) int { + numCpus := runtime.NumCPU() + switch job { + case "kill": + if numCpus <= 3 { + return numCpus * 3 + } + return numCpus * 4 + case "pause": + if numCpus <= 3 { + return numCpus * 3 + } + return numCpus * 4 + case "ps": + return 8 + case "restart": + return numCpus * 2 + case "rm": + if numCpus <= 3 { + return numCpus * 3 + } else { + return numCpus * 4 + } + case "stop": + if numCpus <= 2 { + return 4 + } else { + return numCpus * 3 + } + case "unpause": + if numCpus <= 3 { + return numCpus * 3 + } + return numCpus * 4 + } + return 3 +} diff --git a/pkg/varlinkapi/containers.go b/pkg/varlinkapi/containers.go index 66b3e4095..8fba07c18 100644 --- a/pkg/varlinkapi/containers.go +++ b/pkg/varlinkapi/containers.go @@ -14,11 +14,9 @@ import ( "syscall" "time" - "github.com/containers/libpod/cmd/podman/shared" "github.com/containers/libpod/libpod" "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/logs" - "github.com/containers/libpod/pkg/adapter/shortcuts" "github.com/containers/libpod/pkg/cgroups" "github.com/containers/libpod/pkg/rootless" iopodman "github.com/containers/libpod/pkg/varlink" @@ -39,12 +37,12 @@ func (i *VarlinkAPI) ListContainers(call iopodman.VarlinkCall) error { if err != nil { return call.ReplyErrorOccurred(err.Error()) } - opts := shared.PsOptions{ + opts := PsOptions{ Namespace: true, Size: true, } for _, ctr := range containers { - batchInfo, err := shared.BatchContainerOp(ctr, opts) + batchInfo, err := BatchContainerOp(ctr, opts) if err != nil { return call.ReplyErrorOccurred(err.Error()) } @@ -58,13 +56,13 @@ func (i *VarlinkAPI) Ps(call iopodman.VarlinkCall, opts iopodman.PsOpts) error { var ( containers []iopodman.PsContainer ) - maxWorkers := shared.Parallelize("ps") + maxWorkers := Parallelize("ps") psOpts := makePsOpts(opts) filters := []string{} if opts.Filters != nil { filters = *opts.Filters } - psContainerOutputs, err := shared.GetPsContainerOutput(i.Runtime, psOpts, filters, maxWorkers) + psContainerOutputs, err := GetPsContainerOutput(i.Runtime, psOpts, filters, maxWorkers) if err != nil { return call.ReplyErrorOccurred(err.Error()) } @@ -111,22 +109,22 @@ func (i *VarlinkAPI) GetContainer(call iopodman.VarlinkCall, id string) error { if err != nil { return call.ReplyContainerNotFound(id, err.Error()) } - opts := shared.PsOptions{ + opts := PsOptions{ Namespace: true, Size: true, } - batchInfo, err := shared.BatchContainerOp(ctr, opts) + batchInfo, err := BatchContainerOp(ctr, opts) if err != nil { return call.ReplyErrorOccurred(err.Error()) } return call.ReplyGetContainer(makeListContainer(ctr.ID(), batchInfo)) } -// GetContainersByContext returns a slice of container ids based on all, latest, or a list +// getContainersByContext returns a slice of container ids based on all, latest, or a list func (i *VarlinkAPI) GetContainersByContext(call iopodman.VarlinkCall, all, latest bool, input []string) error { var ids []string - ctrs, err := shortcuts.GetContainersByContext(all, latest, input, i.Runtime) + ctrs, err := getContainersByContext(all, latest, input, i.Runtime) if err != nil { if errors.Cause(err) == define.ErrNoSuchCtr { return call.ReplyContainerNotFound("", err.Error()) @@ -160,9 +158,9 @@ func (i *VarlinkAPI) GetContainersByStatus(call iopodman.VarlinkCall, statuses [ if err != nil { return call.ReplyErrorOccurred(err.Error()) } - opts := shared.PsOptions{Size: true, Namespace: true} + opts := PsOptions{Size: true, Namespace: true} for _, ctr := range filteredContainers { - batchInfo, err := shared.BatchContainerOp(ctr, opts) + batchInfo, err := BatchContainerOp(ctr, opts) if err != nil { return call.ReplyErrorOccurred(err.Error()) } @@ -752,7 +750,7 @@ func (i *VarlinkAPI) GetContainersLogs(call iopodman.VarlinkCall, names []string tailLen = 0 } logChannel := make(chan *logs.LogLine, tailLen*len(names)+1) - containers, err := shortcuts.GetContainersByContext(false, latest, names, i.Runtime) + containers, err := getContainersByContext(false, latest, names, i.Runtime) if err != nil { return call.ReplyErrorOccurred(err.Error()) } diff --git a/pkg/varlinkapi/containers_create.go b/pkg/varlinkapi/containers_create.go index c1c1f6674..f0a87491a 100644 --- a/pkg/varlinkapi/containers_create.go +++ b/pkg/varlinkapi/containers_create.go @@ -3,14 +3,13 @@ package varlinkapi import ( - "github.com/containers/libpod/cmd/podman/shared" iopodman "github.com/containers/libpod/pkg/varlink" ) // CreateContainer ... func (i *VarlinkAPI) CreateContainer(call iopodman.VarlinkCall, config iopodman.Create) error { - generic := shared.VarlinkCreateToGeneric(config) - ctr, _, err := shared.CreateContainer(getContext(), &generic, i.Runtime) + generic := VarlinkCreateToGeneric(config) + ctr, _, err := CreateContainer(getContext(), &generic, i.Runtime) if err != nil { return call.ReplyErrorOccurred(err.Error()) } diff --git a/pkg/varlinkapi/create.go b/pkg/varlinkapi/create.go new file mode 100644 index 000000000..63d5072c6 --- /dev/null +++ b/pkg/varlinkapi/create.go @@ -0,0 +1,1154 @@ +package varlinkapi + +import ( + "context" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + goruntime "runtime" + "strconv" + "strings" + "syscall" + "time" + + "github.com/containers/image/v5/manifest" + "github.com/containers/libpod/cmd/podman/parse" + "github.com/containers/libpod/libpod" + "github.com/containers/libpod/libpod/define" + "github.com/containers/libpod/libpod/image" + ann "github.com/containers/libpod/pkg/annotations" + "github.com/containers/libpod/pkg/autoupdate" + "github.com/containers/libpod/pkg/cgroups" + envLib "github.com/containers/libpod/pkg/env" + "github.com/containers/libpod/pkg/errorhandling" + "github.com/containers/libpod/pkg/inspect" + ns "github.com/containers/libpod/pkg/namespaces" + "github.com/containers/libpod/pkg/rootless" + "github.com/containers/libpod/pkg/seccomp" + cc "github.com/containers/libpod/pkg/spec" + "github.com/containers/libpod/pkg/sysinfo" + systemdGen "github.com/containers/libpod/pkg/systemd/generate" + "github.com/containers/libpod/pkg/util" + "github.com/docker/go-connections/nat" + "github.com/docker/go-units" + "github.com/opentracing/opentracing-go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +var DefaultKernelNamespaces = "cgroup,ipc,net,uts" + +func CreateContainer(ctx context.Context, c *GenericCLIResults, runtime *libpod.Runtime) (*libpod.Container, *cc.CreateConfig, error) { + var ( + healthCheck *manifest.Schema2HealthConfig + err error + cidFile *os.File + ) + if c.Bool("trace") { + span, _ := opentracing.StartSpanFromContext(ctx, "createContainer") + defer span.Finish() + } + if c.Bool("rm") && c.String("restart") != "" && c.String("restart") != "no" { + return nil, nil, errors.Errorf("the --rm option conflicts with --restart") + } + + rtc, err := runtime.GetConfig() + if err != nil { + return nil, nil, err + } + rootfs := "" + if c.Bool("rootfs") { + rootfs = c.InputArgs[0] + } + + if c.IsSet("cidfile") { + cidFile, err = util.OpenExclusiveFile(c.String("cidfile")) + if err != nil && os.IsExist(err) { + return nil, nil, errors.Errorf("container id file exists. Ensure another container is not using it or delete %s", c.String("cidfile")) + } + if err != nil { + return nil, nil, errors.Errorf("error opening cidfile %s", c.String("cidfile")) + } + defer errorhandling.CloseQuiet(cidFile) + defer errorhandling.SyncQuiet(cidFile) + } + + imageName := "" + rawImageName := "" + var imageData *inspect.ImageData = nil + + // Set the storage if there is no rootfs specified + if rootfs == "" { + var writer io.Writer + if !c.Bool("quiet") { + writer = os.Stderr + } + + if len(c.InputArgs) != 0 { + rawImageName = c.InputArgs[0] + } else { + return nil, nil, errors.Errorf("error, image name not provided") + } + + pullType, err := util.ValidatePullType(c.String("pull")) + if err != nil { + return nil, nil, err + } + + overrideOS := c.String("override-os") + overrideArch := c.String("override-arch") + dockerRegistryOptions := image.DockerRegistryOptions{ + OSChoice: overrideOS, + ArchitectureChoice: overrideArch, + } + + newImage, err := runtime.ImageRuntime().New(ctx, rawImageName, rtc.Engine.SignaturePolicyPath, c.String("authfile"), writer, &dockerRegistryOptions, image.SigningOptions{}, nil, pullType) + if err != nil { + return nil, nil, err + } + imageData, err = newImage.InspectNoSize(ctx) + if err != nil { + return nil, nil, err + } + + if overrideOS == "" && imageData.Os != goruntime.GOOS { + logrus.Infof("Using %q (OS) image on %q host", imageData.Os, goruntime.GOOS) + } + + if overrideArch == "" && imageData.Architecture != goruntime.GOARCH { + logrus.Infof("Using %q (architecture) on %q host", imageData.Architecture, goruntime.GOARCH) + } + + names := newImage.Names() + if len(names) > 0 { + imageName = names[0] + } else { + imageName = newImage.ID() + } + + // if the user disabled the healthcheck with "none" or the no-healthcheck + // options is provided, we skip adding it + healthCheckCommandInput := c.String("healthcheck-command") + + // the user didn't disable the healthcheck but did pass in a healthcheck command + // now we need to make a healthcheck from the commandline input + if healthCheckCommandInput != "none" && !c.Bool("no-healthcheck") { + if len(healthCheckCommandInput) > 0 { + healthCheck, err = makeHealthCheckFromCli(c) + if err != nil { + return nil, nil, errors.Wrapf(err, "unable to create healthcheck") + } + } else { + // the user did not disable the health check and did not pass in a healthcheck + // command as input. so now we add healthcheck if it exists AND is correct mediatype + _, mediaType, err := newImage.Manifest(ctx) + if err != nil { + return nil, nil, errors.Wrapf(err, "unable to determine mediatype of image %s", newImage.ID()) + } + if mediaType == manifest.DockerV2Schema2MediaType { + healthCheck, err = newImage.GetHealthCheck(ctx) + if err != nil { + return nil, nil, errors.Wrapf(err, "unable to get healthcheck for %s", c.InputArgs[0]) + } + + if healthCheck != nil { + hcCommand := healthCheck.Test + if len(hcCommand) < 1 || hcCommand[0] == "" || hcCommand[0] == "NONE" { + // disable health check + healthCheck = nil + } else { + // apply defaults if image doesn't override them + if healthCheck.Interval == 0 { + healthCheck.Interval = 30 * time.Second + } + if healthCheck.Timeout == 0 { + healthCheck.Timeout = 30 * time.Second + } + /* Docker default is 0s, so the following would be a no-op + if healthCheck.StartPeriod == 0 { + healthCheck.StartPeriod = 0 * time.Second + } + */ + if healthCheck.Retries == 0 { + healthCheck.Retries = 3 + } + } + } + } + } + } + } + + createConfig, err := ParseCreateOpts(ctx, c, runtime, imageName, rawImageName, imageData) + if err != nil { + return nil, nil, err + } + + // (VR): Ideally we perform the checks _before_ pulling the image but that + // would require some bigger code refactoring of `ParseCreateOpts` and the + // logic here. But as the creation code will be consolidated in the future + // and given auto updates are experimental, we can live with that for now. + // In the end, the user may only need to correct the policy or the raw image + // name. + autoUpdatePolicy, autoUpdatePolicySpecified := createConfig.Labels[autoupdate.Label] + if autoUpdatePolicySpecified { + if _, err := autoupdate.LookupPolicy(autoUpdatePolicy); err != nil { + return nil, nil, err + } + // Now we need to make sure we're having a fully-qualified image reference. + if rootfs != "" { + return nil, nil, errors.Errorf("auto updates do not work with --rootfs") + } + // Make sure the input image is a docker. + if err := autoupdate.ValidateImageReference(rawImageName); err != nil { + return nil, nil, err + } + } + + // Because parseCreateOpts does derive anything from the image, we add health check + // at this point. The rest is done by WithOptions. + createConfig.HealthCheck = healthCheck + + // TODO: Should be able to return this from ParseCreateOpts + var pod *libpod.Pod + if createConfig.Pod != "" { + pod, err = runtime.LookupPod(createConfig.Pod) + if err != nil { + return nil, nil, errors.Wrapf(err, "error looking up pod to join") + } + } + + ctr, err := CreateContainerFromCreateConfig(runtime, createConfig, ctx, pod) + if err != nil { + return nil, nil, err + } + if cidFile != nil { + _, err = cidFile.WriteString(ctr.ID()) + if err != nil { + logrus.Error(err) + } + + } + + logrus.Debugf("New container created %q", ctr.ID()) + return ctr, createConfig, nil +} + +func configureEntrypoint(c *GenericCLIResults, data *inspect.ImageData) []string { + entrypoint := []string{} + if c.IsSet("entrypoint") { + // Force entrypoint to "" + if c.String("entrypoint") == "" { + return entrypoint + } + // Check if entrypoint specified is json + if err := json.Unmarshal([]byte(c.String("entrypoint")), &entrypoint); err == nil { + return entrypoint + } + // Return entrypoint as a single command + return []string{c.String("entrypoint")} + } + if data != nil { + return data.Config.Entrypoint + } + return entrypoint +} + +func configurePod(c *GenericCLIResults, runtime *libpod.Runtime, namespaces map[string]string, podName string) (map[string]string, string, error) { + pod, err := runtime.LookupPod(podName) + if err != nil { + return namespaces, "", err + } + podInfraID, err := pod.InfraContainerID() + if err != nil { + return namespaces, "", err + } + hasUserns := false + if podInfraID != "" { + podCtr, err := runtime.GetContainer(podInfraID) + if err != nil { + return namespaces, "", err + } + mappings, err := podCtr.IDMappings() + if err != nil { + return namespaces, "", err + } + hasUserns = len(mappings.UIDMap) > 0 + } + + if (namespaces["pid"] == cc.Pod) || (!c.IsSet("pid") && pod.SharesPID()) { + namespaces["pid"] = fmt.Sprintf("container:%s", podInfraID) + } + if (namespaces["net"] == cc.Pod) || (!c.IsSet("net") && !c.IsSet("network") && pod.SharesNet()) { + namespaces["net"] = fmt.Sprintf("container:%s", podInfraID) + } + if hasUserns && (namespaces["user"] == cc.Pod) || (!c.IsSet("user") && pod.SharesUser()) { + namespaces["user"] = fmt.Sprintf("container:%s", podInfraID) + } + if (namespaces["ipc"] == cc.Pod) || (!c.IsSet("ipc") && pod.SharesIPC()) { + namespaces["ipc"] = fmt.Sprintf("container:%s", podInfraID) + } + if (namespaces["uts"] == cc.Pod) || (!c.IsSet("uts") && pod.SharesUTS()) { + namespaces["uts"] = fmt.Sprintf("container:%s", podInfraID) + } + return namespaces, podInfraID, nil +} + +// Parses CLI options related to container creation into a config which can be +// parsed into an OCI runtime spec +func ParseCreateOpts(ctx context.Context, c *GenericCLIResults, runtime *libpod.Runtime, imageName string, rawImageName string, data *inspect.ImageData) (*cc.CreateConfig, error) { + var ( + inputCommand, command []string + memoryLimit, memoryReservation, memorySwap, memoryKernel int64 + blkioWeight uint16 + namespaces map[string]string + ) + + idmappings, err := util.ParseIDMapping(ns.UsernsMode(c.String("userns")), c.StringSlice("uidmap"), c.StringSlice("gidmap"), c.String("subuidname"), c.String("subgidname")) + if err != nil { + return nil, err + } + + imageID := "" + + inputCommand = c.InputArgs[1:] + if data != nil { + imageID = data.ID + } + + rootfs := "" + if c.Bool("rootfs") { + rootfs = c.InputArgs[0] + } + + if c.String("memory") != "" { + memoryLimit, err = units.RAMInBytes(c.String("memory")) + if err != nil { + return nil, errors.Wrapf(err, "invalid value for memory") + } + } + if c.String("memory-reservation") != "" { + memoryReservation, err = units.RAMInBytes(c.String("memory-reservation")) + if err != nil { + return nil, errors.Wrapf(err, "invalid value for memory-reservation") + } + } + if c.String("memory-swap") != "" { + if c.String("memory-swap") == "-1" { + memorySwap = -1 + } else { + memorySwap, err = units.RAMInBytes(c.String("memory-swap")) + if err != nil { + return nil, errors.Wrapf(err, "invalid value for memory-swap") + } + } + } + if c.String("kernel-memory") != "" { + memoryKernel, err = units.RAMInBytes(c.String("kernel-memory")) + if err != nil { + return nil, errors.Wrapf(err, "invalid value for kernel-memory") + } + } + if c.String("blkio-weight") != "" { + u, err := strconv.ParseUint(c.String("blkio-weight"), 10, 16) + if err != nil { + return nil, errors.Wrapf(err, "invalid value for blkio-weight") + } + blkioWeight = uint16(u) + } + + tty := c.Bool("tty") + + if c.Changed("cpu-period") && c.Changed("cpus") { + return nil, errors.Errorf("--cpu-period and --cpus cannot be set together") + } + if c.Changed("cpu-quota") && c.Changed("cpus") { + return nil, errors.Errorf("--cpu-quota and --cpus cannot be set together") + } + + if c.Bool("no-hosts") && c.Changed("add-host") { + return nil, errors.Errorf("--no-hosts and --add-host cannot be set together") + } + + // EXPOSED PORTS + var portBindings map[nat.Port][]nat.PortBinding + if data != nil { + portBindings, err = cc.ExposedPorts(c.StringSlice("expose"), c.StringSlice("publish"), c.Bool("publish-all"), data.Config.ExposedPorts) + if err != nil { + return nil, err + } + } + + // Kernel Namespaces + // TODO Fix handling of namespace from pod + // Instead of integrating here, should be done in libpod + // However, that also involves setting up security opts + // when the pod's namespace is integrated + namespaces = map[string]string{ + "cgroup": c.String("cgroupns"), + "pid": c.String("pid"), + "net": c.String("network"), + "ipc": c.String("ipc"), + "user": c.String("userns"), + "uts": c.String("uts"), + } + + originalPodName := c.String("pod") + podName := strings.Replace(originalPodName, "new:", "", 1) + // after we strip out :new, make sure there is something left for a pod name + if len(podName) < 1 && c.IsSet("pod") { + return nil, errors.Errorf("new pod name must be at least one character") + } + + // If we are adding a container to a pod, we would like to add an annotation for the infra ID + // so kata containers can share VMs inside the pod + var podInfraID string + if c.IsSet("pod") { + if strings.HasPrefix(originalPodName, "new:") { + // pod does not exist; lets make it + var podOptions []libpod.PodCreateOption + podOptions = append(podOptions, libpod.WithPodName(podName), libpod.WithInfraContainer(), libpod.WithPodCgroups()) + if len(portBindings) > 0 { + ociPortBindings, err := cc.NatToOCIPortBindings(portBindings) + if err != nil { + return nil, err + } + podOptions = append(podOptions, libpod.WithInfraContainerPorts(ociPortBindings)) + } + + podNsOptions, err := GetNamespaceOptions(strings.Split(DefaultKernelNamespaces, ",")) + if err != nil { + return nil, err + } + podOptions = append(podOptions, podNsOptions...) + // make pod + pod, err := runtime.NewPod(ctx, podOptions...) + if err != nil { + return nil, err + } + logrus.Debugf("pod %s created by new container request", pod.ID()) + + // The container now cannot have port bindings; so we reset the map + portBindings = make(map[nat.Port][]nat.PortBinding) + } + namespaces, podInfraID, err = configurePod(c, runtime, namespaces, podName) + if err != nil { + return nil, err + } + } + + pidMode := ns.PidMode(namespaces["pid"]) + if !cc.Valid(string(pidMode), pidMode) { + return nil, errors.Errorf("--pid %q is not valid", c.String("pid")) + } + + usernsMode := ns.UsernsMode(namespaces["user"]) + if !cc.Valid(string(usernsMode), usernsMode) { + return nil, errors.Errorf("--userns %q is not valid", namespaces["user"]) + } + + utsMode := ns.UTSMode(namespaces["uts"]) + if !cc.Valid(string(utsMode), utsMode) { + return nil, errors.Errorf("--uts %q is not valid", namespaces["uts"]) + } + + cgroupMode := ns.CgroupMode(namespaces["cgroup"]) + if !cgroupMode.Valid() { + return nil, errors.Errorf("--cgroup %q is not valid", namespaces["cgroup"]) + } + + ipcMode := ns.IpcMode(namespaces["ipc"]) + if !cc.Valid(string(ipcMode), ipcMode) { + return nil, errors.Errorf("--ipc %q is not valid", ipcMode) + } + + // Make sure if network is set to container namespace, port binding is not also being asked for + netMode := ns.NetworkMode(namespaces["net"]) + if netMode.IsContainer() { + if len(portBindings) > 0 { + return nil, errors.Errorf("cannot set port bindings on an existing container network namespace") + } + } + + // USER + user := c.String("user") + if user == "" { + switch { + case usernsMode.IsKeepID(): + user = fmt.Sprintf("%d:%d", rootless.GetRootlessUID(), rootless.GetRootlessGID()) + case data == nil: + user = "0" + default: + user = data.Config.User + } + } + + // STOP SIGNAL + stopSignal := syscall.SIGTERM + signalString := "" + if data != nil { + signalString = data.Config.StopSignal + } + if c.IsSet("stop-signal") { + signalString = c.String("stop-signal") + } + if signalString != "" { + stopSignal, err = util.ParseSignal(signalString) + if err != nil { + return nil, err + } + } + + // ENVIRONMENT VARIABLES + // + // Precedence order (higher index wins): + // 1) env-host, 2) image data, 3) env-file, 4) env + env := map[string]string{ + "container": "podman", + } + + // First transform the os env into a map. We need it for the labels later in + // any case. + osEnv, err := envLib.ParseSlice(os.Environ()) + if err != nil { + return nil, errors.Wrap(err, "error parsing host environment variables") + } + + // Start with env-host + + if c.Bool("env-host") { + env = envLib.Join(env, osEnv) + } + + // Image data overrides any previous variables + if data != nil { + configEnv, err := envLib.ParseSlice(data.Config.Env) + if err != nil { + return nil, errors.Wrap(err, "error passing image environment variables") + } + env = envLib.Join(env, configEnv) + } + + // env-file overrides any previous variables + if c.IsSet("env-file") { + for _, f := range c.StringSlice("env-file") { + fileEnv, err := envLib.ParseFile(f) + if err != nil { + return nil, err + } + // File env is overridden by env. + env = envLib.Join(env, fileEnv) + } + } + + if c.IsSet("env") { + // env overrides any previous variables + cmdlineEnv := c.StringSlice("env") + if len(cmdlineEnv) > 0 { + parsedEnv, err := envLib.ParseSlice(cmdlineEnv) + if err != nil { + return nil, err + } + env = envLib.Join(env, parsedEnv) + } + } + + // LABEL VARIABLES + labels, err := parse.GetAllLabels(c.StringSlice("label-file"), c.StringArray("label")) + if err != nil { + return nil, errors.Wrapf(err, "unable to process labels") + } + if data != nil { + for key, val := range data.Config.Labels { + if _, ok := labels[key]; !ok { + labels[key] = val + } + } + } + + if systemdUnit, exists := osEnv[systemdGen.EnvVariable]; exists { + labels[systemdGen.EnvVariable] = systemdUnit + } + + // ANNOTATIONS + annotations := make(map[string]string) + + // First, add our default annotations + annotations[ann.TTY] = "false" + if tty { + annotations[ann.TTY] = "true" + } + + // in the event this container is in a pod, and the pod has an infra container + // we will want to configure it as a type "container" instead defaulting to + // the behavior of a "sandbox" container + // In Kata containers: + // - "sandbox" is the annotation that denotes the container should use its own + // VM, which is the default behavior + // - "container" denotes the container should join the VM of the SandboxID + // (the infra container) + if podInfraID != "" { + annotations[ann.SandboxID] = podInfraID + annotations[ann.ContainerType] = ann.ContainerTypeContainer + } + + if data != nil { + // Next, add annotations from the image + for key, value := range data.Annotations { + annotations[key] = value + } + } + // Last, add user annotations + for _, annotation := range c.StringSlice("annotation") { + splitAnnotation := strings.SplitN(annotation, "=", 2) + if len(splitAnnotation) < 2 { + return nil, errors.Errorf("Annotations must be formatted KEY=VALUE") + } + annotations[splitAnnotation[0]] = splitAnnotation[1] + } + + // WORKING DIRECTORY + workDir := "/" + if c.IsSet("workdir") { + workDir = c.String("workdir") + } else if data != nil && data.Config.WorkingDir != "" { + workDir = data.Config.WorkingDir + } + + userCommand := []string{} + entrypoint := configureEntrypoint(c, data) + // Build the command + // If we have an entry point, it goes first + if len(entrypoint) > 0 { + command = entrypoint + } + if len(inputCommand) > 0 { + // User command overrides data CMD + command = append(command, inputCommand...) + userCommand = append(userCommand, inputCommand...) + } else if data != nil && len(data.Config.Cmd) > 0 && !c.IsSet("entrypoint") { + // If not user command, add CMD + command = append(command, data.Config.Cmd...) + userCommand = append(userCommand, data.Config.Cmd...) + } + + if data != nil && len(command) == 0 { + return nil, errors.Errorf("No command specified on command line or as CMD or ENTRYPOINT in this image") + } + + // SHM Size + shmSize, err := units.FromHumanSize(c.String("shm-size")) + if err != nil { + return nil, errors.Wrapf(err, "unable to translate --shm-size") + } + + if c.IsSet("add-host") { + // Verify the additional hosts are in correct format + for _, host := range c.StringSlice("add-host") { + if _, err := parse.ValidateExtraHost(host); err != nil { + return nil, err + } + } + } + + var ( + dnsSearches []string + dnsServers []string + dnsOptions []string + ) + if c.Changed("dns-search") { + dnsSearches = c.StringSlice("dns-search") + // Check for explicit dns-search domain of '' + if len(dnsSearches) == 0 { + return nil, errors.Errorf("'' is not a valid domain") + } + // Validate domains are good + for _, dom := range dnsSearches { + if dom == "." { + if len(dnsSearches) > 1 { + return nil, errors.Errorf("cannot pass additional search domains when also specifying '.'") + } + continue + } + if _, err := parse.ValidateDomain(dom); err != nil { + return nil, err + } + } + } + if c.IsSet("dns") { + dnsServers = append(dnsServers, c.StringSlice("dns")...) + } + if c.IsSet("dns-opt") { + dnsOptions = c.StringSlice("dns-opt") + } + + var ImageVolumes map[string]struct{} + if data != nil && c.String("image-volume") != "ignore" { + ImageVolumes = data.Config.Volumes + } + + var imageVolType = map[string]string{ + "bind": "", + "tmpfs": "", + "ignore": "", + } + if _, ok := imageVolType[c.String("image-volume")]; !ok { + return nil, errors.Errorf("invalid image-volume type %q. Pick one of bind, tmpfs, or ignore", c.String("image-volume")) + } + + systemd := c.String("systemd") == "always" + if !systemd && command != nil { + x, err := strconv.ParseBool(c.String("systemd")) + if err != nil { + return nil, errors.Wrapf(err, "cannot parse bool %s", c.String("systemd")) + } + if x && (command[0] == "/usr/sbin/init" || command[0] == "/sbin/init" || (filepath.Base(command[0]) == "systemd")) { + systemd = true + } + } + if systemd { + if signalString == "" { + stopSignal, err = util.ParseSignal("RTMIN+3") + if err != nil { + return nil, errors.Wrapf(err, "error parsing systemd signal") + } + } + } + // This is done because cobra cannot have two aliased flags. So we have to check + // both + memorySwappiness := c.Int64("memory-swappiness") + + logDriver := define.KubernetesLogging + if c.Changed("log-driver") { + logDriver = c.String("log-driver") + } + + pidsLimit := c.Int64("pids-limit") + if c.String("cgroups") == "disabled" && !c.Changed("pids-limit") { + pidsLimit = -1 + } + + pid := &cc.PidConfig{ + PidMode: pidMode, + } + ipc := &cc.IpcConfig{ + IpcMode: ipcMode, + } + + cgroup := &cc.CgroupConfig{ + Cgroups: c.String("cgroups"), + Cgroupns: c.String("cgroupns"), + CgroupParent: c.String("cgroup-parent"), + CgroupMode: cgroupMode, + } + + userns := &cc.UserConfig{ + GroupAdd: c.StringSlice("group-add"), + IDMappings: idmappings, + UsernsMode: usernsMode, + User: user, + } + + uts := &cc.UtsConfig{ + UtsMode: utsMode, + NoHosts: c.Bool("no-hosts"), + HostAdd: c.StringSlice("add-host"), + Hostname: c.String("hostname"), + } + net := &cc.NetworkConfig{ + DNSOpt: dnsOptions, + DNSSearch: dnsSearches, + DNSServers: dnsServers, + HTTPProxy: c.Bool("http-proxy"), + MacAddress: c.String("mac-address"), + Network: c.String("network"), + NetMode: netMode, + IPAddress: c.String("ip"), + Publish: c.StringSlice("publish"), + PublishAll: c.Bool("publish-all"), + PortBindings: portBindings, + } + + sysctl := map[string]string{} + if c.Changed("sysctl") { + sysctl, err = util.ValidateSysctls(c.StringSlice("sysctl")) + if err != nil { + return nil, errors.Wrapf(err, "invalid value for sysctl") + } + } + + secConfig := &cc.SecurityConfig{ + CapAdd: c.StringSlice("cap-add"), + CapDrop: c.StringSlice("cap-drop"), + Privileged: c.Bool("privileged"), + ReadOnlyRootfs: c.Bool("read-only"), + ReadOnlyTmpfs: c.Bool("read-only-tmpfs"), + Sysctl: sysctl, + } + + var securityOpt []string + if c.Changed("security-opt") { + securityOpt = c.StringArray("security-opt") + } + if err := secConfig.SetSecurityOpts(runtime, securityOpt); err != nil { + return nil, err + } + + // SECCOMP + if data != nil { + if value, exists := labels[seccomp.ContainerImageLabel]; exists { + secConfig.SeccompProfileFromImage = value + } + } + if policy, err := seccomp.LookupPolicy(c.String("seccomp-policy")); err != nil { + return nil, err + } else { + secConfig.SeccompPolicy = policy + } + rtc, err := runtime.GetConfig() + if err != nil { + return nil, err + } + volumes := rtc.Containers.Volumes + if c.Changed("volume") { + volumes = append(volumes, c.StringSlice("volume")...) + } + + devices := rtc.Containers.Devices + if c.Changed("device") { + devices = append(devices, c.StringSlice("device")...) + } + + config := &cc.CreateConfig{ + Annotations: annotations, + BuiltinImgVolumes: ImageVolumes, + ConmonPidFile: c.String("conmon-pidfile"), + ImageVolumeType: c.String("image-volume"), + CidFile: c.String("cidfile"), + Command: command, + UserCommand: userCommand, + Detach: c.Bool("detach"), + Devices: devices, + Entrypoint: entrypoint, + Env: env, + // ExposedPorts: ports, + Init: c.Bool("init"), + InitPath: c.String("init-path"), + Image: imageName, + RawImageName: rawImageName, + ImageID: imageID, + Interactive: c.Bool("interactive"), + // IP6Address: c.String("ipv6"), // Not implemented yet - needs CNI support for static v6 + Labels: labels, + // LinkLocalIP: c.StringSlice("link-local-ip"), // Not implemented yet + LogDriver: logDriver, + LogDriverOpt: c.StringSlice("log-opt"), + Name: c.String("name"), + // NetworkAlias: c.StringSlice("network-alias"), // Not implemented - does this make sense in Podman? + Pod: podName, + Quiet: c.Bool("quiet"), + Resources: cc.CreateResourceConfig{ + BlkioWeight: blkioWeight, + BlkioWeightDevice: c.StringSlice("blkio-weight-device"), + CPUShares: c.Uint64("cpu-shares"), + CPUPeriod: c.Uint64("cpu-period"), + CPUsetCPUs: c.String("cpuset-cpus"), + CPUsetMems: c.String("cpuset-mems"), + CPUQuota: c.Int64("cpu-quota"), + CPURtPeriod: c.Uint64("cpu-rt-period"), + CPURtRuntime: c.Int64("cpu-rt-runtime"), + CPUs: c.Float64("cpus"), + DeviceCgroupRules: c.StringSlice("device-cgroup-rule"), + DeviceReadBps: c.StringSlice("device-read-bps"), + DeviceReadIOps: c.StringSlice("device-read-iops"), + DeviceWriteBps: c.StringSlice("device-write-bps"), + DeviceWriteIOps: c.StringSlice("device-write-iops"), + DisableOomKiller: c.Bool("oom-kill-disable"), + ShmSize: shmSize, + Memory: memoryLimit, + MemoryReservation: memoryReservation, + MemorySwap: memorySwap, + MemorySwappiness: int(memorySwappiness), + KernelMemory: memoryKernel, + OomScoreAdj: c.Int("oom-score-adj"), + PidsLimit: pidsLimit, + Ulimit: c.StringSlice("ulimit"), + }, + RestartPolicy: c.String("restart"), + Rm: c.Bool("rm"), + Security: *secConfig, + StopSignal: stopSignal, + StopTimeout: c.Uint("stop-timeout"), + Systemd: systemd, + Tmpfs: c.StringArray("tmpfs"), + Tty: tty, + MountsFlag: c.StringArray("mount"), + Volumes: volumes, + WorkDir: workDir, + Rootfs: rootfs, + VolumesFrom: c.StringSlice("volumes-from"), + Syslog: c.Bool("syslog"), + + Pid: *pid, + Ipc: *ipc, + Cgroup: *cgroup, + User: *userns, + Uts: *uts, + Network: *net, + } + + warnings, err := verifyContainerResources(config, false) + if err != nil { + return nil, err + } + for _, warning := range warnings { + fmt.Fprintln(os.Stderr, warning) + } + return config, nil +} + +func CreateContainerFromCreateConfig(r *libpod.Runtime, createConfig *cc.CreateConfig, ctx context.Context, pod *libpod.Pod) (*libpod.Container, error) { + runtimeSpec, options, err := createConfig.MakeContainerConfig(r, pod) + if err != nil { + return nil, err + } + + // Set the CreateCommand explicitly. Some (future) consumers of libpod + // might not want to set it. + options = append(options, libpod.WithCreateCommand()) + + ctr, err := r.NewContainer(ctx, runtimeSpec, options...) + if err != nil { + return nil, err + } + return ctr, nil +} + +func makeHealthCheckFromCli(c *GenericCLIResults) (*manifest.Schema2HealthConfig, error) { + inCommand := c.String("healthcheck-command") + inInterval := c.String("healthcheck-interval") + inRetries := c.Uint("healthcheck-retries") + inTimeout := c.String("healthcheck-timeout") + inStartPeriod := c.String("healthcheck-start-period") + + // Every healthcheck requires a command + if len(inCommand) == 0 { + return nil, errors.New("Must define a healthcheck command for all healthchecks") + } + + // first try to parse option value as JSON array of strings... + cmd := []string{} + err := json.Unmarshal([]byte(inCommand), &cmd) + if err != nil { + // ...otherwise pass it to "/bin/sh -c" inside the container + cmd = []string{"CMD-SHELL", inCommand} + } + hc := manifest.Schema2HealthConfig{ + Test: cmd, + } + + if inInterval == "disable" { + inInterval = "0" + } + intervalDuration, err := time.ParseDuration(inInterval) + if err != nil { + return nil, errors.Wrapf(err, "invalid healthcheck-interval %s ", inInterval) + } + + hc.Interval = intervalDuration + + if inRetries < 1 { + return nil, errors.New("healthcheck-retries must be greater than 0.") + } + hc.Retries = int(inRetries) + timeoutDuration, err := time.ParseDuration(inTimeout) + if err != nil { + return nil, errors.Wrapf(err, "invalid healthcheck-timeout %s", inTimeout) + } + if timeoutDuration < time.Duration(1) { + return nil, errors.New("healthcheck-timeout must be at least 1 second") + } + hc.Timeout = timeoutDuration + + startPeriodDuration, err := time.ParseDuration(inStartPeriod) + if err != nil { + return nil, errors.Wrapf(err, "invalid healthcheck-start-period %s", inStartPeriod) + } + if startPeriodDuration < time.Duration(0) { + return nil, errors.New("healthcheck-start-period must be 0 seconds or greater") + } + hc.StartPeriod = startPeriodDuration + + return &hc, nil +} + +// GetNamespaceOptions transforms a slice of kernel namespaces +// into a slice of pod create options. Currently, not all +// kernel namespaces are supported, and they will be returned in an error +func GetNamespaceOptions(ns []string) ([]libpod.PodCreateOption, error) { + var options []libpod.PodCreateOption + var erroredOptions []libpod.PodCreateOption + for _, toShare := range ns { + switch toShare { + case "cgroup": + options = append(options, libpod.WithPodCgroups()) + case "net": + options = append(options, libpod.WithPodNet()) + case "mnt": + return erroredOptions, errors.Errorf("Mount sharing functionality not supported on pod level") + case "pid": + options = append(options, libpod.WithPodPID()) + case "user": + return erroredOptions, errors.Errorf("User sharing functionality not supported on pod level") + case "ipc": + options = append(options, libpod.WithPodIPC()) + case "uts": + options = append(options, libpod.WithPodUTS()) + case "": + case "none": + return erroredOptions, nil + default: + return erroredOptions, errors.Errorf("Invalid kernel namespace to share: %s. Options are: net, pid, ipc, uts or none", toShare) + } + } + return options, nil +} + +func addWarning(warnings []string, msg string) []string { + logrus.Warn(msg) + return append(warnings, msg) +} + +func verifyContainerResources(config *cc.CreateConfig, update bool) ([]string, error) { + warnings := []string{} + + cgroup2, err := cgroups.IsCgroup2UnifiedMode() + if err != nil || cgroup2 { + return warnings, err + } + + sysInfo := sysinfo.New(true) + + // memory subsystem checks and adjustments + if config.Resources.Memory > 0 && !sysInfo.MemoryLimit { + warnings = addWarning(warnings, "Your kernel does not support memory limit capabilities or the cgroup is not mounted. Limitation discarded.") + config.Resources.Memory = 0 + config.Resources.MemorySwap = -1 + } + if config.Resources.Memory > 0 && config.Resources.MemorySwap != -1 && !sysInfo.SwapLimit { + warnings = addWarning(warnings, "Your kernel does not support swap limit capabilities,or the cgroup is not mounted. Memory limited without swap.") + config.Resources.MemorySwap = -1 + } + if config.Resources.Memory > 0 && config.Resources.MemorySwap > 0 && config.Resources.MemorySwap < config.Resources.Memory { + return warnings, fmt.Errorf("minimum memoryswap limit should be larger than memory limit, see usage") + } + if config.Resources.Memory == 0 && config.Resources.MemorySwap > 0 && !update { + return warnings, fmt.Errorf("you should always set the memory limit when using memoryswap limit, see usage") + } + if config.Resources.MemorySwappiness != -1 { + if !sysInfo.MemorySwappiness { + msg := "Your kernel does not support memory swappiness capabilities, or the cgroup is not mounted. Memory swappiness discarded." + warnings = addWarning(warnings, msg) + config.Resources.MemorySwappiness = -1 + } else { + swappiness := config.Resources.MemorySwappiness + if swappiness < -1 || swappiness > 100 { + return warnings, fmt.Errorf("invalid value: %v, valid memory swappiness range is 0-100", swappiness) + } + } + } + if config.Resources.MemoryReservation > 0 && !sysInfo.MemoryReservation { + warnings = addWarning(warnings, "Your kernel does not support memory soft limit capabilities or the cgroup is not mounted. Limitation discarded.") + config.Resources.MemoryReservation = 0 + } + if config.Resources.Memory > 0 && config.Resources.MemoryReservation > 0 && config.Resources.Memory < config.Resources.MemoryReservation { + return warnings, fmt.Errorf("minimum memory limit cannot be less than memory reservation limit, see usage") + } + if config.Resources.KernelMemory > 0 && !sysInfo.KernelMemory { + warnings = addWarning(warnings, "Your kernel does not support kernel memory limit capabilities or the cgroup is not mounted. Limitation discarded.") + config.Resources.KernelMemory = 0 + } + if config.Resources.DisableOomKiller && !sysInfo.OomKillDisable { + // only produce warnings if the setting wasn't to *disable* the OOM Kill; no point + // warning the caller if they already wanted the feature to be off + warnings = addWarning(warnings, "Your kernel does not support OomKillDisable. OomKillDisable discarded.") + config.Resources.DisableOomKiller = false + } + + if config.Resources.PidsLimit != 0 && !sysInfo.PidsLimit { + warnings = addWarning(warnings, "Your kernel does not support pids limit capabilities or the cgroup is not mounted. PIDs limit discarded.") + config.Resources.PidsLimit = 0 + } + + if config.Resources.CPUShares > 0 && !sysInfo.CPUShares { + warnings = addWarning(warnings, "Your kernel does not support CPU shares or the cgroup is not mounted. Shares discarded.") + config.Resources.CPUShares = 0 + } + if config.Resources.CPUPeriod > 0 && !sysInfo.CPUCfsPeriod { + warnings = addWarning(warnings, "Your kernel does not support CPU cfs period or the cgroup is not mounted. Period discarded.") + config.Resources.CPUPeriod = 0 + } + if config.Resources.CPUPeriod != 0 && (config.Resources.CPUPeriod < 1000 || config.Resources.CPUPeriod > 1000000) { + return warnings, fmt.Errorf("CPU cfs period cannot be less than 1ms (i.e. 1000) or larger than 1s (i.e. 1000000)") + } + if config.Resources.CPUQuota > 0 && !sysInfo.CPUCfsQuota { + warnings = addWarning(warnings, "Your kernel does not support CPU cfs quota or the cgroup is not mounted. Quota discarded.") + config.Resources.CPUQuota = 0 + } + if config.Resources.CPUQuota > 0 && config.Resources.CPUQuota < 1000 { + return warnings, fmt.Errorf("CPU cfs quota cannot be less than 1ms (i.e. 1000)") + } + // cpuset subsystem checks and adjustments + if (config.Resources.CPUsetCPUs != "" || config.Resources.CPUsetMems != "") && !sysInfo.Cpuset { + warnings = addWarning(warnings, "Your kernel does not support cpuset or the cgroup is not mounted. CPUset discarded.") + config.Resources.CPUsetCPUs = "" + config.Resources.CPUsetMems = "" + } + cpusAvailable, err := sysInfo.IsCpusetCpusAvailable(config.Resources.CPUsetCPUs) + if err != nil { + return warnings, fmt.Errorf("invalid value %s for cpuset cpus", config.Resources.CPUsetCPUs) + } + if !cpusAvailable { + return warnings, fmt.Errorf("requested CPUs are not available - requested %s, available: %s", config.Resources.CPUsetCPUs, sysInfo.Cpus) + } + memsAvailable, err := sysInfo.IsCpusetMemsAvailable(config.Resources.CPUsetMems) + if err != nil { + return warnings, fmt.Errorf("invalid value %s for cpuset mems", config.Resources.CPUsetMems) + } + if !memsAvailable { + return warnings, fmt.Errorf("requested memory nodes are not available - requested %s, available: %s", config.Resources.CPUsetMems, sysInfo.Mems) + } + + // blkio subsystem checks and adjustments + if config.Resources.BlkioWeight > 0 && !sysInfo.BlkioWeight { + warnings = addWarning(warnings, "Your kernel does not support Block I/O weight or the cgroup is not mounted. Weight discarded.") + config.Resources.BlkioWeight = 0 + } + if config.Resources.BlkioWeight > 0 && (config.Resources.BlkioWeight < 10 || config.Resources.BlkioWeight > 1000) { + return warnings, fmt.Errorf("range of blkio weight is from 10 to 1000") + } + if len(config.Resources.BlkioWeightDevice) > 0 && !sysInfo.BlkioWeightDevice { + warnings = addWarning(warnings, "Your kernel does not support Block I/O weight_device or the cgroup is not mounted. Weight-device discarded.") + config.Resources.BlkioWeightDevice = []string{} + } + if len(config.Resources.DeviceReadBps) > 0 && !sysInfo.BlkioReadBpsDevice { + warnings = addWarning(warnings, "Your kernel does not support BPS Block I/O read limit or the cgroup is not mounted. Block I/O BPS read limit discarded") + config.Resources.DeviceReadBps = []string{} + } + if len(config.Resources.DeviceWriteBps) > 0 && !sysInfo.BlkioWriteBpsDevice { + warnings = addWarning(warnings, "Your kernel does not support BPS Block I/O write limit or the cgroup is not mounted. Block I/O BPS write limit discarded.") + config.Resources.DeviceWriteBps = []string{} + } + if len(config.Resources.DeviceReadIOps) > 0 && !sysInfo.BlkioReadIOpsDevice { + warnings = addWarning(warnings, "Your kernel does not support IOPS Block read limit or the cgroup is not mounted. Block I/O IOPS read limit discarded.") + config.Resources.DeviceReadIOps = []string{} + } + if len(config.Resources.DeviceWriteIOps) > 0 && !sysInfo.BlkioWriteIOpsDevice { + warnings = addWarning(warnings, "Your kernel does not support IOPS Block I/O write limit or the cgroup is not mounted. Block I/O IOPS write limit discarded.") + config.Resources.DeviceWriteIOps = []string{} + } + + return warnings, nil +} diff --git a/pkg/varlinkapi/funcs.go b/pkg/varlinkapi/funcs.go new file mode 100644 index 000000000..ed90ba050 --- /dev/null +++ b/pkg/varlinkapi/funcs.go @@ -0,0 +1,121 @@ +package varlinkapi + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/containers/image/v5/types" + "github.com/containers/libpod/libpod/image" + "github.com/google/shlex" + "github.com/pkg/errors" +) + +func GetSystemContext(authfile string) (*types.SystemContext, error) { + if authfile != "" { + if _, err := os.Stat(authfile); err != nil { + return nil, errors.Wrapf(err, "error checking authfile path %s", authfile) + } + } + return image.GetSystemContext("", authfile, false), nil +} + +func substituteCommand(cmd string) (string, error) { + var ( + newCommand string + ) + + // Replace cmd with "/proc/self/exe" if "podman" or "docker" is being + // used. If "/usr/bin/docker" is provided, we also sub in podman. + // Otherwise, leave the command unchanged. + if cmd == "podman" || filepath.Base(cmd) == "docker" { + newCommand = "/proc/self/exe" + } else { + newCommand = cmd + } + + // If cmd is an absolute or relative path, check if the file exists. + // Throw an error if it doesn't exist. + if strings.Contains(newCommand, "/") || strings.HasPrefix(newCommand, ".") { + res, err := filepath.Abs(newCommand) + if err != nil { + return "", err + } + if _, err := os.Stat(res); !os.IsNotExist(err) { + return res, nil + } else if err != nil { + return "", err + } + } + + return newCommand, nil +} + +// GenerateCommand takes a label (string) and converts it to an executable command +func GenerateCommand(command, imageName, name, globalOpts string) ([]string, error) { + var ( + newCommand []string + ) + if name == "" { + name = imageName + } + + cmd, err := shlex.Split(command) + if err != nil { + return nil, err + } + + prog, err := substituteCommand(cmd[0]) + if err != nil { + return nil, err + } + newCommand = append(newCommand, prog) + + for _, arg := range cmd[1:] { + var newArg string + switch arg { + case "IMAGE": + newArg = imageName + case "$IMAGE": + newArg = imageName + case "IMAGE=IMAGE": + newArg = fmt.Sprintf("IMAGE=%s", imageName) + case "IMAGE=$IMAGE": + newArg = fmt.Sprintf("IMAGE=%s", imageName) + case "NAME": + newArg = name + case "NAME=NAME": + newArg = fmt.Sprintf("NAME=%s", name) + case "NAME=$NAME": + newArg = fmt.Sprintf("NAME=%s", name) + case "$NAME": + newArg = name + case "$GLOBAL_OPTS": + newArg = globalOpts + default: + newArg = arg + } + newCommand = append(newCommand, newArg) + } + return newCommand, nil +} + +// GenerateRunEnvironment merges the current environment variables with optional +// environment variables provided by the user +func GenerateRunEnvironment(name, imageName string, opts map[string]string) []string { + newEnv := os.Environ() + newEnv = append(newEnv, fmt.Sprintf("NAME=%s", name)) + newEnv = append(newEnv, fmt.Sprintf("IMAGE=%s", imageName)) + + if opts["opt1"] != "" { + newEnv = append(newEnv, fmt.Sprintf("OPT1=%s", opts["opt1"])) + } + if opts["opt2"] != "" { + newEnv = append(newEnv, fmt.Sprintf("OPT2=%s", opts["opt2"])) + } + if opts["opt3"] != "" { + newEnv = append(newEnv, fmt.Sprintf("OPT3=%s", opts["opt3"])) + } + return newEnv +} diff --git a/pkg/varlinkapi/generate.go b/pkg/varlinkapi/generate.go index 81a0df68e..4df185db6 100644 --- a/pkg/varlinkapi/generate.go +++ b/pkg/varlinkapi/generate.go @@ -5,13 +5,12 @@ package varlinkapi import ( "encoding/json" - "github.com/containers/libpod/cmd/podman/shared" iopodman "github.com/containers/libpod/pkg/varlink" ) // GenerateKube ... func (i *VarlinkAPI) GenerateKube(call iopodman.VarlinkCall, name string, service bool) error { - pod, serv, err := shared.GenerateKube(name, service, i.Runtime) + pod, serv, err := GenerateKube(name, service, i.Runtime) if err != nil { return call.ReplyErrorOccurred(err.Error()) } diff --git a/pkg/varlinkapi/images.go b/pkg/varlinkapi/images.go index 49bd0b0cb..8d43b8414 100644 --- a/pkg/varlinkapi/images.go +++ b/pkg/varlinkapi/images.go @@ -20,7 +20,6 @@ import ( "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/transports/alltransports" "github.com/containers/image/v5/types" - "github.com/containers/libpod/cmd/podman/shared" "github.com/containers/libpod/libpod" "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/image" @@ -779,7 +778,7 @@ func (i *VarlinkAPI) ContainerRunlabel(call iopodman.VarlinkCall, input iopodman stdOut := os.Stdout stdIn := os.Stdin - runLabel, imageName, err := shared.GetRunlabel(input.Label, input.Image, ctx, i.Runtime, input.Pull, "", dockerRegistryOptions, input.Authfile, "", nil) + runLabel, imageName, err := GetRunlabel(input.Label, input.Image, ctx, i.Runtime, input.Pull, "", dockerRegistryOptions, input.Authfile, "", nil) if err != nil { return call.ReplyErrorOccurred(err.Error()) } @@ -787,7 +786,7 @@ func (i *VarlinkAPI) ContainerRunlabel(call iopodman.VarlinkCall, input iopodman return call.ReplyErrorOccurred(fmt.Sprintf("%s does not contain the label %s", input.Image, input.Label)) } - cmd, env, err := shared.GenerateRunlabelCommand(runLabel, imageName, input.Name, input.Opts, input.ExtraArgs, "") + cmd, env, err := GenerateRunlabelCommand(runLabel, imageName, input.Name, input.Opts, input.ExtraArgs, "") if err != nil { return call.ReplyErrorOccurred(err.Error()) } diff --git a/pkg/varlinkapi/intermediate.go b/pkg/varlinkapi/intermediate.go new file mode 100644 index 000000000..f04665a86 --- /dev/null +++ b/pkg/varlinkapi/intermediate.go @@ -0,0 +1,289 @@ +package varlinkapi + +import ( + "github.com/sirupsen/logrus" +) + +/* +attention + +in this file you will see a lot of struct duplication. this was done because people wanted a strongly typed +varlink mechanism. this resulted in us creating this intermediate layer that allows us to take the input +from the cli and make an intermediate layer which can be transferred as strongly typed structures over a varlink +interface. + +we intentionally avoided heavy use of reflection here because we were concerned about performance impacts to the +non-varlink intermediate layer generation. +*/ + +// GenericCLIResult describes the overall interface for dealing with +// the create command cli in both local and remote uses +type GenericCLIResult interface { + IsSet() bool + Name() string + Value() interface{} +} + +// CRStringSlice describes a string slice cli struct +type CRStringSlice struct { + Val []string + createResult +} + +// CRString describes a string cli struct +type CRString struct { + Val string + createResult +} + +// CRUint64 describes a uint64 cli struct +type CRUint64 struct { + Val uint64 + createResult +} + +// CRFloat64 describes a float64 cli struct +type CRFloat64 struct { + Val float64 + createResult +} + +//CRBool describes a bool cli struct +type CRBool struct { + Val bool + createResult +} + +// CRInt64 describes an int64 cli struct +type CRInt64 struct { + Val int64 + createResult +} + +// CRUint describes a uint cli struct +type CRUint struct { + Val uint + createResult +} + +// CRInt describes an int cli struct +type CRInt struct { + Val int + createResult +} + +// CRStringArray describes a stringarray cli struct +type CRStringArray struct { + Val []string + createResult +} + +type createResult struct { + Flag string + Changed bool +} + +// GenericCLIResults in the intermediate object between the cobra cli +// and createconfig +type GenericCLIResults struct { + results map[string]GenericCLIResult + InputArgs []string +} + +// IsSet returns a bool if the flag was changed +func (f GenericCLIResults) IsSet(flag string) bool { + r := f.findResult(flag) + if r == nil { + return false + } + return r.IsSet() +} + +// Value returns the value of the cli flag +func (f GenericCLIResults) Value(flag string) interface{} { + r := f.findResult(flag) + if r == nil { + return "" + } + return r.Value() +} + +func (f GenericCLIResults) findResult(flag string) GenericCLIResult { + val, ok := f.results[flag] + if ok { + return val + } + logrus.Debugf("unable to find flag %s", flag) + return nil +} + +// Bool is a wrapper to get a bool value from GenericCLIResults +func (f GenericCLIResults) Bool(flag string) bool { + r := f.findResult(flag) + if r == nil { + return false + } + return r.Value().(bool) +} + +// String is a wrapper to get a string value from GenericCLIResults +func (f GenericCLIResults) String(flag string) string { + r := f.findResult(flag) + if r == nil { + return "" + } + return r.Value().(string) +} + +// Uint is a wrapper to get an uint value from GenericCLIResults +func (f GenericCLIResults) Uint(flag string) uint { + r := f.findResult(flag) + if r == nil { + return 0 + } + return r.Value().(uint) +} + +// StringSlice is a wrapper to get a stringslice value from GenericCLIResults +func (f GenericCLIResults) StringSlice(flag string) []string { + r := f.findResult(flag) + if r == nil { + return []string{} + } + return r.Value().([]string) +} + +// StringArray is a wrapper to get a stringslice value from GenericCLIResults +func (f GenericCLIResults) StringArray(flag string) []string { + r := f.findResult(flag) + if r == nil { + return []string{} + } + return r.Value().([]string) +} + +// Uint64 is a wrapper to get an uint64 value from GenericCLIResults +func (f GenericCLIResults) Uint64(flag string) uint64 { + r := f.findResult(flag) + if r == nil { + return 0 + } + return r.Value().(uint64) +} + +// Int64 is a wrapper to get an int64 value from GenericCLIResults +func (f GenericCLIResults) Int64(flag string) int64 { + r := f.findResult(flag) + if r == nil { + return 0 + } + return r.Value().(int64) +} + +// Int is a wrapper to get an int value from GenericCLIResults +func (f GenericCLIResults) Int(flag string) int { + r := f.findResult(flag) + if r == nil { + return 0 + } + return r.Value().(int) +} + +// Float64 is a wrapper to get an float64 value from GenericCLIResults +func (f GenericCLIResults) Float64(flag string) float64 { + r := f.findResult(flag) + if r == nil { + return 0 + } + return r.Value().(float64) +} + +// Float64 is a wrapper to get an float64 value from GenericCLIResults +func (f GenericCLIResults) Changed(flag string) bool { + r := f.findResult(flag) + if r == nil { + return false + } + return r.IsSet() +} + +// IsSet ... +func (c CRStringSlice) IsSet() bool { return c.Changed } + +// Name ... +func (c CRStringSlice) Name() string { return c.Flag } + +// Value ... +func (c CRStringSlice) Value() interface{} { return c.Val } + +// IsSet ... +func (c CRString) IsSet() bool { return c.Changed } + +// Name ... +func (c CRString) Name() string { return c.Flag } + +// Value ... +func (c CRString) Value() interface{} { return c.Val } + +// IsSet ... +func (c CRUint64) IsSet() bool { return c.Changed } + +// Name ... +func (c CRUint64) Name() string { return c.Flag } + +// Value ... +func (c CRUint64) Value() interface{} { return c.Val } + +// IsSet ... +func (c CRFloat64) IsSet() bool { return c.Changed } + +// Name ... +func (c CRFloat64) Name() string { return c.Flag } + +// Value ... +func (c CRFloat64) Value() interface{} { return c.Val } + +// IsSet ... +func (c CRBool) IsSet() bool { return c.Changed } + +// Name ... +func (c CRBool) Name() string { return c.Flag } + +// Value ... +func (c CRBool) Value() interface{} { return c.Val } + +// IsSet ... +func (c CRInt64) IsSet() bool { return c.Changed } + +// Name ... +func (c CRInt64) Name() string { return c.Flag } + +// Value ... +func (c CRInt64) Value() interface{} { return c.Val } + +// IsSet ... +func (c CRUint) IsSet() bool { return c.Changed } + +// Name ... +func (c CRUint) Name() string { return c.Flag } + +// Value ... +func (c CRUint) Value() interface{} { return c.Val } + +// IsSet ... +func (c CRInt) IsSet() bool { return c.Changed } + +// Name ... +func (c CRInt) Name() string { return c.Flag } + +// Value ... +func (c CRInt) Value() interface{} { return c.Val } + +// IsSet ... +func (c CRStringArray) IsSet() bool { return c.Changed } + +// Name ... +func (c CRStringArray) Name() string { return c.Flag } + +// Value ... +func (c CRStringArray) Value() interface{} { return c.Val } diff --git a/pkg/varlinkapi/intermediate_varlink.go b/pkg/varlinkapi/intermediate_varlink.go new file mode 100644 index 000000000..21c57d4f4 --- /dev/null +++ b/pkg/varlinkapi/intermediate_varlink.go @@ -0,0 +1,457 @@ +// +build varlink remoteclient + +package varlinkapi + +import ( + "github.com/containers/common/pkg/config" + "github.com/containers/libpod/pkg/rootless" + iopodman "github.com/containers/libpod/pkg/varlink" + "github.com/pkg/errors" +) + +//FIXME these are duplicated here to resolve a circular +//import with cmd/podman/common. +var ( + // DefaultHealthCheckInterval default value + DefaultHealthCheckInterval = "30s" + // DefaultHealthCheckRetries default value + DefaultHealthCheckRetries uint = 3 + // DefaultHealthCheckStartPeriod default value + DefaultHealthCheckStartPeriod = "0s" + // DefaultHealthCheckTimeout default value + DefaultHealthCheckTimeout = "30s" + // DefaultImageVolume default value + DefaultImageVolume = "bind" +) + +// StringSliceToPtr converts a genericcliresult value into a *[]string +func StringSliceToPtr(g GenericCLIResult) *[]string { + if !g.IsSet() { + return nil + } + newT := g.Value().([]string) + return &newT +} + +// StringToPtr converts a genericcliresult value into a *string +func StringToPtr(g GenericCLIResult) *string { + if !g.IsSet() { + return nil + } + newT := g.Value().(string) + return &newT +} + +// BoolToPtr converts a genericcliresult value into a *bool +func BoolToPtr(g GenericCLIResult) *bool { + if !g.IsSet() { + return nil + } + newT := g.Value().(bool) + return &newT +} + +// AnyIntToInt64Ptr converts a genericcliresult value into an *int64 +func AnyIntToInt64Ptr(g GenericCLIResult) *int64 { + if !g.IsSet() { + return nil + } + var newT int64 + switch g.Value().(type) { + case int: + newT = int64(g.Value().(int)) + case int64: + newT = g.Value().(int64) + case uint64: + newT = int64(g.Value().(uint64)) + case uint: + newT = int64(g.Value().(uint)) + default: + panic(errors.Errorf("invalid int type")) + } + return &newT +} + +// Float64ToPtr converts a genericcliresult into a *float64 +func Float64ToPtr(g GenericCLIResult) *float64 { + if !g.IsSet() { + return nil + } + newT := g.Value().(float64) + return &newT +} + +// MakeVarlink creates a varlink transportable struct from GenericCLIResults +func (g GenericCLIResults) MakeVarlink() iopodman.Create { + v := iopodman.Create{ + Args: g.InputArgs, + AddHost: StringSliceToPtr(g.Find("add-host")), + Annotation: StringSliceToPtr(g.Find("annotation")), + Attach: StringSliceToPtr(g.Find("attach")), + BlkioWeight: StringToPtr(g.Find("blkio-weight")), + BlkioWeightDevice: StringSliceToPtr(g.Find("blkio-weight-device")), + CapAdd: StringSliceToPtr(g.Find("cap-add")), + CapDrop: StringSliceToPtr(g.Find("cap-drop")), + CgroupParent: StringToPtr(g.Find("cgroup-parent")), + CidFile: StringToPtr(g.Find("cidfile")), + ConmonPidfile: StringToPtr(g.Find("conmon-pidfile")), + CpuPeriod: AnyIntToInt64Ptr(g.Find("cpu-period")), + CpuQuota: AnyIntToInt64Ptr(g.Find("cpu-quota")), + CpuRtPeriod: AnyIntToInt64Ptr(g.Find("cpu-rt-period")), + CpuRtRuntime: AnyIntToInt64Ptr(g.Find("cpu-rt-runtime")), + CpuShares: AnyIntToInt64Ptr(g.Find("cpu-shares")), + Cpus: Float64ToPtr(g.Find("cpus")), + CpuSetCpus: StringToPtr(g.Find("cpuset-cpus")), + CpuSetMems: StringToPtr(g.Find("cpuset-mems")), + Detach: BoolToPtr(g.Find("detach")), + DetachKeys: StringToPtr(g.Find("detach-keys")), + Device: StringSliceToPtr(g.Find("device")), + DeviceReadBps: StringSliceToPtr(g.Find("device-read-bps")), + DeviceReadIops: StringSliceToPtr(g.Find("device-read-iops")), + DeviceWriteBps: StringSliceToPtr(g.Find("device-write-bps")), + DeviceWriteIops: StringSliceToPtr(g.Find("device-write-iops")), + Dns: StringSliceToPtr(g.Find("dns")), + DnsOpt: StringSliceToPtr(g.Find("dns-opt")), + DnsSearch: StringSliceToPtr(g.Find("dns-search")), + Entrypoint: StringToPtr(g.Find("entrypoint")), + Env: StringSliceToPtr(g.Find("env")), + EnvFile: StringSliceToPtr(g.Find("env-file")), + Expose: StringSliceToPtr(g.Find("expose")), + Gidmap: StringSliceToPtr(g.Find("gidmap")), + Groupadd: StringSliceToPtr(g.Find("group-add")), + HealthcheckCommand: StringToPtr(g.Find("healthcheck-command")), + HealthcheckInterval: StringToPtr(g.Find("healthcheck-interval")), + HealthcheckRetries: AnyIntToInt64Ptr(g.Find("healthcheck-retries")), + HealthcheckStartPeriod: StringToPtr(g.Find("healthcheck-start-period")), + HealthcheckTimeout: StringToPtr(g.Find("healthcheck-timeout")), + Hostname: StringToPtr(g.Find("hostname")), + ImageVolume: StringToPtr(g.Find("image-volume")), + Init: BoolToPtr(g.Find("init")), + InitPath: StringToPtr(g.Find("init-path")), + Interactive: BoolToPtr(g.Find("interactive")), + Ip: StringToPtr(g.Find("ip")), + Ipc: StringToPtr(g.Find("ipc")), + KernelMemory: StringToPtr(g.Find("kernel-memory")), + Label: StringSliceToPtr(g.Find("label")), + LabelFile: StringSliceToPtr(g.Find("label-file")), + LogDriver: StringToPtr(g.Find("log-driver")), + LogOpt: StringSliceToPtr(g.Find("log-opt")), + MacAddress: StringToPtr(g.Find("mac-address")), + Memory: StringToPtr(g.Find("memory")), + MemoryReservation: StringToPtr(g.Find("memory-reservation")), + MemorySwap: StringToPtr(g.Find("memory-swap")), + MemorySwappiness: AnyIntToInt64Ptr(g.Find("memory-swappiness")), + Name: StringToPtr(g.Find("name")), + Network: StringToPtr(g.Find("network")), + OomKillDisable: BoolToPtr(g.Find("oom-kill-disable")), + OomScoreAdj: AnyIntToInt64Ptr(g.Find("oom-score-adj")), + OverrideOS: StringToPtr(g.Find("override-os")), + OverrideArch: StringToPtr(g.Find("override-arch")), + Pid: StringToPtr(g.Find("pid")), + PidsLimit: AnyIntToInt64Ptr(g.Find("pids-limit")), + Pod: StringToPtr(g.Find("pod")), + Privileged: BoolToPtr(g.Find("privileged")), + Publish: StringSliceToPtr(g.Find("publish")), + PublishAll: BoolToPtr(g.Find("publish-all")), + Pull: StringToPtr(g.Find("pull")), + Quiet: BoolToPtr(g.Find("quiet")), + Readonly: BoolToPtr(g.Find("read-only")), + Readonlytmpfs: BoolToPtr(g.Find("read-only-tmpfs")), + Restart: StringToPtr(g.Find("restart")), + Rm: BoolToPtr(g.Find("rm")), + Rootfs: BoolToPtr(g.Find("rootfs")), + SecurityOpt: StringSliceToPtr(g.Find("security-opt")), + ShmSize: StringToPtr(g.Find("shm-size")), + StopSignal: StringToPtr(g.Find("stop-signal")), + StopTimeout: AnyIntToInt64Ptr(g.Find("stop-timeout")), + StorageOpt: StringSliceToPtr(g.Find("storage-opt")), + Subuidname: StringToPtr(g.Find("subuidname")), + Subgidname: StringToPtr(g.Find("subgidname")), + Sysctl: StringSliceToPtr(g.Find("sysctl")), + Systemd: StringToPtr(g.Find("systemd")), + Tmpfs: StringSliceToPtr(g.Find("tmpfs")), + Tty: BoolToPtr(g.Find("tty")), + Uidmap: StringSliceToPtr(g.Find("uidmap")), + Ulimit: StringSliceToPtr(g.Find("ulimit")), + User: StringToPtr(g.Find("user")), + Userns: StringToPtr(g.Find("userns")), + Uts: StringToPtr(g.Find("uts")), + Mount: StringSliceToPtr(g.Find("mount")), + Volume: StringSliceToPtr(g.Find("volume")), + VolumesFrom: StringSliceToPtr(g.Find("volumes-from")), + WorkDir: StringToPtr(g.Find("workdir")), + } + + return v +} + +func stringSliceFromVarlink(v *[]string, flagName string, defaultValue *[]string) CRStringSlice { + cr := CRStringSlice{} + if v == nil { + cr.Val = []string{} + if defaultValue != nil { + cr.Val = *defaultValue + } + cr.Changed = false + } else { + cr.Val = *v + cr.Changed = true + } + cr.Flag = flagName + return cr +} + +func stringFromVarlink(v *string, flagName string, defaultValue *string) CRString { + cr := CRString{} + if v == nil { + cr.Val = "" + if defaultValue != nil { + cr.Val = *defaultValue + } + cr.Changed = false + } else { + cr.Val = *v + cr.Changed = true + } + cr.Flag = flagName + return cr +} + +func boolFromVarlink(v *bool, flagName string, defaultValue bool) CRBool { + cr := CRBool{} + if v == nil { + // In case a cli bool default value is true + cr.Val = defaultValue + cr.Changed = false + } else { + cr.Val = *v + cr.Changed = true + } + cr.Flag = flagName + return cr +} + +func uint64FromVarlink(v *int64, flagName string, defaultValue *uint64) CRUint64 { + cr := CRUint64{} + if v == nil { + cr.Val = 0 + if defaultValue != nil { + cr.Val = *defaultValue + } + cr.Changed = false + } else { + cr.Val = uint64(*v) + cr.Changed = true + } + cr.Flag = flagName + return cr +} + +func int64FromVarlink(v *int64, flagName string, defaultValue *int64) CRInt64 { + cr := CRInt64{} + if v == nil { + cr.Val = 0 + if defaultValue != nil { + cr.Val = *defaultValue + } + cr.Changed = false + } else { + cr.Val = *v + cr.Changed = true + } + cr.Flag = flagName + return cr +} + +func float64FromVarlink(v *float64, flagName string, defaultValue *float64) CRFloat64 { + cr := CRFloat64{} + if v == nil { + cr.Val = 0 + if defaultValue != nil { + cr.Val = *defaultValue + } + cr.Changed = false + } else { + cr.Val = *v + cr.Changed = true + } + cr.Flag = flagName + return cr +} + +func uintFromVarlink(v *int64, flagName string, defaultValue *uint) CRUint { + cr := CRUint{} + if v == nil { + cr.Val = 0 + if defaultValue != nil { + cr.Val = *defaultValue + } + cr.Changed = false + } else { + cr.Val = uint(*v) + cr.Changed = true + } + cr.Flag = flagName + return cr +} + +func stringArrayFromVarlink(v *[]string, flagName string, defaultValue *[]string) CRStringArray { + cr := CRStringArray{} + if v == nil { + cr.Val = []string{} + if defaultValue != nil { + cr.Val = *defaultValue + } + cr.Changed = false + } else { + cr.Val = *v + cr.Changed = true + } + cr.Flag = flagName + return cr +} + +func intFromVarlink(v *int64, flagName string, defaultValue *int) CRInt { + cr := CRInt{} + if v == nil { + if defaultValue != nil { + cr.Val = *defaultValue + } + cr.Val = 0 + cr.Changed = false + } else { + cr.Val = int(*v) + cr.Changed = true + } + cr.Flag = flagName + return cr +} + +// VarlinkCreateToGeneric creates a GenericCLIResults from the varlink create +// structure. +func VarlinkCreateToGeneric(opts iopodman.Create) GenericCLIResults { + // FIXME this will need to be fixed!!!!! With containers conf + //defaultContainerConfig := cliconfig.GetDefaultConfig() + // TODO | WARN + // We do not get a default network over varlink. Unlike the other default values for some cli + // elements, it seems it gets set to the default anyway. + + var memSwapDefault int64 = -1 + netModeDefault := "bridge" + systemdDefault := "true" + if rootless.IsRootless() { + netModeDefault = "slirp4netns" + } + + shmSize := config.DefaultShmSize + + m := make(map[string]GenericCLIResult) + m["add-host"] = stringSliceFromVarlink(opts.AddHost, "add-host", nil) + m["annotation"] = stringSliceFromVarlink(opts.Annotation, "annotation", nil) + m["attach"] = stringSliceFromVarlink(opts.Attach, "attach", nil) + m["blkio-weight"] = stringFromVarlink(opts.BlkioWeight, "blkio-weight", nil) + m["blkio-weight-device"] = stringSliceFromVarlink(opts.BlkioWeightDevice, "blkio-weight-device", nil) + m["cap-add"] = stringSliceFromVarlink(opts.CapAdd, "cap-add", nil) + m["cap-drop"] = stringSliceFromVarlink(opts.CapDrop, "cap-drop", nil) + m["cgroup-parent"] = stringFromVarlink(opts.CgroupParent, "cgroup-parent", nil) + m["cidfile"] = stringFromVarlink(opts.CidFile, "cidfile", nil) + m["conmon-pidfile"] = stringFromVarlink(opts.ConmonPidfile, "conmon-file", nil) + m["cpu-period"] = uint64FromVarlink(opts.CpuPeriod, "cpu-period", nil) + m["cpu-quota"] = int64FromVarlink(opts.CpuQuota, "quota", nil) + m["cpu-rt-period"] = uint64FromVarlink(opts.CpuRtPeriod, "cpu-rt-period", nil) + m["cpu-rt-runtime"] = int64FromVarlink(opts.CpuRtRuntime, "cpu-rt-quota", nil) + m["cpu-shares"] = uint64FromVarlink(opts.CpuShares, "cpu-shares", nil) + m["cpus"] = float64FromVarlink(opts.Cpus, "cpus", nil) + m["cpuset-cpus"] = stringFromVarlink(opts.CpuSetCpus, "cpuset-cpus", nil) + m["cpuset-mems"] = stringFromVarlink(opts.CpuSetMems, "cpuset-mems", nil) + m["detach"] = boolFromVarlink(opts.Detach, "detach", false) + m["detach-keys"] = stringFromVarlink(opts.DetachKeys, "detach-keys", nil) + m["device"] = stringSliceFromVarlink(opts.Device, "device", nil) + m["device-read-bps"] = stringSliceFromVarlink(opts.DeviceReadBps, "device-read-bps", nil) + m["device-read-iops"] = stringSliceFromVarlink(opts.DeviceReadIops, "device-read-iops", nil) + m["device-write-bps"] = stringSliceFromVarlink(opts.DeviceWriteBps, "write-device-bps", nil) + m["device-write-iops"] = stringSliceFromVarlink(opts.DeviceWriteIops, "write-device-iops", nil) + m["dns"] = stringSliceFromVarlink(opts.Dns, "dns", nil) + m["dns-opt"] = stringSliceFromVarlink(opts.DnsOpt, "dns-opt", nil) + m["dns-search"] = stringSliceFromVarlink(opts.DnsSearch, "dns-search", nil) + m["entrypoint"] = stringFromVarlink(opts.Entrypoint, "entrypoint", nil) + m["env"] = stringArrayFromVarlink(opts.Env, "env", nil) + m["env-file"] = stringSliceFromVarlink(opts.EnvFile, "env-file", nil) + m["expose"] = stringSliceFromVarlink(opts.Expose, "expose", nil) + m["gidmap"] = stringSliceFromVarlink(opts.Gidmap, "gidmap", nil) + m["group-add"] = stringSliceFromVarlink(opts.Groupadd, "group-add", nil) + m["healthcheck-command"] = stringFromVarlink(opts.HealthcheckCommand, "healthcheck-command", nil) + m["healthcheck-interval"] = stringFromVarlink(opts.HealthcheckInterval, "healthcheck-interval", &DefaultHealthCheckInterval) + m["healthcheck-retries"] = uintFromVarlink(opts.HealthcheckRetries, "healthcheck-retries", &DefaultHealthCheckRetries) + m["healthcheck-start-period"] = stringFromVarlink(opts.HealthcheckStartPeriod, "healthcheck-start-period", &DefaultHealthCheckStartPeriod) + m["healthcheck-timeout"] = stringFromVarlink(opts.HealthcheckTimeout, "healthcheck-timeout", &DefaultHealthCheckTimeout) + m["hostname"] = stringFromVarlink(opts.Hostname, "hostname", nil) + m["image-volume"] = stringFromVarlink(opts.ImageVolume, "image-volume", &DefaultImageVolume) + m["init"] = boolFromVarlink(opts.Init, "init", false) + m["init-path"] = stringFromVarlink(opts.InitPath, "init-path", nil) + m["interactive"] = boolFromVarlink(opts.Interactive, "interactive", false) + m["ip"] = stringFromVarlink(opts.Ip, "ip", nil) + m["ipc"] = stringFromVarlink(opts.Ipc, "ipc", nil) + m["kernel-memory"] = stringFromVarlink(opts.KernelMemory, "kernel-memory", nil) + m["label"] = stringArrayFromVarlink(opts.Label, "label", nil) + m["label-file"] = stringSliceFromVarlink(opts.LabelFile, "label-file", nil) + m["log-driver"] = stringFromVarlink(opts.LogDriver, "log-driver", nil) + m["log-opt"] = stringSliceFromVarlink(opts.LogOpt, "log-opt", nil) + m["mac-address"] = stringFromVarlink(opts.MacAddress, "mac-address", nil) + m["memory"] = stringFromVarlink(opts.Memory, "memory", nil) + m["memory-reservation"] = stringFromVarlink(opts.MemoryReservation, "memory-reservation", nil) + m["memory-swap"] = stringFromVarlink(opts.MemorySwap, "memory-swap", nil) + m["memory-swappiness"] = int64FromVarlink(opts.MemorySwappiness, "memory-swappiness", &memSwapDefault) + m["name"] = stringFromVarlink(opts.Name, "name", nil) + m["network"] = stringFromVarlink(opts.Network, "network", &netModeDefault) + m["no-hosts"] = boolFromVarlink(opts.NoHosts, "no-hosts", false) + m["oom-kill-disable"] = boolFromVarlink(opts.OomKillDisable, "oon-kill-disable", false) + m["oom-score-adj"] = intFromVarlink(opts.OomScoreAdj, "oom-score-adj", nil) + m["override-os"] = stringFromVarlink(opts.OverrideOS, "override-os", nil) + m["override-arch"] = stringFromVarlink(opts.OverrideArch, "override-arch", nil) + m["pid"] = stringFromVarlink(opts.Pid, "pid", nil) + m["pids-limit"] = int64FromVarlink(opts.PidsLimit, "pids-limit", nil) + m["pod"] = stringFromVarlink(opts.Pod, "pod", nil) + m["privileged"] = boolFromVarlink(opts.Privileged, "privileged", false) + m["publish"] = stringSliceFromVarlink(opts.Publish, "publish", nil) + m["publish-all"] = boolFromVarlink(opts.PublishAll, "publish-all", false) + m["pull"] = stringFromVarlink(opts.Pull, "missing", nil) + m["quiet"] = boolFromVarlink(opts.Quiet, "quiet", false) + m["read-only"] = boolFromVarlink(opts.Readonly, "read-only", false) + m["read-only-tmpfs"] = boolFromVarlink(opts.Readonlytmpfs, "read-only-tmpfs", true) + m["restart"] = stringFromVarlink(opts.Restart, "restart", nil) + m["rm"] = boolFromVarlink(opts.Rm, "rm", false) + m["rootfs"] = boolFromVarlink(opts.Rootfs, "rootfs", false) + m["security-opt"] = stringArrayFromVarlink(opts.SecurityOpt, "security-opt", nil) + m["shm-size"] = stringFromVarlink(opts.ShmSize, "shm-size", &shmSize) + m["stop-signal"] = stringFromVarlink(opts.StopSignal, "stop-signal", nil) + m["stop-timeout"] = uintFromVarlink(opts.StopTimeout, "stop-timeout", nil) + m["storage-opt"] = stringSliceFromVarlink(opts.StorageOpt, "storage-opt", nil) + m["subgidname"] = stringFromVarlink(opts.Subgidname, "subgidname", nil) + m["subuidname"] = stringFromVarlink(opts.Subuidname, "subuidname", nil) + m["sysctl"] = stringSliceFromVarlink(opts.Sysctl, "sysctl", nil) + m["systemd"] = stringFromVarlink(opts.Systemd, "systemd", &systemdDefault) + m["tmpfs"] = stringSliceFromVarlink(opts.Tmpfs, "tmpfs", nil) + m["tty"] = boolFromVarlink(opts.Tty, "tty", false) + m["uidmap"] = stringSliceFromVarlink(opts.Uidmap, "uidmap", nil) + m["ulimit"] = stringSliceFromVarlink(opts.Ulimit, "ulimit", nil) + m["user"] = stringFromVarlink(opts.User, "user", nil) + m["userns"] = stringFromVarlink(opts.Userns, "userns", nil) + m["uts"] = stringFromVarlink(opts.Uts, "uts", nil) + m["mount"] = stringArrayFromVarlink(opts.Mount, "mount", nil) + m["volume"] = stringArrayFromVarlink(opts.Volume, "volume", nil) + m["volumes-from"] = stringSliceFromVarlink(opts.VolumesFrom, "volumes-from", nil) + m["workdir"] = stringFromVarlink(opts.WorkDir, "workdir", nil) + + gcli := GenericCLIResults{m, opts.Args} + return gcli +} + +// Find returns a flag from a GenericCLIResults by name +func (g GenericCLIResults) Find(name string) GenericCLIResult { + result, ok := g.results[name] + if ok { + return result + } + panic(errors.Errorf("unable to find generic flag for varlink %s", name)) +} diff --git a/pkg/varlinkapi/pods.go b/pkg/varlinkapi/pods.go index 94add1b6c..5a9360447 100644 --- a/pkg/varlinkapi/pods.go +++ b/pkg/varlinkapi/pods.go @@ -5,11 +5,14 @@ package varlinkapi import ( "encoding/json" "fmt" + "strconv" "syscall" - "github.com/containers/libpod/cmd/podman/shared" + "github.com/cri-o/ocicni/pkg/ocicni" + "github.com/docker/go-connections/nat" + "github.com/pkg/errors" + "github.com/containers/libpod/libpod" - "github.com/containers/libpod/pkg/adapter/shortcuts" iopodman "github.com/containers/libpod/pkg/varlink" ) @@ -18,7 +21,7 @@ func (i *VarlinkAPI) CreatePod(call iopodman.VarlinkCall, create iopodman.PodCre var options []libpod.PodCreateOption if create.Infra { options = append(options, libpod.WithInfraContainer()) - nsOptions, err := shared.GetNamespaceOptions(create.Share) + nsOptions, err := GetNamespaceOptions(create.Share) if err != nil { return err } @@ -44,7 +47,7 @@ func (i *VarlinkAPI) CreatePod(call iopodman.VarlinkCall, create iopodman.PodCre if !create.Infra { return call.ReplyErrorOccurred("you must have an infra container to publish port bindings to the host") } - portBindings, err := shared.CreatePortBindings(create.Publish) + portBindings, err := CreatePortBindings(create.Publish) if err != nil { return call.ReplyErrorOccurred(err.Error()) } @@ -70,7 +73,7 @@ func (i *VarlinkAPI) ListPods(call iopodman.VarlinkCall) error { if err != nil { return call.ReplyErrorOccurred(err.Error()) } - opts := shared.PsOptions{} + opts := PsOptions{} for _, pod := range pods { listPod, err := makeListPod(pod, opts) if err != nil { @@ -87,7 +90,7 @@ func (i *VarlinkAPI) GetPod(call iopodman.VarlinkCall, name string) error { if err != nil { return call.ReplyPodNotFound(name, err.Error()) } - opts := shared.PsOptions{} + opts := PsOptions{} listPod, err := makeListPod(pod, opts) if err != nil { @@ -100,7 +103,7 @@ func (i *VarlinkAPI) GetPod(call iopodman.VarlinkCall, name string) error { // GetPodsByStatus returns a slice of pods filtered by a libpod status func (i *VarlinkAPI) GetPodsByStatus(call iopodman.VarlinkCall, statuses []string) error { filterFuncs := func(p *libpod.Pod) bool { - state, _ := shared.GetPodStatus(p) + state, _ := p.GetPodStatus() for _, status := range statuses { if state == status { return true @@ -290,11 +293,11 @@ func (i *VarlinkAPI) GetPodStats(call iopodman.VarlinkCall, name string) error { return call.ReplyGetPodStats(pod.ID(), containersStats) } -// GetPodsByContext returns a slice of pod ids based on all, latest, or a list +// getPodsByContext returns a slice of pod ids based on all, latest, or a list func (i *VarlinkAPI) GetPodsByContext(call iopodman.VarlinkCall, all, latest bool, input []string) error { var podids []string - pods, err := shortcuts.GetPodsByContext(all, latest, input, i.Runtime) + pods, err := getPodsByContext(all, latest, input, i.Runtime) if err != nil { return call.ReplyErrorOccurred(err.Error()) } @@ -337,7 +340,7 @@ func (i *VarlinkAPI) TopPod(call iopodman.VarlinkCall, name string, latest bool, return call.ReplyPodNotFound(name, err.Error()) } - podStatus, err := shared.GetPodStatus(pod) + podStatus, err := pod.GetPodStatus() if err != nil { return call.ReplyErrorOccurred(fmt.Sprintf("unable to get status for pod %s", pod.ID())) } @@ -350,3 +353,36 @@ func (i *VarlinkAPI) TopPod(call iopodman.VarlinkCall, name string, latest bool, } return call.ReplyTopPod(reply) } + +// CreatePortBindings iterates ports mappings and exposed ports into a format CNI understands +func CreatePortBindings(ports []string) ([]ocicni.PortMapping, error) { + var portBindings []ocicni.PortMapping + // The conversion from []string to natBindings is temporary while mheon reworks the port + // deduplication code. Eventually that step will not be required. + _, natBindings, err := nat.ParsePortSpecs(ports) + if err != nil { + return nil, err + } + for containerPb, hostPb := range natBindings { + var pm ocicni.PortMapping + pm.ContainerPort = int32(containerPb.Int()) + for _, i := range hostPb { + var hostPort int + var err error + pm.HostIP = i.HostIP + if i.HostPort == "" { + hostPort = containerPb.Int() + } else { + hostPort, err = strconv.Atoi(i.HostPort) + if err != nil { + return nil, errors.Wrapf(err, "unable to convert host port to integer") + } + } + + pm.HostPort = int32(hostPort) + pm.Protocol = containerPb.Proto() + portBindings = append(portBindings, pm) + } + } + return portBindings, nil +} diff --git a/pkg/adapter/shortcuts/shortcuts.go b/pkg/varlinkapi/shortcuts.go index 8a8459c6c..771129404 100644 --- a/pkg/adapter/shortcuts/shortcuts.go +++ b/pkg/varlinkapi/shortcuts.go @@ -1,13 +1,13 @@ -package shortcuts +package varlinkapi import ( "github.com/containers/libpod/libpod" "github.com/sirupsen/logrus" ) -// GetPodsByContext returns a slice of pods. Note that all, latest and pods are +// getPodsByContext returns a slice of pods. Note that all, latest and pods are // mutually exclusive arguments. -func GetPodsByContext(all, latest bool, pods []string, runtime *libpod.Runtime) ([]*libpod.Pod, error) { +func getPodsByContext(all, latest bool, pods []string, runtime *libpod.Runtime) ([]*libpod.Pod, error) { var outpods []*libpod.Pod if all { return runtime.GetAllPods() @@ -36,9 +36,9 @@ func GetPodsByContext(all, latest bool, pods []string, runtime *libpod.Runtime) return outpods, err } -// GetContainersByContext gets pods whether all, latest, or a slice of names/ids +// getContainersByContext gets pods whether all, latest, or a slice of names/ids // is specified. -func GetContainersByContext(all, latest bool, names []string, runtime *libpod.Runtime) (ctrs []*libpod.Container, err error) { +func getContainersByContext(all, latest bool, names []string, runtime *libpod.Runtime) (ctrs []*libpod.Container, err error) { var ctr *libpod.Container ctrs = []*libpod.Container{} diff --git a/pkg/varlinkapi/util.go b/pkg/varlinkapi/util.go index 6b196f384..f73e77249 100644 --- a/pkg/varlinkapi/util.go +++ b/pkg/varlinkapi/util.go @@ -9,7 +9,6 @@ import ( "time" "github.com/containers/buildah" - "github.com/containers/libpod/cmd/podman/shared" "github.com/containers/libpod/libpod" "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/pkg/channelwriter" @@ -22,12 +21,12 @@ func getContext() context.Context { return context.TODO() } -func makeListContainer(containerID string, batchInfo shared.BatchContainerStruct) iopodman.Container { +func makeListContainer(containerID string, batchInfo BatchContainerStruct) iopodman.Container { var ( mounts []iopodman.ContainerMount ports []iopodman.ContainerPortMappings ) - ns := shared.GetNamespaces(batchInfo.Pid) + ns := GetNamespaces(batchInfo.Pid) for _, mount := range batchInfo.ConConfig.Spec.Mounts { m := iopodman.ContainerMount{ @@ -85,7 +84,7 @@ func makeListContainer(containerID string, batchInfo shared.BatchContainerStruct return lc } -func makeListPodContainers(containerID string, batchInfo shared.BatchContainerStruct) iopodman.ListPodContainerInfo { +func makeListPodContainers(containerID string, batchInfo BatchContainerStruct) iopodman.ListPodContainerInfo { lc := iopodman.ListPodContainerInfo{ Id: containerID, Status: batchInfo.ConState.String(), @@ -94,10 +93,10 @@ func makeListPodContainers(containerID string, batchInfo shared.BatchContainerSt return lc } -func makeListPod(pod *libpod.Pod, batchInfo shared.PsOptions) (iopodman.ListPodData, error) { +func makeListPod(pod *libpod.Pod, batchInfo PsOptions) (iopodman.ListPodData, error) { var listPodsContainers []iopodman.ListPodContainerInfo var errPodData = iopodman.ListPodData{} - status, err := shared.GetPodStatus(pod) + status, err := pod.GetPodStatus() if err != nil { return errPodData, err } @@ -106,7 +105,7 @@ func makeListPod(pod *libpod.Pod, batchInfo shared.PsOptions) (iopodman.ListPodD return errPodData, err } for _, ctr := range containers { - batchInfo, err := shared.BatchContainerOp(ctr, batchInfo) + batchInfo, err := BatchContainerOp(ctr, batchInfo) if err != nil { return errPodData, err } @@ -179,13 +178,13 @@ func derefString(in *string) string { return *in } -func makePsOpts(inOpts iopodman.PsOpts) shared.PsOptions { +func makePsOpts(inOpts iopodman.PsOpts) PsOptions { last := 0 if inOpts.Last != nil { lastT := *inOpts.Last last = int(lastT) } - return shared.PsOptions{ + return PsOptions{ All: inOpts.All, Last: last, Latest: derefBool(inOpts.Latest), diff --git a/pkg/varlinkapi/volumes.go b/pkg/varlinkapi/volumes.go index ff72c3869..aa0eb1fb5 100644 --- a/pkg/varlinkapi/volumes.go +++ b/pkg/varlinkapi/volumes.go @@ -3,10 +3,11 @@ package varlinkapi import ( + "context" "encoding/json" - "github.com/containers/libpod/cmd/podman/shared" "github.com/containers/libpod/libpod" + "github.com/containers/libpod/pkg/domain/infra/abi/parse" iopodman "github.com/containers/libpod/pkg/varlink" ) @@ -24,7 +25,7 @@ func (i *VarlinkAPI) VolumeCreate(call iopodman.VarlinkCall, options iopodman.Vo volumeOptions = append(volumeOptions, libpod.WithVolumeLabels(options.Labels)) } if len(options.Options) > 0 { - parsedOptions, err := shared.ParseVolumeOptions(options.Options) + parsedOptions, err := parse.ParseVolumeOptions(options.Options) if err != nil { return call.ReplyErrorOccurred(err.Error()) } @@ -39,7 +40,7 @@ func (i *VarlinkAPI) VolumeCreate(call iopodman.VarlinkCall, options iopodman.Vo // VolumeRemove removes volumes by options.All or options.Volumes func (i *VarlinkAPI) VolumeRemove(call iopodman.VarlinkCall, options iopodman.VolumeRemoveOpts) error { - success, failed, err := shared.SharedRemoveVolumes(getContext(), i.Runtime, options.Volumes, options.All, options.Force) + success, failed, err := SharedRemoveVolumes(getContext(), i.Runtime, options.Volumes, options.All, options.Force) if err != nil { return call.ReplyErrorOccurred(err.Error()) } @@ -122,3 +123,43 @@ func (i *VarlinkAPI) VolumesPrune(call iopodman.VarlinkCall) error { } return call.ReplyVolumesPrune(prunedNames, prunedErrors) } + +// Remove given set of volumes +func SharedRemoveVolumes(ctx context.Context, runtime *libpod.Runtime, vols []string, all, force bool) ([]string, map[string]error, error) { + var ( + toRemove []*libpod.Volume + success []string + failed map[string]error + ) + + failed = make(map[string]error) + + if all { + vols, err := runtime.Volumes() + if err != nil { + return nil, nil, err + } + toRemove = vols + } else { + for _, v := range vols { + vol, err := runtime.LookupVolume(v) + if err != nil { + failed[v] = err + continue + } + toRemove = append(toRemove, vol) + } + } + + // We could parallelize this, but I haven't heard anyone complain about + // performance here yet, so hold off. + for _, vol := range toRemove { + if err := runtime.RemoveVolume(ctx, vol, force); err != nil { + failed[vol.Name()] = err + continue + } + success = append(success, vol.Name()) + } + + return success, failed, nil +} |