// +build linux package libpod import ( "bufio" "bytes" "context" "fmt" "io" "io/ioutil" "net" "net/http" "os" "os/exec" "path/filepath" "runtime" "strconv" "strings" "sync" "syscall" "text/template" "time" "github.com/containers/common/pkg/config" conmonConfig "github.com/containers/conmon/runner/config" "github.com/containers/podman/v2/libpod/define" "github.com/containers/podman/v2/libpod/logs" "github.com/containers/podman/v2/pkg/cgroups" "github.com/containers/podman/v2/pkg/errorhandling" "github.com/containers/podman/v2/pkg/lookup" "github.com/containers/podman/v2/pkg/rootless" "github.com/containers/podman/v2/pkg/util" "github.com/containers/podman/v2/utils" "github.com/containers/storage/pkg/homedir" pmount "github.com/containers/storage/pkg/mount" "github.com/coreos/go-systemd/v22/activation" "github.com/coreos/go-systemd/v22/daemon" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/selinux/go-selinux" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" "k8s.io/client-go/tools/remotecommand" ) const ( // This is Conmon's STDIO_BUF_SIZE. I don't believe we have access to it // directly from the Go code, so const it here bufferSize = conmonConfig.BufSize ) // ConmonOCIRuntime is an OCI runtime managed by Conmon. // TODO: Make all calls to OCI runtime have a timeout. type ConmonOCIRuntime struct { name string path string conmonPath string conmonEnv []string tmpDir string exitsDir string socketsDir string logSizeMax int64 noPivot bool reservePorts bool runtimeFlags []string supportsJSON bool supportsKVM bool supportsNoCgroups bool sdNotify bool enableKeyring bool } // Make a new Conmon-based OCI runtime with the given options. // Conmon will wrap the given OCI runtime, which can be `runc`, `crun`, or // any runtime with a runc-compatible CLI. // The first path that points to a valid executable will be used. // Deliberately private. Someone should not be able to construct this outside of // libpod. func newConmonOCIRuntime(name string, paths []string, conmonPath string, runtimeFlags []string, runtimeCfg *config.Config) (OCIRuntime, error) { if name == "" { return nil, errors.Wrapf(define.ErrInvalidArg, "the OCI runtime must be provided a non-empty name") } // Make lookup tables for runtime support supportsJSON := make(map[string]bool, len(runtimeCfg.Engine.RuntimeSupportsJSON)) supportsNoCgroups := make(map[string]bool, len(runtimeCfg.Engine.RuntimeSupportsNoCgroups)) supportsKVM := make(map[string]bool, len(runtimeCfg.Engine.RuntimeSupportsKVM)) for _, r := range runtimeCfg.Engine.RuntimeSupportsJSON { supportsJSON[r] = true } for _, r := range runtimeCfg.Engine.RuntimeSupportsNoCgroups { supportsNoCgroups[r] = true } for _, r := range runtimeCfg.Engine.RuntimeSupportsKVM { supportsKVM[r] = true } runtime := new(ConmonOCIRuntime) runtime.name = name runtime.conmonPath = conmonPath runtime.runtimeFlags = runtimeFlags runtime.conmonEnv = runtimeCfg.Engine.ConmonEnvVars runtime.tmpDir = runtimeCfg.Engine.TmpDir runtime.logSizeMax = runtimeCfg.Containers.LogSizeMax runtime.noPivot = runtimeCfg.Engine.NoPivotRoot runtime.reservePorts = runtimeCfg.Engine.EnablePortReservation runtime.sdNotify = runtimeCfg.Engine.SDNotify runtime.enableKeyring = runtimeCfg.Containers.EnableKeyring // TODO: probe OCI runtime for feature and enable automatically if // available. runtime.supportsJSON = supportsJSON[name] runtime.supportsNoCgroups = supportsNoCgroups[name] runtime.supportsKVM = supportsKVM[name] foundPath := false for _, path := range paths { stat, err := os.Stat(path) if err != nil { if os.IsNotExist(err) { continue } return nil, errors.Wrapf(err, "cannot stat OCI runtime %s path", name) } if !stat.Mode().IsRegular() { continue } foundPath = true runtime.path = path logrus.Debugf("using runtime %q", path) break } // Search the $PATH as last fallback if !foundPath { if foundRuntime, err := exec.LookPath(name); err == nil { foundPath = true runtime.path = foundRuntime logrus.Debugf("using runtime %q from $PATH: %q", name, foundRuntime) } } if !foundPath { return nil, errors.Wrapf(define.ErrInvalidArg, "no valid executable found for OCI runtime %s", name) } runtime.exitsDir = filepath.Join(runtime.tmpDir, "exits") runtime.socketsDir = filepath.Join(runtime.tmpDir, "socket") // Create the exit files and attach sockets directories if err := os.MkdirAll(runtime.exitsDir, 0750); err != nil { // The directory is allowed to exist if !os.IsExist(err) { return nil, errors.Wrapf(err, "error creating OCI runtime exit files directory") } } if err := os.MkdirAll(runtime.socketsDir, 0750); err != nil { // The directory is allowed to exist if !os.IsExist(err) { return nil, errors.Wrap(err, "error creating OCI runtime attach sockets directory") } } return runtime, nil } // Name returns the name of the runtime being wrapped by Conmon. func (r *ConmonOCIRuntime) Name() string { return r.name } // Path returns the path of the OCI runtime being wrapped by Conmon. func (r *ConmonOCIRuntime) Path() string { return r.path } // hasCurrentUserMapped checks whether the current user is mapped inside the container user namespace func hasCurrentUserMapped(ctr *Container) bool { if len(ctr.config.IDMappings.UIDMap) == 0 && len(ctr.config.IDMappings.GIDMap) == 0 { return true } uid := os.Geteuid() for _, m := range ctr.config.IDMappings.UIDMap { if uid >= m.HostID && uid < m.HostID+m.Size { return true } } return false } // CreateContainer creates a container. func (r *ConmonOCIRuntime) CreateContainer(ctr *Container, restoreOptions *ContainerCheckpointOptions) error { if !hasCurrentUserMapped(ctr) { for _, i := range []string{ctr.state.RunDir, ctr.runtime.config.Engine.TmpDir, ctr.config.StaticDir, ctr.state.Mountpoint, ctr.runtime.config.Engine.VolumePath} { if err := makeAccessible(i, ctr.RootUID(), ctr.RootGID()); err != nil { return err } } // if we are running a non privileged container, be sure to umount some kernel paths so they are not // bind mounted inside the container at all. if !ctr.config.Privileged && !rootless.IsRootless() { ch := make(chan error) go func() { runtime.LockOSThread() err := func() error { fd, err := os.Open(fmt.Sprintf("/proc/%d/task/%d/ns/mnt", os.Getpid(), unix.Gettid())) if err != nil { return err } defer errorhandling.CloseQuiet(fd) // create a new mountns on the current thread if err = unix.Unshare(unix.CLONE_NEWNS); err != nil { return err } defer func() { if err := unix.Setns(int(fd.Fd()), unix.CLONE_NEWNS); err != nil { logrus.Errorf("unable to clone new namespace: %q", err) } }() // don't spread our mounts around. We are setting only /sys to be slave // so that the cleanup process is still able to umount the storage and the // changes are propagated to the host. err = unix.Mount("/sys", "/sys", "none", unix.MS_REC|unix.MS_SLAVE, "") if err != nil { return errors.Wrapf(err, "cannot make /sys slave") } mounts, err := pmount.GetMounts() if err != nil { return err } for _, m := range mounts { if !strings.HasPrefix(m.Mountpoint, "/sys/kernel") { continue } err = unix.Unmount(m.Mountpoint, 0) if err != nil && !os.IsNotExist(err) { return errors.Wrapf(err, "cannot unmount %s", m.Mountpoint) } } return r.createOCIContainer(ctr, restoreOptions) }() ch <- err }() err := <-ch return err } } return r.createOCIContainer(ctr, restoreOptions) } // UpdateContainerStatus retrieves the current status of the container from the // runtime. It updates the container's state but does not save it. // If useRuntime is false, we will not directly hit runc to see the container's // status, but will instead only check for the existence of the conmon exit file // and update state to stopped if it exists. func (r *ConmonOCIRuntime) UpdateContainerStatus(ctr *Container) error { exitFile, err := r.ExitFilePath(ctr) if err != nil { return err } runtimeDir, err := util.GetRuntimeDir() if err != nil { return err } // Store old state so we know if we were already stopped oldState := ctr.state.State state := new(spec.State) cmd := exec.Command(r.path, "state", ctr.ID()) cmd.Env = append(cmd.Env, fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)) outPipe, err := cmd.StdoutPipe() if err != nil { return errors.Wrapf(err, "getting stdout pipe") } errPipe, err := cmd.StderrPipe() if err != nil { return errors.Wrapf(err, "getting stderr pipe") } if err := cmd.Start(); err != nil { out, err2 := ioutil.ReadAll(errPipe) if err2 != nil { return errors.Wrapf(err, "error getting container %s state", ctr.ID()) } if strings.Contains(string(out), "does not exist") { if err := ctr.removeConmonFiles(); err != nil { logrus.Debugf("unable to remove conmon files for container %s", ctr.ID()) } ctr.state.ExitCode = -1 ctr.state.FinishedTime = time.Now() ctr.state.State = define.ContainerStateExited return nil } return errors.Wrapf(err, "error getting container %s state. stderr/out: %s", ctr.ID(), out) } defer func() { _ = cmd.Wait() }() if err := errPipe.Close(); err != nil { return err } out, err := ioutil.ReadAll(outPipe) if err != nil { return errors.Wrapf(err, "error reading stdout: %s", ctr.ID()) } if err := json.NewDecoder(bytes.NewBuffer(out)).Decode(state); err != nil { return errors.Wrapf(err, "error decoding container status for container %s", ctr.ID()) } ctr.state.PID = state.Pid switch state.Status { case "created": ctr.state.State = define.ContainerStateCreated case "paused": ctr.state.State = define.ContainerStatePaused case "running": ctr.state.State = define.ContainerStateRunning case "stopped": ctr.state.State = define.ContainerStateStopped default: return errors.Wrapf(define.ErrInternal, "unrecognized status returned by runtime for container %s: %s", ctr.ID(), state.Status) } // Only grab exit status if we were not already stopped // If we were, it should already be in the database if ctr.state.State == define.ContainerStateStopped && oldState != define.ContainerStateStopped { var fi os.FileInfo chWait := make(chan error) defer close(chWait) _, err := WaitForFile(exitFile, chWait, time.Second*5) if err == nil { fi, err = os.Stat(exitFile) } if err != nil { ctr.state.ExitCode = -1 ctr.state.FinishedTime = time.Now() logrus.Errorf("No exit file for container %s found: %v", ctr.ID(), err) return nil } return ctr.handleExitFile(exitFile, fi) } return nil } // StartContainer starts the given container. // Sets time the container was started, but does not save it. func (r *ConmonOCIRuntime) StartContainer(ctr *Container) error { // TODO: streams should probably *not* be our STDIN/OUT/ERR - redirect to buffers? runtimeDir, err := util.GetRuntimeDir() if err != nil { return err } env := []string{fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)} if ctr.config.SdNotifyMode == define.SdNotifyModeContainer { if notify, ok := os.LookupEnv("NOTIFY_SOCKET"); ok { env = append(env, fmt.Sprintf("NOTIFY_SOCKET=%s", notify)) } } if path, ok := os.LookupEnv("PATH"); ok { env = append(env, fmt.Sprintf("PATH=%s", path)) } if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, append(r.runtimeFlags, "start", ctr.ID())...); err != nil { return err } ctr.state.StartedTime = time.Now() return nil } // KillContainer sends the given signal to the given container. // If all is set, send to all PIDs in the container. // All is only supported if the container created cgroups. func (r *ConmonOCIRuntime) KillContainer(ctr *Container, signal uint, all bool) error { logrus.Debugf("Sending signal %d to container %s", signal, ctr.ID()) runtimeDir, err := util.GetRuntimeDir() if err != nil { return err } env := []string{fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)} var args []string args = append(args, r.runtimeFlags...) if all { args = append(args, "kill", "--all", ctr.ID(), fmt.Sprintf("%d", signal)) } else { args = append(args, "kill", ctr.ID(), fmt.Sprintf("%d", signal)) } if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, args...); err != nil { return errors.Wrapf(err, "error sending signal to container %s", ctr.ID()) } return nil } // StopContainer stops a container, first using its given stop signal (or // SIGTERM if no signal was specified), then using SIGKILL. // Timeout is given in seconds. If timeout is 0, the container will be // immediately kill with SIGKILL. // Does not set finished time for container, assumes you will run updateStatus // after to pull the exit code. func (r *ConmonOCIRuntime) StopContainer(ctr *Container, timeout uint, all bool) error { logrus.Debugf("Stopping container %s (PID %d)", ctr.ID(), ctr.state.PID) // Ping the container to see if it's alive // If it's not, it's already stopped, return err := unix.Kill(ctr.state.PID, 0) if err == unix.ESRCH { return nil } stopSignal := ctr.config.StopSignal if stopSignal == 0 { stopSignal = uint(syscall.SIGTERM) } if timeout > 0 { if err := r.KillContainer(ctr, stopSignal, all); err != nil { // Is the container gone? // If so, it probably died between the first check and // our sending the signal // The container is stopped, so exit cleanly err := unix.Kill(ctr.state.PID, 0) if err == unix.ESRCH { return nil } return err } if err := waitContainerStop(ctr, time.Duration(timeout)*time.Second); err != nil { logrus.Infof("Timed out stopping container %s, resorting to SIGKILL: %v", ctr.ID(), err) } else { // No error, the container is dead return nil } } if err := r.KillContainer(ctr, 9, all); err != nil { // Again, check if the container is gone. If it is, exit cleanly. err := unix.Kill(ctr.state.PID, 0) if err == unix.ESRCH { return nil } return errors.Wrapf(err, "error sending SIGKILL to container %s", ctr.ID()) } // Give runtime a few seconds to make it happen if err := waitContainerStop(ctr, killContainerTimeout); err != nil { return err } return nil } // DeleteContainer deletes a container from the OCI runtime. func (r *ConmonOCIRuntime) DeleteContainer(ctr *Container) error { runtimeDir, err := util.GetRuntimeDir() if err != nil { return err } env := []string{fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)} return utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, append(r.runtimeFlags, "delete", "--force", ctr.ID())...) } // PauseContainer pauses the given container. func (r *ConmonOCIRuntime) PauseContainer(ctr *Container) error { runtimeDir, err := util.GetRuntimeDir() if err != nil { return err } env := []string{fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)} return utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, append(r.runtimeFlags, "pause", ctr.ID())...) } // UnpauseContainer unpauses the given container. func (r *ConmonOCIRuntime) UnpauseContainer(ctr *Container) error { runtimeDir, err := util.GetRuntimeDir() if err != nil { return err } env := []string{fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)} return utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, append(r.runtimeFlags, "resume", ctr.ID())...) } // HTTPAttach performs an attach for the HTTP API. // The caller must handle closing the HTTP connection after this returns. // The cancel channel is not closed; it is up to the caller to do so after // this function returns. // If this is a container with a terminal, we will stream raw. If it is not, we // will stream with an 8-byte header to multiplex STDOUT and STDERR. // Returns any errors that occurred, and whether the connection was successfully // hijacked before that error occurred. func (r *ConmonOCIRuntime) HTTPAttach(ctr *Container, req *http.Request, w http.ResponseWriter, streams *HTTPAttachStreams, detachKeys *string, cancel <-chan bool, hijackDone chan<- bool, streamAttach, streamLogs bool) (deferredErr error) { isTerminal := false if ctr.config.Spec.Process != nil { isTerminal = ctr.config.Spec.Process.Terminal } if streams != nil { if !streams.Stdin && !streams.Stdout && !streams.Stderr { return errors.Wrapf(define.ErrInvalidArg, "must specify at least one stream to attach to") } } attachSock, err := r.AttachSocketPath(ctr) if err != nil { return err } socketPath := buildSocketPath(attachSock) var conn *net.UnixConn if streamAttach { newConn, err := net.DialUnix("unixpacket", nil, &net.UnixAddr{Name: socketPath, Net: "unixpacket"}) if err != nil { return errors.Wrapf(err, "failed to connect to container's attach socket: %v", socketPath) } conn = newConn defer func() { if err := conn.Close(); err != nil { logrus.Errorf("unable to close container %s attach socket: %q", ctr.ID(), err) } }() logrus.Debugf("Successfully connected to container %s attach socket %s", ctr.ID(), socketPath) } detachString := ctr.runtime.config.Engine.DetachKeys if detachKeys != nil { detachString = *detachKeys } detach, err := processDetachKeys(detachString) if err != nil { return err } attachStdout := true attachStderr := true attachStdin := true if streams != nil { attachStdout = streams.Stdout attachStderr = streams.Stderr attachStdin = streams.Stdin } logrus.Debugf("Going to hijack container %s attach connection", ctr.ID()) // Alright, let's hijack. hijacker, ok := w.(http.Hijacker) if !ok { return errors.Errorf("unable to hijack connection") } httpCon, httpBuf, err := hijacker.Hijack() if err != nil { return errors.Wrapf(err, "error hijacking connection") } hijackDone <- true writeHijackHeader(req, httpBuf) // Force a flush after the header is written. if err := httpBuf.Flush(); err != nil { return errors.Wrapf(err, "error flushing HTTP hijack header") } defer func() { hijackWriteErrorAndClose(deferredErr, ctr.ID(), isTerminal, httpCon, httpBuf) }() logrus.Debugf("Hijack for container %s attach session done, ready to stream", ctr.ID()) // TODO: This is gross. Really, really gross. // I want to say we should read all the logs into an array before // calling this, in container_api.go, but that could take a lot of // memory... // On the whole, we need to figure out a better way of doing this, // though. logSize := 0 if streamLogs { logrus.Debugf("Will stream logs for container %s attach session", ctr.ID()) // Get all logs for the container logChan := make(chan *logs.LogLine) logOpts := new(logs.LogOptions) logOpts.Tail = -1 logOpts.WaitGroup = new(sync.WaitGroup) errChan := make(chan error) go func() { var err error // In non-terminal mode we need to prepend with the // stream header. logrus.Debugf("Writing logs for container %s to HTTP attach", ctr.ID()) for logLine := range logChan { if !isTerminal { device := logLine.Device var header []byte headerLen := uint32(len(logLine.Msg)) logSize += len(logLine.Msg) switch strings.ToLower(device) { case "stdin": header = makeHTTPAttachHeader(0, headerLen) case "stdout": header = makeHTTPAttachHeader(1, headerLen) case "stderr": header = makeHTTPAttachHeader(2, headerLen) default: logrus.Errorf("Unknown device for log line: %s", device) header = makeHTTPAttachHeader(1, headerLen) } _, err = httpBuf.Write(header) if err != nil { break } } _, err = httpBuf.Write([]byte(logLine.Msg)) if err != nil { break } _, err = httpBuf.Write([]byte("\n")) if err != nil { break } err = httpBuf.Flush() if err != nil { break } } errChan <- err }() go func() { logOpts.WaitGroup.Wait() close(logChan) }() if err := ctr.ReadLog(context.Background(), logOpts, logChan); err != nil { return err } logrus.Debugf("Done reading logs for container %s, %d bytes", ctr.ID(), logSize) if err := <-errChan; err != nil { return err } } if !streamAttach { logrus.Debugf("Done streaming logs for container %s attach, exiting as attach streaming not requested", ctr.ID()) return nil } logrus.Debugf("Forwarding attach output for container %s", ctr.ID()) stdoutChan := make(chan error) stdinChan := make(chan error) // Handle STDOUT/STDERR go func() { var err error if isTerminal { // Hack: return immediately if attachStdout not set to // emulate Docker. // Basically, when terminal is set, STDERR goes nowhere. // Everything does over STDOUT. // Therefore, if not attaching STDOUT - we'll never copy // anything from here. logrus.Debugf("Performing terminal HTTP attach for container %s", ctr.ID()) if attachStdout { err = httpAttachTerminalCopy(conn, httpBuf, ctr.ID()) } } else { logrus.Debugf("Performing non-terminal HTTP attach for container %s", ctr.ID()) err = httpAttachNonTerminalCopy(conn, httpBuf, ctr.ID(), attachStdin, attachStdout, attachStderr) } stdoutChan <- err logrus.Debugf("STDOUT/ERR copy completed") }() // Next, STDIN. Avoid entirely if attachStdin unset. if attachStdin { go func() { _, err := utils.CopyDetachable(conn, httpBuf, detach) logrus.Debugf("STDIN copy completed") stdinChan <- err }() } for { select { case err := <-stdoutChan: if err != nil { return err } return nil case err := <-stdinChan: if err != nil { return err } case <-cancel: return nil } } } // isRetryable returns whether the error was caused by a blocked syscall or the // specified operation on a non blocking file descriptor wasn't ready for completion. func isRetryable(err error) bool { if errno, isErrno := errors.Cause(err).(syscall.Errno); isErrno { return errno == syscall.EINTR || errno == syscall.EAGAIN } return false } // openControlFile opens the terminal control file. func openControlFile(ctr *Container, parentDir string) (*os.File, error) { controlPath := filepath.Join(parentDir, "ctl") for i := 0; i < 600; i++ { controlFile, err := os.OpenFile(controlPath, unix.O_WRONLY|unix.O_NONBLOCK, 0) if err == nil { return controlFile, err } if !isRetryable(err) { return nil, errors.Wrapf(err, "could not open ctl file for terminal resize for container %s", ctr.ID()) } time.Sleep(time.Second / 10) } return nil, errors.Errorf("timeout waiting for %q", controlPath) } // AttachResize resizes the terminal used by the given container. func (r *ConmonOCIRuntime) AttachResize(ctr *Container, newSize remotecommand.TerminalSize) error { controlFile, err := openControlFile(ctr, ctr.bundlePath()) if err != nil { return err } defer controlFile.Close() logrus.Debugf("Received a resize event for container %s: %+v", ctr.ID(), newSize) if _, err = fmt.Fprintf(controlFile, "%d %d %d\n", 1, newSize.Height, newSize.Width); err != nil { return errors.Wrapf(err, "failed to write to ctl file to resize terminal") } return nil } // CheckpointContainer checkpoints the given container. func (r *ConmonOCIRuntime) CheckpointContainer(ctr *Container, options ContainerCheckpointOptions) error { if err := label.SetSocketLabel(ctr.ProcessLabel()); err != nil { return err } // imagePath is used by CRIU to store the actual checkpoint files imagePath := ctr.CheckpointPath() // workPath will be used to store dump.log and stats-dump workPath := ctr.bundlePath() logrus.Debugf("Writing checkpoint to %s", imagePath) logrus.Debugf("Writing checkpoint logs to %s", workPath) args := []string{} args = append(args, r.runtimeFlags...) args = append(args, "checkpoint") args = append(args, "--image-path") args = append(args, imagePath) args = append(args, "--work-path") args = append(args, workPath) if options.KeepRunning { args = append(args, "--leave-running") } if options.TCPEstablished { args = append(args, "--tcp-established") } runtimeDir, err := util.GetRuntimeDir() if err != nil { return err } if err = os.Setenv("XDG_RUNTIME_DIR", runtimeDir); err != nil { return errors.Wrapf(err, "cannot set XDG_RUNTIME_DIR") } args = append(args, ctr.ID()) return utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, nil, r.path, args...) } func (r *ConmonOCIRuntime) CheckConmonRunning(ctr *Container) (bool, error) { if ctr.state.ConmonPID == 0 { // If the container is running or paused, assume Conmon is // running. We didn't record Conmon PID on some old versions, so // that is likely what's going on... // Unusual enough that we should print a warning message though. if ctr.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) { logrus.Warnf("Conmon PID is not set, but container is running!") return true, nil } // Container's not running, so conmon PID being unset is // expected. Conmon is not running. return false, nil } // We have a conmon PID. Ping it with signal 0. if err := unix.Kill(ctr.state.ConmonPID, 0); err != nil { if err == unix.ESRCH { return false, nil } return false, errors.Wrapf(err, "error pinging container %s conmon with signal 0", ctr.ID()) } return true, nil } // SupportsCheckpoint checks if the OCI runtime supports checkpointing // containers. func (r *ConmonOCIRuntime) SupportsCheckpoint() bool { // Check if the runtime implements checkpointing. Currently only // runc's checkpoint/restore implementation is supported. cmd := exec.Command(r.path, "checkpoint", "--help") if err := cmd.Start(); err != nil { return false } if err := cmd.Wait(); err == nil { return true } return false } // SupportsJSONErrors checks if the OCI runtime supports JSON-formatted error // messages. func (r *ConmonOCIRuntime) SupportsJSONErrors() bool { return r.supportsJSON } // SupportsNoCgroups checks if the OCI runtime supports running containers // without cgroups (the --cgroup-manager=disabled flag). func (r *ConmonOCIRuntime) SupportsNoCgroups() bool { return r.supportsNoCgroups } // SupportsKVM checks if the OCI runtime supports running containers // without KVM separation func (r *ConmonOCIRuntime) SupportsKVM() bool { return r.supportsKVM } // AttachSocketPath is the path to a single container's attach socket. func (r *ConmonOCIRuntime) AttachSocketPath(ctr *Container) (string, error) { if ctr == nil { return "", errors.Wrapf(define.ErrInvalidArg, "must provide a valid container to get attach socket path") } return filepath.Join(r.socketsDir, ctr.ID(), "attach"), nil } // ExitFilePath is the path to a container's exit file. func (r *ConmonOCIRuntime) ExitFilePath(ctr *Container) (string, error) { if ctr == nil { return "", errors.Wrapf(define.ErrInvalidArg, "must provide a valid container to get exit file path") } return filepath.Join(r.exitsDir, ctr.ID()), nil } // RuntimeInfo provides information on the runtime. func (r *ConmonOCIRuntime) RuntimeInfo() (*define.ConmonInfo, *define.OCIRuntimeInfo, error) { runtimePackage := packageVersion(r.path) conmonPackage := packageVersion(r.conmonPath) runtimeVersion, err := r.getOCIRuntimeVersion() if err != nil { return nil, nil, errors.Wrapf(err, "error getting version of OCI runtime %s", r.name) } conmonVersion, err := r.getConmonVersion() if err != nil { return nil, nil, errors.Wrapf(err, "error getting conmon version") } conmon := define.ConmonInfo{ Package: conmonPackage, Path: r.conmonPath, Version: conmonVersion, } ocirt := define.OCIRuntimeInfo{ Name: r.name, Path: r.path, Package: runtimePackage, Version: runtimeVersion, } return &conmon, &ocirt, nil } // makeAccessible changes the path permission and each parent directory to have --x--x--x func makeAccessible(path string, uid, gid int) error { for ; path != "/"; path = filepath.Dir(path) { st, err := os.Stat(path) if err != nil { if os.IsNotExist(err) { return nil } return err } if int(st.Sys().(*syscall.Stat_t).Uid) == uid && int(st.Sys().(*syscall.Stat_t).Gid) == gid { continue } if st.Mode()&0111 != 0111 { if err := os.Chmod(path, st.Mode()|0111); err != nil { return err } } } return nil } // Wait for a container which has been sent a signal to stop func waitContainerStop(ctr *Container, timeout time.Duration) error { return waitPidStop(ctr.state.PID, timeout) } // Wait for a given PID to stop func waitPidStop(pid int, timeout time.Duration) error { done := make(chan struct{}) chControl := make(chan struct{}) go func() { for { select { case <-chControl: return default: if err := unix.Kill(pid, 0); err != nil { if err == unix.ESRCH { close(done) return } logrus.Errorf("Error pinging PID %d with signal 0: %v", pid, err) } time.Sleep(100 * time.Millisecond) } } }() select { case <-done: return nil case <-time.After(timeout): close(chControl) return errors.Errorf("given PIDs did not die within timeout") } } func (r *ConmonOCIRuntime) getLogTag(ctr *Container) (string, error) { logTag := ctr.LogTag() if logTag == "" { return "", nil } data, err := ctr.inspectLocked(false) if err != nil { return "", nil } tmpl, err := template.New("container").Parse(logTag) if err != nil { return "", errors.Wrapf(err, "template parsing error %s", logTag) } var b bytes.Buffer err = tmpl.Execute(&b, data) if err != nil { return "", err } return b.String(), nil } // createOCIContainer generates this container's main conmon instance and prepares it for starting func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *ContainerCheckpointOptions) error { var stderrBuf bytes.Buffer runtimeDir, err := util.GetRuntimeDir() if err != nil { return err } parentSyncPipe, childSyncPipe, err := newPipe() if err != nil { return errors.Wrapf(err, "error creating socket pair") } defer errorhandling.CloseQuiet(parentSyncPipe) childStartPipe, parentStartPipe, err := newPipe() if err != nil { return errors.Wrapf(err, "error creating socket pair for start pipe") } defer errorhandling.CloseQuiet(parentStartPipe) var ociLog string if logrus.GetLevel() != logrus.DebugLevel && r.supportsJSON { ociLog = filepath.Join(ctr.state.RunDir, "oci-log") } logTag, err := r.getLogTag(ctr) if err != nil { return err } if ctr.config.CgroupsMode == cgroupSplit { if err := utils.MoveUnderCgroupSubtree("supervisor"); err != nil { return err } } if ctr.config.SdNotifyMode == define.SdNotifyModeIgnore { if err := os.Unsetenv("NOTIFY_SOCKET"); err != nil { logrus.Warnf("Error unsetting NOTIFY_SOCKET %v", err) } } args := r.sharedConmonArgs(ctr, ctr.ID(), ctr.bundlePath(), filepath.Join(ctr.state.RunDir, "pidfile"), ctr.LogPath(), r.exitsDir, ociLog, ctr.LogDriver(), logTag) if ctr.config.Spec.Process.Terminal { args = append(args, "-t") } else if ctr.config.Stdin { args = append(args, "-i") } if !r.enableKeyring { args = append(args, "--no-new-keyring") } if ctr.config.ConmonPidFile != "" { args = append(args, "--conmon-pidfile", ctr.config.ConmonPidFile) } if r.noPivot { args = append(args, "--no-pivot") } if len(ctr.config.ExitCommand) > 0 { args = append(args, "--exit-command", ctr.config.ExitCommand[0]) for _, arg := range ctr.config.ExitCommand[1:] { args = append(args, []string{"--exit-command-arg", arg}...) } } if ctr.config.PreserveFDs > 0 { args = append(args, formatRuntimeOpts("--preserve-fds", fmt.Sprintf("%d", ctr.config.PreserveFDs))...) } if restoreOptions != nil { args = append(args, "--restore", ctr.CheckpointPath()) if restoreOptions.TCPEstablished { args = append(args, "--runtime-opt", "--tcp-established") } } logrus.WithFields(logrus.Fields{ "args": args, }).Debugf("running conmon: %s", r.conmonPath) cmd := exec.Command(r.conmonPath, args...) cmd.Dir = ctr.bundlePath() cmd.SysProcAttr = &syscall.SysProcAttr{ Setpgid: true, } // TODO this is probably a really bad idea for some uses // Make this configurable cmd.Stdin = os.Stdin cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr if ctr.config.Spec.Process.Terminal { cmd.Stderr = &stderrBuf } // 0, 1 and 2 are stdin, stdout and stderr conmonEnv, envFiles := r.configureConmonEnv(ctr, runtimeDir) var filesToClose []*os.File if ctr.config.PreserveFDs > 0 { for fd := 3; fd < int(3+ctr.config.PreserveFDs); fd++ { f := os.NewFile(uintptr(fd), fmt.Sprintf("fd-%d", fd)) filesToClose = append(filesToClose, f) cmd.ExtraFiles = append(cmd.ExtraFiles, f) } } cmd.Env = r.conmonEnv // we don't want to step on users fds they asked to preserve // Since 0-2 are used for stdio, start the fds we pass in at preserveFDs+3 cmd.Env = append(cmd.Env, fmt.Sprintf("_OCI_SYNCPIPE=%d", ctr.config.PreserveFDs+3), fmt.Sprintf("_OCI_STARTPIPE=%d", ctr.config.PreserveFDs+4)) cmd.Env = append(cmd.Env, conmonEnv...) cmd.ExtraFiles = append(cmd.ExtraFiles, childSyncPipe, childStartPipe) cmd.ExtraFiles = append(cmd.ExtraFiles, envFiles...) if r.reservePorts && !rootless.IsRootless() && !ctr.config.NetMode.IsSlirp4netns() { ports, err := bindPorts(ctr.config.PortMappings) if err != nil { return err } // Leak the port we bound in the conmon process. These fd's won't be used // by the container and conmon will keep the ports busy so that another // process cannot use them. cmd.ExtraFiles = append(cmd.ExtraFiles, ports...) } if ctr.config.NetMode.IsSlirp4netns() || rootless.IsRootless() { if ctr.config.PostConfigureNetNS { havePortMapping := len(ctr.Config().PortMappings) > 0 if havePortMapping { ctr.rootlessPortSyncR, ctr.rootlessPortSyncW, err = os.Pipe() if err != nil { return errors.Wrapf(err, "failed to create rootless port sync pipe") } } ctr.rootlessSlirpSyncR, ctr.rootlessSlirpSyncW, err = os.Pipe() if err != nil { return errors.Wrapf(err, "failed to create rootless network sync pipe") } } else { if ctr.rootlessSlirpSyncR != nil { defer errorhandling.CloseQuiet(ctr.rootlessSlirpSyncR) } if ctr.rootlessSlirpSyncW != nil { defer errorhandling.CloseQuiet(ctr.rootlessSlirpSyncW) } } // Leak one end in conmon, the other one will be leaked into slirp4netns cmd.ExtraFiles = append(cmd.ExtraFiles, ctr.rootlessSlirpSyncW) if ctr.rootlessPortSyncW != nil { defer errorhandling.CloseQuiet(ctr.rootlessPortSyncW) // Leak one end in conmon, the other one will be leaked into rootlessport cmd.ExtraFiles = append(cmd.ExtraFiles, ctr.rootlessPortSyncW) } } err = startCommandGivenSelinux(cmd) // regardless of whether we errored or not, we no longer need the children pipes childSyncPipe.Close() childStartPipe.Close() if err != nil { return err } if err := r.moveConmonToCgroupAndSignal(ctr, cmd, parentStartPipe); err != nil { return err } /* Wait for initial setup and fork, and reap child */ err = cmd.Wait() if err != nil { return err } pid, err := readConmonPipeData(parentSyncPipe, ociLog) if err != nil { if err2 := r.DeleteContainer(ctr); err2 != nil { logrus.Errorf("Error removing container %s from runtime after creation failed", ctr.ID()) } return err } ctr.state.PID = pid conmonPID, err := readConmonPidFile(ctr.config.ConmonPidFile) if err != nil { logrus.Warnf("error reading conmon pid file for container %s: %v", ctr.ID(), err) } else if conmonPID > 0 { // conmon not having a pid file is a valid state, so don't set it if we don't have it logrus.Infof("Got Conmon PID as %d", conmonPID) ctr.state.ConmonPID = conmonPID if ctr.config.SdNotifyMode != define.SdNotifyModeIgnore { if sent, err := daemon.SdNotify(false, fmt.Sprintf("MAINPID=%d", conmonPID)); err != nil { logrus.Errorf("Error notifying systemd of Conmon PID: %v", err) } else if sent { logrus.Debugf("Notify MAINPID sent successfully") } } } // These fds were passed down to the runtime. Close them // and not interfere for _, f := range filesToClose { errorhandling.CloseQuiet(f) } return nil } // prepareProcessExec returns the path of the process.json used in runc exec -p // caller is responsible to close the returned *os.File if needed. func prepareProcessExec(c *Container, options *ExecOptions, env []string, sessionID string) (*os.File, error) { f, err := ioutil.TempFile(c.execBundlePath(sessionID), "exec-process-") if err != nil { return nil, err } pspec := c.config.Spec.Process pspec.SelinuxLabel = c.config.ProcessLabel pspec.Args = options.Cmd for _, cap := range options.CapAdd { pspec.Capabilities.Bounding = append(pspec.Capabilities.Bounding, cap) pspec.Capabilities.Effective = append(pspec.Capabilities.Effective, cap) pspec.Capabilities.Inheritable = append(pspec.Capabilities.Inheritable, cap) pspec.Capabilities.Permitted = append(pspec.Capabilities.Permitted, cap) pspec.Capabilities.Ambient = append(pspec.Capabilities.Ambient, cap) } // We need to default this to false else it will inherit terminal as true // from the container. pspec.Terminal = false if options.Terminal { pspec.Terminal = true } if len(env) > 0 { pspec.Env = append(pspec.Env, env...) } if options.Cwd != "" { pspec.Cwd = options.Cwd } var addGroups []string var sgids []uint32 // if the user is empty, we should inherit the user that the container is currently running with user := options.User if user == "" { user = c.config.User addGroups = c.config.Groups } overrides := c.getUserOverrides() execUser, err := lookup.GetUserGroupInfo(c.state.Mountpoint, user, overrides) if err != nil { return nil, err } if len(addGroups) > 0 { sgids, err = lookup.GetContainerGroups(addGroups, c.state.Mountpoint, overrides) if err != nil { return nil, errors.Wrapf(err, "error looking up supplemental groups for container %s exec session %s", c.ID(), sessionID) } } // If user was set, look it up in the container to get a UID to use on // the host if user != "" || len(sgids) > 0 { if user != "" { for _, sgid := range execUser.Sgids { sgids = append(sgids, uint32(sgid)) } } processUser := spec.User{ UID: uint32(execUser.Uid), GID: uint32(execUser.Gid), AdditionalGids: sgids, } pspec.User = processUser } hasHomeSet := false for _, s := range pspec.Env { if strings.HasPrefix(s, "HOME=") { hasHomeSet = true break } } if !hasHomeSet { pspec.Env = append(pspec.Env, fmt.Sprintf("HOME=%s", execUser.Home)) } processJSON, err := json.Marshal(pspec) if err != nil { return nil, err } if err := ioutil.WriteFile(f.Name(), processJSON, 0644); err != nil { return nil, err } return f, nil } // configureConmonEnv gets the environment values to add to conmon's exec struct // TODO this may want to be less hardcoded/more configurable in the future func (r *ConmonOCIRuntime) configureConmonEnv(ctr *Container, runtimeDir string) ([]string, []*os.File) { env := make([]string, 0, 6) env = append(env, fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)) env = append(env, fmt.Sprintf("_CONTAINERS_USERNS_CONFIGURED=%s", os.Getenv("_CONTAINERS_USERNS_CONFIGURED"))) env = append(env, fmt.Sprintf("_CONTAINERS_ROOTLESS_UID=%s", os.Getenv("_CONTAINERS_ROOTLESS_UID"))) home := homedir.Get() if home != "" { env = append(env, fmt.Sprintf("HOME=%s", home)) } extraFiles := make([]*os.File, 0) if ctr.config.SdNotifyMode == define.SdNotifyModeContainer { if notify, ok := os.LookupEnv("NOTIFY_SOCKET"); ok { env = append(env, fmt.Sprintf("NOTIFY_SOCKET=%s", notify)) } } if !r.sdNotify { if listenfds, ok := os.LookupEnv("LISTEN_FDS"); ok { env = append(env, fmt.Sprintf("LISTEN_FDS=%s", listenfds), "LISTEN_PID=1") fds := activation.Files(false) extraFiles = append(extraFiles, fds...) } } else { logrus.Debug("disabling SD notify") } return env, extraFiles } // sharedConmonArgs takes common arguments for exec and create/restore and formats them for the conmon CLI func (r *ConmonOCIRuntime) sharedConmonArgs(ctr *Container, cuuid, bundlePath, pidPath, logPath, exitDir, ociLogPath, logDriver, logTag string) []string { // set the conmon API version to be able to use the correct sync struct keys args := []string{ "--api-version", "1", "-c", ctr.ID(), "-u", cuuid, "-r", r.path, "-b", bundlePath, "-p", pidPath, "-n", ctr.Name(), "--exit-dir", exitDir, "--socket-dir-path", r.socketsDir, } if len(r.runtimeFlags) > 0 { rFlags := []string{} for _, arg := range r.runtimeFlags { rFlags = append(rFlags, "--runtime-arg", arg) } args = append(args, rFlags...) } if ctr.CgroupManager() == config.SystemdCgroupsManager && !ctr.config.NoCgroups && ctr.config.CgroupsMode != cgroupSplit { args = append(args, "-s") } var logDriverArg string switch logDriver { case define.JournaldLogging: logDriverArg = define.JournaldLogging case define.NoLogging: logDriverArg = define.NoLogging case define.JSONLogging: fallthrough default: //nolint-stylecheck // No case here should happen except JSONLogging, but keep this here in case the options are extended logrus.Errorf("%s logging specified but not supported. Choosing k8s-file logging instead", ctr.LogDriver()) fallthrough case "": // to get here, either a user would specify `--log-driver ""`, or this came from another place in libpod // since the former case is obscure, and the latter case isn't an error, let's silently fallthrough fallthrough case define.KubernetesLogging: logDriverArg = fmt.Sprintf("%s:%s", define.KubernetesLogging, logPath) } args = append(args, "-l", logDriverArg) logLevel := logrus.GetLevel() args = append(args, "--log-level", logLevel.String()) if logLevel == logrus.DebugLevel { logrus.Debugf("%s messages will be logged to syslog", r.conmonPath) args = append(args, "--syslog") } size := r.logSizeMax if ctr.config.LogSize > 0 { size = ctr.config.LogSize } if size > 0 { args = append(args, "--log-size-max", fmt.Sprintf("%v", size)) } if ociLogPath != "" { args = append(args, "--runtime-arg", "--log-format=json", "--runtime-arg", "--log", fmt.Sprintf("--runtime-arg=%s", ociLogPath)) } if logTag != "" { args = append(args, "--log-tag", logTag) } if ctr.config.NoCgroups { logrus.Debugf("Running with no CGroups") args = append(args, "--runtime-arg", "--cgroup-manager", "--runtime-arg", "disabled") } return args } // startCommandGivenSelinux starts a container ensuring to set the labels of // the process to make sure SELinux doesn't block conmon communication, if SELinux is enabled func startCommandGivenSelinux(cmd *exec.Cmd) error { if !selinux.GetEnabled() { return cmd.Start() } // Set the label of the conmon process to be level :s0 // This will allow the container processes to talk to fifo-files // passed into the container by conmon var ( plabel string con selinux.Context err error ) plabel, err = selinux.CurrentLabel() if err != nil { return errors.Wrapf(err, "failed to get current SELinux label") } con, err = selinux.NewContext(plabel) if err != nil { return errors.Wrapf(err, "failed to get new context from SELinux label") } runtime.LockOSThread() if con["level"] != "s0" && con["level"] != "" { con["level"] = "s0" if err = label.SetProcessLabel(con.Get()); err != nil { runtime.UnlockOSThread() return err } } err = cmd.Start() // Ignore error returned from SetProcessLabel("") call, // can't recover. if labelErr := label.SetProcessLabel(""); labelErr != nil { logrus.Errorf("unable to set process label: %q", err) } runtime.UnlockOSThread() return err } // moveConmonToCgroupAndSignal gets a container's cgroupParent and moves the conmon process to that cgroup // it then signals for conmon to start by sending nonce data down the start fd func (r *ConmonOCIRuntime) moveConmonToCgroupAndSignal(ctr *Container, cmd *exec.Cmd, startFd *os.File) error { mustCreateCgroup := true if ctr.config.NoCgroups { mustCreateCgroup = false } // If cgroup creation is disabled - just signal. switch ctr.config.CgroupsMode { case "disabled", "no-conmon", cgroupSplit: mustCreateCgroup = false } // $INVOCATION_ID is set by systemd when running as a service. if os.Getenv("INVOCATION_ID") != "" { mustCreateCgroup = false } if mustCreateCgroup { // Usually rootless users are not allowed to configure cgroupfs. // There are cases though, where it is allowed, e.g. if the cgroup // is manually configured and chowned). Avoid detecting all // such cases and simply use a lower log level. logLevel := logrus.WarnLevel if rootless.IsRootless() { logLevel = logrus.InfoLevel } // TODO: This should be a switch - we are not guaranteed that // there are only 2 valid cgroup managers cgroupParent := ctr.CgroupParent() if ctr.CgroupManager() == config.SystemdCgroupsManager { unitName := createUnitName("libpod-conmon", ctr.ID()) realCgroupParent := cgroupParent splitParent := strings.Split(cgroupParent, "/") if strings.HasSuffix(cgroupParent, ".slice") && len(splitParent) > 1 { realCgroupParent = splitParent[len(splitParent)-1] } logrus.Infof("Running conmon under slice %s and unitName %s", realCgroupParent, unitName) if err := utils.RunUnderSystemdScope(cmd.Process.Pid, realCgroupParent, unitName); err != nil { logrus.StandardLogger().Logf(logLevel, "Failed to add conmon to systemd sandbox cgroup: %v", err) } } else { cgroupPath := filepath.Join(ctr.config.CgroupParent, "conmon") control, err := cgroups.New(cgroupPath, &spec.LinuxResources{}) if err != nil { logrus.StandardLogger().Logf(logLevel, "Failed to add conmon to cgroupfs sandbox cgroup: %v", err) } else if err := control.AddPid(cmd.Process.Pid); err != nil { // we need to remove this defer and delete the cgroup once conmon exits // maybe need a conmon monitor? logrus.StandardLogger().Logf(logLevel, "Failed to add conmon to cgroupfs sandbox cgroup: %v", err) } } } /* We set the cgroup, now the child can start creating children */ if err := writeConmonPipeData(startFd); err != nil { return err } return nil } // newPipe creates a unix socket pair for communication. // Returns two files - first is parent, second is child. func newPipe() (*os.File, *os.File, error) { fds, err := unix.Socketpair(unix.AF_LOCAL, unix.SOCK_SEQPACKET|unix.SOCK_CLOEXEC, 0) if err != nil { return nil, nil, err } return os.NewFile(uintptr(fds[1]), "parent"), os.NewFile(uintptr(fds[0]), "child"), nil } // readConmonPidFile attempts to read conmon's pid from its pid file func readConmonPidFile(pidFile string) (int, error) { // Let's try reading the Conmon pid at the same time. if pidFile != "" { contents, err := ioutil.ReadFile(pidFile) if err != nil { return -1, err } // Convert it to an int conmonPID, err := strconv.Atoi(string(contents)) if err != nil { return -1, err } return conmonPID, nil } return 0, nil } // readConmonPipeData attempts to read a syncInfo struct from the pipe func readConmonPipeData(pipe *os.File, ociLog string) (int, error) { // syncInfo is used to return data from monitor process to daemon type syncInfo struct { Data int `json:"data"` Message string `json:"message,omitempty"` } // Wait to get container pid from conmon type syncStruct struct { si *syncInfo err error } ch := make(chan syncStruct) go func() { var si *syncInfo rdr := bufio.NewReader(pipe) b, err := rdr.ReadBytes('\n') if err != nil { ch <- syncStruct{err: err} } if err := json.Unmarshal(b, &si); err != nil { ch <- syncStruct{err: err} return } ch <- syncStruct{si: si} }() data := -1 select { case ss := <-ch: if ss.err != nil { if ociLog != "" { ociLogData, err := ioutil.ReadFile(ociLog) if err == nil { var ociErr ociError if err := json.Unmarshal(ociLogData, &ociErr); err == nil { return -1, getOCIRuntimeError(ociErr.Msg) } } } return -1, errors.Wrapf(ss.err, "container create failed (no logs from conmon)") } logrus.Debugf("Received: %d", ss.si.Data) if ss.si.Data < 0 { if ociLog != "" { ociLogData, err := ioutil.ReadFile(ociLog) if err == nil { var ociErr ociError if err := json.Unmarshal(ociLogData, &ociErr); err == nil { return ss.si.Data, getOCIRuntimeError(ociErr.Msg) } } } // If we failed to parse the JSON errors, then print the output as it is if ss.si.Message != "" { return ss.si.Data, getOCIRuntimeError(ss.si.Message) } return ss.si.Data, errors.Wrapf(define.ErrInternal, "container create failed") } data = ss.si.Data case <-time.After(define.ContainerCreateTimeout): return -1, errors.Wrapf(define.ErrInternal, "container creation timeout") } return data, nil } // writeConmonPipeData writes nonce data to a pipe func writeConmonPipeData(pipe *os.File) error { someData := []byte{0} _, err := pipe.Write(someData) return err } // formatRuntimeOpts prepends opts passed to it with --runtime-opt for passing to conmon func formatRuntimeOpts(opts ...string) []string { args := make([]string, 0, len(opts)*2) for _, o := range opts { args = append(args, "--runtime-opt", o) } return args } // getConmonVersion returns a string representation of the conmon version. func (r *ConmonOCIRuntime) getConmonVersion() (string, error) { output, err := utils.ExecCmd(r.conmonPath, "--version") if err != nil { return "", err } return strings.TrimSuffix(strings.Replace(output, "\n", ", ", 1), "\n"), nil } // getOCIRuntimeVersion returns a string representation of the OCI runtime's // version. func (r *ConmonOCIRuntime) getOCIRuntimeVersion() (string, error) { output, err := utils.ExecCmd(r.path, "--version") if err != nil { return "", err } return strings.TrimSuffix(output, "\n"), nil } // Copy data from container to HTTP connection, for terminal attach. // Container is the container's attach socket connection, http is a buffer for // the HTTP connection. cid is the ID of the container the attach session is // running for (used solely for error messages). func httpAttachTerminalCopy(container *net.UnixConn, http *bufio.ReadWriter, cid string) error { buf := make([]byte, bufferSize) for { numR, err := container.Read(buf) logrus.Debugf("Read fd(%d) %d/%d bytes for container %s", int(buf[0]), numR, len(buf), cid) if numR > 0 { switch buf[0] { case AttachPipeStdout: // Do nothing default: logrus.Errorf("Received unexpected attach type %+d, discarding %d bytes", buf[0], numR) continue } numW, err2 := http.Write(buf[1:numR]) if err2 != nil { if err != nil { logrus.Errorf("Error reading container %s STDOUT: %v", cid, err) } return err2 } else if numW+1 != numR { return io.ErrShortWrite } // We need to force the buffer to write immediately, so // there isn't a delay on the terminal side. if err2 := http.Flush(); err2 != nil { if err != nil { logrus.Errorf("Error reading container %s STDOUT: %v", cid, err) } return err2 } } if err != nil { if err == io.EOF { return nil } return err } } } // Copy data from a container to an HTTP connection, for non-terminal attach. // Appends a header to multiplex input. func httpAttachNonTerminalCopy(container *net.UnixConn, http *bufio.ReadWriter, cid string, stdin, stdout, stderr bool) error { buf := make([]byte, bufferSize) for { numR, err := container.Read(buf) if numR > 0 { var headerBuf []byte // Subtract 1 because we strip the first byte (used for // multiplexing by Conmon). headerLen := uint32(numR - 1) // Practically speaking, we could make this buf[0] - 1, // but we need to validate it anyways... switch buf[0] { case AttachPipeStdin: headerBuf = makeHTTPAttachHeader(0, headerLen) if !stdin { continue } case AttachPipeStdout: if !stdout { continue } headerBuf = makeHTTPAttachHeader(1, headerLen) case AttachPipeStderr: if !stderr { continue } headerBuf = makeHTTPAttachHeader(2, headerLen) default: logrus.Errorf("Received unexpected attach type %+d, discarding %d bytes", buf[0], numR) continue } numH, err2 := http.Write(headerBuf) if err2 != nil { if err != nil { logrus.Errorf("Error reading container %s standard streams: %v", cid, err) } return err2 } // Hardcoding header length is pretty gross, but // fast. Should be safe, as this is a fixed part // of the protocol. if numH != 8 { if err != nil { logrus.Errorf("Error reading container %s standard streams: %v", cid, err) } return io.ErrShortWrite } numW, err2 := http.Write(buf[1:numR]) if err2 != nil { if err != nil { logrus.Errorf("Error reading container %s standard streams: %v", cid, err) } return err2 } else if numW+1 != numR { if err != nil { logrus.Errorf("Error reading container %s standard streams: %v", cid, err) } return io.ErrShortWrite } // We need to force the buffer to write immediately, so // there isn't a delay on the terminal side. if err2 := http.Flush(); err2 != nil { if err != nil { logrus.Errorf("Error reading container %s STDOUT: %v", cid, err) } return err2 } } if err != nil { if err == io.EOF { return nil } return err } } }