From 0b6bb6a3d3c3c15b9c6629a6949a616a30b0478a Mon Sep 17 00:00:00 2001 From: baude Date: Mon, 22 Apr 2019 16:01:31 -0500 Subject: enable podman-remote on windows build a podman-remote binary for windows that allows users to use the remote client on windows and interact with podman on linux system. Signed-off-by: baude --- .cirrus.yml | 1 + Makefile | 3 + cmd/podman/main.go | 20 +- cmd/podman/main_local.go | 27 +- cmd/podman/main_remote.go | 6 + libpod/healthcheck.go | 59 - libpod/healthcheck_linux.go | 67 + libpod/healthcheck_unsupported.go | 19 + libpod/oci.go | 447 ----- libpod/oci_linux.go | 447 +++++ libpod/oci_unsupported.go | 12 + pkg/adapter/runtime_remote_supported.go | 1 + pkg/adapter/sigproxy.go | 36 - pkg/adapter/sigproxy_linux.go | 36 + pkg/adapter/terminal.go | 81 - pkg/adapter/terminal_linux.go | 91 + pkg/spec/config_linux.go | 6 + pkg/spec/createconfig.go | 7 - pkg/util/utils.go | 72 - pkg/util/utils_supported.go | 60 + pkg/util/utils_windows.go | 12 + utils/utils.go | 33 - utils/utils_supported.go | 39 + utils/utils_windows.go | 9 + vendor.conf | 4 +- vendor/github.com/containers/buildah/buildah.go | 2 +- .../buildah/imagebuildah/chroot_symlink.go | 266 --- .../buildah/imagebuildah/chroot_symlink_linux.go | 266 +++ .../imagebuildah/chroot_symlink_unsupported.go | 13 + .../containers/buildah/pkg/parse/parse.go | 12 +- .../containers/buildah/pkg/parse/parse_unix.go | 20 + .../buildah/pkg/parse/parse_unsupported.go | 7 + vendor/github.com/containers/buildah/run.go | 2020 +------------------ vendor/github.com/containers/buildah/run_linux.go | 2022 +++++++++++++++++++- .../github.com/containers/buildah/run_unsupport.go | 11 - .../containers/buildah/run_unsupported.go | 20 + .../varlink/go/varlink/bridge_windows.go | 4 +- vendor/github.com/varlink/go/varlink/call.go | 19 +- vendor/github.com/varlink/go/varlink/service.go | 109 +- .../github.com/varlink/go/varlink/varlink_test.go | 26 +- 40 files changed, 3306 insertions(+), 3106 deletions(-) create mode 100644 libpod/healthcheck_linux.go create mode 100644 libpod/healthcheck_unsupported.go create mode 100644 pkg/adapter/runtime_remote_supported.go delete mode 100644 pkg/adapter/sigproxy.go create mode 100644 pkg/adapter/sigproxy_linux.go create mode 100644 pkg/adapter/terminal_linux.go create mode 100644 pkg/util/utils_supported.go create mode 100644 pkg/util/utils_windows.go create mode 100644 utils/utils_supported.go create mode 100644 utils/utils_windows.go delete mode 100644 vendor/github.com/containers/buildah/imagebuildah/chroot_symlink.go create mode 100644 vendor/github.com/containers/buildah/imagebuildah/chroot_symlink_linux.go create mode 100644 vendor/github.com/containers/buildah/imagebuildah/chroot_symlink_unsupported.go create mode 100644 vendor/github.com/containers/buildah/pkg/parse/parse_unix.go create mode 100644 vendor/github.com/containers/buildah/pkg/parse/parse_unsupported.go delete mode 100644 vendor/github.com/containers/buildah/run_unsupport.go create mode 100644 vendor/github.com/containers/buildah/run_unsupported.go diff --git a/.cirrus.yml b/.cirrus.yml index 0102dcb1a..6c9d199db 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -135,6 +135,7 @@ gating_task: - '/usr/local/bin/entrypoint.sh clean podman-remote' - '/usr/local/bin/entrypoint.sh clean podman BUILDTAGS="exclude_graphdriver_devicemapper selinux seccomp"' - '/usr/local/bin/entrypoint.sh podman-remote-darwin' + - '/usr/local/bin/entrypoint.sh podman-remote-windows' on_failure: master_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_master_failure.sh' diff --git a/Makefile b/Makefile index 1990c2d11..9228ec711 100644 --- a/Makefile +++ b/Makefile @@ -120,6 +120,9 @@ podman-remote: .gopathok $(PODMAN_VARLINK_DEPENDENCIES) ## Build with podman on podman-remote-darwin: .gopathok $(PODMAN_VARLINK_DEPENDENCIES) ## Build with podman on remote OSX environment GOOS=darwin $(GO) build -ldflags '$(LDFLAGS_PODMAN)' -tags "remoteclient containers_image_openpgp exclude_graphdriver_devicemapper" -o bin/$@ $(PROJECT)/cmd/podman +podman-remote-windows: .gopathok $(PODMAN_VARLINK_DEPENDENCIES) ## Build with podman for a remote windows environment + GOOS=windows $(GO) build -ldflags '$(LDFLAGS_PODMAN)' -tags "remoteclient containers_image_openpgp exclude_graphdriver_devicemapper" -o bin/$@.exe $(PROJECT)/cmd/podman + local-cross: $(CROSS_BUILD_TARGETS) ## Cross local compilation bin/podman.cross.%: .gopathok diff --git a/cmd/podman/main.go b/cmd/podman/main.go index a0f1cf401..f501ee674 100644 --- a/cmd/podman/main.go +++ b/cmd/podman/main.go @@ -4,7 +4,6 @@ import ( "context" "io" "os" - "syscall" "github.com/containers/libpod/cmd/podman/cliconfig" "github.com/containers/libpod/libpod" @@ -13,7 +12,6 @@ import ( "github.com/containers/libpod/version" "github.com/containers/storage/pkg/reexec" "github.com/opentracing/opentracing-go" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/spf13/cobra" ) @@ -118,25 +116,13 @@ func before(cmd *cobra.Command, args []string) error { } logrus.SetLevel(level) - rlimits := new(syscall.Rlimit) - rlimits.Cur = 1048576 - rlimits.Max = 1048576 - if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, rlimits); err != nil { - if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, rlimits); err != nil { - return errors.Wrapf(err, "error getting rlimits") - } - rlimits.Cur = rlimits.Max - if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, rlimits); err != nil { - return errors.Wrapf(err, "error setting new rlimits") - } + if err := setRLimits(); err != nil { + return err } - if rootless.IsRootless() { logrus.Info("running as rootless") } - - // Be sure we can create directories with 0755 mode. - syscall.Umask(0022) + setUMask() return profileOn(cmd) } diff --git a/cmd/podman/main_local.go b/cmd/podman/main_local.go index 5afd51e28..7452965a2 100644 --- a/cmd/podman/main_local.go +++ b/cmd/podman/main_local.go @@ -4,16 +4,17 @@ package main import ( "context" - "github.com/containers/libpod/cmd/podman/cliconfig" - "github.com/containers/libpod/cmd/podman/libpodruntime" - "github.com/containers/libpod/pkg/rootless" "io/ioutil" "log/syslog" "os" "runtime/pprof" "strconv" "strings" + "syscall" + "github.com/containers/libpod/cmd/podman/cliconfig" + "github.com/containers/libpod/cmd/podman/libpodruntime" + "github.com/containers/libpod/pkg/rootless" "github.com/containers/libpod/pkg/tracing" "github.com/opentracing/opentracing-go" "github.com/pkg/errors" @@ -154,3 +155,23 @@ func setupRootless(cmd *cobra.Command, args []string) error { } return nil } +func setRLimits() error { + rlimits := new(syscall.Rlimit) + rlimits.Cur = 1048576 + rlimits.Max = 1048576 + if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, rlimits); err != nil { + if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, rlimits); err != nil { + return errors.Wrapf(err, "error getting rlimits") + } + rlimits.Cur = rlimits.Max + if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, rlimits); err != nil { + return errors.Wrapf(err, "error setting new rlimits") + } + } + return nil +} + +func setUMask() { + // Be sure we can create directories with 0755 mode. + syscall.Umask(0022) +} diff --git a/cmd/podman/main_remote.go b/cmd/podman/main_remote.go index 2a7d184cd..a3335050a 100644 --- a/cmd/podman/main_remote.go +++ b/cmd/podman/main_remote.go @@ -41,3 +41,9 @@ func setupRootless(cmd *cobra.Command, args []string) error { } return nil } + +func setRLimits() error { + return nil +} + +func setUMask() {} diff --git a/libpod/healthcheck.go b/libpod/healthcheck.go index 3a6609740..5c48cc8ee 100644 --- a/libpod/healthcheck.go +++ b/libpod/healthcheck.go @@ -3,16 +3,13 @@ package libpod import ( "bufio" "bytes" - "fmt" "io/ioutil" "os" - "os/exec" "path/filepath" "strings" "time" "github.com/containers/libpod/pkg/inspect" - "github.com/coreos/go-systemd/dbus" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -241,62 +238,6 @@ func (c *Container) GetHealthCheckLog() (inspect.HealthCheckResults, error) { return healthCheck, nil } -// createTimer systemd timers for healthchecks of a container -func (c *Container) createTimer() error { - if c.disableHealthCheckSystemd() { - return nil - } - podman, err := os.Executable() - if err != nil { - return errors.Wrapf(err, "failed to get path for podman for a health check timer") - } - - var cmd = []string{"--unit", fmt.Sprintf("%s", c.ID()), fmt.Sprintf("--on-unit-inactive=%s", c.HealthCheckConfig().Interval.String()), "--timer-property=AccuracySec=1s", podman, "healthcheck", "run", c.ID()} - - conn, err := dbus.NewSystemdConnection() - if err != nil { - return errors.Wrapf(err, "unable to get systemd connection to add healthchecks") - } - conn.Close() - logrus.Debugf("creating systemd-transient files: %s %s", "systemd-run", cmd) - systemdRun := exec.Command("systemd-run", cmd...) - _, err = systemdRun.CombinedOutput() - if err != nil { - return err - } - return nil -} - -// startTimer starts a systemd timer for the healthchecks -func (c *Container) startTimer() error { - if c.disableHealthCheckSystemd() { - return nil - } - conn, err := dbus.NewSystemdConnection() - if err != nil { - return errors.Wrapf(err, "unable to get systemd connection to start healthchecks") - } - defer conn.Close() - _, err = conn.StartUnit(fmt.Sprintf("%s.service", c.ID()), "fail", nil) - return err -} - -// removeTimer removes the systemd timer and unit files -// for the container -func (c *Container) removeTimer() error { - if c.disableHealthCheckSystemd() { - return nil - } - conn, err := dbus.NewSystemdConnection() - if err != nil { - return errors.Wrapf(err, "unable to get systemd connection to remove healthchecks") - } - defer conn.Close() - serviceFile := fmt.Sprintf("%s.timer", c.ID()) - _, err = conn.StopUnit(serviceFile, "fail", nil) - return err -} - // HealthCheckStatus returns the current state of a container with a healthcheck func (c *Container) HealthCheckStatus() (string, error) { if !c.HasHealthCheck() { diff --git a/libpod/healthcheck_linux.go b/libpod/healthcheck_linux.go new file mode 100644 index 000000000..869605ea8 --- /dev/null +++ b/libpod/healthcheck_linux.go @@ -0,0 +1,67 @@ +package libpod + +import ( + "fmt" + "os" + "os/exec" + + "github.com/coreos/go-systemd/dbus" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// createTimer systemd timers for healthchecks of a container +func (c *Container) createTimer() error { + if c.disableHealthCheckSystemd() { + return nil + } + podman, err := os.Executable() + if err != nil { + return errors.Wrapf(err, "failed to get path for podman for a health check timer") + } + + var cmd = []string{"--unit", fmt.Sprintf("%s", c.ID()), fmt.Sprintf("--on-unit-inactive=%s", c.HealthCheckConfig().Interval.String()), "--timer-property=AccuracySec=1s", podman, "healthcheck", "run", c.ID()} + + conn, err := dbus.NewSystemdConnection() + if err != nil { + return errors.Wrapf(err, "unable to get systemd connection to add healthchecks") + } + conn.Close() + logrus.Debugf("creating systemd-transient files: %s %s", "systemd-run", cmd) + systemdRun := exec.Command("systemd-run", cmd...) + _, err = systemdRun.CombinedOutput() + if err != nil { + return err + } + return nil +} + +// startTimer starts a systemd timer for the healthchecks +func (c *Container) startTimer() error { + if c.disableHealthCheckSystemd() { + return nil + } + conn, err := dbus.NewSystemdConnection() + if err != nil { + return errors.Wrapf(err, "unable to get systemd connection to start healthchecks") + } + defer conn.Close() + _, err = conn.StartUnit(fmt.Sprintf("%s.service", c.ID()), "fail", nil) + return err +} + +// removeTimer removes the systemd timer and unit files +// for the container +func (c *Container) removeTimer() error { + if c.disableHealthCheckSystemd() { + return nil + } + conn, err := dbus.NewSystemdConnection() + if err != nil { + return errors.Wrapf(err, "unable to get systemd connection to remove healthchecks") + } + defer conn.Close() + serviceFile := fmt.Sprintf("%s.timer", c.ID()) + _, err = conn.StopUnit(serviceFile, "fail", nil) + return err +} diff --git a/libpod/healthcheck_unsupported.go b/libpod/healthcheck_unsupported.go new file mode 100644 index 000000000..d01d1ccd4 --- /dev/null +++ b/libpod/healthcheck_unsupported.go @@ -0,0 +1,19 @@ +// +build !linux + +package libpod + +// createTimer systemd timers for healthchecks of a container +func (c *Container) createTimer() error { + return ErrNotImplemented +} + +// startTimer starts a systemd timer for the healthchecks +func (c *Container) startTimer() error { + return ErrNotImplemented +} + +// removeTimer removes the systemd timer and unit files +// for the container +func (c *Container) removeTimer() error { + return ErrNotImplemented +} diff --git a/libpod/oci.go b/libpod/oci.go index 189359753..3dfde4f24 100644 --- a/libpod/oci.go +++ b/libpod/oci.go @@ -1,7 +1,6 @@ package libpod import ( - "bufio" "bytes" "fmt" "io/ioutil" @@ -9,21 +8,15 @@ import ( "os" "os/exec" "path/filepath" - "runtime" "strings" - "syscall" "time" - "github.com/containers/libpod/pkg/rootless" "github.com/containers/libpod/pkg/util" - "github.com/coreos/go-systemd/activation" "github.com/cri-o/ocicni/pkg/ocicni" spec "github.com/opencontainers/runtime-spec/specs-go" - "github.com/opencontainers/selinux/go-selinux" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" kwait "k8s.io/apimachinery/pkg/util/wait" // TODO import these functions into libpod and remove the import @@ -118,70 +111,6 @@ func createUnitName(prefix string, name string) string { return fmt.Sprintf("%s-%s.scope", prefix, name) } -// Wait for a container which has been sent a signal to stop -func waitContainerStop(ctr *Container, timeout time.Duration) error { - done := make(chan struct{}) - chControl := make(chan struct{}) - go func() { - for { - select { - case <-chControl: - return - default: - // Check if the process is still around - err := unix.Kill(ctr.state.PID, 0) - if err == unix.ESRCH { - close(done) - return - } - time.Sleep(100 * time.Millisecond) - } - } - }() - select { - case <-done: - return nil - case <-time.After(timeout): - close(chControl) - logrus.Debugf("container %s did not die within timeout %d", ctr.ID(), timeout) - return errors.Errorf("container %s did not die within timeout", ctr.ID()) - } -} - -// Wait for a set of given PIDs to stop -func waitPidsStop(pids []int, timeout time.Duration) error { - done := make(chan struct{}) - chControl := make(chan struct{}) - go func() { - for { - select { - case <-chControl: - return - default: - allClosed := true - for _, pid := range pids { - if err := unix.Kill(pid, 0); err != unix.ESRCH { - allClosed = false - break - } - } - if allClosed { - close(done) - return - } - time.Sleep(100 * time.Millisecond) - } - } - }() - select { - case <-done: - return nil - case <-time.After(timeout): - close(chControl) - return errors.Errorf("given PIDs did not die within timeout") - } -} - func bindPorts(ports []ocicni.PortMapping) ([]*os.File, error) { var files []*os.File notifySCTP := false @@ -234,241 +163,6 @@ func bindPorts(ports []ocicni.PortMapping) ([]*os.File, error) { return files, nil } -func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, restoreOptions *ContainerCheckpointOptions) (err error) { - var stderrBuf bytes.Buffer - - runtimeDir, err := util.GetRootlessRuntimeDir() - if err != nil { - return err - } - - parentPipe, childPipe, err := newPipe() - if err != nil { - return errors.Wrapf(err, "error creating socket pair") - } - - childStartPipe, parentStartPipe, err := newPipe() - if err != nil { - return errors.Wrapf(err, "error creating socket pair for start pipe") - } - - defer parentPipe.Close() - defer parentStartPipe.Close() - - args := []string{} - if r.cgroupManager == SystemdCgroupsManager { - args = append(args, "-s") - } - args = append(args, "-c", ctr.ID()) - args = append(args, "-u", ctr.ID()) - args = append(args, "-r", r.path) - args = append(args, "-b", ctr.bundlePath()) - args = append(args, "-p", filepath.Join(ctr.state.RunDir, "pidfile")) - args = append(args, "-l", ctr.LogPath()) - args = append(args, "--exit-dir", r.exitsDir) - if ctr.config.ConmonPidFile != "" { - args = append(args, "--conmon-pidfile", ctr.config.ConmonPidFile) - } - if len(ctr.config.ExitCommand) > 0 { - args = append(args, "--exit-command", ctr.config.ExitCommand[0]) - for _, arg := range ctr.config.ExitCommand[1:] { - args = append(args, []string{"--exit-command-arg", arg}...) - } - } - args = append(args, "--socket-dir-path", r.socketsDir) - if ctr.config.Spec.Process.Terminal { - args = append(args, "-t") - } else if ctr.config.Stdin { - args = append(args, "-i") - } - if r.logSizeMax >= 0 { - args = append(args, "--log-size-max", fmt.Sprintf("%v", r.logSizeMax)) - } - if r.noPivot { - args = append(args, "--no-pivot") - } - - logLevel := logrus.GetLevel() - args = append(args, "--log-level", logLevel.String()) - - if logLevel == logrus.DebugLevel { - logrus.Debugf("%s messages will be logged to syslog", r.conmonPath) - args = append(args, "--syslog") - } - - if restoreOptions != nil { - args = append(args, "--restore", ctr.CheckpointPath()) - if restoreOptions.TCPEstablished { - args = append(args, "--restore-arg", "--tcp-established") - } - } - - logrus.WithFields(logrus.Fields{ - "args": args, - }).Debugf("running conmon: %s", r.conmonPath) - - cmd := exec.Command(r.conmonPath, args...) - cmd.Dir = ctr.bundlePath() - cmd.SysProcAttr = &syscall.SysProcAttr{ - Setpgid: true, - } - // TODO this is probably a really bad idea for some uses - // Make this configurable - cmd.Stdin = os.Stdin - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - if ctr.config.Spec.Process.Terminal { - cmd.Stderr = &stderrBuf - } - - cmd.ExtraFiles = append(cmd.ExtraFiles, childPipe, childStartPipe) - // 0, 1 and 2 are stdin, stdout and stderr - cmd.Env = append(r.conmonEnv, fmt.Sprintf("_OCI_SYNCPIPE=%d", 3)) - cmd.Env = append(cmd.Env, fmt.Sprintf("_OCI_STARTPIPE=%d", 4)) - cmd.Env = append(cmd.Env, fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)) - cmd.Env = append(cmd.Env, fmt.Sprintf("_CONTAINERS_USERNS_CONFIGURED=%s", os.Getenv("_CONTAINERS_USERNS_CONFIGURED"))) - cmd.Env = append(cmd.Env, fmt.Sprintf("_CONTAINERS_ROOTLESS_UID=%s", os.Getenv("_CONTAINERS_ROOTLESS_UID"))) - cmd.Env = append(cmd.Env, fmt.Sprintf("HOME=%s", os.Getenv("HOME"))) - - if r.reservePorts && !ctr.config.NetMode.IsSlirp4netns() { - ports, err := bindPorts(ctr.config.PortMappings) - if err != nil { - return err - } - - // Leak the port we bound in the conmon process. These fd's won't be used - // by the container and conmon will keep the ports busy so that another - // process cannot use them. - cmd.ExtraFiles = append(cmd.ExtraFiles, ports...) - } - - if ctr.config.NetMode.IsSlirp4netns() { - ctr.rootlessSlirpSyncR, ctr.rootlessSlirpSyncW, err = os.Pipe() - if err != nil { - return errors.Wrapf(err, "failed to create rootless network sync pipe") - } - // Leak one end in conmon, the other one will be leaked into slirp4netns - cmd.ExtraFiles = append(cmd.ExtraFiles, ctr.rootlessSlirpSyncW) - } - - if notify, ok := os.LookupEnv("NOTIFY_SOCKET"); ok { - cmd.Env = append(cmd.Env, fmt.Sprintf("NOTIFY_SOCKET=%s", notify)) - } - if listenfds, ok := os.LookupEnv("LISTEN_FDS"); ok { - cmd.Env = append(cmd.Env, fmt.Sprintf("LISTEN_FDS=%s", listenfds), "LISTEN_PID=1") - fds := activation.Files(false) - cmd.ExtraFiles = append(cmd.ExtraFiles, fds...) - } - if selinux.GetEnabled() { - // Set the label of the conmon process to be level :s0 - // This will allow the container processes to talk to fifo-files - // passed into the container by conmon - var ( - plabel string - con selinux.Context - ) - plabel, err = selinux.CurrentLabel() - if err != nil { - childPipe.Close() - return errors.Wrapf(err, "Failed to get current SELinux label") - } - - con, err = selinux.NewContext(plabel) - if err != nil { - return errors.Wrapf(err, "Failed to get new context from SELinux label") - } - - runtime.LockOSThread() - if con["level"] != "s0" && con["level"] != "" { - con["level"] = "s0" - if err = label.SetProcessLabel(con.Get()); err != nil { - runtime.UnlockOSThread() - return err - } - } - err = cmd.Start() - // Ignore error returned from SetProcessLabel("") call, - // can't recover. - label.SetProcessLabel("") - runtime.UnlockOSThread() - } else { - err = cmd.Start() - } - if err != nil { - childPipe.Close() - return err - } - defer cmd.Wait() - - // We don't need childPipe on the parent side - childPipe.Close() - childStartPipe.Close() - - // Move conmon to specified cgroup - if err := r.moveConmonToCgroup(ctr, cgroupParent, cmd); err != nil { - return err - } - - /* We set the cgroup, now the child can start creating children */ - someData := []byte{0} - _, err = parentStartPipe.Write(someData) - if err != nil { - return err - } - - /* Wait for initial setup and fork, and reap child */ - err = cmd.Wait() - if err != nil { - return err - } - - defer func() { - if err != nil { - if err2 := r.deleteContainer(ctr); err2 != nil { - logrus.Errorf("Error removing container %s from runtime after creation failed", ctr.ID()) - } - } - }() - - // Wait to get container pid from conmon - type syncStruct struct { - si *syncInfo - err error - } - ch := make(chan syncStruct) - go func() { - var si *syncInfo - rdr := bufio.NewReader(parentPipe) - b, err := rdr.ReadBytes('\n') - if err != nil { - ch <- syncStruct{err: err} - } - if err := json.Unmarshal(b, &si); err != nil { - ch <- syncStruct{err: err} - return - } - ch <- syncStruct{si: si} - }() - - select { - case ss := <-ch: - if ss.err != nil { - return errors.Wrapf(ss.err, "error reading container (probably exited) json message") - } - logrus.Debugf("Received container pid: %d", ss.si.Pid) - if ss.si.Pid == -1 { - if ss.si.Message != "" { - return errors.Wrapf(ErrInternal, "container create failed: %s", ss.si.Message) - } - return errors.Wrapf(ErrInternal, "container create failed") - } - ctr.state.PID = ss.si.Pid - case <-time.After(ContainerCreateTimeout): - return errors.Wrapf(ErrInternal, "container creation timeout") - } - return nil -} - // updateContainerStatus retrieves the current status of the container from the // runtime. It updates the container's state but does not save it. // If useRunc is false, we will not directly hit runc to see the container's @@ -631,82 +325,6 @@ func (r *OCIRuntime) killContainer(ctr *Container, signal uint) error { return nil } -// stopContainer stops a container, first using its given stop signal (or -// SIGTERM if no signal was specified), then using SIGKILL -// Timeout is given in seconds. If timeout is 0, the container will be -// immediately kill with SIGKILL -// Does not set finished time for container, assumes you will run updateStatus -// after to pull the exit code -func (r *OCIRuntime) stopContainer(ctr *Container, timeout uint) error { - logrus.Debugf("Stopping container %s (PID %d)", ctr.ID(), ctr.state.PID) - - // Ping the container to see if it's alive - // If it's not, it's already stopped, return - err := unix.Kill(ctr.state.PID, 0) - if err == unix.ESRCH { - return nil - } - - stopSignal := ctr.config.StopSignal - if stopSignal == 0 { - stopSignal = uint(syscall.SIGTERM) - } - - if timeout > 0 { - if err := r.killContainer(ctr, stopSignal); err != nil { - // Is the container gone? - // If so, it probably died between the first check and - // our sending the signal - // The container is stopped, so exit cleanly - err := unix.Kill(ctr.state.PID, 0) - if err == unix.ESRCH { - return nil - } - - return err - } - - if err := waitContainerStop(ctr, time.Duration(timeout)*time.Second); err != nil { - logrus.Warnf("Timed out stopping container %s, resorting to SIGKILL", ctr.ID()) - } else { - // No error, the container is dead - return nil - } - } - - var args []string - if rootless.IsRootless() { - // we don't use --all for rootless containers as the OCI runtime might use - // the cgroups to determine the PIDs, but for rootless containers there is - // not any. - args = []string{"kill", ctr.ID(), "KILL"} - } else { - args = []string{"kill", "--all", ctr.ID(), "KILL"} - } - - runtimeDir, err := util.GetRootlessRuntimeDir() - if err != nil { - return err - } - env := []string{fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)} - if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, args...); err != nil { - // Again, check if the container is gone. If it is, exit cleanly. - err := unix.Kill(ctr.state.PID, 0) - if err == unix.ESRCH { - return nil - } - - return errors.Wrapf(err, "error sending SIGKILL to container %s", ctr.ID()) - } - - // Give runtime a few seconds to make it happen - if err := waitContainerStop(ctr, killContainerTimeout); err != nil { - return err - } - - return nil -} - // deleteContainer deletes a container from the OCI runtime func (r *OCIRuntime) deleteContainer(ctr *Container) error { runtimeDir, err := util.GetRootlessRuntimeDir() @@ -834,71 +452,6 @@ func (r *OCIRuntime) execContainer(c *Container, cmd, capAdd, env []string, tty return execCmd, nil } -// execStopContainer stops all active exec sessions in a container -// It will also stop all other processes in the container. It is only intended -// to be used to assist in cleanup when removing a container. -// SIGTERM is used by default to stop processes. If SIGTERM fails, SIGKILL will be used. -func (r *OCIRuntime) execStopContainer(ctr *Container, timeout uint) error { - // Do we have active exec sessions? - if len(ctr.state.ExecSessions) == 0 { - return nil - } - - // Get a list of active exec sessions - execSessions := []int{} - for _, session := range ctr.state.ExecSessions { - pid := session.PID - // Ping the PID with signal 0 to see if it still exists - if err := unix.Kill(pid, 0); err == unix.ESRCH { - continue - } - - execSessions = append(execSessions, pid) - } - - // All the sessions may be dead - // If they are, just return - if len(execSessions) == 0 { - return nil - } - runtimeDir, err := util.GetRootlessRuntimeDir() - if err != nil { - return err - } - env := []string{fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)} - - // If timeout is 0, just use SIGKILL - if timeout > 0 { - // Stop using SIGTERM by default - // Use SIGSTOP after a timeout - logrus.Debugf("Killing all processes in container %s with SIGTERM", ctr.ID()) - if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, "kill", "--all", ctr.ID(), "TERM"); err != nil { - return errors.Wrapf(err, "error sending SIGTERM to container %s processes", ctr.ID()) - } - - // Wait for all processes to stop - if err := waitPidsStop(execSessions, time.Duration(timeout)*time.Second); err != nil { - logrus.Warnf("Timed out stopping container %s exec sessions", ctr.ID()) - } else { - // No error, all exec sessions are dead - return nil - } - } - - // Send SIGKILL - logrus.Debugf("Killing all processes in container %s with SIGKILL", ctr.ID()) - if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, "kill", "--all", ctr.ID(), "KILL"); err != nil { - return errors.Wrapf(err, "error sending SIGKILL to container %s processes", ctr.ID()) - } - - // Give the processes a few seconds to go down - if err := waitPidsStop(execSessions, killContainerTimeout); err != nil { - return errors.Wrapf(err, "failed to kill container %s exec sessions", ctr.ID()) - } - - return nil -} - // checkpointContainer checkpoints the given container func (r *OCIRuntime) checkpointContainer(ctr *Container, options ContainerCheckpointOptions) error { label.SetSocketLabel(ctr.ProcessLabel()) diff --git a/libpod/oci_linux.go b/libpod/oci_linux.go index 1f5411c1f..1c1e4a203 100644 --- a/libpod/oci_linux.go +++ b/libpod/oci_linux.go @@ -3,6 +3,8 @@ package libpod import ( + "bufio" + "bytes" "fmt" "os" "os/exec" @@ -10,12 +12,17 @@ import ( "runtime" "strings" "syscall" + "time" "github.com/containerd/cgroups" "github.com/containers/libpod/pkg/rootless" + "github.com/containers/libpod/pkg/util" "github.com/containers/libpod/utils" pmount "github.com/containers/storage/pkg/mount" + "github.com/coreos/go-systemd/activation" spec "github.com/opencontainers/runtime-spec/specs-go" + "github.com/opencontainers/selinux/go-selinux" + "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" @@ -179,3 +186,443 @@ func (r *OCIRuntime) conmonPackage() string { } return dpkgVersion(r.conmonPath) } + +func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, restoreOptions *ContainerCheckpointOptions) (err error) { + var stderrBuf bytes.Buffer + + runtimeDir, err := util.GetRootlessRuntimeDir() + if err != nil { + return err + } + + parentPipe, childPipe, err := newPipe() + if err != nil { + return errors.Wrapf(err, "error creating socket pair") + } + + childStartPipe, parentStartPipe, err := newPipe() + if err != nil { + return errors.Wrapf(err, "error creating socket pair for start pipe") + } + + defer parentPipe.Close() + defer parentStartPipe.Close() + + args := []string{} + if r.cgroupManager == SystemdCgroupsManager { + args = append(args, "-s") + } + args = append(args, "-c", ctr.ID()) + args = append(args, "-u", ctr.ID()) + args = append(args, "-r", r.path) + args = append(args, "-b", ctr.bundlePath()) + args = append(args, "-p", filepath.Join(ctr.state.RunDir, "pidfile")) + args = append(args, "-l", ctr.LogPath()) + args = append(args, "--exit-dir", r.exitsDir) + if ctr.config.ConmonPidFile != "" { + args = append(args, "--conmon-pidfile", ctr.config.ConmonPidFile) + } + if len(ctr.config.ExitCommand) > 0 { + args = append(args, "--exit-command", ctr.config.ExitCommand[0]) + for _, arg := range ctr.config.ExitCommand[1:] { + args = append(args, []string{"--exit-command-arg", arg}...) + } + } + args = append(args, "--socket-dir-path", r.socketsDir) + if ctr.config.Spec.Process.Terminal { + args = append(args, "-t") + } else if ctr.config.Stdin { + args = append(args, "-i") + } + if r.logSizeMax >= 0 { + args = append(args, "--log-size-max", fmt.Sprintf("%v", r.logSizeMax)) + } + if r.noPivot { + args = append(args, "--no-pivot") + } + + logLevel := logrus.GetLevel() + args = append(args, "--log-level", logLevel.String()) + + if logLevel == logrus.DebugLevel { + logrus.Debugf("%s messages will be logged to syslog", r.conmonPath) + args = append(args, "--syslog") + } + + if restoreOptions != nil { + args = append(args, "--restore", ctr.CheckpointPath()) + if restoreOptions.TCPEstablished { + args = append(args, "--restore-arg", "--tcp-established") + } + } + + logrus.WithFields(logrus.Fields{ + "args": args, + }).Debugf("running conmon: %s", r.conmonPath) + + cmd := exec.Command(r.conmonPath, args...) + cmd.Dir = ctr.bundlePath() + cmd.SysProcAttr = &syscall.SysProcAttr{ + Setpgid: true, + } + // TODO this is probably a really bad idea for some uses + // Make this configurable + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if ctr.config.Spec.Process.Terminal { + cmd.Stderr = &stderrBuf + } + + cmd.ExtraFiles = append(cmd.ExtraFiles, childPipe, childStartPipe) + // 0, 1 and 2 are stdin, stdout and stderr + cmd.Env = append(r.conmonEnv, fmt.Sprintf("_OCI_SYNCPIPE=%d", 3)) + cmd.Env = append(cmd.Env, fmt.Sprintf("_OCI_STARTPIPE=%d", 4)) + cmd.Env = append(cmd.Env, fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)) + cmd.Env = append(cmd.Env, fmt.Sprintf("_CONTAINERS_USERNS_CONFIGURED=%s", os.Getenv("_CONTAINERS_USERNS_CONFIGURED"))) + cmd.Env = append(cmd.Env, fmt.Sprintf("_CONTAINERS_ROOTLESS_UID=%s", os.Getenv("_CONTAINERS_ROOTLESS_UID"))) + cmd.Env = append(cmd.Env, fmt.Sprintf("HOME=%s", os.Getenv("HOME"))) + + if r.reservePorts && !ctr.config.NetMode.IsSlirp4netns() { + ports, err := bindPorts(ctr.config.PortMappings) + if err != nil { + return err + } + + // Leak the port we bound in the conmon process. These fd's won't be used + // by the container and conmon will keep the ports busy so that another + // process cannot use them. + cmd.ExtraFiles = append(cmd.ExtraFiles, ports...) + } + + if ctr.config.NetMode.IsSlirp4netns() { + ctr.rootlessSlirpSyncR, ctr.rootlessSlirpSyncW, err = os.Pipe() + if err != nil { + return errors.Wrapf(err, "failed to create rootless network sync pipe") + } + // Leak one end in conmon, the other one will be leaked into slirp4netns + cmd.ExtraFiles = append(cmd.ExtraFiles, ctr.rootlessSlirpSyncW) + } + + if notify, ok := os.LookupEnv("NOTIFY_SOCKET"); ok { + cmd.Env = append(cmd.Env, fmt.Sprintf("NOTIFY_SOCKET=%s", notify)) + } + if listenfds, ok := os.LookupEnv("LISTEN_FDS"); ok { + cmd.Env = append(cmd.Env, fmt.Sprintf("LISTEN_FDS=%s", listenfds), "LISTEN_PID=1") + fds := activation.Files(false) + cmd.ExtraFiles = append(cmd.ExtraFiles, fds...) + } + if selinux.GetEnabled() { + // Set the label of the conmon process to be level :s0 + // This will allow the container processes to talk to fifo-files + // passed into the container by conmon + var ( + plabel string + con selinux.Context + ) + plabel, err = selinux.CurrentLabel() + if err != nil { + childPipe.Close() + return errors.Wrapf(err, "Failed to get current SELinux label") + } + + con, err = selinux.NewContext(plabel) + if err != nil { + return errors.Wrapf(err, "Failed to get new context from SELinux label") + } + + runtime.LockOSThread() + if con["level"] != "s0" && con["level"] != "" { + con["level"] = "s0" + if err = label.SetProcessLabel(con.Get()); err != nil { + runtime.UnlockOSThread() + return err + } + } + err = cmd.Start() + // Ignore error returned from SetProcessLabel("") call, + // can't recover. + label.SetProcessLabel("") + runtime.UnlockOSThread() + } else { + err = cmd.Start() + } + if err != nil { + childPipe.Close() + return err + } + defer cmd.Wait() + + // We don't need childPipe on the parent side + childPipe.Close() + childStartPipe.Close() + + // Move conmon to specified cgroup + if err := r.moveConmonToCgroup(ctr, cgroupParent, cmd); err != nil { + return err + } + + /* We set the cgroup, now the child can start creating children */ + someData := []byte{0} + _, err = parentStartPipe.Write(someData) + if err != nil { + return err + } + + /* Wait for initial setup and fork, and reap child */ + err = cmd.Wait() + if err != nil { + return err + } + + defer func() { + if err != nil { + if err2 := r.deleteContainer(ctr); err2 != nil { + logrus.Errorf("Error removing container %s from runtime after creation failed", ctr.ID()) + } + } + }() + + // Wait to get container pid from conmon + type syncStruct struct { + si *syncInfo + err error + } + ch := make(chan syncStruct) + go func() { + var si *syncInfo + rdr := bufio.NewReader(parentPipe) + b, err := rdr.ReadBytes('\n') + if err != nil { + ch <- syncStruct{err: err} + } + if err := json.Unmarshal(b, &si); err != nil { + ch <- syncStruct{err: err} + return + } + ch <- syncStruct{si: si} + }() + + select { + case ss := <-ch: + if ss.err != nil { + return errors.Wrapf(ss.err, "error reading container (probably exited) json message") + } + logrus.Debugf("Received container pid: %d", ss.si.Pid) + if ss.si.Pid == -1 { + if ss.si.Message != "" { + return errors.Wrapf(ErrInternal, "container create failed: %s", ss.si.Message) + } + return errors.Wrapf(ErrInternal, "container create failed") + } + ctr.state.PID = ss.si.Pid + case <-time.After(ContainerCreateTimeout): + return errors.Wrapf(ErrInternal, "container creation timeout") + } + return nil +} + +// Wait for a container which has been sent a signal to stop +func waitContainerStop(ctr *Container, timeout time.Duration) error { + done := make(chan struct{}) + chControl := make(chan struct{}) + go func() { + for { + select { + case <-chControl: + return + default: + // Check if the process is still around + err := unix.Kill(ctr.state.PID, 0) + if err == unix.ESRCH { + close(done) + return + } + time.Sleep(100 * time.Millisecond) + } + } + }() + select { + case <-done: + return nil + case <-time.After(timeout): + close(chControl) + logrus.Debugf("container %s did not die within timeout %d", ctr.ID(), timeout) + return errors.Errorf("container %s did not die within timeout", ctr.ID()) + } +} + +// Wait for a set of given PIDs to stop +func waitPidsStop(pids []int, timeout time.Duration) error { + done := make(chan struct{}) + chControl := make(chan struct{}) + go func() { + for { + select { + case <-chControl: + return + default: + allClosed := true + for _, pid := range pids { + if err := unix.Kill(pid, 0); err != unix.ESRCH { + allClosed = false + break + } + } + if allClosed { + close(done) + return + } + time.Sleep(100 * time.Millisecond) + } + } + }() + select { + case <-done: + return nil + case <-time.After(timeout): + close(chControl) + return errors.Errorf("given PIDs did not die within timeout") + } +} + +// stopContainer stops a container, first using its given stop signal (or +// SIGTERM if no signal was specified), then using SIGKILL +// Timeout is given in seconds. If timeout is 0, the container will be +// immediately kill with SIGKILL +// Does not set finished time for container, assumes you will run updateStatus +// after to pull the exit code +func (r *OCIRuntime) stopContainer(ctr *Container, timeout uint) error { + logrus.Debugf("Stopping container %s (PID %d)", ctr.ID(), ctr.state.PID) + + // Ping the container to see if it's alive + // If it's not, it's already stopped, return + err := unix.Kill(ctr.state.PID, 0) + if err == unix.ESRCH { + return nil + } + + stopSignal := ctr.config.StopSignal + if stopSignal == 0 { + stopSignal = uint(syscall.SIGTERM) + } + + if timeout > 0 { + if err := r.killContainer(ctr, stopSignal); err != nil { + // Is the container gone? + // If so, it probably died between the first check and + // our sending the signal + // The container is stopped, so exit cleanly + err := unix.Kill(ctr.state.PID, 0) + if err == unix.ESRCH { + return nil + } + + return err + } + + if err := waitContainerStop(ctr, time.Duration(timeout)*time.Second); err != nil { + logrus.Warnf("Timed out stopping container %s, resorting to SIGKILL", ctr.ID()) + } else { + // No error, the container is dead + return nil + } + } + + var args []string + if rootless.IsRootless() { + // we don't use --all for rootless containers as the OCI runtime might use + // the cgroups to determine the PIDs, but for rootless containers there is + // not any. + args = []string{"kill", ctr.ID(), "KILL"} + } else { + args = []string{"kill", "--all", ctr.ID(), "KILL"} + } + + runtimeDir, err := util.GetRootlessRuntimeDir() + if err != nil { + return err + } + env := []string{fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)} + if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, args...); err != nil { + // Again, check if the container is gone. If it is, exit cleanly. + err := unix.Kill(ctr.state.PID, 0) + if err == unix.ESRCH { + return nil + } + + return errors.Wrapf(err, "error sending SIGKILL to container %s", ctr.ID()) + } + + // Give runtime a few seconds to make it happen + if err := waitContainerStop(ctr, killContainerTimeout); err != nil { + return err + } + + return nil +} + +// execStopContainer stops all active exec sessions in a container +// It will also stop all other processes in the container. It is only intended +// to be used to assist in cleanup when removing a container. +// SIGTERM is used by default to stop processes. If SIGTERM fails, SIGKILL will be used. +func (r *OCIRuntime) execStopContainer(ctr *Container, timeout uint) error { + // Do we have active exec sessions? + if len(ctr.state.ExecSessions) == 0 { + return nil + } + + // Get a list of active exec sessions + execSessions := []int{} + for _, session := range ctr.state.ExecSessions { + pid := session.PID + // Ping the PID with signal 0 to see if it still exists + if err := unix.Kill(pid, 0); err == unix.ESRCH { + continue + } + + execSessions = append(execSessions, pid) + } + + // All the sessions may be dead + // If they are, just return + if len(execSessions) == 0 { + return nil + } + runtimeDir, err := util.GetRootlessRuntimeDir() + if err != nil { + return err + } + env := []string{fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)} + + // If timeout is 0, just use SIGKILL + if timeout > 0 { + // Stop using SIGTERM by default + // Use SIGSTOP after a timeout + logrus.Debugf("Killing all processes in container %s with SIGTERM", ctr.ID()) + if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, "kill", "--all", ctr.ID(), "TERM"); err != nil { + return errors.Wrapf(err, "error sending SIGTERM to container %s processes", ctr.ID()) + } + + // Wait for all processes to stop + if err := waitPidsStop(execSessions, time.Duration(timeout)*time.Second); err != nil { + logrus.Warnf("Timed out stopping container %s exec sessions", ctr.ID()) + } else { + // No error, all exec sessions are dead + return nil + } + } + + // Send SIGKILL + logrus.Debugf("Killing all processes in container %s with SIGKILL", ctr.ID()) + if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, "kill", "--all", ctr.ID(), "KILL"); err != nil { + return errors.Wrapf(err, "error sending SIGKILL to container %s processes", ctr.ID()) + } + + // Give the processes a few seconds to go down + if err := waitPidsStop(execSessions, killContainerTimeout); err != nil { + return errors.Wrapf(err, "failed to kill container %s exec sessions", ctr.ID()) + } + + return nil +} diff --git a/libpod/oci_unsupported.go b/libpod/oci_unsupported.go index 8c084d1e2..12183faf3 100644 --- a/libpod/oci_unsupported.go +++ b/libpod/oci_unsupported.go @@ -26,3 +26,15 @@ func (r *OCIRuntime) pathPackage() string { func (r *OCIRuntime) conmonPackage() string { return "" } + +func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, restoreOptions *ContainerCheckpointOptions) (err error) { + return ErrOSNotSupported +} + +func (r *OCIRuntime) execStopContainer(ctr *Container, timeout uint) error { + return ErrOSNotSupported +} + +func (r *OCIRuntime) stopContainer(ctr *Container, timeout uint) error { + return ErrOSNotSupported +} diff --git a/pkg/adapter/runtime_remote_supported.go b/pkg/adapter/runtime_remote_supported.go new file mode 100644 index 000000000..b8e8da308 --- /dev/null +++ b/pkg/adapter/runtime_remote_supported.go @@ -0,0 +1 @@ +package adapter diff --git a/pkg/adapter/sigproxy.go b/pkg/adapter/sigproxy.go deleted file mode 100644 index af968cb89..000000000 --- a/pkg/adapter/sigproxy.go +++ /dev/null @@ -1,36 +0,0 @@ -package adapter - -import ( - "os" - "syscall" - - "github.com/containers/libpod/libpod" - "github.com/docker/docker/pkg/signal" - "github.com/sirupsen/logrus" -) - -// ProxySignals ... -func ProxySignals(ctr *libpod.Container) { - sigBuffer := make(chan os.Signal, 128) - signal.CatchAll(sigBuffer) - - logrus.Debugf("Enabling signal proxying") - - go func() { - for s := range sigBuffer { - // Ignore SIGCHLD and SIGPIPE - these are mostly likely - // intended for the podman command itself. - if s == signal.SIGCHLD || s == signal.SIGPIPE { - continue - } - - if err := ctr.Kill(uint(s.(syscall.Signal))); err != nil { - logrus.Errorf("Error forwarding signal %d to container %s: %v", s, ctr.ID(), err) - signal.StopCatch(sigBuffer) - syscall.Kill(syscall.Getpid(), s.(syscall.Signal)) - } - } - }() - - return -} diff --git a/pkg/adapter/sigproxy_linux.go b/pkg/adapter/sigproxy_linux.go new file mode 100644 index 000000000..af968cb89 --- /dev/null +++ b/pkg/adapter/sigproxy_linux.go @@ -0,0 +1,36 @@ +package adapter + +import ( + "os" + "syscall" + + "github.com/containers/libpod/libpod" + "github.com/docker/docker/pkg/signal" + "github.com/sirupsen/logrus" +) + +// ProxySignals ... +func ProxySignals(ctr *libpod.Container) { + sigBuffer := make(chan os.Signal, 128) + signal.CatchAll(sigBuffer) + + logrus.Debugf("Enabling signal proxying") + + go func() { + for s := range sigBuffer { + // Ignore SIGCHLD and SIGPIPE - these are mostly likely + // intended for the podman command itself. + if s == signal.SIGCHLD || s == signal.SIGPIPE { + continue + } + + if err := ctr.Kill(uint(s.(syscall.Signal))); err != nil { + logrus.Errorf("Error forwarding signal %d to container %s: %v", s, ctr.ID(), err) + signal.StopCatch(sigBuffer) + syscall.Kill(syscall.Getpid(), s.(syscall.Signal)) + } + } + }() + + return +} diff --git a/pkg/adapter/terminal.go b/pkg/adapter/terminal.go index 0b608decf..373c78322 100644 --- a/pkg/adapter/terminal.go +++ b/pkg/adapter/terminal.go @@ -2,16 +2,12 @@ package adapter import ( "context" - "fmt" "os" gosignal "os/signal" - "github.com/containers/libpod/libpod" "github.com/docker/docker/pkg/signal" "github.com/docker/docker/pkg/term" - "github.com/pkg/errors" "github.com/sirupsen/logrus" - "golang.org/x/crypto/ssh/terminal" "k8s.io/client-go/tools/remotecommand" ) @@ -19,83 +15,6 @@ import ( type RawTtyFormatter struct { } -// StartAttachCtr starts and (if required) attaches to a container -func StartAttachCtr(ctx context.Context, ctr *libpod.Container, stdout, stderr, stdin *os.File, detachKeys string, sigProxy bool, startContainer bool, recursive bool) error { - resize := make(chan remotecommand.TerminalSize) - - haveTerminal := terminal.IsTerminal(int(os.Stdin.Fd())) - - // Check if we are attached to a terminal. If we are, generate resize - // events, and set the terminal to raw mode - if haveTerminal && ctr.Spec().Process.Terminal { - logrus.Debugf("Handling terminal attach") - - subCtx, cancel := context.WithCancel(ctx) - defer cancel() - - resizeTty(subCtx, resize) - - oldTermState, err := term.SaveState(os.Stdin.Fd()) - if err != nil { - return errors.Wrapf(err, "unable to save terminal state") - } - - logrus.SetFormatter(&RawTtyFormatter{}) - term.SetRawTerminal(os.Stdin.Fd()) - - defer restoreTerminal(oldTermState) - } - - streams := new(libpod.AttachStreams) - streams.OutputStream = stdout - streams.ErrorStream = stderr - streams.InputStream = stdin - streams.AttachOutput = true - streams.AttachError = true - streams.AttachInput = true - - if stdout == nil { - logrus.Debugf("Not attaching to stdout") - streams.AttachOutput = false - } - if stderr == nil { - logrus.Debugf("Not attaching to stderr") - streams.AttachError = false - } - if stdin == nil { - logrus.Debugf("Not attaching to stdin") - streams.AttachInput = false - } - - if !startContainer { - if sigProxy { - ProxySignals(ctr) - } - - return ctr.Attach(streams, detachKeys, resize) - } - - attachChan, err := ctr.StartAndAttach(ctx, streams, detachKeys, resize, recursive) - if err != nil { - return err - } - - if sigProxy { - ProxySignals(ctr) - } - - if stdout == nil && stderr == nil { - fmt.Printf("%s\n", ctr.ID()) - } - - err = <-attachChan - if err != nil { - return errors.Wrapf(err, "error attaching to container %s", ctr.ID()) - } - - return nil -} - // getResize returns a TerminalSize command matching stdin's current // size on success, and nil on errors. func getResize() *remotecommand.TerminalSize { diff --git a/pkg/adapter/terminal_linux.go b/pkg/adapter/terminal_linux.go new file mode 100644 index 000000000..3c4c3bd38 --- /dev/null +++ b/pkg/adapter/terminal_linux.go @@ -0,0 +1,91 @@ +package adapter + +import ( + "context" + "fmt" + "os" + + "github.com/containers/libpod/libpod" + "github.com/docker/docker/pkg/term" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/crypto/ssh/terminal" + "k8s.io/client-go/tools/remotecommand" +) + +// StartAttachCtr starts and (if required) attaches to a container +func StartAttachCtr(ctx context.Context, ctr *libpod.Container, stdout, stderr, stdin *os.File, detachKeys string, sigProxy bool, startContainer bool, recursive bool) error { + resize := make(chan remotecommand.TerminalSize) + + haveTerminal := terminal.IsTerminal(int(os.Stdin.Fd())) + + // Check if we are attached to a terminal. If we are, generate resize + // events, and set the terminal to raw mode + if haveTerminal && ctr.Spec().Process.Terminal { + logrus.Debugf("Handling terminal attach") + + subCtx, cancel := context.WithCancel(ctx) + defer cancel() + + resizeTty(subCtx, resize) + + oldTermState, err := term.SaveState(os.Stdin.Fd()) + if err != nil { + return errors.Wrapf(err, "unable to save terminal state") + } + + logrus.SetFormatter(&RawTtyFormatter{}) + term.SetRawTerminal(os.Stdin.Fd()) + + defer restoreTerminal(oldTermState) + } + + streams := new(libpod.AttachStreams) + streams.OutputStream = stdout + streams.ErrorStream = stderr + streams.InputStream = stdin + streams.AttachOutput = true + streams.AttachError = true + streams.AttachInput = true + + if stdout == nil { + logrus.Debugf("Not attaching to stdout") + streams.AttachOutput = false + } + if stderr == nil { + logrus.Debugf("Not attaching to stderr") + streams.AttachError = false + } + if stdin == nil { + logrus.Debugf("Not attaching to stdin") + streams.AttachInput = false + } + + if !startContainer { + if sigProxy { + ProxySignals(ctr) + } + + return ctr.Attach(streams, detachKeys, resize) + } + + attachChan, err := ctr.StartAndAttach(ctx, streams, detachKeys, resize, recursive) + if err != nil { + return err + } + + if sigProxy { + ProxySignals(ctr) + } + + if stdout == nil && stderr == nil { + fmt.Printf("%s\n", ctr.ID()) + } + + err = <-attachChan + if err != nil { + return errors.Wrapf(err, "error attaching to container %s", ctr.ID()) + } + + return nil +} diff --git a/pkg/spec/config_linux.go b/pkg/spec/config_linux.go index a1873086e..eb2acf984 100644 --- a/pkg/spec/config_linux.go +++ b/pkg/spec/config_linux.go @@ -244,3 +244,9 @@ func makeThrottleArray(throttleInput []string, rateType int) ([]spec.LinuxThrott } return ltds, nil } + +func getStatFromPath(path string) (unix.Stat_t, error) { + s := unix.Stat_t{} + err := unix.Stat(path, &s) + return s, err +} diff --git a/pkg/spec/createconfig.go b/pkg/spec/createconfig.go index 064dedd45..d1b722807 100644 --- a/pkg/spec/createconfig.go +++ b/pkg/spec/createconfig.go @@ -19,7 +19,6 @@ import ( "github.com/opencontainers/runtime-tools/generate" "github.com/pkg/errors" "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" ) // Type constants @@ -642,9 +641,3 @@ func NatToOCIPortBindings(ports nat.PortMap) ([]ocicni.PortMapping, error) { func (c *CreateConfig) AddPrivilegedDevices(g *generate.Generator) error { return c.addPrivilegedDevices(g) } - -func getStatFromPath(path string) (unix.Stat_t, error) { - s := unix.Stat_t{} - err := unix.Stat(path, &s) - return s, err -} diff --git a/pkg/util/utils.go b/pkg/util/utils.go index 136f8fadd..14b0c2b55 100644 --- a/pkg/util/utils.go +++ b/pkg/util/utils.go @@ -6,12 +6,10 @@ import ( "path/filepath" "strings" "sync" - "syscall" "time" "github.com/BurntSushi/toml" "github.com/containers/image/types" - "github.com/containers/libpod/pkg/rootless" "github.com/containers/storage" "github.com/containers/storage/pkg/idtools" "github.com/opencontainers/image-spec/specs-go/v1" @@ -187,76 +185,6 @@ var ( rootlessRuntimeDir string ) -// GetRootlessRuntimeDir returns the runtime directory when running as non root -func GetRootlessRuntimeDir() (string, error) { - var rootlessRuntimeDirError error - - rootlessRuntimeDirOnce.Do(func() { - runtimeDir := os.Getenv("XDG_RUNTIME_DIR") - uid := fmt.Sprintf("%d", rootless.GetRootlessUID()) - if runtimeDir == "" { - tmpDir := filepath.Join("/run", "user", uid) - os.MkdirAll(tmpDir, 0700) - st, err := os.Stat(tmpDir) - if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && st.Mode().Perm() == 0700 { - runtimeDir = tmpDir - } - } - if runtimeDir == "" { - tmpDir := filepath.Join(os.TempDir(), fmt.Sprintf("run-%s", uid)) - os.MkdirAll(tmpDir, 0700) - st, err := os.Stat(tmpDir) - if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && st.Mode().Perm() == 0700 { - runtimeDir = tmpDir - } - } - if runtimeDir == "" { - home := os.Getenv("HOME") - if home == "" { - rootlessRuntimeDirError = fmt.Errorf("neither XDG_RUNTIME_DIR nor HOME was set non-empty") - return - } - resolvedHome, err := filepath.EvalSymlinks(home) - if err != nil { - rootlessRuntimeDirError = errors.Wrapf(err, "cannot resolve %s", home) - return - } - runtimeDir = filepath.Join(resolvedHome, "rundir") - } - rootlessRuntimeDir = runtimeDir - }) - - if rootlessRuntimeDirError != nil { - return "", rootlessRuntimeDirError - } - return rootlessRuntimeDir, nil -} - -// GetRootlessDirInfo returns the parent path of where the storage for containers and -// volumes will be in rootless mode -func GetRootlessDirInfo() (string, string, error) { - rootlessRuntime, err := GetRootlessRuntimeDir() - if err != nil { - return "", "", err - } - - dataDir := os.Getenv("XDG_DATA_HOME") - if dataDir == "" { - home := os.Getenv("HOME") - if home == "" { - return "", "", fmt.Errorf("neither XDG_DATA_HOME nor HOME was set non-empty") - } - // runc doesn't like symlinks in the rootfs path, and at least - // on CoreOS /home is a symlink to /var/home, so resolve any symlink. - resolvedHome, err := filepath.EvalSymlinks(home) - if err != nil { - return "", "", errors.Wrapf(err, "cannot resolve %s", home) - } - dataDir = filepath.Join(resolvedHome, ".local", "share") - } - return dataDir, rootlessRuntime, nil -} - type tomlOptionsConfig struct { MountProgram string `toml:"mount_program"` } diff --git a/pkg/util/utils_supported.go b/pkg/util/utils_supported.go new file mode 100644 index 000000000..af5e67fc1 --- /dev/null +++ b/pkg/util/utils_supported.go @@ -0,0 +1,60 @@ +// +build linux darwin + +package util + +// TODO once rootless function is consolidated under libpod, we +// should work to take darwin from this + +import ( + "fmt" + "github.com/containers/libpod/pkg/rootless" + "github.com/pkg/errors" + "os" + "path/filepath" + "syscall" +) + +// GetRootlessRuntimeDir returns the runtime directory when running as non root +func GetRootlessRuntimeDir() (string, error) { + var rootlessRuntimeDirError error + + rootlessRuntimeDirOnce.Do(func() { + runtimeDir := os.Getenv("XDG_RUNTIME_DIR") + uid := fmt.Sprintf("%d", rootless.GetRootlessUID()) + if runtimeDir == "" { + tmpDir := filepath.Join("/run", "user", uid) + os.MkdirAll(tmpDir, 0700) + st, err := os.Stat(tmpDir) + if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && st.Mode().Perm() == 0700 { + runtimeDir = tmpDir + } + } + if runtimeDir == "" { + tmpDir := filepath.Join(os.TempDir(), fmt.Sprintf("run-%s", uid)) + os.MkdirAll(tmpDir, 0700) + st, err := os.Stat(tmpDir) + if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && st.Mode().Perm() == 0700 { + runtimeDir = tmpDir + } + } + if runtimeDir == "" { + home := os.Getenv("HOME") + if home == "" { + rootlessRuntimeDirError = fmt.Errorf("neither XDG_RUNTIME_DIR nor HOME was set non-empty") + return + } + resolvedHome, err := filepath.EvalSymlinks(home) + if err != nil { + rootlessRuntimeDirError = errors.Wrapf(err, "cannot resolve %s", home) + return + } + runtimeDir = filepath.Join(resolvedHome, "rundir") + } + rootlessRuntimeDir = runtimeDir + }) + + if rootlessRuntimeDirError != nil { + return "", rootlessRuntimeDirError + } + return rootlessRuntimeDir, nil +} diff --git a/pkg/util/utils_windows.go b/pkg/util/utils_windows.go new file mode 100644 index 000000000..1e9ccea90 --- /dev/null +++ b/pkg/util/utils_windows.go @@ -0,0 +1,12 @@ +// +build windows + +package util + +import ( + "github.com/pkg/errors" +) + +// GetRootlessRuntimeDir returns the runtime directory when running as non root +func GetRootlessRuntimeDir() (string, error) { + return "", errors.New("this function is not implemented for windows") +} diff --git a/utils/utils.go b/utils/utils.go index c195daa5d..86adfb967 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -9,8 +9,6 @@ import ( "strings" "github.com/containers/storage/pkg/archive" - systemdDbus "github.com/coreos/go-systemd/dbus" - "github.com/godbus/dbus" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -55,37 +53,6 @@ func StatusToExitCode(status int) int { return ((status) & 0xff00) >> 8 } -// RunUnderSystemdScope adds the specified pid to a systemd scope -func RunUnderSystemdScope(pid int, slice string, unitName string) error { - var properties []systemdDbus.Property - conn, err := systemdDbus.New() - if err != nil { - return err - } - properties = append(properties, systemdDbus.PropSlice(slice)) - properties = append(properties, newProp("PIDs", []uint32{uint32(pid)})) - properties = append(properties, newProp("Delegate", true)) - properties = append(properties, newProp("DefaultDependencies", false)) - ch := make(chan string) - _, err = conn.StartTransientUnit(unitName, "replace", properties, ch) - if err != nil { - return err - } - defer conn.Close() - - // Block until job is started - <-ch - - return nil -} - -func newProp(name string, units interface{}) systemdDbus.Property { - return systemdDbus.Property{ - Name: name, - Value: dbus.MakeVariant(units), - } -} - // ErrDetach is an error indicating that the user manually detached from the // container. var ErrDetach = errors.New("detached from container") diff --git a/utils/utils_supported.go b/utils/utils_supported.go new file mode 100644 index 000000000..8b0ba4438 --- /dev/null +++ b/utils/utils_supported.go @@ -0,0 +1,39 @@ +// +build linux darwin + +package utils + +import ( + systemdDbus "github.com/coreos/go-systemd/dbus" + "github.com/godbus/dbus" +) + +// RunUnderSystemdScope adds the specified pid to a systemd scope +func RunUnderSystemdScope(pid int, slice string, unitName string) error { + var properties []systemdDbus.Property + conn, err := systemdDbus.New() + if err != nil { + return err + } + properties = append(properties, systemdDbus.PropSlice(slice)) + properties = append(properties, newProp("PIDs", []uint32{uint32(pid)})) + properties = append(properties, newProp("Delegate", true)) + properties = append(properties, newProp("DefaultDependencies", false)) + ch := make(chan string) + _, err = conn.StartTransientUnit(unitName, "replace", properties, ch) + if err != nil { + return err + } + defer conn.Close() + + // Block until job is started + <-ch + + return nil +} + +func newProp(name string, units interface{}) systemdDbus.Property { + return systemdDbus.Property{ + Name: name, + Value: dbus.MakeVariant(units), + } +} diff --git a/utils/utils_windows.go b/utils/utils_windows.go new file mode 100644 index 000000000..db27877d9 --- /dev/null +++ b/utils/utils_windows.go @@ -0,0 +1,9 @@ +// +build windows + +package utils + +import "github.com/pkg/errors" + +func RunUnderSystemdScope(pid int, slice string, unitName string) error { + return errors.New("not implemented for windows") +} diff --git a/vendor.conf b/vendor.conf index 02283beb9..ace2298aa 100644 --- a/vendor.conf +++ b/vendor.conf @@ -93,8 +93,8 @@ k8s.io/api kubernetes-1.10.13-beta.0 https://github.com/kubernetes/api k8s.io/apimachinery kubernetes-1.10.13-beta.0 https://github.com/kubernetes/apimachinery k8s.io/client-go kubernetes-1.10.13-beta.0 https://github.com/kubernetes/client-go github.com/mrunalp/fileutils 7d4729fb36185a7c1719923406c9d40e54fb93c7 -github.com/varlink/go 3ac79db6fd6aec70924193b090962f92985fe199 -github.com/containers/buildah v1.8.0 +github.com/varlink/go 64e07fabffa33e385817b41971cf2674f692f391 +github.com/containers/buildah 34e7eba408282e890e61395b6d97e58b88e14d25 # TODO: Gotty has not been updated since 2012. Can we find replacement? github.com/Nvveen/Gotty cd527374f1e5bff4938207604a14f2e38a9cf512 github.com/fsouza/go-dockerclient v1.3.0 diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go index b6e6545ec..e29e69383 100644 --- a/vendor/github.com/containers/buildah/buildah.go +++ b/vendor/github.com/containers/buildah/buildah.go @@ -26,7 +26,7 @@ const ( Package = "buildah" // Version for the Package. Bump version in contrib/rpm/buildah.spec // too. - Version = "1.8.0" + Version = "1.9.0-dev" // The value we use to identify what type of information, currently a // serialized Builder structure, we are using as per-container state. // This should only be changed when we make incompatible changes to diff --git a/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink.go b/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink.go deleted file mode 100644 index 0789c2b3c..000000000 --- a/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink.go +++ /dev/null @@ -1,266 +0,0 @@ -package imagebuildah - -import ( - "flag" - "fmt" - "os" - "path/filepath" - "strings" - "time" - - "github.com/containers/storage/pkg/reexec" - "github.com/pkg/errors" - "golang.org/x/sys/unix" -) - -const ( - symlinkChrootedCommand = "chrootsymlinks-resolve" - symlinkModifiedTime = "modtimesymlinks-resolve" - maxSymlinksResolved = 40 -) - -func init() { - reexec.Register(symlinkChrootedCommand, resolveChrootedSymlinks) - reexec.Register(symlinkModifiedTime, resolveSymlinkTimeModified) -} - -// main() for resolveSymlink()'s subprocess. -func resolveChrootedSymlinks() { - status := 0 - flag.Parse() - if len(flag.Args()) < 2 { - fmt.Fprintf(os.Stderr, "%s needs two arguments\n", symlinkChrootedCommand) - os.Exit(1) - } - // Our first parameter is the directory to chroot into. - if err := unix.Chdir(flag.Arg(0)); err != nil { - fmt.Fprintf(os.Stderr, "chdir(): %v\n", err) - os.Exit(1) - } - if err := unix.Chroot(flag.Arg(0)); err != nil { - fmt.Fprintf(os.Stderr, "chroot(): %v\n", err) - os.Exit(1) - } - - // Our second parameter is the path name to evaluate for symbolic links - symLink, err := getSymbolicLink(flag.Arg(1)) - if err != nil { - fmt.Fprintf(os.Stderr, "error getting symbolic links: %v\n", err) - os.Exit(1) - } - if _, err := os.Stdout.WriteString(symLink); err != nil { - fmt.Fprintf(os.Stderr, "error writing string to stdout: %v\n", err) - os.Exit(1) - } - os.Exit(status) -} - -// resolveSymlink uses a child subprocess to resolve any symlinks in filename -// in the context of rootdir. -func resolveSymlink(rootdir, filename string) (string, error) { - // The child process expects a chroot and one path that - // will be consulted relative to the chroot directory and evaluated - // for any symbolic links present. - cmd := reexec.Command(symlinkChrootedCommand, rootdir, filename) - output, err := cmd.CombinedOutput() - if err != nil { - return "", errors.Wrapf(err, string(output)) - } - - // Hand back the resolved symlink, will be filename if a symlink is not found - return string(output), nil -} - -// main() for grandparent subprocess. Its main job is to shuttle stdio back -// and forth, managing a pseudo-terminal if we want one, for our child, the -// parent subprocess. -func resolveSymlinkTimeModified() { - status := 0 - flag.Parse() - if len(flag.Args()) < 1 { - os.Exit(1) - } - // Our first parameter is the directory to chroot into. - if err := unix.Chdir(flag.Arg(0)); err != nil { - fmt.Fprintf(os.Stderr, "chdir(): %v\n", err) - os.Exit(1) - } - if err := unix.Chroot(flag.Arg(0)); err != nil { - fmt.Fprintf(os.Stderr, "chroot(): %v\n", err) - os.Exit(1) - } - - // Our second parameter is the path name to evaluate for symbolic links. - // Our third parameter is the time the cached intermediate image was created. - // We check whether the modified time of the filepath we provide is after the time the cached image was created. - timeIsGreater, err := modTimeIsGreater(flag.Arg(0), flag.Arg(1), flag.Arg(2)) - if err != nil { - fmt.Fprintf(os.Stderr, "error checking if modified time of resolved symbolic link is greater: %v\n", err) - os.Exit(1) - } - if _, err := os.Stdout.WriteString(fmt.Sprintf("%v", timeIsGreater)); err != nil { - fmt.Fprintf(os.Stderr, "error writing string to stdout: %v\n", err) - os.Exit(1) - } - os.Exit(status) -} - -// resolveModifiedTime (in the grandparent process) checks filename for any symlinks, -// resolves it and compares the modified time of the file with historyTime, which is -// the creation time of the cached image. It returns true if filename was modified after -// historyTime, otherwise returns false. -func resolveModifiedTime(rootdir, filename, historyTime string) (bool, error) { - // The child process expects a chroot and one path that - // will be consulted relative to the chroot directory and evaluated - // for any symbolic links present. - cmd := reexec.Command(symlinkModifiedTime, rootdir, filename, historyTime) - output, err := cmd.CombinedOutput() - if err != nil { - return false, errors.Wrapf(err, string(output)) - } - // Hand back true/false depending on in the file was modified after the caches image was created. - return string(output) == "true", nil -} - -// modTimeIsGreater goes through the files added/copied in using the Dockerfile and -// checks the time stamp (follows symlinks) with the time stamp of when the cached -// image was created. IT compares the two and returns true if the file was modified -// after the cached image was created, otherwise it returns false. -func modTimeIsGreater(rootdir, path string, historyTime string) (bool, error) { - var timeIsGreater bool - - // Convert historyTime from string to time.Time for comparison - histTime, err := time.Parse(time.RFC3339Nano, historyTime) - if err != nil { - return false, errors.Wrapf(err, "error converting string to time.Time %q", historyTime) - } - - // Since we are chroot in rootdir, we want a relative path, i.e (path - rootdir) - relPath, err := filepath.Rel(rootdir, path) - if err != nil { - return false, errors.Wrapf(err, "error making path %q relative to %q", path, rootdir) - } - - // Walk the file tree and check the time stamps. - err = filepath.Walk(relPath, func(path string, info os.FileInfo, err error) error { - // If using cached images, it is possible for files that are being copied to come from - // previous build stages. But if using cached images, then the copied file won't exist - // since a container won't have been created for the previous build stage and info will be nil. - // In that case just return nil and continue on with using the cached image for the whole build process. - if info == nil { - return nil - } - modTime := info.ModTime() - if info.Mode()&os.ModeSymlink == os.ModeSymlink { - // Evaluate any symlink that occurs to get updated modified information - resolvedPath, err := filepath.EvalSymlinks(path) - if err != nil && os.IsNotExist(err) { - return errors.Wrapf(errDanglingSymlink, "%q", path) - } - if err != nil { - return errors.Wrapf(err, "error evaluating symlink %q", path) - } - fileInfo, err := os.Stat(resolvedPath) - if err != nil { - return errors.Wrapf(err, "error getting file info %q", resolvedPath) - } - modTime = fileInfo.ModTime() - } - if modTime.After(histTime) { - timeIsGreater = true - return nil - } - return nil - }) - - if err != nil { - // if error is due to dangling symlink, ignore error and return nil - if errors.Cause(err) == errDanglingSymlink { - return false, nil - } - return false, errors.Wrapf(err, "error walking file tree %q", path) - } - return timeIsGreater, err -} - -// getSymbolic link goes through each part of the path and continues resolving symlinks as they appear. -// Returns what the whole target path for what "path" resolves to. -func getSymbolicLink(path string) (string, error) { - var ( - symPath string - symLinksResolved int - ) - // Splitting path as we need to resolve each part of the path at a time - splitPath := strings.Split(path, "/") - if splitPath[0] == "" { - splitPath = splitPath[1:] - symPath = "/" - } - for _, p := range splitPath { - // If we have resolved 40 symlinks, that means something is terribly wrong - // will return an error and exit - if symLinksResolved >= maxSymlinksResolved { - return "", errors.Errorf("have resolved %q symlinks, something is terribly wrong!", maxSymlinksResolved) - } - symPath = filepath.Join(symPath, p) - isSymlink, resolvedPath, err := hasSymlink(symPath) - if err != nil { - return "", errors.Wrapf(err, "error checking symlink for %q", symPath) - } - // if isSymlink is true, check if resolvedPath is potentially another symlink - // keep doing this till resolvedPath is not a symlink and isSymlink is false - for isSymlink == true { - // Need to keep track of number of symlinks resolved - // Will also return an error if the symlink points to itself as that will exceed maxSymlinksResolved - if symLinksResolved >= maxSymlinksResolved { - return "", errors.Errorf("have resolved %q symlinks, something is terribly wrong!", maxSymlinksResolved) - } - isSymlink, resolvedPath, err = hasSymlink(resolvedPath) - if err != nil { - return "", errors.Wrapf(err, "error checking symlink for %q", resolvedPath) - } - symLinksResolved++ - } - // Assign resolvedPath to symPath. The next part of the loop will append the next part of the original path - // and continue resolving - symPath = resolvedPath - symLinksResolved++ - } - return symPath, nil -} - -// hasSymlink returns true and the target if path is symlink -// otherwise it returns false and path -func hasSymlink(path string) (bool, string, error) { - info, err := os.Lstat(path) - if err != nil { - if os.IsNotExist(err) { - if err = os.MkdirAll(path, 0755); err != nil { - return false, "", errors.Wrapf(err, "error ensuring volume path %q exists", path) - } - info, err = os.Lstat(path) - if err != nil { - return false, "", errors.Wrapf(err, "error running lstat on %q", path) - } - } else { - return false, path, errors.Wrapf(err, "error get stat of path %q", path) - } - } - - // Return false and path as path if not a symlink - if info.Mode()&os.ModeSymlink != os.ModeSymlink { - return false, path, nil - } - - // Read the symlink to get what it points to - targetDir, err := os.Readlink(path) - if err != nil { - return false, "", errors.Wrapf(err, "error reading link %q", path) - } - // if the symlink points to a relative path, prepend the path till now to the resolved path - if !filepath.IsAbs(targetDir) { - targetDir = filepath.Join(filepath.Dir(path), targetDir) - } - // run filepath.Clean to remove the ".." from relative paths - return true, filepath.Clean(targetDir), nil -} diff --git a/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink_linux.go b/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink_linux.go new file mode 100644 index 000000000..e9d745b67 --- /dev/null +++ b/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink_linux.go @@ -0,0 +1,266 @@ +package imagebuildah + +import ( + "flag" + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "github.com/containers/storage/pkg/reexec" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +const ( + symlinkChrootedCommand = "chrootsymlinks-resolve" + symlinkModifiedTime = "modtimesymlinks-resolve" + maxSymlinksResolved = 40 +) + +func init() { + reexec.Register(symlinkChrootedCommand, resolveChrootedSymlinks) + reexec.Register(symlinkModifiedTime, resolveSymlinkTimeModified) +} + +// resolveSymlink uses a child subprocess to resolve any symlinks in filename +// in the context of rootdir. +func resolveSymlink(rootdir, filename string) (string, error) { + // The child process expects a chroot and one path that + // will be consulted relative to the chroot directory and evaluated + // for any symbolic links present. + cmd := reexec.Command(symlinkChrootedCommand, rootdir, filename) + output, err := cmd.CombinedOutput() + if err != nil { + return "", errors.Wrapf(err, string(output)) + } + + // Hand back the resolved symlink, will be filename if a symlink is not found + return string(output), nil +} + +// main() for resolveSymlink()'s subprocess. +func resolveChrootedSymlinks() { + status := 0 + flag.Parse() + if len(flag.Args()) < 2 { + fmt.Fprintf(os.Stderr, "%s needs two arguments\n", symlinkChrootedCommand) + os.Exit(1) + } + // Our first parameter is the directory to chroot into. + if err := unix.Chdir(flag.Arg(0)); err != nil { + fmt.Fprintf(os.Stderr, "chdir(): %v\n", err) + os.Exit(1) + } + if err := unix.Chroot(flag.Arg(0)); err != nil { + fmt.Fprintf(os.Stderr, "chroot(): %v\n", err) + os.Exit(1) + } + + // Our second parameter is the path name to evaluate for symbolic links + symLink, err := getSymbolicLink(flag.Arg(1)) + if err != nil { + fmt.Fprintf(os.Stderr, "error getting symbolic links: %v\n", err) + os.Exit(1) + } + if _, err := os.Stdout.WriteString(symLink); err != nil { + fmt.Fprintf(os.Stderr, "error writing string to stdout: %v\n", err) + os.Exit(1) + } + os.Exit(status) +} + +// main() for grandparent subprocess. Its main job is to shuttle stdio back +// and forth, managing a pseudo-terminal if we want one, for our child, the +// parent subprocess. +func resolveSymlinkTimeModified() { + status := 0 + flag.Parse() + if len(flag.Args()) < 1 { + os.Exit(1) + } + // Our first parameter is the directory to chroot into. + if err := unix.Chdir(flag.Arg(0)); err != nil { + fmt.Fprintf(os.Stderr, "chdir(): %v\n", err) + os.Exit(1) + } + if err := unix.Chroot(flag.Arg(0)); err != nil { + fmt.Fprintf(os.Stderr, "chroot(): %v\n", err) + os.Exit(1) + } + + // Our second parameter is the path name to evaluate for symbolic links. + // Our third parameter is the time the cached intermediate image was created. + // We check whether the modified time of the filepath we provide is after the time the cached image was created. + timeIsGreater, err := modTimeIsGreater(flag.Arg(0), flag.Arg(1), flag.Arg(2)) + if err != nil { + fmt.Fprintf(os.Stderr, "error checking if modified time of resolved symbolic link is greater: %v\n", err) + os.Exit(1) + } + if _, err := os.Stdout.WriteString(fmt.Sprintf("%v", timeIsGreater)); err != nil { + fmt.Fprintf(os.Stderr, "error writing string to stdout: %v\n", err) + os.Exit(1) + } + os.Exit(status) +} + +// resolveModifiedTime (in the grandparent process) checks filename for any symlinks, +// resolves it and compares the modified time of the file with historyTime, which is +// the creation time of the cached image. It returns true if filename was modified after +// historyTime, otherwise returns false. +func resolveModifiedTime(rootdir, filename, historyTime string) (bool, error) { + // The child process expects a chroot and one path that + // will be consulted relative to the chroot directory and evaluated + // for any symbolic links present. + cmd := reexec.Command(symlinkModifiedTime, rootdir, filename, historyTime) + output, err := cmd.CombinedOutput() + if err != nil { + return false, errors.Wrapf(err, string(output)) + } + // Hand back true/false depending on in the file was modified after the caches image was created. + return string(output) == "true", nil +} + +// modTimeIsGreater goes through the files added/copied in using the Dockerfile and +// checks the time stamp (follows symlinks) with the time stamp of when the cached +// image was created. IT compares the two and returns true if the file was modified +// after the cached image was created, otherwise it returns false. +func modTimeIsGreater(rootdir, path string, historyTime string) (bool, error) { + var timeIsGreater bool + + // Convert historyTime from string to time.Time for comparison + histTime, err := time.Parse(time.RFC3339Nano, historyTime) + if err != nil { + return false, errors.Wrapf(err, "error converting string to time.Time %q", historyTime) + } + + // Since we are chroot in rootdir, we want a relative path, i.e (path - rootdir) + relPath, err := filepath.Rel(rootdir, path) + if err != nil { + return false, errors.Wrapf(err, "error making path %q relative to %q", path, rootdir) + } + + // Walk the file tree and check the time stamps. + err = filepath.Walk(relPath, func(path string, info os.FileInfo, err error) error { + // If using cached images, it is possible for files that are being copied to come from + // previous build stages. But if using cached images, then the copied file won't exist + // since a container won't have been created for the previous build stage and info will be nil. + // In that case just return nil and continue on with using the cached image for the whole build process. + if info == nil { + return nil + } + modTime := info.ModTime() + if info.Mode()&os.ModeSymlink == os.ModeSymlink { + // Evaluate any symlink that occurs to get updated modified information + resolvedPath, err := filepath.EvalSymlinks(path) + if err != nil && os.IsNotExist(err) { + return errors.Wrapf(errDanglingSymlink, "%q", path) + } + if err != nil { + return errors.Wrapf(err, "error evaluating symlink %q", path) + } + fileInfo, err := os.Stat(resolvedPath) + if err != nil { + return errors.Wrapf(err, "error getting file info %q", resolvedPath) + } + modTime = fileInfo.ModTime() + } + if modTime.After(histTime) { + timeIsGreater = true + return nil + } + return nil + }) + + if err != nil { + // if error is due to dangling symlink, ignore error and return nil + if errors.Cause(err) == errDanglingSymlink { + return false, nil + } + return false, errors.Wrapf(err, "error walking file tree %q", path) + } + return timeIsGreater, err +} + +// getSymbolic link goes through each part of the path and continues resolving symlinks as they appear. +// Returns what the whole target path for what "path" resolves to. +func getSymbolicLink(path string) (string, error) { + var ( + symPath string + symLinksResolved int + ) + // Splitting path as we need to resolve each part of the path at a time + splitPath := strings.Split(path, "/") + if splitPath[0] == "" { + splitPath = splitPath[1:] + symPath = "/" + } + for _, p := range splitPath { + // If we have resolved 40 symlinks, that means something is terribly wrong + // will return an error and exit + if symLinksResolved >= maxSymlinksResolved { + return "", errors.Errorf("have resolved %q symlinks, something is terribly wrong!", maxSymlinksResolved) + } + symPath = filepath.Join(symPath, p) + isSymlink, resolvedPath, err := hasSymlink(symPath) + if err != nil { + return "", errors.Wrapf(err, "error checking symlink for %q", symPath) + } + // if isSymlink is true, check if resolvedPath is potentially another symlink + // keep doing this till resolvedPath is not a symlink and isSymlink is false + for isSymlink == true { + // Need to keep track of number of symlinks resolved + // Will also return an error if the symlink points to itself as that will exceed maxSymlinksResolved + if symLinksResolved >= maxSymlinksResolved { + return "", errors.Errorf("have resolved %q symlinks, something is terribly wrong!", maxSymlinksResolved) + } + isSymlink, resolvedPath, err = hasSymlink(resolvedPath) + if err != nil { + return "", errors.Wrapf(err, "error checking symlink for %q", resolvedPath) + } + symLinksResolved++ + } + // Assign resolvedPath to symPath. The next part of the loop will append the next part of the original path + // and continue resolving + symPath = resolvedPath + symLinksResolved++ + } + return symPath, nil +} + +// hasSymlink returns true and the target if path is symlink +// otherwise it returns false and path +func hasSymlink(path string) (bool, string, error) { + info, err := os.Lstat(path) + if err != nil { + if os.IsNotExist(err) { + if err = os.MkdirAll(path, 0755); err != nil { + return false, "", errors.Wrapf(err, "error ensuring volume path %q exists", path) + } + info, err = os.Lstat(path) + if err != nil { + return false, "", errors.Wrapf(err, "error running lstat on %q", path) + } + } else { + return false, path, errors.Wrapf(err, "error get stat of path %q", path) + } + } + + // Return false and path as path if not a symlink + if info.Mode()&os.ModeSymlink != os.ModeSymlink { + return false, path, nil + } + + // Read the symlink to get what it points to + targetDir, err := os.Readlink(path) + if err != nil { + return false, "", errors.Wrapf(err, "error reading link %q", path) + } + // if the symlink points to a relative path, prepend the path till now to the resolved path + if !filepath.IsAbs(targetDir) { + targetDir = filepath.Join(filepath.Dir(path), targetDir) + } + // run filepath.Clean to remove the ".." from relative paths + return true, filepath.Clean(targetDir), nil +} diff --git a/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink_unsupported.go b/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink_unsupported.go new file mode 100644 index 000000000..2cec4fe21 --- /dev/null +++ b/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink_unsupported.go @@ -0,0 +1,13 @@ +// +build !linux + +package imagebuildah + +import "github.com/pkg/errors" + +func resolveSymlink(rootdir, filename string) (string, error) { + return "", errors.New("function not supported on non-linux systems") +} + +func resolveModifiedTime(rootdir, filename, historyTime string) (bool, error) { + return false, errors.New("function not supported on non-linux systems") +} diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse.go b/vendor/github.com/containers/buildah/pkg/parse/parse.go index cc85136fd..bec41f3ae 100644 --- a/vendor/github.com/containers/buildah/pkg/parse/parse.go +++ b/vendor/github.com/containers/buildah/pkg/parse/parse.go @@ -22,7 +22,6 @@ import ( "github.com/sirupsen/logrus" "github.com/spf13/cobra" "golang.org/x/crypto/ssh/terminal" - "golang.org/x/sys/unix" ) const ( @@ -39,14 +38,9 @@ func CommonBuildOptions(c *cobra.Command) (*buildah.CommonBuildOptions, error) { memorySwap int64 err error ) - rlim := unix.Rlimit{Cur: 1048576, Max: 1048576} - defaultLimits := []string{} - if err := unix.Setrlimit(unix.RLIMIT_NOFILE, &rlim); err == nil { - defaultLimits = append(defaultLimits, fmt.Sprintf("nofile=%d:%d", rlim.Cur, rlim.Max)) - } - if err := unix.Setrlimit(unix.RLIMIT_NPROC, &rlim); err == nil { - defaultLimits = append(defaultLimits, fmt.Sprintf("nproc=%d:%d", rlim.Cur, rlim.Max)) - } + + defaultLimits := getDefaultProcessLimits() + memVal, _ := c.Flags().GetString("memory") if memVal != "" { memoryLimit, err = units.RAMInBytes(memVal) diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse_unix.go b/vendor/github.com/containers/buildah/pkg/parse/parse_unix.go new file mode 100644 index 000000000..d056c1bb3 --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/parse/parse_unix.go @@ -0,0 +1,20 @@ +// +build linux darwin + +package parse + +import ( + "fmt" + "golang.org/x/sys/unix" +) + +func getDefaultProcessLimits() []string { + rlim := unix.Rlimit{Cur: 1048576, Max: 1048576} + defaultLimits := []string{} + if err := unix.Setrlimit(unix.RLIMIT_NOFILE, &rlim); err == nil { + defaultLimits = append(defaultLimits, fmt.Sprintf("nofile=%d:%d", rlim.Cur, rlim.Max)) + } + if err := unix.Setrlimit(unix.RLIMIT_NPROC, &rlim); err == nil { + defaultLimits = append(defaultLimits, fmt.Sprintf("nproc=%d:%d", rlim.Cur, rlim.Max)) + } + return defaultLimits +} diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse_unsupported.go b/vendor/github.com/containers/buildah/pkg/parse/parse_unsupported.go new file mode 100644 index 000000000..7e970624f --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/parse/parse_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux,!darwin + +package parse + +func getDefaultProcessLimits() []string { + return []string{} +} diff --git a/vendor/github.com/containers/buildah/run.go b/vendor/github.com/containers/buildah/run.go index 00eac8e39..88900b6b7 100644 --- a/vendor/github.com/containers/buildah/run.go +++ b/vendor/github.com/containers/buildah/run.go @@ -1,44 +1,10 @@ package buildah import ( - "bytes" - "context" - "encoding/json" "fmt" "io" - "io/ioutil" - "net" - "os" - "os/exec" - "path/filepath" - "runtime" - "strconv" - "strings" - "sync" - "syscall" - "time" - "github.com/containernetworking/cni/libcni" - "github.com/containers/buildah/bind" - "github.com/containers/buildah/chroot" - "github.com/containers/buildah/pkg/secrets" - "github.com/containers/buildah/pkg/unshare" - "github.com/containers/buildah/util" - "github.com/containers/storage/pkg/idtools" - "github.com/containers/storage/pkg/ioutils" - "github.com/containers/storage/pkg/reexec" - "github.com/containers/storage/pkg/stringid" - units "github.com/docker/go-units" - "github.com/docker/libnetwork/resolvconf" - "github.com/docker/libnetwork/types" - digest "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/opencontainers/runtime-tools/generate" - "github.com/opencontainers/selinux/go-selinux/label" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/crypto/ssh/terminal" - "golang.org/x/sys/unix" + "github.com/opencontainers/runtime-spec/specs-go" ) const ( @@ -203,34 +169,6 @@ type RunOptions struct { DropCapabilities []string } -// DefaultNamespaceOptions returns the default namespace settings from the -// runtime-tools generator library. -func DefaultNamespaceOptions() (NamespaceOptions, error) { - options := NamespaceOptions{ - {Name: string(specs.CgroupNamespace), Host: true}, - {Name: string(specs.IPCNamespace), Host: true}, - {Name: string(specs.MountNamespace), Host: true}, - {Name: string(specs.NetworkNamespace), Host: true}, - {Name: string(specs.PIDNamespace), Host: true}, - {Name: string(specs.UserNamespace), Host: true}, - {Name: string(specs.UTSNamespace), Host: true}, - } - g, err := generate.New("linux") - if err != nil { - return options, errors.Wrapf(err, "error generating new 'linux' runtime spec") - } - spec := g.Config - if spec.Linux != nil { - for _, ns := range spec.Linux.Namespaces { - options.AddOrReplace(NamespaceOption{ - Name: string(ns.Type), - Path: ns.Path, - }) - } - } - return options, nil -} - // Find the configuration for the namespace of the given type. If there are // duplicates, find the _last_ one of the type, since we assume it was appended // more recently. @@ -258,1959 +196,3 @@ nextOption: *n = append(*n, option) } } - -func addRlimits(ulimit []string, g *generate.Generator) error { - var ( - ul *units.Ulimit - err error - ) - - for _, u := range ulimit { - if ul, err = units.ParseUlimit(u); err != nil { - return errors.Wrapf(err, "ulimit option %q requires name=SOFT:HARD, failed to be parsed", u) - } - - g.AddProcessRlimits("RLIMIT_"+strings.ToUpper(ul.Name), uint64(ul.Hard), uint64(ul.Soft)) - } - return nil -} - -func addCommonOptsToSpec(commonOpts *CommonBuildOptions, g *generate.Generator) error { - // Resources - CPU - if commonOpts.CPUPeriod != 0 { - g.SetLinuxResourcesCPUPeriod(commonOpts.CPUPeriod) - } - if commonOpts.CPUQuota != 0 { - g.SetLinuxResourcesCPUQuota(commonOpts.CPUQuota) - } - if commonOpts.CPUShares != 0 { - g.SetLinuxResourcesCPUShares(commonOpts.CPUShares) - } - if commonOpts.CPUSetCPUs != "" { - g.SetLinuxResourcesCPUCpus(commonOpts.CPUSetCPUs) - } - if commonOpts.CPUSetMems != "" { - g.SetLinuxResourcesCPUMems(commonOpts.CPUSetMems) - } - - // Resources - Memory - if commonOpts.Memory != 0 { - g.SetLinuxResourcesMemoryLimit(commonOpts.Memory) - } - if commonOpts.MemorySwap != 0 { - g.SetLinuxResourcesMemorySwap(commonOpts.MemorySwap) - } - - // cgroup membership - if commonOpts.CgroupParent != "" { - g.SetLinuxCgroupsPath(commonOpts.CgroupParent) - } - - // Other process resource limits - if err := addRlimits(commonOpts.Ulimit, g); err != nil { - return err - } - - logrus.Debugf("Resources: %#v", commonOpts) - return nil -} - -func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath string, optionMounts []specs.Mount, bindFiles map[string]string, builtinVolumes, volumeMounts []string, shmSize string, namespaceOptions NamespaceOptions) error { - // Start building a new list of mounts. - var mounts []specs.Mount - haveMount := func(destination string) bool { - for _, mount := range mounts { - if mount.Destination == destination { - // Already have something to mount there. - return true - } - } - return false - } - - ipc := namespaceOptions.Find(string(specs.IPCNamespace)) - hostIPC := ipc == nil || ipc.Host - net := namespaceOptions.Find(string(specs.NetworkNamespace)) - hostNetwork := net == nil || net.Host - user := namespaceOptions.Find(string(specs.UserNamespace)) - hostUser := user == nil || user.Host - - // Copy mounts from the generated list. - mountCgroups := true - specMounts := []specs.Mount{} - for _, specMount := range spec.Mounts { - // Override some of the mounts from the generated list if we're doing different things with namespaces. - if specMount.Destination == "/dev/shm" { - specMount.Options = []string{"nosuid", "noexec", "nodev", "mode=1777", "size=" + shmSize} - if hostIPC && !hostUser { - if _, err := os.Stat("/dev/shm"); err != nil && os.IsNotExist(err) { - logrus.Debugf("/dev/shm is not present, not binding into container") - continue - } - specMount = specs.Mount{ - Source: "/dev/shm", - Type: "bind", - Destination: "/dev/shm", - Options: []string{bind.NoBindOption, "rbind", "nosuid", "noexec", "nodev"}, - } - } - } - if specMount.Destination == "/dev/mqueue" { - if hostIPC && !hostUser { - if _, err := os.Stat("/dev/mqueue"); err != nil && os.IsNotExist(err) { - logrus.Debugf("/dev/mqueue is not present, not binding into container") - continue - } - specMount = specs.Mount{ - Source: "/dev/mqueue", - Type: "bind", - Destination: "/dev/mqueue", - Options: []string{bind.NoBindOption, "rbind", "nosuid", "noexec", "nodev"}, - } - } - } - if specMount.Destination == "/sys" { - if hostNetwork && !hostUser { - mountCgroups = false - if _, err := os.Stat("/sys"); err != nil && os.IsNotExist(err) { - logrus.Debugf("/sys is not present, not binding into container") - continue - } - specMount = specs.Mount{ - Source: "/sys", - Type: "bind", - Destination: "/sys", - Options: []string{bind.NoBindOption, "rbind", "nosuid", "noexec", "nodev", "ro"}, - } - } - } - specMounts = append(specMounts, specMount) - } - - // Add a mount for the cgroups filesystem, unless we're already - // recursively bind mounting all of /sys, in which case we shouldn't - // bother with it. - sysfsMount := []specs.Mount{} - if mountCgroups { - sysfsMount = []specs.Mount{{ - Destination: "/sys/fs/cgroup", - Type: "cgroup", - Source: "cgroup", - Options: []string{bind.NoBindOption, "nosuid", "noexec", "nodev", "relatime", "ro"}, - }} - } - - // Get the list of files we need to bind into the container. - bindFileMounts, err := runSetupBoundFiles(bundlePath, bindFiles) - if err != nil { - return err - } - - // After this point we need to know the per-container persistent storage directory. - cdir, err := b.store.ContainerDirectory(b.ContainerID) - if err != nil { - return errors.Wrapf(err, "error determining work directory for container %q", b.ContainerID) - } - - // Figure out which UID and GID to tell the secrets package to use - // for files that it creates. - rootUID, rootGID, err := util.GetHostRootIDs(spec) - if err != nil { - return err - } - - // Get the list of secrets mounts. - secretMounts := secrets.SecretMountsWithUIDGID(b.MountLabel, cdir, b.DefaultMountsFilePath, cdir, int(rootUID), int(rootGID), unshare.IsRootless()) - - // Add temporary copies of the contents of volume locations at the - // volume locations, unless we already have something there. - copyWithTar := b.copyWithTar(nil, nil) - builtins, err := runSetupBuiltinVolumes(b.MountLabel, mountPoint, cdir, copyWithTar, builtinVolumes, int(rootUID), int(rootGID)) - if err != nil { - return err - } - - // Get the list of explicitly-specified volume mounts. - volumes, err := runSetupVolumeMounts(spec.Linux.MountLabel, volumeMounts, optionMounts) - if err != nil { - return err - } - - // Add them all, in the preferred order, except where they conflict with something that was previously added. - for _, mount := range append(append(append(append(append(volumes, builtins...), secretMounts...), bindFileMounts...), specMounts...), sysfsMount...) { - if haveMount(mount.Destination) { - // Already mounting something there, no need to bother with this one. - continue - } - // Add the mount. - mounts = append(mounts, mount) - } - - // Set the list in the spec. - spec.Mounts = mounts - return nil -} - -func runSetupBoundFiles(bundlePath string, bindFiles map[string]string) (mounts []specs.Mount, err error) { - for dest, src := range bindFiles { - options := []string{"rbind"} - if strings.HasPrefix(src, bundlePath) { - options = append(options, bind.NoBindOption) - } - mounts = append(mounts, specs.Mount{ - Source: src, - Destination: dest, - Type: "bind", - Options: options, - }) - } - return mounts, nil -} - -func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, copyWithTar func(srcPath, dstPath string) error, builtinVolumes []string, rootUID, rootGID int) ([]specs.Mount, error) { - var mounts []specs.Mount - hostOwner := idtools.IDPair{UID: rootUID, GID: rootGID} - // Add temporary copies of the contents of volume locations at the - // volume locations, unless we already have something there. - for _, volume := range builtinVolumes { - subdir := digest.Canonical.FromString(volume).Hex() - volumePath := filepath.Join(containerDir, "buildah-volumes", subdir) - srcPath := filepath.Join(mountPoint, volume) - initializeVolume := false - // If we need to, initialize the volume path's initial contents. - if _, err := os.Stat(volumePath); err != nil { - if !os.IsNotExist(err) { - return nil, errors.Wrapf(err, "failed to stat %q for volume %q", volumePath, volume) - } - logrus.Debugf("setting up built-in volume at %q", volumePath) - if err = os.MkdirAll(volumePath, 0755); err != nil { - return nil, errors.Wrapf(err, "error creating directory %q for volume %q", volumePath, volume) - } - if err = label.Relabel(volumePath, mountLabel, false); err != nil { - return nil, errors.Wrapf(err, "error relabeling directory %q for volume %q", volumePath, volume) - } - initializeVolume = true - } - stat, err := os.Stat(srcPath) - if err != nil { - if !os.IsNotExist(err) { - return nil, errors.Wrapf(err, "failed to stat %q for volume %q", srcPath, volume) - } - if err = idtools.MkdirAllAndChownNew(srcPath, 0755, hostOwner); err != nil { - return nil, errors.Wrapf(err, "error creating directory %q for volume %q", srcPath, volume) - } - if stat, err = os.Stat(srcPath); err != nil { - return nil, errors.Wrapf(err, "failed to stat %q for volume %q", srcPath, volume) - } - } - if initializeVolume { - if err = os.Chmod(volumePath, stat.Mode().Perm()); err != nil { - return nil, errors.Wrapf(err, "failed to chmod %q for volume %q", volumePath, volume) - } - if err = os.Chown(volumePath, int(stat.Sys().(*syscall.Stat_t).Uid), int(stat.Sys().(*syscall.Stat_t).Gid)); err != nil { - return nil, errors.Wrapf(err, "error chowning directory %q for volume %q", volumePath, volume) - } - if err = copyWithTar(srcPath, volumePath); err != nil && !os.IsNotExist(errors.Cause(err)) { - return nil, errors.Wrapf(err, "error populating directory %q for volume %q using contents of %q", volumePath, volume, srcPath) - } - } - // Add the bind mount. - mounts = append(mounts, specs.Mount{ - Source: volumePath, - Destination: volume, - Type: "bind", - Options: []string{"bind"}, - }) - } - return mounts, nil -} - -func runSetupVolumeMounts(mountLabel string, volumeMounts []string, optionMounts []specs.Mount) ([]specs.Mount, error) { - var mounts []specs.Mount - - parseMount := func(host, container string, options []string) (specs.Mount, error) { - var foundrw, foundro, foundz, foundZ bool - var rootProp string - for _, opt := range options { - switch opt { - case "rw": - foundrw = true - case "ro": - foundro = true - case "z": - foundz = true - case "Z": - foundZ = true - case "private", "rprivate", "slave", "rslave", "shared", "rshared": - rootProp = opt - } - } - if !foundrw && !foundro { - options = append(options, "rw") - } - if foundz { - if err := label.Relabel(host, mountLabel, true); err != nil { - return specs.Mount{}, errors.Wrapf(err, "relabeling %q failed", host) - } - } - if foundZ { - if err := label.Relabel(host, mountLabel, false); err != nil { - return specs.Mount{}, errors.Wrapf(err, "relabeling %q failed", host) - } - } - if rootProp == "" { - options = append(options, "private") - } - return specs.Mount{ - Destination: container, - Type: "bind", - Source: host, - Options: options, - }, nil - } - // Bind mount volumes specified for this particular Run() invocation - for _, i := range optionMounts { - logrus.Debugf("setting up mounted volume at %q", i.Destination) - mount, err := parseMount(i.Source, i.Destination, append(i.Options, "rbind")) - if err != nil { - return nil, err - } - mounts = append(mounts, mount) - } - // Bind mount volumes given by the user when the container was created - for _, i := range volumeMounts { - var options []string - spliti := strings.Split(i, ":") - if len(spliti) > 2 { - options = strings.Split(spliti[2], ",") - } - options = append(options, "rbind") - mount, err := parseMount(spliti[0], spliti[1], options) - if err != nil { - return nil, err - } - mounts = append(mounts, mount) - } - return mounts, nil -} - -// addNetworkConfig copies files from host and sets them up to bind mount into container -func (b *Builder) addNetworkConfig(rdir, hostPath string, chownOpts *idtools.IDPair, dnsServers, dnsSearch, dnsOptions []string) (string, error) { - stat, err := os.Stat(hostPath) - if err != nil { - return "", errors.Wrapf(err, "error statting %q for container %q", hostPath, b.ContainerID) - } - contents, err := ioutil.ReadFile(hostPath) - if err != nil { - return "", errors.Wrapf(err, "unable to read %s", hostPath) - } - - search := resolvconf.GetSearchDomains(contents) - nameservers := resolvconf.GetNameservers(contents, types.IP) - options := resolvconf.GetOptions(contents) - - if len(dnsSearch) > 0 { - search = dnsSearch - } - if len(dnsServers) != 0 { - dns, err := getDNSIP(dnsServers) - if err != nil { - return "", errors.Wrapf(err, "error getting dns servers") - } - nameservers = []string{} - for _, server := range dns { - nameservers = append(nameservers, server.String()) - } - } - - if len(dnsOptions) != 0 { - options = dnsOptions - } - - cfile := filepath.Join(rdir, filepath.Base(hostPath)) - if _, err = resolvconf.Build(cfile, nameservers, search, options); err != nil { - return "", errors.Wrapf(err, "error building resolv.conf for container %s", b.ContainerID) - } - - uid := int(stat.Sys().(*syscall.Stat_t).Uid) - gid := int(stat.Sys().(*syscall.Stat_t).Gid) - if chownOpts != nil { - uid = chownOpts.UID - gid = chownOpts.GID - } - if err = os.Chown(cfile, uid, gid); err != nil { - return "", errors.Wrapf(err, "error chowning file %q for container %q", cfile, b.ContainerID) - } - - if err := label.Relabel(cfile, b.MountLabel, false); err != nil { - return "", errors.Wrapf(err, "error relabeling %q in container %q", cfile, b.ContainerID) - } - - return cfile, nil -} - -func getDNSIP(dnsServers []string) (dns []net.IP, err error) { - for _, i := range dnsServers { - result := net.ParseIP(i) - if result == nil { - return dns, errors.Errorf("invalid IP address %s", i) - } - dns = append(dns, result) - } - return dns, nil -} - -// generateHosts creates a containers hosts file -func (b *Builder) generateHosts(rdir, hostname string, addHosts []string, chownOpts *idtools.IDPair) (string, error) { - hostPath := "/etc/hosts" - stat, err := os.Stat(hostPath) - if err != nil { - return "", errors.Wrapf(err, "error statting %q for container %q", hostPath, b.ContainerID) - } - - hosts := bytes.NewBufferString("# Generated by Buildah\n") - orig, err := ioutil.ReadFile(hostPath) - if err != nil { - return "", errors.Wrapf(err, "unable to read %s", hostPath) - } - hosts.Write(orig) - for _, host := range addHosts { - // verify the host format - values := strings.SplitN(host, ":", 2) - if len(values) != 2 { - return "", errors.Errorf("unable to parse host entry %q: incorrect format", host) - } - if values[0] == "" { - return "", errors.Errorf("hostname in host entry %q is empty", host) - } - if values[1] == "" { - return "", errors.Errorf("IP address in host entry %q is empty", host) - } - hosts.Write([]byte(fmt.Sprintf("%s\t%s\n", values[1], values[0]))) - } - - if hostname != "" { - hosts.Write([]byte(fmt.Sprintf("127.0.0.1 %s\n", hostname))) - hosts.Write([]byte(fmt.Sprintf("::1 %s\n", hostname))) - } - cfile := filepath.Join(rdir, filepath.Base(hostPath)) - if err = ioutils.AtomicWriteFile(cfile, hosts.Bytes(), stat.Mode().Perm()); err != nil { - return "", errors.Wrapf(err, "error writing /etc/hosts into the container") - } - uid := int(stat.Sys().(*syscall.Stat_t).Uid) - gid := int(stat.Sys().(*syscall.Stat_t).Gid) - if chownOpts != nil { - uid = chownOpts.UID - gid = chownOpts.GID - } - if err = os.Chown(cfile, uid, gid); err != nil { - return "", errors.Wrapf(err, "error chowning file %q for container %q", cfile, b.ContainerID) - } - if err := label.Relabel(cfile, b.MountLabel, false); err != nil { - return "", errors.Wrapf(err, "error relabeling %q in container %q", cfile, b.ContainerID) - } - - return cfile, nil -} - -func setupMaskedPaths(g *generate.Generator) { - for _, mp := range []string{ - "/proc/acpi", - "/proc/kcore", - "/proc/keys", - "/proc/latency_stats", - "/proc/timer_list", - "/proc/timer_stats", - "/proc/sched_debug", - "/proc/scsi", - "/sys/firmware", - } { - g.AddLinuxMaskedPaths(mp) - } -} - -func setupReadOnlyPaths(g *generate.Generator) { - for _, rp := range []string{ - "/proc/asound", - "/proc/bus", - "/proc/fs", - "/proc/irq", - "/proc/sys", - "/proc/sysrq-trigger", - } { - g.AddLinuxReadonlyPaths(rp) - } -} - -func setupCapAdd(g *generate.Generator, caps ...string) error { - for _, cap := range caps { - if err := g.AddProcessCapabilityBounding(cap); err != nil { - return errors.Wrapf(err, "error adding %q to the bounding capability set", cap) - } - if err := g.AddProcessCapabilityEffective(cap); err != nil { - return errors.Wrapf(err, "error adding %q to the effective capability set", cap) - } - if err := g.AddProcessCapabilityInheritable(cap); err != nil { - return errors.Wrapf(err, "error adding %q to the inheritable capability set", cap) - } - if err := g.AddProcessCapabilityPermitted(cap); err != nil { - return errors.Wrapf(err, "error adding %q to the permitted capability set", cap) - } - if err := g.AddProcessCapabilityAmbient(cap); err != nil { - return errors.Wrapf(err, "error adding %q to the ambient capability set", cap) - } - } - return nil -} - -func setupCapDrop(g *generate.Generator, caps ...string) error { - for _, cap := range caps { - if err := g.DropProcessCapabilityBounding(cap); err != nil { - return errors.Wrapf(err, "error removing %q from the bounding capability set", cap) - } - if err := g.DropProcessCapabilityEffective(cap); err != nil { - return errors.Wrapf(err, "error removing %q from the effective capability set", cap) - } - if err := g.DropProcessCapabilityInheritable(cap); err != nil { - return errors.Wrapf(err, "error removing %q from the inheritable capability set", cap) - } - if err := g.DropProcessCapabilityPermitted(cap); err != nil { - return errors.Wrapf(err, "error removing %q from the permitted capability set", cap) - } - if err := g.DropProcessCapabilityAmbient(cap); err != nil { - return errors.Wrapf(err, "error removing %q from the ambient capability set", cap) - } - } - return nil -} - -func setupCapabilities(g *generate.Generator, firstAdds, firstDrops, secondAdds, secondDrops []string) error { - g.ClearProcessCapabilities() - if err := setupCapAdd(g, util.DefaultCapabilities...); err != nil { - return err - } - if err := setupCapAdd(g, firstAdds...); err != nil { - return err - } - if err := setupCapDrop(g, firstDrops...); err != nil { - return err - } - if err := setupCapAdd(g, secondAdds...); err != nil { - return err - } - return setupCapDrop(g, secondDrops...) -} - -func setupTerminal(g *generate.Generator, terminalPolicy TerminalPolicy, terminalSize *specs.Box) { - switch terminalPolicy { - case DefaultTerminal: - onTerminal := terminal.IsTerminal(unix.Stdin) && terminal.IsTerminal(unix.Stdout) && terminal.IsTerminal(unix.Stderr) - if onTerminal { - logrus.Debugf("stdio is a terminal, defaulting to using a terminal") - } else { - logrus.Debugf("stdio is not a terminal, defaulting to not using a terminal") - } - g.SetProcessTerminal(onTerminal) - case WithTerminal: - g.SetProcessTerminal(true) - case WithoutTerminal: - g.SetProcessTerminal(false) - } - if terminalSize != nil { - g.SetProcessConsoleSize(terminalSize.Width, terminalSize.Height) - } -} - -func setupNamespaces(g *generate.Generator, namespaceOptions NamespaceOptions, idmapOptions IDMappingOptions, policy NetworkConfigurationPolicy) (configureNetwork bool, configureNetworks []string, configureUTS bool, err error) { - // Set namespace options in the container configuration. - configureUserns := false - specifiedNetwork := false - for _, namespaceOption := range namespaceOptions { - switch namespaceOption.Name { - case string(specs.UserNamespace): - configureUserns = false - if !namespaceOption.Host && namespaceOption.Path == "" { - configureUserns = true - } - case string(specs.NetworkNamespace): - specifiedNetwork = true - configureNetwork = false - if !namespaceOption.Host && (namespaceOption.Path == "" || !filepath.IsAbs(namespaceOption.Path)) { - if namespaceOption.Path != "" && !filepath.IsAbs(namespaceOption.Path) { - configureNetworks = strings.Split(namespaceOption.Path, ",") - namespaceOption.Path = "" - } - configureNetwork = (policy != NetworkDisabled) - } - case string(specs.UTSNamespace): - configureUTS = false - if !namespaceOption.Host && namespaceOption.Path == "" { - configureUTS = true - } - } - if namespaceOption.Host { - if err := g.RemoveLinuxNamespace(namespaceOption.Name); err != nil { - return false, nil, false, errors.Wrapf(err, "error removing %q namespace for run", namespaceOption.Name) - } - } else if err := g.AddOrReplaceLinuxNamespace(namespaceOption.Name, namespaceOption.Path); err != nil { - if namespaceOption.Path == "" { - return false, nil, false, errors.Wrapf(err, "error adding new %q namespace for run", namespaceOption.Name) - } - return false, nil, false, errors.Wrapf(err, "error adding %q namespace %q for run", namespaceOption.Name, namespaceOption.Path) - } - } - - // If we've got mappings, we're going to have to create a user namespace. - if len(idmapOptions.UIDMap) > 0 || len(idmapOptions.GIDMap) > 0 || configureUserns { - if err := g.AddOrReplaceLinuxNamespace(specs.UserNamespace, ""); err != nil { - return false, nil, false, errors.Wrapf(err, "error adding new %q namespace for run", string(specs.UserNamespace)) - } - hostUidmap, hostGidmap, err := unshare.GetHostIDMappings("") - if err != nil { - return false, nil, false, err - } - for _, m := range idmapOptions.UIDMap { - g.AddLinuxUIDMapping(m.HostID, m.ContainerID, m.Size) - } - if len(idmapOptions.UIDMap) == 0 { - for _, m := range hostUidmap { - g.AddLinuxUIDMapping(m.ContainerID, m.ContainerID, m.Size) - } - } - for _, m := range idmapOptions.GIDMap { - g.AddLinuxGIDMapping(m.HostID, m.ContainerID, m.Size) - } - if len(idmapOptions.GIDMap) == 0 { - for _, m := range hostGidmap { - g.AddLinuxGIDMapping(m.ContainerID, m.ContainerID, m.Size) - } - } - if !specifiedNetwork { - if err := g.AddOrReplaceLinuxNamespace(specs.NetworkNamespace, ""); err != nil { - return false, nil, false, errors.Wrapf(err, "error adding new %q namespace for run", string(specs.NetworkNamespace)) - } - configureNetwork = (policy != NetworkDisabled) - } - } else { - if err := g.RemoveLinuxNamespace(specs.UserNamespace); err != nil { - return false, nil, false, errors.Wrapf(err, "error removing %q namespace for run", string(specs.UserNamespace)) - } - if !specifiedNetwork { - if err := g.RemoveLinuxNamespace(specs.NetworkNamespace); err != nil { - return false, nil, false, errors.Wrapf(err, "error removing %q namespace for run", string(specs.NetworkNamespace)) - } - } - } - if configureNetwork { - for name, val := range util.DefaultNetworkSysctl { - g.AddLinuxSysctl(name, val) - } - } - return configureNetwork, configureNetworks, configureUTS, nil -} - -// Search for a command that isn't given as an absolute path using the $PATH -// under the rootfs. We can't resolve absolute symbolic links without -// chroot()ing, which we may not be able to do, so just accept a link as a -// valid resolution. -func runLookupPath(g *generate.Generator, command []string) []string { - // Look for the configured $PATH. - spec := g.Config - envPath := "" - for i := range spec.Process.Env { - if strings.HasPrefix(spec.Process.Env[i], "PATH=") { - envPath = spec.Process.Env[i] - } - } - // If there is no configured $PATH, supply one. - if envPath == "" { - defaultPath := "/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin" - envPath = "PATH=" + defaultPath - g.AddProcessEnv("PATH", defaultPath) - } - // No command, nothing to do. - if len(command) == 0 { - return command - } - // Command is already an absolute path, use it as-is. - if filepath.IsAbs(command[0]) { - return command - } - // For each element in the PATH, - for _, pathEntry := range filepath.SplitList(envPath[5:]) { - // if it's the empty string, it's ".", which is the Cwd, - if pathEntry == "" { - pathEntry = spec.Process.Cwd - } - // build the absolute path which it might be, - candidate := filepath.Join(pathEntry, command[0]) - // check if it's there, - if fi, err := os.Lstat(filepath.Join(spec.Root.Path, candidate)); fi != nil && err == nil { - // and if it's not a directory, and either a symlink or executable, - if !fi.IsDir() && ((fi.Mode()&os.ModeSymlink != 0) || (fi.Mode()&0111 != 0)) { - // use that. - return append([]string{candidate}, command[1:]...) - } - } - } - return command -} - -func (b *Builder) configureUIDGID(g *generate.Generator, mountPoint string, options RunOptions) error { - // Set the user UID/GID/supplemental group list/capabilities lists. - user, err := b.user(mountPoint, options.User) - if err != nil { - return err - } - if err := setupCapabilities(g, b.AddCapabilities, b.DropCapabilities, options.AddCapabilities, options.DropCapabilities); err != nil { - return err - } - g.SetProcessUID(user.UID) - g.SetProcessGID(user.GID) - for _, gid := range user.AdditionalGids { - g.AddProcessAdditionalGid(gid) - } - - // Remove capabilities if not running as root except Bounding set - if user.UID != 0 { - bounding := g.Config.Process.Capabilities.Bounding - g.ClearProcessCapabilities() - g.Config.Process.Capabilities.Bounding = bounding - } - - return nil -} - -func (b *Builder) configureEnvironment(g *generate.Generator, options RunOptions) { - g.ClearProcessEnv() - if b.CommonBuildOpts.HTTPProxy { - for _, envSpec := range []string{ - "http_proxy", - "HTTP_PROXY", - "https_proxy", - "HTTPS_PROXY", - "ftp_proxy", - "FTP_PROXY", - "no_proxy", - "NO_PROXY", - } { - envVal := os.Getenv(envSpec) - if envVal != "" { - g.AddProcessEnv(envSpec, envVal) - } - } - } - - for _, envSpec := range append(b.Env(), options.Env...) { - env := strings.SplitN(envSpec, "=", 2) - if len(env) > 1 { - g.AddProcessEnv(env[0], env[1]) - } - } - - for src, dest := range b.Args { - g.AddProcessEnv(src, dest) - } -} - -func (b *Builder) configureNamespaces(g *generate.Generator, options RunOptions) (bool, []string, error) { - defaultNamespaceOptions, err := DefaultNamespaceOptions() - if err != nil { - return false, nil, err - } - - namespaceOptions := defaultNamespaceOptions - namespaceOptions.AddOrReplace(b.NamespaceOptions...) - namespaceOptions.AddOrReplace(options.NamespaceOptions...) - - networkPolicy := options.ConfigureNetwork - if networkPolicy == NetworkDefault { - networkPolicy = b.ConfigureNetwork - } - - configureNetwork, configureNetworks, configureUTS, err := setupNamespaces(g, namespaceOptions, b.IDMappingOptions, networkPolicy) - if err != nil { - return false, nil, err - } - - if configureUTS { - if options.Hostname != "" { - g.SetHostname(options.Hostname) - } else if b.Hostname() != "" { - g.SetHostname(b.Hostname()) - } else { - g.SetHostname(stringid.TruncateID(b.ContainerID)) - } - } else { - g.SetHostname("") - } - - found := false - spec := g.Config - for i := range spec.Process.Env { - if strings.HasPrefix(spec.Process.Env[i], "HOSTNAME=") { - found = true - break - } - } - if !found { - spec.Process.Env = append(spec.Process.Env, fmt.Sprintf("HOSTNAME=%s", spec.Hostname)) - } - - return configureNetwork, configureNetworks, nil -} - -// Run runs the specified command in the container's root filesystem. -func (b *Builder) Run(command []string, options RunOptions) error { - p, err := ioutil.TempDir("", Package) - if err != nil { - return errors.Wrapf(err, "run: error creating temporary directory under %q", os.TempDir()) - } - // On some hosts like AH, /tmp is a symlink and we need an - // absolute path. - path, err := filepath.EvalSymlinks(p) - if err != nil { - return errors.Wrapf(err, "run: error evaluating %q for symbolic links", p) - } - logrus.Debugf("using %q to hold bundle data", path) - defer func() { - if err2 := os.RemoveAll(path); err2 != nil { - logrus.Errorf("error removing %q: %v", path, err2) - } - }() - - gp, err := generate.New("linux") - if err != nil { - return errors.Wrapf(err, "error generating new 'linux' runtime spec") - } - g := &gp - - isolation := options.Isolation - if isolation == IsolationDefault { - isolation = b.Isolation - if isolation == IsolationDefault { - isolation = IsolationOCI - } - } - if err := checkAndOverrideIsolationOptions(isolation, &options); err != nil { - return err - } - - b.configureEnvironment(g, options) - - if b.CommonBuildOpts == nil { - return errors.Errorf("Invalid format on container you must recreate the container") - } - - if err := addCommonOptsToSpec(b.CommonBuildOpts, g); err != nil { - return err - } - - if options.WorkingDir != "" { - g.SetProcessCwd(options.WorkingDir) - } else if b.WorkDir() != "" { - g.SetProcessCwd(b.WorkDir()) - } - setupSelinux(g, b.ProcessLabel, b.MountLabel) - mountPoint, err := b.Mount(b.MountLabel) - if err != nil { - return errors.Wrapf(err, "error mounting container %q", b.ContainerID) - } - defer func() { - if err := b.Unmount(); err != nil { - logrus.Errorf("error unmounting container: %v", err) - } - }() - g.SetRootPath(mountPoint) - if len(command) > 0 { - command = runLookupPath(g, command) - g.SetProcessArgs(command) - } else { - g.SetProcessArgs(nil) - } - - setupMaskedPaths(g) - setupReadOnlyPaths(g) - - setupTerminal(g, options.Terminal, options.TerminalSize) - - configureNetwork, configureNetworks, err := b.configureNamespaces(g, options) - if err != nil { - return err - } - - if err := b.configureUIDGID(g, mountPoint, options); err != nil { - return err - } - - g.SetProcessApparmorProfile(b.CommonBuildOpts.ApparmorProfile) - - // Now grab the spec from the generator. Set the generator to nil so that future contributors - // will quickly be able to tell that they're supposed to be modifying the spec directly from here. - spec := g.Config - g = nil - - logrus.Debugf("ensuring working directory %q exists", filepath.Join(mountPoint, spec.Process.Cwd)) - if err = os.MkdirAll(filepath.Join(mountPoint, spec.Process.Cwd), 0755); err != nil { - return errors.Wrapf(err, "error ensuring working directory %q exists", spec.Process.Cwd) - } - - // Set the seccomp configuration using the specified profile name. Some syscalls are - // allowed if certain capabilities are to be granted (example: CAP_SYS_CHROOT and chroot), - // so we sorted out the capabilities lists first. - if err = setupSeccomp(spec, b.CommonBuildOpts.SeccompProfilePath); err != nil { - return err - } - - // Figure out who owns files that will appear to be owned by UID/GID 0 in the container. - rootUID, rootGID, err := util.GetHostRootIDs(spec) - if err != nil { - return err - } - rootIDPair := &idtools.IDPair{UID: int(rootUID), GID: int(rootGID)} - - bindFiles := make(map[string]string) - namespaceOptions := append(b.NamespaceOptions, options.NamespaceOptions...) - volumes := b.Volumes() - - if !contains(volumes, "/etc/hosts") { - hostFile, err := b.generateHosts(path, spec.Hostname, b.CommonBuildOpts.AddHost, rootIDPair) - if err != nil { - return err - } - bindFiles["/etc/hosts"] = hostFile - } - - if !contains(volumes, "/etc/resolv.conf") { - resolvFile, err := b.addNetworkConfig(path, "/etc/resolv.conf", rootIDPair, b.CommonBuildOpts.DNSServers, b.CommonBuildOpts.DNSSearch, b.CommonBuildOpts.DNSOptions) - if err != nil { - return err - } - bindFiles["/etc/resolv.conf"] = resolvFile - } - - err = b.setupMounts(mountPoint, spec, path, options.Mounts, bindFiles, volumes, b.CommonBuildOpts.Volumes, b.CommonBuildOpts.ShmSize, namespaceOptions) - if err != nil { - return errors.Wrapf(err, "error resolving mountpoints for container %q", b.ContainerID) - } - - if options.CNIConfigDir == "" { - options.CNIConfigDir = b.CNIConfigDir - if b.CNIConfigDir == "" { - options.CNIConfigDir = util.DefaultCNIConfigDir - } - } - if options.CNIPluginPath == "" { - options.CNIPluginPath = b.CNIPluginPath - if b.CNIPluginPath == "" { - options.CNIPluginPath = util.DefaultCNIPluginPath - } - } - - switch isolation { - case IsolationOCI: - var moreCreateArgs []string - if options.NoPivot { - moreCreateArgs = []string{"--no-pivot"} - } else { - moreCreateArgs = nil - } - err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, configureNetworks, moreCreateArgs, spec, mountPoint, path, Package+"-"+filepath.Base(path)) - case IsolationChroot: - err = chroot.RunUsingChroot(spec, path, options.Stdin, options.Stdout, options.Stderr) - case IsolationOCIRootless: - moreCreateArgs := []string{"--no-new-keyring"} - if options.NoPivot { - moreCreateArgs = append(moreCreateArgs, "--no-pivot") - } - if err := setupRootlessSpecChanges(spec, path, rootUID, rootGID); err != nil { - return err - } - err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, configureNetworks, moreCreateArgs, spec, mountPoint, path, Package+"-"+filepath.Base(path)) - default: - err = errors.Errorf("don't know how to run this command") - } - return err -} - -func contains(volumes []string, v string) bool { - for _, i := range volumes { - if i == v { - return true - } - } - return false -} - -func checkAndOverrideIsolationOptions(isolation Isolation, options *RunOptions) error { - switch isolation { - case IsolationOCIRootless: - if ns := options.NamespaceOptions.Find(string(specs.IPCNamespace)); ns == nil || ns.Host { - logrus.Debugf("Forcing use of an IPC namespace.") - } - options.NamespaceOptions.AddOrReplace(NamespaceOption{Name: string(specs.IPCNamespace)}) - _, err := exec.LookPath("slirp4netns") - hostNetworking := err != nil - networkNamespacePath := "" - if ns := options.NamespaceOptions.Find(string(specs.NetworkNamespace)); ns != nil { - hostNetworking = ns.Host - networkNamespacePath = ns.Path - if !hostNetworking && networkNamespacePath != "" && !filepath.IsAbs(networkNamespacePath) { - logrus.Debugf("Disabling network namespace configuration.") - networkNamespacePath = "" - } - } - options.NamespaceOptions.AddOrReplace(NamespaceOption{ - Name: string(specs.NetworkNamespace), - Host: hostNetworking, - Path: networkNamespacePath, - }) - if ns := options.NamespaceOptions.Find(string(specs.PIDNamespace)); ns == nil || ns.Host { - logrus.Debugf("Forcing use of a PID namespace.") - } - options.NamespaceOptions.AddOrReplace(NamespaceOption{Name: string(specs.PIDNamespace), Host: false}) - if ns := options.NamespaceOptions.Find(string(specs.UserNamespace)); ns == nil || ns.Host { - logrus.Debugf("Forcing use of a user namespace.") - } - options.NamespaceOptions.AddOrReplace(NamespaceOption{Name: string(specs.UserNamespace)}) - if ns := options.NamespaceOptions.Find(string(specs.UTSNamespace)); ns != nil && !ns.Host { - logrus.Debugf("Disabling UTS namespace.") - } - options.NamespaceOptions.AddOrReplace(NamespaceOption{Name: string(specs.UTSNamespace), Host: true}) - case IsolationOCI: - pidns := options.NamespaceOptions.Find(string(specs.PIDNamespace)) - userns := options.NamespaceOptions.Find(string(specs.UserNamespace)) - if (pidns == nil || pidns.Host) && (userns != nil && !userns.Host) { - return fmt.Errorf("not allowed to mix host PID namespace with container user namespace") - } - } - return nil -} - -func setupRootlessSpecChanges(spec *specs.Spec, bundleDir string, rootUID, rootGID uint32) error { - spec.Hostname = "" - spec.Process.User.AdditionalGids = nil - spec.Linux.Resources = nil - - emptyDir := filepath.Join(bundleDir, "empty") - if err := os.Mkdir(emptyDir, 0); err != nil { - return errors.Wrapf(err, "error creating %q", emptyDir) - } - - // Replace /sys with a read-only bind mount. - mounts := []specs.Mount{ - { - Source: "/dev", - Destination: "/dev", - Type: "tmpfs", - Options: []string{"private", "strictatime", "noexec", "nosuid", "mode=755", "size=65536k"}, - }, - { - Source: "mqueue", - Destination: "/dev/mqueue", - Type: "mqueue", - Options: []string{"private", "nodev", "noexec", "nosuid"}, - }, - { - Source: "pts", - Destination: "/dev/pts", - Type: "devpts", - Options: []string{"private", "noexec", "nosuid", "newinstance", "ptmxmode=0666", "mode=0620"}, - }, - { - Source: "shm", - Destination: "/dev/shm", - Type: "tmpfs", - Options: []string{"private", "nodev", "noexec", "nosuid", "mode=1777", "size=65536k"}, - }, - { - Source: "/proc", - Destination: "/proc", - Type: "proc", - Options: []string{"private", "nodev", "noexec", "nosuid"}, - }, - { - Source: "/sys", - Destination: "/sys", - Type: "bind", - Options: []string{bind.NoBindOption, "rbind", "private", "nodev", "noexec", "nosuid", "ro"}, - }, - } - // Cover up /sys/fs/cgroup and /sys/fs/selinux, if they exist in our source for /sys. - if _, err := os.Stat("/sys/fs/cgroup"); err == nil { - spec.Linux.MaskedPaths = append(spec.Linux.MaskedPaths, "/sys/fs/cgroup") - } - if _, err := os.Stat("/sys/fs/selinux"); err == nil { - spec.Linux.MaskedPaths = append(spec.Linux.MaskedPaths, "/sys/fs/selinux") - } - // Keep anything that isn't under /dev, /proc, or /sys. - for i := range spec.Mounts { - if spec.Mounts[i].Destination == "/dev" || strings.HasPrefix(spec.Mounts[i].Destination, "/dev/") || - spec.Mounts[i].Destination == "/proc" || strings.HasPrefix(spec.Mounts[i].Destination, "/proc/") || - spec.Mounts[i].Destination == "/sys" || strings.HasPrefix(spec.Mounts[i].Destination, "/sys/") { - continue - } - mounts = append(mounts, spec.Mounts[i]) - } - spec.Mounts = mounts - return nil -} - -type runUsingRuntimeSubprocOptions struct { - Options RunOptions - Spec *specs.Spec - RootPath string - BundlePath string - ConfigureNetwork bool - ConfigureNetworks []string - MoreCreateArgs []string - ContainerName string - Isolation Isolation -} - -func (b *Builder) runUsingRuntimeSubproc(isolation Isolation, options RunOptions, configureNetwork bool, configureNetworks, moreCreateArgs []string, spec *specs.Spec, rootPath, bundlePath, containerName string) (err error) { - var confwg sync.WaitGroup - config, conferr := json.Marshal(runUsingRuntimeSubprocOptions{ - Options: options, - Spec: spec, - RootPath: rootPath, - BundlePath: bundlePath, - ConfigureNetwork: configureNetwork, - ConfigureNetworks: configureNetworks, - MoreCreateArgs: moreCreateArgs, - ContainerName: containerName, - Isolation: isolation, - }) - if conferr != nil { - return errors.Wrapf(conferr, "error encoding configuration for %q", runUsingRuntimeCommand) - } - cmd := reexec.Command(runUsingRuntimeCommand) - cmd.Dir = bundlePath - cmd.Stdin = options.Stdin - if cmd.Stdin == nil { - cmd.Stdin = os.Stdin - } - cmd.Stdout = options.Stdout - if cmd.Stdout == nil { - cmd.Stdout = os.Stdout - } - cmd.Stderr = options.Stderr - if cmd.Stderr == nil { - cmd.Stderr = os.Stderr - } - cmd.Env = append(os.Environ(), fmt.Sprintf("LOGLEVEL=%d", logrus.GetLevel())) - preader, pwriter, err := os.Pipe() - if err != nil { - return errors.Wrapf(err, "error creating configuration pipe") - } - confwg.Add(1) - go func() { - _, conferr = io.Copy(pwriter, bytes.NewReader(config)) - if conferr != nil { - conferr = errors.Wrapf(conferr, "error while copying configuration down pipe to child process") - } - confwg.Done() - }() - cmd.ExtraFiles = append([]*os.File{preader}, cmd.ExtraFiles...) - defer preader.Close() - defer pwriter.Close() - err = cmd.Run() - if err != nil { - err = errors.Wrapf(err, "error while running runtime") - } - confwg.Wait() - if err == nil { - return conferr - } - if conferr != nil { - logrus.Debugf("%v", conferr) - } - return err -} - -func init() { - reexec.Register(runUsingRuntimeCommand, runUsingRuntimeMain) -} - -func runUsingRuntimeMain() { - var options runUsingRuntimeSubprocOptions - // Set logging. - if level := os.Getenv("LOGLEVEL"); level != "" { - if ll, err := strconv.Atoi(level); err == nil { - logrus.SetLevel(logrus.Level(ll)) - } - } - // Unpack our configuration. - confPipe := os.NewFile(3, "confpipe") - if confPipe == nil { - fmt.Fprintf(os.Stderr, "error reading options pipe\n") - os.Exit(1) - } - defer confPipe.Close() - if err := json.NewDecoder(confPipe).Decode(&options); err != nil { - fmt.Fprintf(os.Stderr, "error decoding options: %v\n", err) - os.Exit(1) - } - // Set ourselves up to read the container's exit status. We're doing this in a child process - // so that we won't mess with the setting in a caller of the library. This stubs to OS specific - // calls - if err := setChildProcess(); err != nil { - os.Exit(1) - } - // Run the container, start to finish. - status, err := runUsingRuntime(options.Isolation, options.Options, options.ConfigureNetwork, options.ConfigureNetworks, options.MoreCreateArgs, options.Spec, options.RootPath, options.BundlePath, options.ContainerName) - if err != nil { - fmt.Fprintf(os.Stderr, "error running container: %v\n", err) - os.Exit(1) - } - // Pass the container's exit status back to the caller by exiting with the same status. - if status.Exited() { - os.Exit(status.ExitStatus()) - } else if status.Signaled() { - fmt.Fprintf(os.Stderr, "container exited on %s\n", status.Signal()) - os.Exit(1) - } - os.Exit(1) -} - -func runUsingRuntime(isolation Isolation, options RunOptions, configureNetwork bool, configureNetworks, moreCreateArgs []string, spec *specs.Spec, rootPath, bundlePath, containerName string) (wstatus unix.WaitStatus, err error) { - // Lock the caller to a single OS-level thread. - runtime.LockOSThread() - - // Set up bind mounts for things that a namespaced user might not be able to get to directly. - unmountAll, err := bind.SetupIntermediateMountNamespace(spec, bundlePath) - if unmountAll != nil { - defer func() { - if err := unmountAll(); err != nil { - logrus.Error(err) - } - }() - } - if err != nil { - return 1, err - } - - // Write the runtime configuration. - specbytes, err := json.Marshal(spec) - if err != nil { - return 1, errors.Wrapf(err, "error encoding configuration %#v as json", spec) - } - if err = ioutils.AtomicWriteFile(filepath.Join(bundlePath, "config.json"), specbytes, 0600); err != nil { - return 1, errors.Wrapf(err, "error storing runtime configuration in %q", filepath.Join(bundlePath, "config.json")) - } - - logrus.Debugf("config = %v", string(specbytes)) - - // Decide which runtime to use. - runtime := options.Runtime - if runtime == "" { - runtime = util.Runtime() - } - - // Default to just passing down our stdio. - getCreateStdio := func() (io.ReadCloser, io.WriteCloser, io.WriteCloser) { - return os.Stdin, os.Stdout, os.Stderr - } - - // Figure out how we're doing stdio handling, and create pipes and sockets. - var stdio sync.WaitGroup - var consoleListener *net.UnixListener - var errorFds, closeBeforeReadingErrorFds []int - stdioPipe := make([][]int, 3) - copyConsole := false - copyPipes := false - finishCopy := make([]int, 2) - if err = unix.Pipe(finishCopy); err != nil { - return 1, errors.Wrapf(err, "error creating pipe for notifying to stop stdio") - } - finishedCopy := make(chan struct{}) - if spec.Process != nil { - if spec.Process.Terminal { - copyConsole = true - // Create a listening socket for accepting the container's terminal's PTY master. - socketPath := filepath.Join(bundlePath, "console.sock") - consoleListener, err = net.ListenUnix("unix", &net.UnixAddr{Name: socketPath, Net: "unix"}) - if err != nil { - return 1, errors.Wrapf(err, "error creating socket %q to receive terminal descriptor", consoleListener.Addr()) - } - // Add console socket arguments. - moreCreateArgs = append(moreCreateArgs, "--console-socket", socketPath) - } else { - copyPipes = true - // Figure out who should own the pipes. - uid, gid, err := util.GetHostRootIDs(spec) - if err != nil { - return 1, err - } - // Create stdio pipes. - if stdioPipe, err = runMakeStdioPipe(int(uid), int(gid)); err != nil { - return 1, err - } - errorFds = []int{stdioPipe[unix.Stdout][0], stdioPipe[unix.Stderr][0]} - closeBeforeReadingErrorFds = []int{stdioPipe[unix.Stdout][1], stdioPipe[unix.Stderr][1]} - // Set stdio to our pipes. - getCreateStdio = func() (io.ReadCloser, io.WriteCloser, io.WriteCloser) { - stdin := os.NewFile(uintptr(stdioPipe[unix.Stdin][0]), "/dev/stdin") - stdout := os.NewFile(uintptr(stdioPipe[unix.Stdout][1]), "/dev/stdout") - stderr := os.NewFile(uintptr(stdioPipe[unix.Stderr][1]), "/dev/stderr") - return stdin, stdout, stderr - } - } - } else { - if options.Quiet { - // Discard stdout. - getCreateStdio = func() (io.ReadCloser, io.WriteCloser, io.WriteCloser) { - return os.Stdin, nil, os.Stderr - } - } - } - - // Build the commands that we'll execute. - pidFile := filepath.Join(bundlePath, "pid") - args := append(append(append(options.Args, "create", "--bundle", bundlePath, "--pid-file", pidFile), moreCreateArgs...), containerName) - create := exec.Command(runtime, args...) - create.Dir = bundlePath - stdin, stdout, stderr := getCreateStdio() - create.Stdin, create.Stdout, create.Stderr = stdin, stdout, stderr - if create.SysProcAttr == nil { - create.SysProcAttr = &syscall.SysProcAttr{} - } - - args = append(options.Args, "start", containerName) - start := exec.Command(runtime, args...) - start.Dir = bundlePath - start.Stderr = os.Stderr - - args = append(options.Args, "kill", containerName) - kill := exec.Command(runtime, args...) - kill.Dir = bundlePath - kill.Stderr = os.Stderr - - args = append(options.Args, "delete", containerName) - del := exec.Command(runtime, args...) - del.Dir = bundlePath - del.Stderr = os.Stderr - - // Actually create the container. - logrus.Debugf("Running %q", create.Args) - err = create.Run() - if err != nil { - return 1, errors.Wrapf(err, "error creating container for %v: %s", spec.Process.Args, runCollectOutput(errorFds, closeBeforeReadingErrorFds)) - } - defer func() { - err2 := del.Run() - if err2 != nil { - if err == nil { - err = errors.Wrapf(err2, "error deleting container") - } else { - logrus.Infof("error deleting container: %v", err2) - } - } - }() - - // Make sure we read the container's exit status when it exits. - pidValue, err := ioutil.ReadFile(pidFile) - if err != nil { - return 1, errors.Wrapf(err, "error reading pid from %q", pidFile) - } - pid, err := strconv.Atoi(strings.TrimSpace(string(pidValue))) - if err != nil { - return 1, errors.Wrapf(err, "error parsing pid %s as a number", string(pidValue)) - } - var reaping sync.WaitGroup - reaping.Add(1) - go func() { - defer reaping.Done() - var err error - _, err = unix.Wait4(pid, &wstatus, 0, nil) - if err != nil { - wstatus = 0 - logrus.Errorf("error waiting for container child process %d: %v\n", pid, err) - } - }() - - if configureNetwork { - teardown, err := runConfigureNetwork(isolation, options, configureNetworks, pid, containerName, spec.Process.Args) - if teardown != nil { - defer teardown() - } - if err != nil { - return 1, err - } - } - - if copyPipes { - // We don't need the ends of the pipes that belong to the container. - stdin.Close() - if stdout != nil { - stdout.Close() - } - stderr.Close() - } - - // Handle stdio for the container in the background. - stdio.Add(1) - go runCopyStdio(&stdio, copyPipes, stdioPipe, copyConsole, consoleListener, finishCopy, finishedCopy, spec) - - // Start the container. - logrus.Debugf("Running %q", start.Args) - err = start.Run() - if err != nil { - return 1, errors.Wrapf(err, "error starting container") - } - stopped := false - defer func() { - if !stopped { - err2 := kill.Run() - if err2 != nil { - if err == nil { - err = errors.Wrapf(err2, "error stopping container") - } else { - logrus.Infof("error stopping container: %v", err2) - } - } - } - }() - - // Wait for the container to exit. - for { - now := time.Now() - var state specs.State - args = append(options.Args, "state", containerName) - stat := exec.Command(runtime, args...) - stat.Dir = bundlePath - stat.Stderr = os.Stderr - stateOutput, stateErr := stat.Output() - if stateErr != nil { - return 1, errors.Wrapf(stateErr, "error reading container state") - } - if err = json.Unmarshal(stateOutput, &state); err != nil { - return 1, errors.Wrapf(stateErr, "error parsing container state %q", string(stateOutput)) - } - switch state.Status { - case "running": - case "stopped": - stopped = true - default: - return 1, errors.Errorf("container status unexpectedly changed to %q", state.Status) - } - if stopped { - break - } - select { - case <-finishedCopy: - stopped = true - case <-time.After(time.Until(now.Add(100 * time.Millisecond))): - continue - } - if stopped { - break - } - } - - // Close the writing end of the stop-handling-stdio notification pipe. - unix.Close(finishCopy[1]) - // Wait for the stdio copy goroutine to flush. - stdio.Wait() - // Wait until we finish reading the exit status. - reaping.Wait() - - return wstatus, nil -} - -func runCollectOutput(fds, closeBeforeReadingFds []int) string { - for _, fd := range closeBeforeReadingFds { - unix.Close(fd) - } - var b bytes.Buffer - buf := make([]byte, 8192) - for _, fd := range fds { - nread, err := unix.Read(fd, buf) - if err != nil { - if errno, isErrno := err.(syscall.Errno); isErrno { - switch errno { - default: - logrus.Errorf("error reading from pipe %d: %v", fd, err) - case syscall.EINTR, syscall.EAGAIN: - } - } else { - logrus.Errorf("unable to wait for data from pipe %d: %v", fd, err) - } - continue - } - for nread > 0 { - r := buf[:nread] - if nwritten, err := b.Write(r); err != nil || nwritten != len(r) { - if nwritten != len(r) { - logrus.Errorf("error buffering data from pipe %d: %v", fd, err) - break - } - } - nread, err = unix.Read(fd, buf) - if err != nil { - if errno, isErrno := err.(syscall.Errno); isErrno { - switch errno { - default: - logrus.Errorf("error reading from pipe %d: %v", fd, err) - case syscall.EINTR, syscall.EAGAIN: - } - } else { - logrus.Errorf("unable to wait for data from pipe %d: %v", fd, err) - } - break - } - } - } - return b.String() -} -func setupRootlessNetwork(pid int) (teardown func(), err error) { - slirp4netns, err := exec.LookPath("slirp4netns") - if err != nil { - return nil, errors.Wrapf(err, "cannot find slirp4netns") - } - - rootlessSlirpSyncR, rootlessSlirpSyncW, err := os.Pipe() - if err != nil { - return nil, errors.Wrapf(err, "cannot create slirp4netns sync pipe") - } - defer rootlessSlirpSyncR.Close() - - // Be sure there are no fds inherited to slirp4netns except the sync pipe - files, err := ioutil.ReadDir("/proc/self/fd") - if err != nil { - return nil, errors.Wrapf(err, "cannot list open fds") - } - for _, f := range files { - fd, err := strconv.Atoi(f.Name()) - if err != nil { - return nil, errors.Wrapf(err, "cannot parse fd") - } - if fd == int(rootlessSlirpSyncW.Fd()) { - continue - } - unix.CloseOnExec(fd) - } - - cmd := exec.Command(slirp4netns, "--mtu", "65520", "-r", "3", "-c", fmt.Sprintf("%d", pid), "tap0") - cmd.Stdin, cmd.Stdout, cmd.Stderr = nil, nil, nil - cmd.ExtraFiles = []*os.File{rootlessSlirpSyncW} - - err = cmd.Start() - rootlessSlirpSyncW.Close() - if err != nil { - return nil, errors.Wrapf(err, "cannot start slirp4netns") - } - - b := make([]byte, 1) - for { - if err := rootlessSlirpSyncR.SetDeadline(time.Now().Add(1 * time.Second)); err != nil { - return nil, errors.Wrapf(err, "error setting slirp4netns pipe timeout") - } - if _, err := rootlessSlirpSyncR.Read(b); err == nil { - break - } else { - if os.IsTimeout(err) { - // Check if the process is still running. - var status syscall.WaitStatus - _, err := syscall.Wait4(cmd.Process.Pid, &status, syscall.WNOHANG, nil) - if err != nil { - return nil, errors.Wrapf(err, "failed to read slirp4netns process status") - } - if status.Exited() || status.Signaled() { - return nil, errors.New("slirp4netns failed") - } - - continue - } - return nil, errors.Wrapf(err, "failed to read from slirp4netns sync pipe") - } - } - - return func() { - cmd.Process.Kill() - cmd.Wait() - }, nil -} - -func runConfigureNetwork(isolation Isolation, options RunOptions, configureNetworks []string, pid int, containerName string, command []string) (teardown func(), err error) { - var netconf, undo []*libcni.NetworkConfigList - - if isolation == IsolationOCIRootless { - if ns := options.NamespaceOptions.Find(string(specs.NetworkNamespace)); ns != nil && !ns.Host && ns.Path == "" { - return setupRootlessNetwork(pid) - } - } - // Scan for CNI configuration files. - confdir := options.CNIConfigDir - files, err := libcni.ConfFiles(confdir, []string{".conf"}) - if err != nil { - return nil, errors.Wrapf(err, "error finding CNI networking configuration files named *.conf in directory %q", confdir) - } - lists, err := libcni.ConfFiles(confdir, []string{".conflist"}) - if err != nil { - return nil, errors.Wrapf(err, "error finding CNI networking configuration list files named *.conflist in directory %q", confdir) - } - logrus.Debugf("CNI network configuration file list: %#v", append(files, lists...)) - // Read the CNI configuration files. - for _, file := range files { - nc, err := libcni.ConfFromFile(file) - if err != nil { - return nil, errors.Wrapf(err, "error loading networking configuration from file %q for %v", file, command) - } - if len(configureNetworks) > 0 && nc.Network != nil && (nc.Network.Name == "" || !util.StringInSlice(nc.Network.Name, configureNetworks)) { - if nc.Network.Name == "" { - logrus.Debugf("configuration in %q has no name, skipping it", file) - } else { - logrus.Debugf("configuration in %q has name %q, skipping it", file, nc.Network.Name) - } - continue - } - cl, err := libcni.ConfListFromConf(nc) - if err != nil { - return nil, errors.Wrapf(err, "error converting networking configuration from file %q for %v", file, command) - } - logrus.Debugf("using network configuration from %q", file) - netconf = append(netconf, cl) - } - for _, list := range lists { - cl, err := libcni.ConfListFromFile(list) - if err != nil { - return nil, errors.Wrapf(err, "error loading networking configuration list from file %q for %v", list, command) - } - if len(configureNetworks) > 0 && (cl.Name == "" || !util.StringInSlice(cl.Name, configureNetworks)) { - if cl.Name == "" { - logrus.Debugf("configuration list in %q has no name, skipping it", list) - } else { - logrus.Debugf("configuration list in %q has name %q, skipping it", list, cl.Name) - } - continue - } - logrus.Debugf("using network configuration list from %q", list) - netconf = append(netconf, cl) - } - // Make sure we can access the container's network namespace, - // even after it exits, to successfully tear down the - // interfaces. Ensure this by opening a handle to the network - // namespace, and using our copy to both configure and - // deconfigure it. - netns := fmt.Sprintf("/proc/%d/ns/net", pid) - netFD, err := unix.Open(netns, unix.O_RDONLY, 0) - if err != nil { - return nil, errors.Wrapf(err, "error opening network namespace for %v", command) - } - mynetns := fmt.Sprintf("/proc/%d/fd/%d", unix.Getpid(), netFD) - // Build our search path for the plugins. - pluginPaths := strings.Split(options.CNIPluginPath, string(os.PathListSeparator)) - cni := libcni.CNIConfig{Path: pluginPaths} - // Configure the interfaces. - rtconf := make(map[*libcni.NetworkConfigList]*libcni.RuntimeConf) - teardown = func() { - for _, nc := range undo { - if err = cni.DelNetworkList(context.Background(), nc, rtconf[nc]); err != nil { - logrus.Errorf("error cleaning up network %v for %v: %v", rtconf[nc].IfName, command, err) - } - } - unix.Close(netFD) - } - for i, nc := range netconf { - // Build the runtime config for use with this network configuration. - rtconf[nc] = &libcni.RuntimeConf{ - ContainerID: containerName, - NetNS: mynetns, - IfName: fmt.Sprintf("if%d", i), - Args: [][2]string{}, - CapabilityArgs: map[string]interface{}{}, - } - // Bring it up. - _, err := cni.AddNetworkList(context.Background(), nc, rtconf[nc]) - if err != nil { - return teardown, errors.Wrapf(err, "error configuring network list %v for %v", rtconf[nc].IfName, command) - } - // Add it to the list of networks to take down when the container process exits. - undo = append([]*libcni.NetworkConfigList{nc}, undo...) - } - return teardown, nil -} - -func runCopyStdio(stdio *sync.WaitGroup, copyPipes bool, stdioPipe [][]int, copyConsole bool, consoleListener *net.UnixListener, finishCopy []int, finishedCopy chan struct{}, spec *specs.Spec) { - defer func() { - unix.Close(finishCopy[0]) - if copyPipes { - unix.Close(stdioPipe[unix.Stdin][1]) - unix.Close(stdioPipe[unix.Stdout][0]) - unix.Close(stdioPipe[unix.Stderr][0]) - } - stdio.Done() - finishedCopy <- struct{}{} - }() - // Map describing where data on an incoming descriptor should go. - relayMap := make(map[int]int) - // Map describing incoming and outgoing descriptors. - readDesc := make(map[int]string) - writeDesc := make(map[int]string) - // Buffers. - relayBuffer := make(map[int]*bytes.Buffer) - // Set up the terminal descriptor or pipes for polling. - if copyConsole { - // Accept a connection over our listening socket. - fd, err := runAcceptTerminal(consoleListener, spec.Process.ConsoleSize) - if err != nil { - logrus.Errorf("%v", err) - return - } - terminalFD := fd - // Input from our stdin, output from the terminal descriptor. - relayMap[unix.Stdin] = terminalFD - readDesc[unix.Stdin] = "stdin" - relayBuffer[terminalFD] = new(bytes.Buffer) - writeDesc[terminalFD] = "container terminal input" - relayMap[terminalFD] = unix.Stdout - readDesc[terminalFD] = "container terminal output" - relayBuffer[unix.Stdout] = new(bytes.Buffer) - writeDesc[unix.Stdout] = "output" - // Set our terminal's mode to raw, to pass handling of special - // terminal input to the terminal in the container. - if terminal.IsTerminal(unix.Stdin) { - if state, err := terminal.MakeRaw(unix.Stdin); err != nil { - logrus.Warnf("error setting terminal state: %v", err) - } else { - defer func() { - if err = terminal.Restore(unix.Stdin, state); err != nil { - logrus.Errorf("unable to restore terminal state: %v", err) - } - }() - } - } - } - if copyPipes { - // Input from our stdin, output from the stdout and stderr pipes. - relayMap[unix.Stdin] = stdioPipe[unix.Stdin][1] - readDesc[unix.Stdin] = "stdin" - relayBuffer[stdioPipe[unix.Stdin][1]] = new(bytes.Buffer) - writeDesc[stdioPipe[unix.Stdin][1]] = "container stdin" - relayMap[stdioPipe[unix.Stdout][0]] = unix.Stdout - readDesc[stdioPipe[unix.Stdout][0]] = "container stdout" - relayBuffer[unix.Stdout] = new(bytes.Buffer) - writeDesc[unix.Stdout] = "stdout" - relayMap[stdioPipe[unix.Stderr][0]] = unix.Stderr - readDesc[stdioPipe[unix.Stderr][0]] = "container stderr" - relayBuffer[unix.Stderr] = new(bytes.Buffer) - writeDesc[unix.Stderr] = "stderr" - } - // Set our reading descriptors to non-blocking. - for rfd, wfd := range relayMap { - if err := unix.SetNonblock(rfd, true); err != nil { - logrus.Errorf("error setting %s to nonblocking: %v", readDesc[rfd], err) - return - } - if err := unix.SetNonblock(wfd, false); err != nil { - logrus.Errorf("error setting descriptor %d (%s) blocking: %v", wfd, writeDesc[wfd], err) - } - } - // Pass data back and forth. - pollTimeout := -1 - for len(relayMap) > 0 { - // Start building the list of descriptors to poll. - pollFds := make([]unix.PollFd, 0, len(relayMap)+1) - // Poll for a notification that we should stop handling stdio. - pollFds = append(pollFds, unix.PollFd{Fd: int32(finishCopy[0]), Events: unix.POLLIN | unix.POLLHUP}) - // Poll on our reading descriptors. - for rfd := range relayMap { - pollFds = append(pollFds, unix.PollFd{Fd: int32(rfd), Events: unix.POLLIN | unix.POLLHUP}) - } - buf := make([]byte, 8192) - // Wait for new data from any input descriptor, or a notification that we're done. - _, err := unix.Poll(pollFds, pollTimeout) - if !util.LogIfNotRetryable(err, fmt.Sprintf("error waiting for stdio/terminal data to relay: %v", err)) { - return - } - removes := make(map[int]struct{}) - for _, pollFd := range pollFds { - // If this descriptor's just been closed from the other end, mark it for - // removal from the set that we're checking for. - if pollFd.Revents&unix.POLLHUP == unix.POLLHUP { - removes[int(pollFd.Fd)] = struct{}{} - } - // If the descriptor was closed elsewhere, remove it from our list. - if pollFd.Revents&unix.POLLNVAL != 0 { - logrus.Debugf("error polling descriptor %s: closed?", readDesc[int(pollFd.Fd)]) - removes[int(pollFd.Fd)] = struct{}{} - } - // If the POLLIN flag isn't set, then there's no data to be read from this descriptor. - if pollFd.Revents&unix.POLLIN == 0 { - // If we're using pipes and it's our stdin and it's closed, close the writing - // end of the corresponding pipe. - if copyPipes && int(pollFd.Fd) == unix.Stdin && pollFd.Revents&unix.POLLHUP != 0 { - unix.Close(stdioPipe[unix.Stdin][1]) - stdioPipe[unix.Stdin][1] = -1 - } - continue - } - // Read whatever there is to be read. - readFD := int(pollFd.Fd) - writeFD, needToRelay := relayMap[readFD] - if needToRelay { - n, err := unix.Read(readFD, buf) - if !util.LogIfNotRetryable(err, fmt.Sprintf("unable to read %s data: %v", readDesc[readFD], err)) { - return - } - // If it's zero-length on our stdin and we're - // using pipes, it's an EOF, so close the stdin - // pipe's writing end. - if n == 0 && copyPipes && int(pollFd.Fd) == unix.Stdin { - unix.Close(stdioPipe[unix.Stdin][1]) - stdioPipe[unix.Stdin][1] = -1 - } - if n > 0 { - // Buffer the data in case we get blocked on where they need to go. - nwritten, err := relayBuffer[writeFD].Write(buf[:n]) - if err != nil { - logrus.Debugf("buffer: %v", err) - continue - } - if nwritten != n { - logrus.Debugf("buffer: expected to buffer %d bytes, wrote %d", n, nwritten) - continue - } - // If this is the last of the data we'll be able to read from this - // descriptor, read all that there is to read. - for pollFd.Revents&unix.POLLHUP == unix.POLLHUP { - nr, err := unix.Read(readFD, buf) - util.LogIfUnexpectedWhileDraining(err, fmt.Sprintf("read %s: %v", readDesc[readFD], err)) - if nr <= 0 { - break - } - nwritten, err := relayBuffer[writeFD].Write(buf[:nr]) - if err != nil { - logrus.Debugf("buffer: %v", err) - break - } - if nwritten != nr { - logrus.Debugf("buffer: expected to buffer %d bytes, wrote %d", nr, nwritten) - break - } - } - } - } - } - // Try to drain the output buffers. Set the default timeout - // for the next poll() to 100ms if we still have data to write. - pollTimeout = -1 - for writeFD := range relayBuffer { - if relayBuffer[writeFD].Len() > 0 { - n, err := unix.Write(writeFD, relayBuffer[writeFD].Bytes()) - if !util.LogIfNotRetryable(err, fmt.Sprintf("unable to write %s data: %v", writeDesc[writeFD], err)) { - return - } - if n > 0 { - relayBuffer[writeFD].Next(n) - } - } - if relayBuffer[writeFD].Len() > 0 { - pollTimeout = 100 - } - } - // Remove any descriptors which we don't need to poll any more from the poll descriptor list. - for remove := range removes { - delete(relayMap, remove) - } - // If the we-can-return pipe had anything for us, we're done. - for _, pollFd := range pollFds { - if int(pollFd.Fd) == finishCopy[0] && pollFd.Revents != 0 { - // The pipe is closed, indicating that we can stop now. - return - } - } - } -} - -func runAcceptTerminal(consoleListener *net.UnixListener, terminalSize *specs.Box) (int, error) { - defer consoleListener.Close() - c, err := consoleListener.AcceptUnix() - if err != nil { - return -1, errors.Wrapf(err, "error accepting socket descriptor connection") - } - defer c.Close() - // Expect a control message over our new connection. - b := make([]byte, 8192) - oob := make([]byte, 8192) - n, oobn, _, _, err := c.ReadMsgUnix(b, oob) - if err != nil { - return -1, errors.Wrapf(err, "error reading socket descriptor") - } - if n > 0 { - logrus.Debugf("socket descriptor is for %q", string(b[:n])) - } - if oobn > len(oob) { - return -1, errors.Errorf("too much out-of-bounds data (%d bytes)", oobn) - } - // Parse the control message. - scm, err := unix.ParseSocketControlMessage(oob[:oobn]) - if err != nil { - return -1, errors.Wrapf(err, "error parsing out-of-bound data as a socket control message") - } - logrus.Debugf("control messages: %v", scm) - // Expect to get a descriptor. - terminalFD := -1 - for i := range scm { - fds, err := unix.ParseUnixRights(&scm[i]) - if err != nil { - return -1, errors.Wrapf(err, "error parsing unix rights control message: %v", &scm[i]) - } - logrus.Debugf("fds: %v", fds) - if len(fds) == 0 { - continue - } - terminalFD = fds[0] - break - } - if terminalFD == -1 { - return -1, errors.Errorf("unable to read terminal descriptor") - } - // Set the pseudoterminal's size to the configured size, or our own. - winsize := &unix.Winsize{} - if terminalSize != nil { - // Use configured sizes. - winsize.Row = uint16(terminalSize.Height) - winsize.Col = uint16(terminalSize.Width) - } else { - if terminal.IsTerminal(unix.Stdin) { - // Use the size of our terminal. - if winsize, err = unix.IoctlGetWinsize(unix.Stdin, unix.TIOCGWINSZ); err != nil { - logrus.Warnf("error reading size of controlling terminal: %v", err) - winsize.Row = 0 - winsize.Col = 0 - } - } - } - if winsize.Row != 0 && winsize.Col != 0 { - if err = unix.IoctlSetWinsize(terminalFD, unix.TIOCSWINSZ, winsize); err != nil { - logrus.Warnf("error setting size of container pseudoterminal: %v", err) - } - // FIXME - if we're connected to a terminal, we should - // be passing the updated terminal size down when we - // receive a SIGWINCH. - } - return terminalFD, nil -} - -// Create pipes to use for relaying stdio. -func runMakeStdioPipe(uid, gid int) ([][]int, error) { - stdioPipe := make([][]int, 3) - for i := range stdioPipe { - stdioPipe[i] = make([]int, 2) - if err := unix.Pipe(stdioPipe[i]); err != nil { - return nil, errors.Wrapf(err, "error creating pipe for container FD %d", i) - } - } - if err := unix.Fchown(stdioPipe[unix.Stdin][0], uid, gid); err != nil { - return nil, errors.Wrapf(err, "error setting owner of stdin pipe descriptor") - } - if err := unix.Fchown(stdioPipe[unix.Stdout][1], uid, gid); err != nil { - return nil, errors.Wrapf(err, "error setting owner of stdout pipe descriptor") - } - if err := unix.Fchown(stdioPipe[unix.Stderr][1], uid, gid); err != nil { - return nil, errors.Wrapf(err, "error setting owner of stderr pipe descriptor") - } - return stdioPipe, nil -} diff --git a/vendor/github.com/containers/buildah/run_linux.go b/vendor/github.com/containers/buildah/run_linux.go index a7519a092..8597e3656 100644 --- a/vendor/github.com/containers/buildah/run_linux.go +++ b/vendor/github.com/containers/buildah/run_linux.go @@ -3,9 +3,44 @@ package buildah import ( + "bytes" + "context" + "encoding/json" "fmt" - "golang.org/x/sys/unix" + "io" + "io/ioutil" + "net" "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/containernetworking/cni/libcni" + "github.com/containers/buildah/bind" + "github.com/containers/buildah/chroot" + "github.com/containers/buildah/pkg/secrets" + "github.com/containers/buildah/pkg/unshare" + "github.com/containers/buildah/util" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/reexec" + "github.com/containers/storage/pkg/stringid" + "github.com/docker/go-units" + "github.com/docker/libnetwork/resolvconf" + "github.com/docker/libnetwork/types" + "github.com/opencontainers/go-digest" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/opencontainers/runtime-tools/generate" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/crypto/ssh/terminal" + "golang.org/x/sys/unix" ) func setChildProcess() error { @@ -15,3 +50,1988 @@ func setChildProcess() error { } return nil } + +// Run runs the specified command in the container's root filesystem. +func (b *Builder) Run(command []string, options RunOptions) error { + p, err := ioutil.TempDir("", Package) + if err != nil { + return errors.Wrapf(err, "run: error creating temporary directory under %q", os.TempDir()) + } + // On some hosts like AH, /tmp is a symlink and we need an + // absolute path. + path, err := filepath.EvalSymlinks(p) + if err != nil { + return errors.Wrapf(err, "run: error evaluating %q for symbolic links", p) + } + logrus.Debugf("using %q to hold bundle data", path) + defer func() { + if err2 := os.RemoveAll(path); err2 != nil { + logrus.Errorf("error removing %q: %v", path, err2) + } + }() + + gp, err := generate.New("linux") + if err != nil { + return errors.Wrapf(err, "error generating new 'linux' runtime spec") + } + g := &gp + + isolation := options.Isolation + if isolation == IsolationDefault { + isolation = b.Isolation + if isolation == IsolationDefault { + isolation = IsolationOCI + } + } + if err := checkAndOverrideIsolationOptions(isolation, &options); err != nil { + return err + } + + b.configureEnvironment(g, options) + + if b.CommonBuildOpts == nil { + return errors.Errorf("Invalid format on container you must recreate the container") + } + + if err := addCommonOptsToSpec(b.CommonBuildOpts, g); err != nil { + return err + } + + if options.WorkingDir != "" { + g.SetProcessCwd(options.WorkingDir) + } else if b.WorkDir() != "" { + g.SetProcessCwd(b.WorkDir()) + } + setupSelinux(g, b.ProcessLabel, b.MountLabel) + mountPoint, err := b.Mount(b.MountLabel) + if err != nil { + return errors.Wrapf(err, "error mounting container %q", b.ContainerID) + } + defer func() { + if err := b.Unmount(); err != nil { + logrus.Errorf("error unmounting container: %v", err) + } + }() + g.SetRootPath(mountPoint) + if len(command) > 0 { + command = runLookupPath(g, command) + g.SetProcessArgs(command) + } else { + g.SetProcessArgs(nil) + } + + setupMaskedPaths(g) + setupReadOnlyPaths(g) + + setupTerminal(g, options.Terminal, options.TerminalSize) + + configureNetwork, configureNetworks, err := b.configureNamespaces(g, options) + if err != nil { + return err + } + + if err := b.configureUIDGID(g, mountPoint, options); err != nil { + return err + } + + g.SetProcessApparmorProfile(b.CommonBuildOpts.ApparmorProfile) + + // Now grab the spec from the generator. Set the generator to nil so that future contributors + // will quickly be able to tell that they're supposed to be modifying the spec directly from here. + spec := g.Config + g = nil + + logrus.Debugf("ensuring working directory %q exists", filepath.Join(mountPoint, spec.Process.Cwd)) + if err = os.MkdirAll(filepath.Join(mountPoint, spec.Process.Cwd), 0755); err != nil { + return errors.Wrapf(err, "error ensuring working directory %q exists", spec.Process.Cwd) + } + + // Set the seccomp configuration using the specified profile name. Some syscalls are + // allowed if certain capabilities are to be granted (example: CAP_SYS_CHROOT and chroot), + // so we sorted out the capabilities lists first. + if err = setupSeccomp(spec, b.CommonBuildOpts.SeccompProfilePath); err != nil { + return err + } + + // Figure out who owns files that will appear to be owned by UID/GID 0 in the container. + rootUID, rootGID, err := util.GetHostRootIDs(spec) + if err != nil { + return err + } + rootIDPair := &idtools.IDPair{UID: int(rootUID), GID: int(rootGID)} + + bindFiles := make(map[string]string) + namespaceOptions := append(b.NamespaceOptions, options.NamespaceOptions...) + volumes := b.Volumes() + + if !contains(volumes, "/etc/hosts") { + hostFile, err := b.generateHosts(path, spec.Hostname, b.CommonBuildOpts.AddHost, rootIDPair) + if err != nil { + return err + } + bindFiles["/etc/hosts"] = hostFile + } + + if !contains(volumes, "/etc/resolv.conf") { + resolvFile, err := b.addNetworkConfig(path, "/etc/resolv.conf", rootIDPair, b.CommonBuildOpts.DNSServers, b.CommonBuildOpts.DNSSearch, b.CommonBuildOpts.DNSOptions) + if err != nil { + return err + } + bindFiles["/etc/resolv.conf"] = resolvFile + } + + err = b.setupMounts(mountPoint, spec, path, options.Mounts, bindFiles, volumes, b.CommonBuildOpts.Volumes, b.CommonBuildOpts.ShmSize, namespaceOptions) + if err != nil { + return errors.Wrapf(err, "error resolving mountpoints for container %q", b.ContainerID) + } + + if options.CNIConfigDir == "" { + options.CNIConfigDir = b.CNIConfigDir + if b.CNIConfigDir == "" { + options.CNIConfigDir = util.DefaultCNIConfigDir + } + } + if options.CNIPluginPath == "" { + options.CNIPluginPath = b.CNIPluginPath + if b.CNIPluginPath == "" { + options.CNIPluginPath = util.DefaultCNIPluginPath + } + } + + switch isolation { + case IsolationOCI: + var moreCreateArgs []string + if options.NoPivot { + moreCreateArgs = []string{"--no-pivot"} + } else { + moreCreateArgs = nil + } + err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, configureNetworks, moreCreateArgs, spec, mountPoint, path, Package+"-"+filepath.Base(path)) + case IsolationChroot: + err = chroot.RunUsingChroot(spec, path, options.Stdin, options.Stdout, options.Stderr) + case IsolationOCIRootless: + moreCreateArgs := []string{"--no-new-keyring"} + if options.NoPivot { + moreCreateArgs = append(moreCreateArgs, "--no-pivot") + } + if err := setupRootlessSpecChanges(spec, path, rootUID, rootGID); err != nil { + return err + } + err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, configureNetworks, moreCreateArgs, spec, mountPoint, path, Package+"-"+filepath.Base(path)) + default: + err = errors.Errorf("don't know how to run this command") + } + return err +} + +func addCommonOptsToSpec(commonOpts *CommonBuildOptions, g *generate.Generator) error { + // Resources - CPU + if commonOpts.CPUPeriod != 0 { + g.SetLinuxResourcesCPUPeriod(commonOpts.CPUPeriod) + } + if commonOpts.CPUQuota != 0 { + g.SetLinuxResourcesCPUQuota(commonOpts.CPUQuota) + } + if commonOpts.CPUShares != 0 { + g.SetLinuxResourcesCPUShares(commonOpts.CPUShares) + } + if commonOpts.CPUSetCPUs != "" { + g.SetLinuxResourcesCPUCpus(commonOpts.CPUSetCPUs) + } + if commonOpts.CPUSetMems != "" { + g.SetLinuxResourcesCPUMems(commonOpts.CPUSetMems) + } + + // Resources - Memory + if commonOpts.Memory != 0 { + g.SetLinuxResourcesMemoryLimit(commonOpts.Memory) + } + if commonOpts.MemorySwap != 0 { + g.SetLinuxResourcesMemorySwap(commonOpts.MemorySwap) + } + + // cgroup membership + if commonOpts.CgroupParent != "" { + g.SetLinuxCgroupsPath(commonOpts.CgroupParent) + } + + // Other process resource limits + if err := addRlimits(commonOpts.Ulimit, g); err != nil { + return err + } + + logrus.Debugf("Resources: %#v", commonOpts) + return nil +} + +func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, copyWithTar func(srcPath, dstPath string) error, builtinVolumes []string, rootUID, rootGID int) ([]specs.Mount, error) { + var mounts []specs.Mount + hostOwner := idtools.IDPair{UID: rootUID, GID: rootGID} + // Add temporary copies of the contents of volume locations at the + // volume locations, unless we already have something there. + for _, volume := range builtinVolumes { + subdir := digest.Canonical.FromString(volume).Hex() + volumePath := filepath.Join(containerDir, "buildah-volumes", subdir) + srcPath := filepath.Join(mountPoint, volume) + initializeVolume := false + // If we need to, initialize the volume path's initial contents. + if _, err := os.Stat(volumePath); err != nil { + if !os.IsNotExist(err) { + return nil, errors.Wrapf(err, "failed to stat %q for volume %q", volumePath, volume) + } + logrus.Debugf("setting up built-in volume at %q", volumePath) + if err = os.MkdirAll(volumePath, 0755); err != nil { + return nil, errors.Wrapf(err, "error creating directory %q for volume %q", volumePath, volume) + } + if err = label.Relabel(volumePath, mountLabel, false); err != nil { + return nil, errors.Wrapf(err, "error relabeling directory %q for volume %q", volumePath, volume) + } + initializeVolume = true + } + stat, err := os.Stat(srcPath) + if err != nil { + if !os.IsNotExist(err) { + return nil, errors.Wrapf(err, "failed to stat %q for volume %q", srcPath, volume) + } + if err = idtools.MkdirAllAndChownNew(srcPath, 0755, hostOwner); err != nil { + return nil, errors.Wrapf(err, "error creating directory %q for volume %q", srcPath, volume) + } + if stat, err = os.Stat(srcPath); err != nil { + return nil, errors.Wrapf(err, "failed to stat %q for volume %q", srcPath, volume) + } + } + if initializeVolume { + if err = os.Chmod(volumePath, stat.Mode().Perm()); err != nil { + return nil, errors.Wrapf(err, "failed to chmod %q for volume %q", volumePath, volume) + } + if err = os.Chown(volumePath, int(stat.Sys().(*syscall.Stat_t).Uid), int(stat.Sys().(*syscall.Stat_t).Gid)); err != nil { + return nil, errors.Wrapf(err, "error chowning directory %q for volume %q", volumePath, volume) + } + if err = copyWithTar(srcPath, volumePath); err != nil && !os.IsNotExist(errors.Cause(err)) { + return nil, errors.Wrapf(err, "error populating directory %q for volume %q using contents of %q", volumePath, volume, srcPath) + } + } + // Add the bind mount. + mounts = append(mounts, specs.Mount{ + Source: volumePath, + Destination: volume, + Type: "bind", + Options: []string{"bind"}, + }) + } + return mounts, nil +} + +func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath string, optionMounts []specs.Mount, bindFiles map[string]string, builtinVolumes, volumeMounts []string, shmSize string, namespaceOptions NamespaceOptions) error { + // Start building a new list of mounts. + var mounts []specs.Mount + haveMount := func(destination string) bool { + for _, mount := range mounts { + if mount.Destination == destination { + // Already have something to mount there. + return true + } + } + return false + } + + ipc := namespaceOptions.Find(string(specs.IPCNamespace)) + hostIPC := ipc == nil || ipc.Host + net := namespaceOptions.Find(string(specs.NetworkNamespace)) + hostNetwork := net == nil || net.Host + user := namespaceOptions.Find(string(specs.UserNamespace)) + hostUser := user == nil || user.Host + + // Copy mounts from the generated list. + mountCgroups := true + specMounts := []specs.Mount{} + for _, specMount := range spec.Mounts { + // Override some of the mounts from the generated list if we're doing different things with namespaces. + if specMount.Destination == "/dev/shm" { + specMount.Options = []string{"nosuid", "noexec", "nodev", "mode=1777", "size=" + shmSize} + if hostIPC && !hostUser { + if _, err := os.Stat("/dev/shm"); err != nil && os.IsNotExist(err) { + logrus.Debugf("/dev/shm is not present, not binding into container") + continue + } + specMount = specs.Mount{ + Source: "/dev/shm", + Type: "bind", + Destination: "/dev/shm", + Options: []string{bind.NoBindOption, "rbind", "nosuid", "noexec", "nodev"}, + } + } + } + if specMount.Destination == "/dev/mqueue" { + if hostIPC && !hostUser { + if _, err := os.Stat("/dev/mqueue"); err != nil && os.IsNotExist(err) { + logrus.Debugf("/dev/mqueue is not present, not binding into container") + continue + } + specMount = specs.Mount{ + Source: "/dev/mqueue", + Type: "bind", + Destination: "/dev/mqueue", + Options: []string{bind.NoBindOption, "rbind", "nosuid", "noexec", "nodev"}, + } + } + } + if specMount.Destination == "/sys" { + if hostNetwork && !hostUser { + mountCgroups = false + if _, err := os.Stat("/sys"); err != nil && os.IsNotExist(err) { + logrus.Debugf("/sys is not present, not binding into container") + continue + } + specMount = specs.Mount{ + Source: "/sys", + Type: "bind", + Destination: "/sys", + Options: []string{bind.NoBindOption, "rbind", "nosuid", "noexec", "nodev", "ro"}, + } + } + } + specMounts = append(specMounts, specMount) + } + + // Add a mount for the cgroups filesystem, unless we're already + // recursively bind mounting all of /sys, in which case we shouldn't + // bother with it. + sysfsMount := []specs.Mount{} + if mountCgroups { + sysfsMount = []specs.Mount{{ + Destination: "/sys/fs/cgroup", + Type: "cgroup", + Source: "cgroup", + Options: []string{bind.NoBindOption, "nosuid", "noexec", "nodev", "relatime", "ro"}, + }} + } + + // Get the list of files we need to bind into the container. + bindFileMounts, err := runSetupBoundFiles(bundlePath, bindFiles) + if err != nil { + return err + } + + // After this point we need to know the per-container persistent storage directory. + cdir, err := b.store.ContainerDirectory(b.ContainerID) + if err != nil { + return errors.Wrapf(err, "error determining work directory for container %q", b.ContainerID) + } + + // Figure out which UID and GID to tell the secrets package to use + // for files that it creates. + rootUID, rootGID, err := util.GetHostRootIDs(spec) + if err != nil { + return err + } + + // Get the list of secrets mounts. + secretMounts := secrets.SecretMountsWithUIDGID(b.MountLabel, cdir, b.DefaultMountsFilePath, cdir, int(rootUID), int(rootGID), unshare.IsRootless()) + + // Add temporary copies of the contents of volume locations at the + // volume locations, unless we already have something there. + copyWithTar := b.copyWithTar(nil, nil) + builtins, err := runSetupBuiltinVolumes(b.MountLabel, mountPoint, cdir, copyWithTar, builtinVolumes, int(rootUID), int(rootGID)) + if err != nil { + return err + } + + // Get the list of explicitly-specified volume mounts. + volumes, err := runSetupVolumeMounts(spec.Linux.MountLabel, volumeMounts, optionMounts) + if err != nil { + return err + } + + // Add them all, in the preferred order, except where they conflict with something that was previously added. + for _, mount := range append(append(append(append(append(volumes, builtins...), secretMounts...), bindFileMounts...), specMounts...), sysfsMount...) { + if haveMount(mount.Destination) { + // Already mounting something there, no need to bother with this one. + continue + } + // Add the mount. + mounts = append(mounts, mount) + } + + // Set the list in the spec. + spec.Mounts = mounts + return nil +} + +// addNetworkConfig copies files from host and sets them up to bind mount into container +func (b *Builder) addNetworkConfig(rdir, hostPath string, chownOpts *idtools.IDPair, dnsServers, dnsSearch, dnsOptions []string) (string, error) { + stat, err := os.Stat(hostPath) + if err != nil { + return "", errors.Wrapf(err, "error statting %q for container %q", hostPath, b.ContainerID) + } + contents, err := ioutil.ReadFile(hostPath) + if err != nil { + return "", errors.Wrapf(err, "unable to read %s", hostPath) + } + + search := resolvconf.GetSearchDomains(contents) + nameservers := resolvconf.GetNameservers(contents, types.IP) + options := resolvconf.GetOptions(contents) + + if len(dnsSearch) > 0 { + search = dnsSearch + } + if len(dnsServers) != 0 { + dns, err := getDNSIP(dnsServers) + if err != nil { + return "", errors.Wrapf(err, "error getting dns servers") + } + nameservers = []string{} + for _, server := range dns { + nameservers = append(nameservers, server.String()) + } + } + + if len(dnsOptions) != 0 { + options = dnsOptions + } + + cfile := filepath.Join(rdir, filepath.Base(hostPath)) + if _, err = resolvconf.Build(cfile, nameservers, search, options); err != nil { + return "", errors.Wrapf(err, "error building resolv.conf for container %s", b.ContainerID) + } + + uid := int(stat.Sys().(*syscall.Stat_t).Uid) + gid := int(stat.Sys().(*syscall.Stat_t).Gid) + if chownOpts != nil { + uid = chownOpts.UID + gid = chownOpts.GID + } + if err = os.Chown(cfile, uid, gid); err != nil { + return "", errors.Wrapf(err, "error chowning file %q for container %q", cfile, b.ContainerID) + } + + if err := label.Relabel(cfile, b.MountLabel, false); err != nil { + return "", errors.Wrapf(err, "error relabeling %q in container %q", cfile, b.ContainerID) + } + + return cfile, nil +} + +// generateHosts creates a containers hosts file +func (b *Builder) generateHosts(rdir, hostname string, addHosts []string, chownOpts *idtools.IDPair) (string, error) { + hostPath := "/etc/hosts" + stat, err := os.Stat(hostPath) + if err != nil { + return "", errors.Wrapf(err, "error statting %q for container %q", hostPath, b.ContainerID) + } + + hosts := bytes.NewBufferString("# Generated by Buildah\n") + orig, err := ioutil.ReadFile(hostPath) + if err != nil { + return "", errors.Wrapf(err, "unable to read %s", hostPath) + } + hosts.Write(orig) + for _, host := range addHosts { + // verify the host format + values := strings.SplitN(host, ":", 2) + if len(values) != 2 { + return "", errors.Errorf("unable to parse host entry %q: incorrect format", host) + } + if values[0] == "" { + return "", errors.Errorf("hostname in host entry %q is empty", host) + } + if values[1] == "" { + return "", errors.Errorf("IP address in host entry %q is empty", host) + } + hosts.Write([]byte(fmt.Sprintf("%s\t%s\n", values[1], values[0]))) + } + + if hostname != "" { + hosts.Write([]byte(fmt.Sprintf("127.0.0.1 %s\n", hostname))) + hosts.Write([]byte(fmt.Sprintf("::1 %s\n", hostname))) + } + cfile := filepath.Join(rdir, filepath.Base(hostPath)) + if err = ioutils.AtomicWriteFile(cfile, hosts.Bytes(), stat.Mode().Perm()); err != nil { + return "", errors.Wrapf(err, "error writing /etc/hosts into the container") + } + uid := int(stat.Sys().(*syscall.Stat_t).Uid) + gid := int(stat.Sys().(*syscall.Stat_t).Gid) + if chownOpts != nil { + uid = chownOpts.UID + gid = chownOpts.GID + } + if err = os.Chown(cfile, uid, gid); err != nil { + return "", errors.Wrapf(err, "error chowning file %q for container %q", cfile, b.ContainerID) + } + if err := label.Relabel(cfile, b.MountLabel, false); err != nil { + return "", errors.Wrapf(err, "error relabeling %q in container %q", cfile, b.ContainerID) + } + + return cfile, nil +} + +func setupTerminal(g *generate.Generator, terminalPolicy TerminalPolicy, terminalSize *specs.Box) { + switch terminalPolicy { + case DefaultTerminal: + onTerminal := terminal.IsTerminal(unix.Stdin) && terminal.IsTerminal(unix.Stdout) && terminal.IsTerminal(unix.Stderr) + if onTerminal { + logrus.Debugf("stdio is a terminal, defaulting to using a terminal") + } else { + logrus.Debugf("stdio is not a terminal, defaulting to not using a terminal") + } + g.SetProcessTerminal(onTerminal) + case WithTerminal: + g.SetProcessTerminal(true) + case WithoutTerminal: + g.SetProcessTerminal(false) + } + if terminalSize != nil { + g.SetProcessConsoleSize(terminalSize.Width, terminalSize.Height) + } +} + +func runUsingRuntime(isolation Isolation, options RunOptions, configureNetwork bool, configureNetworks, moreCreateArgs []string, spec *specs.Spec, rootPath, bundlePath, containerName string) (wstatus unix.WaitStatus, err error) { + // Lock the caller to a single OS-level thread. + runtime.LockOSThread() + + // Set up bind mounts for things that a namespaced user might not be able to get to directly. + unmountAll, err := bind.SetupIntermediateMountNamespace(spec, bundlePath) + if unmountAll != nil { + defer func() { + if err := unmountAll(); err != nil { + logrus.Error(err) + } + }() + } + if err != nil { + return 1, err + } + + // Write the runtime configuration. + specbytes, err := json.Marshal(spec) + if err != nil { + return 1, errors.Wrapf(err, "error encoding configuration %#v as json", spec) + } + if err = ioutils.AtomicWriteFile(filepath.Join(bundlePath, "config.json"), specbytes, 0600); err != nil { + return 1, errors.Wrapf(err, "error storing runtime configuration in %q", filepath.Join(bundlePath, "config.json")) + } + + logrus.Debugf("config = %v", string(specbytes)) + + // Decide which runtime to use. + runtime := options.Runtime + if runtime == "" { + runtime = util.Runtime() + } + + // Default to just passing down our stdio. + getCreateStdio := func() (io.ReadCloser, io.WriteCloser, io.WriteCloser) { + return os.Stdin, os.Stdout, os.Stderr + } + + // Figure out how we're doing stdio handling, and create pipes and sockets. + var stdio sync.WaitGroup + var consoleListener *net.UnixListener + var errorFds, closeBeforeReadingErrorFds []int + stdioPipe := make([][]int, 3) + copyConsole := false + copyPipes := false + finishCopy := make([]int, 2) + if err = unix.Pipe(finishCopy); err != nil { + return 1, errors.Wrapf(err, "error creating pipe for notifying to stop stdio") + } + finishedCopy := make(chan struct{}) + if spec.Process != nil { + if spec.Process.Terminal { + copyConsole = true + // Create a listening socket for accepting the container's terminal's PTY master. + socketPath := filepath.Join(bundlePath, "console.sock") + consoleListener, err = net.ListenUnix("unix", &net.UnixAddr{Name: socketPath, Net: "unix"}) + if err != nil { + return 1, errors.Wrapf(err, "error creating socket %q to receive terminal descriptor", consoleListener.Addr()) + } + // Add console socket arguments. + moreCreateArgs = append(moreCreateArgs, "--console-socket", socketPath) + } else { + copyPipes = true + // Figure out who should own the pipes. + uid, gid, err := util.GetHostRootIDs(spec) + if err != nil { + return 1, err + } + // Create stdio pipes. + if stdioPipe, err = runMakeStdioPipe(int(uid), int(gid)); err != nil { + return 1, err + } + errorFds = []int{stdioPipe[unix.Stdout][0], stdioPipe[unix.Stderr][0]} + closeBeforeReadingErrorFds = []int{stdioPipe[unix.Stdout][1], stdioPipe[unix.Stderr][1]} + // Set stdio to our pipes. + getCreateStdio = func() (io.ReadCloser, io.WriteCloser, io.WriteCloser) { + stdin := os.NewFile(uintptr(stdioPipe[unix.Stdin][0]), "/dev/stdin") + stdout := os.NewFile(uintptr(stdioPipe[unix.Stdout][1]), "/dev/stdout") + stderr := os.NewFile(uintptr(stdioPipe[unix.Stderr][1]), "/dev/stderr") + return stdin, stdout, stderr + } + } + } else { + if options.Quiet { + // Discard stdout. + getCreateStdio = func() (io.ReadCloser, io.WriteCloser, io.WriteCloser) { + return os.Stdin, nil, os.Stderr + } + } + } + + // Build the commands that we'll execute. + pidFile := filepath.Join(bundlePath, "pid") + args := append(append(append(options.Args, "create", "--bundle", bundlePath, "--pid-file", pidFile), moreCreateArgs...), containerName) + create := exec.Command(runtime, args...) + create.Dir = bundlePath + stdin, stdout, stderr := getCreateStdio() + create.Stdin, create.Stdout, create.Stderr = stdin, stdout, stderr + if create.SysProcAttr == nil { + create.SysProcAttr = &syscall.SysProcAttr{} + } + + args = append(options.Args, "start", containerName) + start := exec.Command(runtime, args...) + start.Dir = bundlePath + start.Stderr = os.Stderr + + args = append(options.Args, "kill", containerName) + kill := exec.Command(runtime, args...) + kill.Dir = bundlePath + kill.Stderr = os.Stderr + + args = append(options.Args, "delete", containerName) + del := exec.Command(runtime, args...) + del.Dir = bundlePath + del.Stderr = os.Stderr + + // Actually create the container. + logrus.Debugf("Running %q", create.Args) + err = create.Run() + if err != nil { + return 1, errors.Wrapf(err, "error creating container for %v: %s", spec.Process.Args, runCollectOutput(errorFds, closeBeforeReadingErrorFds)) + } + defer func() { + err2 := del.Run() + if err2 != nil { + if err == nil { + err = errors.Wrapf(err2, "error deleting container") + } else { + logrus.Infof("error deleting container: %v", err2) + } + } + }() + + // Make sure we read the container's exit status when it exits. + pidValue, err := ioutil.ReadFile(pidFile) + if err != nil { + return 1, errors.Wrapf(err, "error reading pid from %q", pidFile) + } + pid, err := strconv.Atoi(strings.TrimSpace(string(pidValue))) + if err != nil { + return 1, errors.Wrapf(err, "error parsing pid %s as a number", string(pidValue)) + } + var reaping sync.WaitGroup + reaping.Add(1) + go func() { + defer reaping.Done() + var err error + _, err = unix.Wait4(pid, &wstatus, 0, nil) + if err != nil { + wstatus = 0 + logrus.Errorf("error waiting for container child process %d: %v\n", pid, err) + } + }() + + if configureNetwork { + teardown, err := runConfigureNetwork(isolation, options, configureNetworks, pid, containerName, spec.Process.Args) + if teardown != nil { + defer teardown() + } + if err != nil { + return 1, err + } + } + + if copyPipes { + // We don't need the ends of the pipes that belong to the container. + stdin.Close() + if stdout != nil { + stdout.Close() + } + stderr.Close() + } + + // Handle stdio for the container in the background. + stdio.Add(1) + go runCopyStdio(&stdio, copyPipes, stdioPipe, copyConsole, consoleListener, finishCopy, finishedCopy, spec) + + // Start the container. + logrus.Debugf("Running %q", start.Args) + err = start.Run() + if err != nil { + return 1, errors.Wrapf(err, "error starting container") + } + stopped := false + defer func() { + if !stopped { + err2 := kill.Run() + if err2 != nil { + if err == nil { + err = errors.Wrapf(err2, "error stopping container") + } else { + logrus.Infof("error stopping container: %v", err2) + } + } + } + }() + + // Wait for the container to exit. + for { + now := time.Now() + var state specs.State + args = append(options.Args, "state", containerName) + stat := exec.Command(runtime, args...) + stat.Dir = bundlePath + stat.Stderr = os.Stderr + stateOutput, stateErr := stat.Output() + if stateErr != nil { + return 1, errors.Wrapf(stateErr, "error reading container state") + } + if err = json.Unmarshal(stateOutput, &state); err != nil { + return 1, errors.Wrapf(stateErr, "error parsing container state %q", string(stateOutput)) + } + switch state.Status { + case "running": + case "stopped": + stopped = true + default: + return 1, errors.Errorf("container status unexpectedly changed to %q", state.Status) + } + if stopped { + break + } + select { + case <-finishedCopy: + stopped = true + case <-time.After(time.Until(now.Add(100 * time.Millisecond))): + continue + } + if stopped { + break + } + } + + // Close the writing end of the stop-handling-stdio notification pipe. + unix.Close(finishCopy[1]) + // Wait for the stdio copy goroutine to flush. + stdio.Wait() + // Wait until we finish reading the exit status. + reaping.Wait() + + return wstatus, nil +} + +func runCollectOutput(fds, closeBeforeReadingFds []int) string { + for _, fd := range closeBeforeReadingFds { + unix.Close(fd) + } + var b bytes.Buffer + buf := make([]byte, 8192) + for _, fd := range fds { + nread, err := unix.Read(fd, buf) + if err != nil { + if errno, isErrno := err.(syscall.Errno); isErrno { + switch errno { + default: + logrus.Errorf("error reading from pipe %d: %v", fd, err) + case syscall.EINTR, syscall.EAGAIN: + } + } else { + logrus.Errorf("unable to wait for data from pipe %d: %v", fd, err) + } + continue + } + for nread > 0 { + r := buf[:nread] + if nwritten, err := b.Write(r); err != nil || nwritten != len(r) { + if nwritten != len(r) { + logrus.Errorf("error buffering data from pipe %d: %v", fd, err) + break + } + } + nread, err = unix.Read(fd, buf) + if err != nil { + if errno, isErrno := err.(syscall.Errno); isErrno { + switch errno { + default: + logrus.Errorf("error reading from pipe %d: %v", fd, err) + case syscall.EINTR, syscall.EAGAIN: + } + } else { + logrus.Errorf("unable to wait for data from pipe %d: %v", fd, err) + } + break + } + } + } + return b.String() +} + +func setupRootlessNetwork(pid int) (teardown func(), err error) { + slirp4netns, err := exec.LookPath("slirp4netns") + if err != nil { + return nil, errors.Wrapf(err, "cannot find slirp4netns") + } + + rootlessSlirpSyncR, rootlessSlirpSyncW, err := os.Pipe() + if err != nil { + return nil, errors.Wrapf(err, "cannot create slirp4netns sync pipe") + } + defer rootlessSlirpSyncR.Close() + + // Be sure there are no fds inherited to slirp4netns except the sync pipe + files, err := ioutil.ReadDir("/proc/self/fd") + if err != nil { + return nil, errors.Wrapf(err, "cannot list open fds") + } + for _, f := range files { + fd, err := strconv.Atoi(f.Name()) + if err != nil { + return nil, errors.Wrapf(err, "cannot parse fd") + } + if fd == int(rootlessSlirpSyncW.Fd()) { + continue + } + unix.CloseOnExec(fd) + } + + cmd := exec.Command(slirp4netns, "--mtu", "65520", "-r", "3", "-c", fmt.Sprintf("%d", pid), "tap0") + cmd.Stdin, cmd.Stdout, cmd.Stderr = nil, nil, nil + cmd.ExtraFiles = []*os.File{rootlessSlirpSyncW} + + err = cmd.Start() + rootlessSlirpSyncW.Close() + if err != nil { + return nil, errors.Wrapf(err, "cannot start slirp4netns") + } + + b := make([]byte, 1) + for { + if err := rootlessSlirpSyncR.SetDeadline(time.Now().Add(1 * time.Second)); err != nil { + return nil, errors.Wrapf(err, "error setting slirp4netns pipe timeout") + } + if _, err := rootlessSlirpSyncR.Read(b); err == nil { + break + } else { + if os.IsTimeout(err) { + // Check if the process is still running. + var status syscall.WaitStatus + _, err := syscall.Wait4(cmd.Process.Pid, &status, syscall.WNOHANG, nil) + if err != nil { + return nil, errors.Wrapf(err, "failed to read slirp4netns process status") + } + if status.Exited() || status.Signaled() { + return nil, errors.New("slirp4netns failed") + } + + continue + } + return nil, errors.Wrapf(err, "failed to read from slirp4netns sync pipe") + } + } + + return func() { + cmd.Process.Kill() + cmd.Wait() + }, nil +} + +func runConfigureNetwork(isolation Isolation, options RunOptions, configureNetworks []string, pid int, containerName string, command []string) (teardown func(), err error) { + var netconf, undo []*libcni.NetworkConfigList + + if isolation == IsolationOCIRootless { + if ns := options.NamespaceOptions.Find(string(specs.NetworkNamespace)); ns != nil && !ns.Host && ns.Path == "" { + return setupRootlessNetwork(pid) + } + } + // Scan for CNI configuration files. + confdir := options.CNIConfigDir + files, err := libcni.ConfFiles(confdir, []string{".conf"}) + if err != nil { + return nil, errors.Wrapf(err, "error finding CNI networking configuration files named *.conf in directory %q", confdir) + } + lists, err := libcni.ConfFiles(confdir, []string{".conflist"}) + if err != nil { + return nil, errors.Wrapf(err, "error finding CNI networking configuration list files named *.conflist in directory %q", confdir) + } + logrus.Debugf("CNI network configuration file list: %#v", append(files, lists...)) + // Read the CNI configuration files. + for _, file := range files { + nc, err := libcni.ConfFromFile(file) + if err != nil { + return nil, errors.Wrapf(err, "error loading networking configuration from file %q for %v", file, command) + } + if len(configureNetworks) > 0 && nc.Network != nil && (nc.Network.Name == "" || !util.StringInSlice(nc.Network.Name, configureNetworks)) { + if nc.Network.Name == "" { + logrus.Debugf("configuration in %q has no name, skipping it", file) + } else { + logrus.Debugf("configuration in %q has name %q, skipping it", file, nc.Network.Name) + } + continue + } + cl, err := libcni.ConfListFromConf(nc) + if err != nil { + return nil, errors.Wrapf(err, "error converting networking configuration from file %q for %v", file, command) + } + logrus.Debugf("using network configuration from %q", file) + netconf = append(netconf, cl) + } + for _, list := range lists { + cl, err := libcni.ConfListFromFile(list) + if err != nil { + return nil, errors.Wrapf(err, "error loading networking configuration list from file %q for %v", list, command) + } + if len(configureNetworks) > 0 && (cl.Name == "" || !util.StringInSlice(cl.Name, configureNetworks)) { + if cl.Name == "" { + logrus.Debugf("configuration list in %q has no name, skipping it", list) + } else { + logrus.Debugf("configuration list in %q has name %q, skipping it", list, cl.Name) + } + continue + } + logrus.Debugf("using network configuration list from %q", list) + netconf = append(netconf, cl) + } + // Make sure we can access the container's network namespace, + // even after it exits, to successfully tear down the + // interfaces. Ensure this by opening a handle to the network + // namespace, and using our copy to both configure and + // deconfigure it. + netns := fmt.Sprintf("/proc/%d/ns/net", pid) + netFD, err := unix.Open(netns, unix.O_RDONLY, 0) + if err != nil { + return nil, errors.Wrapf(err, "error opening network namespace for %v", command) + } + mynetns := fmt.Sprintf("/proc/%d/fd/%d", unix.Getpid(), netFD) + // Build our search path for the plugins. + pluginPaths := strings.Split(options.CNIPluginPath, string(os.PathListSeparator)) + cni := libcni.CNIConfig{Path: pluginPaths} + // Configure the interfaces. + rtconf := make(map[*libcni.NetworkConfigList]*libcni.RuntimeConf) + teardown = func() { + for _, nc := range undo { + if err = cni.DelNetworkList(context.Background(), nc, rtconf[nc]); err != nil { + logrus.Errorf("error cleaning up network %v for %v: %v", rtconf[nc].IfName, command, err) + } + } + unix.Close(netFD) + } + for i, nc := range netconf { + // Build the runtime config for use with this network configuration. + rtconf[nc] = &libcni.RuntimeConf{ + ContainerID: containerName, + NetNS: mynetns, + IfName: fmt.Sprintf("if%d", i), + Args: [][2]string{}, + CapabilityArgs: map[string]interface{}{}, + } + // Bring it up. + _, err := cni.AddNetworkList(context.Background(), nc, rtconf[nc]) + if err != nil { + return teardown, errors.Wrapf(err, "error configuring network list %v for %v", rtconf[nc].IfName, command) + } + // Add it to the list of networks to take down when the container process exits. + undo = append([]*libcni.NetworkConfigList{nc}, undo...) + } + return teardown, nil +} + +func runCopyStdio(stdio *sync.WaitGroup, copyPipes bool, stdioPipe [][]int, copyConsole bool, consoleListener *net.UnixListener, finishCopy []int, finishedCopy chan struct{}, spec *specs.Spec) { + defer func() { + unix.Close(finishCopy[0]) + if copyPipes { + unix.Close(stdioPipe[unix.Stdin][1]) + unix.Close(stdioPipe[unix.Stdout][0]) + unix.Close(stdioPipe[unix.Stderr][0]) + } + stdio.Done() + finishedCopy <- struct{}{} + }() + // Map describing where data on an incoming descriptor should go. + relayMap := make(map[int]int) + // Map describing incoming and outgoing descriptors. + readDesc := make(map[int]string) + writeDesc := make(map[int]string) + // Buffers. + relayBuffer := make(map[int]*bytes.Buffer) + // Set up the terminal descriptor or pipes for polling. + if copyConsole { + // Accept a connection over our listening socket. + fd, err := runAcceptTerminal(consoleListener, spec.Process.ConsoleSize) + if err != nil { + logrus.Errorf("%v", err) + return + } + terminalFD := fd + // Input from our stdin, output from the terminal descriptor. + relayMap[unix.Stdin] = terminalFD + readDesc[unix.Stdin] = "stdin" + relayBuffer[terminalFD] = new(bytes.Buffer) + writeDesc[terminalFD] = "container terminal input" + relayMap[terminalFD] = unix.Stdout + readDesc[terminalFD] = "container terminal output" + relayBuffer[unix.Stdout] = new(bytes.Buffer) + writeDesc[unix.Stdout] = "output" + // Set our terminal's mode to raw, to pass handling of special + // terminal input to the terminal in the container. + if terminal.IsTerminal(unix.Stdin) { + if state, err := terminal.MakeRaw(unix.Stdin); err != nil { + logrus.Warnf("error setting terminal state: %v", err) + } else { + defer func() { + if err = terminal.Restore(unix.Stdin, state); err != nil { + logrus.Errorf("unable to restore terminal state: %v", err) + } + }() + } + } + } + if copyPipes { + // Input from our stdin, output from the stdout and stderr pipes. + relayMap[unix.Stdin] = stdioPipe[unix.Stdin][1] + readDesc[unix.Stdin] = "stdin" + relayBuffer[stdioPipe[unix.Stdin][1]] = new(bytes.Buffer) + writeDesc[stdioPipe[unix.Stdin][1]] = "container stdin" + relayMap[stdioPipe[unix.Stdout][0]] = unix.Stdout + readDesc[stdioPipe[unix.Stdout][0]] = "container stdout" + relayBuffer[unix.Stdout] = new(bytes.Buffer) + writeDesc[unix.Stdout] = "stdout" + relayMap[stdioPipe[unix.Stderr][0]] = unix.Stderr + readDesc[stdioPipe[unix.Stderr][0]] = "container stderr" + relayBuffer[unix.Stderr] = new(bytes.Buffer) + writeDesc[unix.Stderr] = "stderr" + } + // Set our reading descriptors to non-blocking. + for rfd, wfd := range relayMap { + if err := unix.SetNonblock(rfd, true); err != nil { + logrus.Errorf("error setting %s to nonblocking: %v", readDesc[rfd], err) + return + } + if err := unix.SetNonblock(wfd, false); err != nil { + logrus.Errorf("error setting descriptor %d (%s) blocking: %v", wfd, writeDesc[wfd], err) + } + } + // Pass data back and forth. + pollTimeout := -1 + for len(relayMap) > 0 { + // Start building the list of descriptors to poll. + pollFds := make([]unix.PollFd, 0, len(relayMap)+1) + // Poll for a notification that we should stop handling stdio. + pollFds = append(pollFds, unix.PollFd{Fd: int32(finishCopy[0]), Events: unix.POLLIN | unix.POLLHUP}) + // Poll on our reading descriptors. + for rfd := range relayMap { + pollFds = append(pollFds, unix.PollFd{Fd: int32(rfd), Events: unix.POLLIN | unix.POLLHUP}) + } + buf := make([]byte, 8192) + // Wait for new data from any input descriptor, or a notification that we're done. + _, err := unix.Poll(pollFds, pollTimeout) + if !util.LogIfNotRetryable(err, fmt.Sprintf("error waiting for stdio/terminal data to relay: %v", err)) { + return + } + removes := make(map[int]struct{}) + for _, pollFd := range pollFds { + // If this descriptor's just been closed from the other end, mark it for + // removal from the set that we're checking for. + if pollFd.Revents&unix.POLLHUP == unix.POLLHUP { + removes[int(pollFd.Fd)] = struct{}{} + } + // If the descriptor was closed elsewhere, remove it from our list. + if pollFd.Revents&unix.POLLNVAL != 0 { + logrus.Debugf("error polling descriptor %s: closed?", readDesc[int(pollFd.Fd)]) + removes[int(pollFd.Fd)] = struct{}{} + } + // If the POLLIN flag isn't set, then there's no data to be read from this descriptor. + if pollFd.Revents&unix.POLLIN == 0 { + // If we're using pipes and it's our stdin and it's closed, close the writing + // end of the corresponding pipe. + if copyPipes && int(pollFd.Fd) == unix.Stdin && pollFd.Revents&unix.POLLHUP != 0 { + unix.Close(stdioPipe[unix.Stdin][1]) + stdioPipe[unix.Stdin][1] = -1 + } + continue + } + // Read whatever there is to be read. + readFD := int(pollFd.Fd) + writeFD, needToRelay := relayMap[readFD] + if needToRelay { + n, err := unix.Read(readFD, buf) + if !util.LogIfNotRetryable(err, fmt.Sprintf("unable to read %s data: %v", readDesc[readFD], err)) { + return + } + // If it's zero-length on our stdin and we're + // using pipes, it's an EOF, so close the stdin + // pipe's writing end. + if n == 0 && copyPipes && int(pollFd.Fd) == unix.Stdin { + unix.Close(stdioPipe[unix.Stdin][1]) + stdioPipe[unix.Stdin][1] = -1 + } + if n > 0 { + // Buffer the data in case we get blocked on where they need to go. + nwritten, err := relayBuffer[writeFD].Write(buf[:n]) + if err != nil { + logrus.Debugf("buffer: %v", err) + continue + } + if nwritten != n { + logrus.Debugf("buffer: expected to buffer %d bytes, wrote %d", n, nwritten) + continue + } + // If this is the last of the data we'll be able to read from this + // descriptor, read all that there is to read. + for pollFd.Revents&unix.POLLHUP == unix.POLLHUP { + nr, err := unix.Read(readFD, buf) + util.LogIfUnexpectedWhileDraining(err, fmt.Sprintf("read %s: %v", readDesc[readFD], err)) + if nr <= 0 { + break + } + nwritten, err := relayBuffer[writeFD].Write(buf[:nr]) + if err != nil { + logrus.Debugf("buffer: %v", err) + break + } + if nwritten != nr { + logrus.Debugf("buffer: expected to buffer %d bytes, wrote %d", nr, nwritten) + break + } + } + } + } + } + // Try to drain the output buffers. Set the default timeout + // for the next poll() to 100ms if we still have data to write. + pollTimeout = -1 + for writeFD := range relayBuffer { + if relayBuffer[writeFD].Len() > 0 { + n, err := unix.Write(writeFD, relayBuffer[writeFD].Bytes()) + if !util.LogIfNotRetryable(err, fmt.Sprintf("unable to write %s data: %v", writeDesc[writeFD], err)) { + return + } + if n > 0 { + relayBuffer[writeFD].Next(n) + } + } + if relayBuffer[writeFD].Len() > 0 { + pollTimeout = 100 + } + } + // Remove any descriptors which we don't need to poll any more from the poll descriptor list. + for remove := range removes { + delete(relayMap, remove) + } + // If the we-can-return pipe had anything for us, we're done. + for _, pollFd := range pollFds { + if int(pollFd.Fd) == finishCopy[0] && pollFd.Revents != 0 { + // The pipe is closed, indicating that we can stop now. + return + } + } + } +} + +func runAcceptTerminal(consoleListener *net.UnixListener, terminalSize *specs.Box) (int, error) { + defer consoleListener.Close() + c, err := consoleListener.AcceptUnix() + if err != nil { + return -1, errors.Wrapf(err, "error accepting socket descriptor connection") + } + defer c.Close() + // Expect a control message over our new connection. + b := make([]byte, 8192) + oob := make([]byte, 8192) + n, oobn, _, _, err := c.ReadMsgUnix(b, oob) + if err != nil { + return -1, errors.Wrapf(err, "error reading socket descriptor") + } + if n > 0 { + logrus.Debugf("socket descriptor is for %q", string(b[:n])) + } + if oobn > len(oob) { + return -1, errors.Errorf("too much out-of-bounds data (%d bytes)", oobn) + } + // Parse the control message. + scm, err := unix.ParseSocketControlMessage(oob[:oobn]) + if err != nil { + return -1, errors.Wrapf(err, "error parsing out-of-bound data as a socket control message") + } + logrus.Debugf("control messages: %v", scm) + // Expect to get a descriptor. + terminalFD := -1 + for i := range scm { + fds, err := unix.ParseUnixRights(&scm[i]) + if err != nil { + return -1, errors.Wrapf(err, "error parsing unix rights control message: %v", &scm[i]) + } + logrus.Debugf("fds: %v", fds) + if len(fds) == 0 { + continue + } + terminalFD = fds[0] + break + } + if terminalFD == -1 { + return -1, errors.Errorf("unable to read terminal descriptor") + } + // Set the pseudoterminal's size to the configured size, or our own. + winsize := &unix.Winsize{} + if terminalSize != nil { + // Use configured sizes. + winsize.Row = uint16(terminalSize.Height) + winsize.Col = uint16(terminalSize.Width) + } else { + if terminal.IsTerminal(unix.Stdin) { + // Use the size of our terminal. + if winsize, err = unix.IoctlGetWinsize(unix.Stdin, unix.TIOCGWINSZ); err != nil { + logrus.Warnf("error reading size of controlling terminal: %v", err) + winsize.Row = 0 + winsize.Col = 0 + } + } + } + if winsize.Row != 0 && winsize.Col != 0 { + if err = unix.IoctlSetWinsize(terminalFD, unix.TIOCSWINSZ, winsize); err != nil { + logrus.Warnf("error setting size of container pseudoterminal: %v", err) + } + // FIXME - if we're connected to a terminal, we should + // be passing the updated terminal size down when we + // receive a SIGWINCH. + } + return terminalFD, nil +} + +// Create pipes to use for relaying stdio. +func runMakeStdioPipe(uid, gid int) ([][]int, error) { + stdioPipe := make([][]int, 3) + for i := range stdioPipe { + stdioPipe[i] = make([]int, 2) + if err := unix.Pipe(stdioPipe[i]); err != nil { + return nil, errors.Wrapf(err, "error creating pipe for container FD %d", i) + } + } + if err := unix.Fchown(stdioPipe[unix.Stdin][0], uid, gid); err != nil { + return nil, errors.Wrapf(err, "error setting owner of stdin pipe descriptor") + } + if err := unix.Fchown(stdioPipe[unix.Stdout][1], uid, gid); err != nil { + return nil, errors.Wrapf(err, "error setting owner of stdout pipe descriptor") + } + if err := unix.Fchown(stdioPipe[unix.Stderr][1], uid, gid); err != nil { + return nil, errors.Wrapf(err, "error setting owner of stderr pipe descriptor") + } + return stdioPipe, nil +} + +func runUsingRuntimeMain() { + var options runUsingRuntimeSubprocOptions + // Set logging. + if level := os.Getenv("LOGLEVEL"); level != "" { + if ll, err := strconv.Atoi(level); err == nil { + logrus.SetLevel(logrus.Level(ll)) + } + } + // Unpack our configuration. + confPipe := os.NewFile(3, "confpipe") + if confPipe == nil { + fmt.Fprintf(os.Stderr, "error reading options pipe\n") + os.Exit(1) + } + defer confPipe.Close() + if err := json.NewDecoder(confPipe).Decode(&options); err != nil { + fmt.Fprintf(os.Stderr, "error decoding options: %v\n", err) + os.Exit(1) + } + // Set ourselves up to read the container's exit status. We're doing this in a child process + // so that we won't mess with the setting in a caller of the library. This stubs to OS specific + // calls + if err := setChildProcess(); err != nil { + os.Exit(1) + } + // Run the container, start to finish. + status, err := runUsingRuntime(options.Isolation, options.Options, options.ConfigureNetwork, options.ConfigureNetworks, options.MoreCreateArgs, options.Spec, options.RootPath, options.BundlePath, options.ContainerName) + if err != nil { + fmt.Fprintf(os.Stderr, "error running container: %v\n", err) + os.Exit(1) + } + // Pass the container's exit status back to the caller by exiting with the same status. + if status.Exited() { + os.Exit(status.ExitStatus()) + } else if status.Signaled() { + fmt.Fprintf(os.Stderr, "container exited on %s\n", status.Signal()) + os.Exit(1) + } + os.Exit(1) +} + +func setupNamespaces(g *generate.Generator, namespaceOptions NamespaceOptions, idmapOptions IDMappingOptions, policy NetworkConfigurationPolicy) (configureNetwork bool, configureNetworks []string, configureUTS bool, err error) { + // Set namespace options in the container configuration. + configureUserns := false + specifiedNetwork := false + for _, namespaceOption := range namespaceOptions { + switch namespaceOption.Name { + case string(specs.UserNamespace): + configureUserns = false + if !namespaceOption.Host && namespaceOption.Path == "" { + configureUserns = true + } + case string(specs.NetworkNamespace): + specifiedNetwork = true + configureNetwork = false + if !namespaceOption.Host && (namespaceOption.Path == "" || !filepath.IsAbs(namespaceOption.Path)) { + if namespaceOption.Path != "" && !filepath.IsAbs(namespaceOption.Path) { + configureNetworks = strings.Split(namespaceOption.Path, ",") + namespaceOption.Path = "" + } + configureNetwork = (policy != NetworkDisabled) + } + case string(specs.UTSNamespace): + configureUTS = false + if !namespaceOption.Host && namespaceOption.Path == "" { + configureUTS = true + } + } + if namespaceOption.Host { + if err := g.RemoveLinuxNamespace(namespaceOption.Name); err != nil { + return false, nil, false, errors.Wrapf(err, "error removing %q namespace for run", namespaceOption.Name) + } + } else if err := g.AddOrReplaceLinuxNamespace(namespaceOption.Name, namespaceOption.Path); err != nil { + if namespaceOption.Path == "" { + return false, nil, false, errors.Wrapf(err, "error adding new %q namespace for run", namespaceOption.Name) + } + return false, nil, false, errors.Wrapf(err, "error adding %q namespace %q for run", namespaceOption.Name, namespaceOption.Path) + } + } + + // If we've got mappings, we're going to have to create a user namespace. + if len(idmapOptions.UIDMap) > 0 || len(idmapOptions.GIDMap) > 0 || configureUserns { + if err := g.AddOrReplaceLinuxNamespace(specs.UserNamespace, ""); err != nil { + return false, nil, false, errors.Wrapf(err, "error adding new %q namespace for run", string(specs.UserNamespace)) + } + hostUidmap, hostGidmap, err := unshare.GetHostIDMappings("") + if err != nil { + return false, nil, false, err + } + for _, m := range idmapOptions.UIDMap { + g.AddLinuxUIDMapping(m.HostID, m.ContainerID, m.Size) + } + if len(idmapOptions.UIDMap) == 0 { + for _, m := range hostUidmap { + g.AddLinuxUIDMapping(m.ContainerID, m.ContainerID, m.Size) + } + } + for _, m := range idmapOptions.GIDMap { + g.AddLinuxGIDMapping(m.HostID, m.ContainerID, m.Size) + } + if len(idmapOptions.GIDMap) == 0 { + for _, m := range hostGidmap { + g.AddLinuxGIDMapping(m.ContainerID, m.ContainerID, m.Size) + } + } + if !specifiedNetwork { + if err := g.AddOrReplaceLinuxNamespace(specs.NetworkNamespace, ""); err != nil { + return false, nil, false, errors.Wrapf(err, "error adding new %q namespace for run", string(specs.NetworkNamespace)) + } + configureNetwork = (policy != NetworkDisabled) + } + } else { + if err := g.RemoveLinuxNamespace(specs.UserNamespace); err != nil { + return false, nil, false, errors.Wrapf(err, "error removing %q namespace for run", string(specs.UserNamespace)) + } + if !specifiedNetwork { + if err := g.RemoveLinuxNamespace(specs.NetworkNamespace); err != nil { + return false, nil, false, errors.Wrapf(err, "error removing %q namespace for run", string(specs.NetworkNamespace)) + } + } + } + if configureNetwork { + for name, val := range util.DefaultNetworkSysctl { + g.AddLinuxSysctl(name, val) + } + } + return configureNetwork, configureNetworks, configureUTS, nil +} + +func (b *Builder) configureNamespaces(g *generate.Generator, options RunOptions) (bool, []string, error) { + defaultNamespaceOptions, err := DefaultNamespaceOptions() + if err != nil { + return false, nil, err + } + + namespaceOptions := defaultNamespaceOptions + namespaceOptions.AddOrReplace(b.NamespaceOptions...) + namespaceOptions.AddOrReplace(options.NamespaceOptions...) + + networkPolicy := options.ConfigureNetwork + if networkPolicy == NetworkDefault { + networkPolicy = b.ConfigureNetwork + } + + configureNetwork, configureNetworks, configureUTS, err := setupNamespaces(g, namespaceOptions, b.IDMappingOptions, networkPolicy) + if err != nil { + return false, nil, err + } + + if configureUTS { + if options.Hostname != "" { + g.SetHostname(options.Hostname) + } else if b.Hostname() != "" { + g.SetHostname(b.Hostname()) + } else { + g.SetHostname(stringid.TruncateID(b.ContainerID)) + } + } else { + g.SetHostname("") + } + + found := false + spec := g.Config + for i := range spec.Process.Env { + if strings.HasPrefix(spec.Process.Env[i], "HOSTNAME=") { + found = true + break + } + } + if !found { + spec.Process.Env = append(spec.Process.Env, fmt.Sprintf("HOSTNAME=%s", spec.Hostname)) + } + + return configureNetwork, configureNetworks, nil +} + +func runSetupBoundFiles(bundlePath string, bindFiles map[string]string) (mounts []specs.Mount, err error) { + for dest, src := range bindFiles { + options := []string{"rbind"} + if strings.HasPrefix(src, bundlePath) { + options = append(options, bind.NoBindOption) + } + mounts = append(mounts, specs.Mount{ + Source: src, + Destination: dest, + Type: "bind", + Options: options, + }) + } + return mounts, nil +} + +func addRlimits(ulimit []string, g *generate.Generator) error { + var ( + ul *units.Ulimit + err error + ) + + for _, u := range ulimit { + if ul, err = units.ParseUlimit(u); err != nil { + return errors.Wrapf(err, "ulimit option %q requires name=SOFT:HARD, failed to be parsed", u) + } + + g.AddProcessRlimits("RLIMIT_"+strings.ToUpper(ul.Name), uint64(ul.Hard), uint64(ul.Soft)) + } + return nil +} + +func runSetupVolumeMounts(mountLabel string, volumeMounts []string, optionMounts []specs.Mount) ([]specs.Mount, error) { + var mounts []specs.Mount + + parseMount := func(host, container string, options []string) (specs.Mount, error) { + var foundrw, foundro, foundz, foundZ bool + var rootProp string + for _, opt := range options { + switch opt { + case "rw": + foundrw = true + case "ro": + foundro = true + case "z": + foundz = true + case "Z": + foundZ = true + case "private", "rprivate", "slave", "rslave", "shared", "rshared": + rootProp = opt + } + } + if !foundrw && !foundro { + options = append(options, "rw") + } + if foundz { + if err := label.Relabel(host, mountLabel, true); err != nil { + return specs.Mount{}, errors.Wrapf(err, "relabeling %q failed", host) + } + } + if foundZ { + if err := label.Relabel(host, mountLabel, false); err != nil { + return specs.Mount{}, errors.Wrapf(err, "relabeling %q failed", host) + } + } + if rootProp == "" { + options = append(options, "private") + } + return specs.Mount{ + Destination: container, + Type: "bind", + Source: host, + Options: options, + }, nil + } + // Bind mount volumes specified for this particular Run() invocation + for _, i := range optionMounts { + logrus.Debugf("setting up mounted volume at %q", i.Destination) + mount, err := parseMount(i.Source, i.Destination, append(i.Options, "rbind")) + if err != nil { + return nil, err + } + mounts = append(mounts, mount) + } + // Bind mount volumes given by the user when the container was created + for _, i := range volumeMounts { + var options []string + spliti := strings.Split(i, ":") + if len(spliti) > 2 { + options = strings.Split(spliti[2], ",") + } + options = append(options, "rbind") + mount, err := parseMount(spliti[0], spliti[1], options) + if err != nil { + return nil, err + } + mounts = append(mounts, mount) + } + return mounts, nil +} + +func setupMaskedPaths(g *generate.Generator) { + for _, mp := range []string{ + "/proc/acpi", + "/proc/kcore", + "/proc/keys", + "/proc/latency_stats", + "/proc/timer_list", + "/proc/timer_stats", + "/proc/sched_debug", + "/proc/scsi", + "/sys/firmware", + } { + g.AddLinuxMaskedPaths(mp) + } +} + +func setupReadOnlyPaths(g *generate.Generator) { + for _, rp := range []string{ + "/proc/asound", + "/proc/bus", + "/proc/fs", + "/proc/irq", + "/proc/sys", + "/proc/sysrq-trigger", + } { + g.AddLinuxReadonlyPaths(rp) + } +} + +func setupCapAdd(g *generate.Generator, caps ...string) error { + for _, cap := range caps { + if err := g.AddProcessCapabilityBounding(cap); err != nil { + return errors.Wrapf(err, "error adding %q to the bounding capability set", cap) + } + if err := g.AddProcessCapabilityEffective(cap); err != nil { + return errors.Wrapf(err, "error adding %q to the effective capability set", cap) + } + if err := g.AddProcessCapabilityInheritable(cap); err != nil { + return errors.Wrapf(err, "error adding %q to the inheritable capability set", cap) + } + if err := g.AddProcessCapabilityPermitted(cap); err != nil { + return errors.Wrapf(err, "error adding %q to the permitted capability set", cap) + } + if err := g.AddProcessCapabilityAmbient(cap); err != nil { + return errors.Wrapf(err, "error adding %q to the ambient capability set", cap) + } + } + return nil +} + +func setupCapDrop(g *generate.Generator, caps ...string) error { + for _, cap := range caps { + if err := g.DropProcessCapabilityBounding(cap); err != nil { + return errors.Wrapf(err, "error removing %q from the bounding capability set", cap) + } + if err := g.DropProcessCapabilityEffective(cap); err != nil { + return errors.Wrapf(err, "error removing %q from the effective capability set", cap) + } + if err := g.DropProcessCapabilityInheritable(cap); err != nil { + return errors.Wrapf(err, "error removing %q from the inheritable capability set", cap) + } + if err := g.DropProcessCapabilityPermitted(cap); err != nil { + return errors.Wrapf(err, "error removing %q from the permitted capability set", cap) + } + if err := g.DropProcessCapabilityAmbient(cap); err != nil { + return errors.Wrapf(err, "error removing %q from the ambient capability set", cap) + } + } + return nil +} + +func setupCapabilities(g *generate.Generator, firstAdds, firstDrops, secondAdds, secondDrops []string) error { + g.ClearProcessCapabilities() + if err := setupCapAdd(g, util.DefaultCapabilities...); err != nil { + return err + } + if err := setupCapAdd(g, firstAdds...); err != nil { + return err + } + if err := setupCapDrop(g, firstDrops...); err != nil { + return err + } + if err := setupCapAdd(g, secondAdds...); err != nil { + return err + } + return setupCapDrop(g, secondDrops...) +} + +// Search for a command that isn't given as an absolute path using the $PATH +// under the rootfs. We can't resolve absolute symbolic links without +// chroot()ing, which we may not be able to do, so just accept a link as a +// valid resolution. +func runLookupPath(g *generate.Generator, command []string) []string { + // Look for the configured $PATH. + spec := g.Config + envPath := "" + for i := range spec.Process.Env { + if strings.HasPrefix(spec.Process.Env[i], "PATH=") { + envPath = spec.Process.Env[i] + } + } + // If there is no configured $PATH, supply one. + if envPath == "" { + defaultPath := "/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin" + envPath = "PATH=" + defaultPath + g.AddProcessEnv("PATH", defaultPath) + } + // No command, nothing to do. + if len(command) == 0 { + return command + } + // Command is already an absolute path, use it as-is. + if filepath.IsAbs(command[0]) { + return command + } + // For each element in the PATH, + for _, pathEntry := range filepath.SplitList(envPath[5:]) { + // if it's the empty string, it's ".", which is the Cwd, + if pathEntry == "" { + pathEntry = spec.Process.Cwd + } + // build the absolute path which it might be, + candidate := filepath.Join(pathEntry, command[0]) + // check if it's there, + if fi, err := os.Lstat(filepath.Join(spec.Root.Path, candidate)); fi != nil && err == nil { + // and if it's not a directory, and either a symlink or executable, + if !fi.IsDir() && ((fi.Mode()&os.ModeSymlink != 0) || (fi.Mode()&0111 != 0)) { + // use that. + return append([]string{candidate}, command[1:]...) + } + } + } + return command +} + +func getDNSIP(dnsServers []string) (dns []net.IP, err error) { + for _, i := range dnsServers { + result := net.ParseIP(i) + if result == nil { + return dns, errors.Errorf("invalid IP address %s", i) + } + dns = append(dns, result) + } + return dns, nil +} + +func (b *Builder) configureUIDGID(g *generate.Generator, mountPoint string, options RunOptions) error { + // Set the user UID/GID/supplemental group list/capabilities lists. + user, err := b.user(mountPoint, options.User) + if err != nil { + return err + } + if err := setupCapabilities(g, b.AddCapabilities, b.DropCapabilities, options.AddCapabilities, options.DropCapabilities); err != nil { + return err + } + g.SetProcessUID(user.UID) + g.SetProcessGID(user.GID) + for _, gid := range user.AdditionalGids { + g.AddProcessAdditionalGid(gid) + } + + // Remove capabilities if not running as root except Bounding set + if user.UID != 0 { + bounding := g.Config.Process.Capabilities.Bounding + g.ClearProcessCapabilities() + g.Config.Process.Capabilities.Bounding = bounding + } + + return nil +} + +func (b *Builder) configureEnvironment(g *generate.Generator, options RunOptions) { + g.ClearProcessEnv() + if b.CommonBuildOpts.HTTPProxy { + for _, envSpec := range []string{ + "http_proxy", + "HTTP_PROXY", + "https_proxy", + "HTTPS_PROXY", + "ftp_proxy", + "FTP_PROXY", + "no_proxy", + "NO_PROXY", + } { + envVal := os.Getenv(envSpec) + if envVal != "" { + g.AddProcessEnv(envSpec, envVal) + } + } + } + + for _, envSpec := range append(b.Env(), options.Env...) { + env := strings.SplitN(envSpec, "=", 2) + if len(env) > 1 { + g.AddProcessEnv(env[0], env[1]) + } + } + + for src, dest := range b.Args { + g.AddProcessEnv(src, dest) + } +} + +func setupRootlessSpecChanges(spec *specs.Spec, bundleDir string, rootUID, rootGID uint32) error { + spec.Hostname = "" + spec.Process.User.AdditionalGids = nil + spec.Linux.Resources = nil + + emptyDir := filepath.Join(bundleDir, "empty") + if err := os.Mkdir(emptyDir, 0); err != nil { + return errors.Wrapf(err, "error creating %q", emptyDir) + } + + // Replace /sys with a read-only bind mount. + mounts := []specs.Mount{ + { + Source: "/dev", + Destination: "/dev", + Type: "tmpfs", + Options: []string{"private", "strictatime", "noexec", "nosuid", "mode=755", "size=65536k"}, + }, + { + Source: "mqueue", + Destination: "/dev/mqueue", + Type: "mqueue", + Options: []string{"private", "nodev", "noexec", "nosuid"}, + }, + { + Source: "pts", + Destination: "/dev/pts", + Type: "devpts", + Options: []string{"private", "noexec", "nosuid", "newinstance", "ptmxmode=0666", "mode=0620"}, + }, + { + Source: "shm", + Destination: "/dev/shm", + Type: "tmpfs", + Options: []string{"private", "nodev", "noexec", "nosuid", "mode=1777", "size=65536k"}, + }, + { + Source: "/proc", + Destination: "/proc", + Type: "proc", + Options: []string{"private", "nodev", "noexec", "nosuid"}, + }, + { + Source: "/sys", + Destination: "/sys", + Type: "bind", + Options: []string{bind.NoBindOption, "rbind", "private", "nodev", "noexec", "nosuid", "ro"}, + }, + } + // Cover up /sys/fs/cgroup and /sys/fs/selinux, if they exist in our source for /sys. + if _, err := os.Stat("/sys/fs/cgroup"); err == nil { + spec.Linux.MaskedPaths = append(spec.Linux.MaskedPaths, "/sys/fs/cgroup") + } + if _, err := os.Stat("/sys/fs/selinux"); err == nil { + spec.Linux.MaskedPaths = append(spec.Linux.MaskedPaths, "/sys/fs/selinux") + } + // Keep anything that isn't under /dev, /proc, or /sys. + for i := range spec.Mounts { + if spec.Mounts[i].Destination == "/dev" || strings.HasPrefix(spec.Mounts[i].Destination, "/dev/") || + spec.Mounts[i].Destination == "/proc" || strings.HasPrefix(spec.Mounts[i].Destination, "/proc/") || + spec.Mounts[i].Destination == "/sys" || strings.HasPrefix(spec.Mounts[i].Destination, "/sys/") { + continue + } + mounts = append(mounts, spec.Mounts[i]) + } + spec.Mounts = mounts + return nil +} + +func (b *Builder) runUsingRuntimeSubproc(isolation Isolation, options RunOptions, configureNetwork bool, configureNetworks, moreCreateArgs []string, spec *specs.Spec, rootPath, bundlePath, containerName string) (err error) { + var confwg sync.WaitGroup + config, conferr := json.Marshal(runUsingRuntimeSubprocOptions{ + Options: options, + Spec: spec, + RootPath: rootPath, + BundlePath: bundlePath, + ConfigureNetwork: configureNetwork, + ConfigureNetworks: configureNetworks, + MoreCreateArgs: moreCreateArgs, + ContainerName: containerName, + Isolation: isolation, + }) + if conferr != nil { + return errors.Wrapf(conferr, "error encoding configuration for %q", runUsingRuntimeCommand) + } + cmd := reexec.Command(runUsingRuntimeCommand) + cmd.Dir = bundlePath + cmd.Stdin = options.Stdin + if cmd.Stdin == nil { + cmd.Stdin = os.Stdin + } + cmd.Stdout = options.Stdout + if cmd.Stdout == nil { + cmd.Stdout = os.Stdout + } + cmd.Stderr = options.Stderr + if cmd.Stderr == nil { + cmd.Stderr = os.Stderr + } + cmd.Env = append(os.Environ(), fmt.Sprintf("LOGLEVEL=%d", logrus.GetLevel())) + preader, pwriter, err := os.Pipe() + if err != nil { + return errors.Wrapf(err, "error creating configuration pipe") + } + confwg.Add(1) + go func() { + _, conferr = io.Copy(pwriter, bytes.NewReader(config)) + if conferr != nil { + conferr = errors.Wrapf(conferr, "error while copying configuration down pipe to child process") + } + confwg.Done() + }() + cmd.ExtraFiles = append([]*os.File{preader}, cmd.ExtraFiles...) + defer preader.Close() + defer pwriter.Close() + err = cmd.Run() + if err != nil { + err = errors.Wrapf(err, "error while running runtime") + } + confwg.Wait() + if err == nil { + return conferr + } + if conferr != nil { + logrus.Debugf("%v", conferr) + } + return err +} + +func checkAndOverrideIsolationOptions(isolation Isolation, options *RunOptions) error { + switch isolation { + case IsolationOCIRootless: + if ns := options.NamespaceOptions.Find(string(specs.IPCNamespace)); ns == nil || ns.Host { + logrus.Debugf("Forcing use of an IPC namespace.") + } + options.NamespaceOptions.AddOrReplace(NamespaceOption{Name: string(specs.IPCNamespace)}) + _, err := exec.LookPath("slirp4netns") + hostNetworking := err != nil + networkNamespacePath := "" + if ns := options.NamespaceOptions.Find(string(specs.NetworkNamespace)); ns != nil { + hostNetworking = ns.Host + networkNamespacePath = ns.Path + if !hostNetworking && networkNamespacePath != "" && !filepath.IsAbs(networkNamespacePath) { + logrus.Debugf("Disabling network namespace configuration.") + networkNamespacePath = "" + } + } + options.NamespaceOptions.AddOrReplace(NamespaceOption{ + Name: string(specs.NetworkNamespace), + Host: hostNetworking, + Path: networkNamespacePath, + }) + if ns := options.NamespaceOptions.Find(string(specs.PIDNamespace)); ns == nil || ns.Host { + logrus.Debugf("Forcing use of a PID namespace.") + } + options.NamespaceOptions.AddOrReplace(NamespaceOption{Name: string(specs.PIDNamespace), Host: false}) + if ns := options.NamespaceOptions.Find(string(specs.UserNamespace)); ns == nil || ns.Host { + logrus.Debugf("Forcing use of a user namespace.") + } + options.NamespaceOptions.AddOrReplace(NamespaceOption{Name: string(specs.UserNamespace)}) + if ns := options.NamespaceOptions.Find(string(specs.UTSNamespace)); ns != nil && !ns.Host { + logrus.Debugf("Disabling UTS namespace.") + } + options.NamespaceOptions.AddOrReplace(NamespaceOption{Name: string(specs.UTSNamespace), Host: true}) + case IsolationOCI: + pidns := options.NamespaceOptions.Find(string(specs.PIDNamespace)) + userns := options.NamespaceOptions.Find(string(specs.UserNamespace)) + if (pidns == nil || pidns.Host) && (userns != nil && !userns.Host) { + return fmt.Errorf("not allowed to mix host PID namespace with container user namespace") + } + } + return nil +} + +// DefaultNamespaceOptions returns the default namespace settings from the +// runtime-tools generator library. +func DefaultNamespaceOptions() (NamespaceOptions, error) { + options := NamespaceOptions{ + {Name: string(specs.CgroupNamespace), Host: true}, + {Name: string(specs.IPCNamespace), Host: true}, + {Name: string(specs.MountNamespace), Host: true}, + {Name: string(specs.NetworkNamespace), Host: true}, + {Name: string(specs.PIDNamespace), Host: true}, + {Name: string(specs.UserNamespace), Host: true}, + {Name: string(specs.UTSNamespace), Host: true}, + } + g, err := generate.New("linux") + if err != nil { + return options, errors.Wrapf(err, "error generating new 'linux' runtime spec") + } + spec := g.Config + if spec.Linux != nil { + for _, ns := range spec.Linux.Namespaces { + options.AddOrReplace(NamespaceOption{ + Name: string(ns.Type), + Path: ns.Path, + }) + } + } + return options, nil +} + +func contains(volumes []string, v string) bool { + for _, i := range volumes { + if i == v { + return true + } + } + return false +} + +type runUsingRuntimeSubprocOptions struct { + Options RunOptions + Spec *specs.Spec + RootPath string + BundlePath string + ConfigureNetwork bool + ConfigureNetworks []string + MoreCreateArgs []string + ContainerName string + Isolation Isolation +} + +func init() { + reexec.Register(runUsingRuntimeCommand, runUsingRuntimeMain) +} diff --git a/vendor/github.com/containers/buildah/run_unsupport.go b/vendor/github.com/containers/buildah/run_unsupport.go deleted file mode 100644 index 4824a0c4e..000000000 --- a/vendor/github.com/containers/buildah/run_unsupport.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !linux - -package buildah - -import ( - "github.com/pkg/errors" -) - -func setChildProcess() error { - return errors.New("function not supported on non-linux systems") -} diff --git a/vendor/github.com/containers/buildah/run_unsupported.go b/vendor/github.com/containers/buildah/run_unsupported.go new file mode 100644 index 000000000..d8ff46cf6 --- /dev/null +++ b/vendor/github.com/containers/buildah/run_unsupported.go @@ -0,0 +1,20 @@ +// +build !linux + +package buildah + +import ( + "github.com/pkg/errors" +) + +func setChildProcess() error { + return errors.New("function not supported on non-linux systems") +} + +func runUsingRuntimeMain() {} + +func (b *Builder) Run(command []string, options RunOptions) error { + return errors.New("function not supported on non-linux systems") +} +func DefaultNamespaceOptions() (NamespaceOptions, error) { + return NamespaceOptions{}, errors.New("function not supported on non-linux systems") +} diff --git a/vendor/github.com/varlink/go/varlink/bridge_windows.go b/vendor/github.com/varlink/go/varlink/bridge_windows.go index 220ae3156..751224ec8 100644 --- a/vendor/github.com/varlink/go/varlink/bridge_windows.go +++ b/vendor/github.com/varlink/go/varlink/bridge_windows.go @@ -44,8 +44,8 @@ func NewBridge(bridge string) (*Connection, error) { } c.conn = PipeCon{nil, cmd, &r, &w} c.address = "" - c.reader = bufio.NewReader(r) - c.writer = bufio.NewWriter(w) + c.Reader = bufio.NewReader(r) + c.Writer = bufio.NewWriter(w) err = cmd.Start() if err != nil { diff --git a/vendor/github.com/varlink/go/varlink/call.go b/vendor/github.com/varlink/go/varlink/call.go index d6e046f1d..0eaf24aca 100644 --- a/vendor/github.com/varlink/go/varlink/call.go +++ b/vendor/github.com/varlink/go/varlink/call.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "io" + "net" "strings" ) @@ -14,36 +15,38 @@ import ( type Call struct { *bufio.Reader *bufio.Writer - in *serviceCall + Conn *net.Conn + Request *[]byte + In *serviceCall Continues bool Upgrade bool } // WantsMore indicates if the calling client accepts more than one reply to this method call. func (c *Call) WantsMore() bool { - return c.in.More + return c.In.More } // WantsUpgrade indicates that the calling client wants the connection to be upgraded. func (c *Call) WantsUpgrade() bool { - return c.in.Upgrade + return c.In.Upgrade } // IsOneway indicate that the calling client does not expect a reply. func (c *Call) IsOneway() bool { - return c.in.Oneway + return c.In.Oneway } // GetParameters retrieves the method call parameters. func (c *Call) GetParameters(p interface{}) error { - if c.in.Parameters == nil { + if c.In.Parameters == nil { return fmt.Errorf("empty parameters") } - return json.Unmarshal(*c.in.Parameters, p) + return json.Unmarshal(*c.In.Parameters, p) } func (c *Call) sendMessage(r *serviceReply) error { - if c.in.Oneway { + if c.In.Oneway { return nil } @@ -75,7 +78,7 @@ func (c *Call) Reply(parameters interface{}) error { }) } - if !c.in.More { + if !c.In.More { return fmt.Errorf("call did not set more, it does not expect continues") } diff --git a/vendor/github.com/varlink/go/varlink/service.go b/vendor/github.com/varlink/go/varlink/service.go index abccffe6a..bf13aa1de 100644 --- a/vendor/github.com/varlink/go/varlink/service.go +++ b/vendor/github.com/varlink/go/varlink/service.go @@ -74,7 +74,7 @@ func (s *Service) getInterfaceDescription(c Call, name string) error { return c.replyGetInterfaceDescription(description) } -func (s *Service) handleMessage(reader *bufio.Reader, writer *bufio.Writer, request []byte) error { +func (s *Service) HandleMessage(conn *net.Conn, reader *bufio.Reader, writer *bufio.Writer, request []byte) error { var in serviceCall err := json.Unmarshal(request, &in) @@ -84,9 +84,11 @@ func (s *Service) handleMessage(reader *bufio.Reader, writer *bufio.Writer, requ } c := Call{ - Reader: reader, - Writer: writer, - in: &in, + Conn: conn, + Reader: reader, + Writer: writer, + In: &in, + Request: &request, } r := strings.LastIndex(in.Method, ".") @@ -131,7 +133,7 @@ func (s *Service) handleConnection(conn net.Conn, wg *sync.WaitGroup) { break } - err = s.handleMessage(reader, writer, request[:len(request)-1]) + err = s.HandleMessage(&conn, reader, writer, request[:len(request)-1]) if err != nil { // FIXME: report error //fmt.Fprintf(os.Stderr, "handleMessage: %v", err) @@ -179,25 +181,36 @@ func (s *Service) parseAddress(address string) error { return nil } -func getListener(protocol string, address string) (net.Listener, error) { +func (s *Service) GetListener() (*net.Listener, error) { + s.mutex.Lock() + l := s.listener + s.mutex.Unlock() + return &l, nil +} + +func (s *Service) setListener() error { l := activationListener() if l == nil { - if protocol == "unix" && address[0] != '@' { - os.Remove(address) + if s.protocol == "unix" && s.address[0] != '@' { + os.Remove(s.address) } var err error - l, err = net.Listen(protocol, address) + l, err = net.Listen(s.protocol, s.address) if err != nil { - return nil, err + return err } - if protocol == "unix" && address[0] != '@' { + if s.protocol == "unix" && s.address[0] != '@' { l.(*net.UnixListener).SetUnlinkOnClose(true) } } - return l, nil + s.mutex.Lock() + s.listener = l + s.mutex.Unlock() + + return nil } func (s *Service) refreshTimeout(timeout time.Duration) error { @@ -216,26 +229,84 @@ func (s *Service) refreshTimeout(timeout time.Duration) error { } // Listen starts a Service. -func (s *Service) Listen(address string, timeout time.Duration) error { - var wg sync.WaitGroup - defer func() { s.teardown(); wg.Wait() }() - +func (s *Service) Bind(address string) error { s.mutex.Lock() if s.running { s.mutex.Unlock() - return fmt.Errorf("Listen(): already running") + return fmt.Errorf("Init(): already running") } s.mutex.Unlock() s.parseAddress(address) - l, err := getListener(s.protocol, s.address) + err := s.setListener() + if err != nil { + return err + } + return nil +} + +// Listen starts a Service. +func (s *Service) Listen(address string, timeout time.Duration) error { + var wg sync.WaitGroup + defer func() { s.teardown(); wg.Wait() }() + + err := s.Bind(address) if err != nil { return err } s.mutex.Lock() - s.listener = l + s.running = true + l := s.listener + s.mutex.Unlock() + + for s.running { + if timeout != 0 { + if err := s.refreshTimeout(timeout); err != nil { + return err + } + } + conn, err := l.Accept() + if err != nil { + if err.(net.Error).Timeout() { + s.mutex.Lock() + if s.conncounter == 0 { + s.mutex.Unlock() + return ServiceTimeoutError{} + } + s.mutex.Unlock() + continue + } + if !s.running { + return nil + } + return err + } + s.mutex.Lock() + s.conncounter++ + s.mutex.Unlock() + wg.Add(1) + go s.handleConnection(conn, &wg) + } + + return nil +} + +// Listen starts a Service. +func (s *Service) DoListen(timeout time.Duration) error { + var wg sync.WaitGroup + defer func() { s.teardown(); wg.Wait() }() + + s.mutex.Lock() + l := s.listener + s.mutex.Unlock() + + if l == nil { + return fmt.Errorf("No listener set") + } + + s.mutex.Lock() s.running = true s.mutex.Unlock() diff --git a/vendor/github.com/varlink/go/varlink/varlink_test.go b/vendor/github.com/varlink/go/varlink/varlink_test.go index 9dd4ddc63..9e6d0a1f4 100644 --- a/vendor/github.com/varlink/go/varlink/varlink_test.go +++ b/vendor/github.com/varlink/go/varlink/varlink_test.go @@ -31,7 +31,7 @@ func TestService(t *testing.T) { r := bufio.NewReader(&br) var b bytes.Buffer w := bufio.NewWriter(&b) - if err := service.handleMessage(r, w, []byte{0}); err == nil { + if err := service.HandleMessage(nil, r, w, []byte{0}); err == nil { t.Fatal("HandleMessage returned non-error") } }) @@ -42,7 +42,7 @@ func TestService(t *testing.T) { var b bytes.Buffer w := bufio.NewWriter(&b) msg := []byte(`{"method":"foo.GetInterfaceDescription" fdgdfg}`) - if err := service.handleMessage(r, w, msg); err == nil { + if err := service.HandleMessage(nil, r, w, msg); err == nil { t.Fatal("HandleMessage returned no error on invalid json") } }) @@ -53,7 +53,7 @@ func TestService(t *testing.T) { var b bytes.Buffer w := bufio.NewWriter(&b) msg := []byte(`{"method":"foo.GetInterfaceDescription"}`) - if err := service.handleMessage(r, w, msg); err != nil { + if err := service.HandleMessage(nil, r, w, msg); err != nil { t.Fatal("HandleMessage returned error on wrong interface") } expect(t, `{"parameters":{"interface":"foo"},"error":"org.varlink.service.InterfaceNotFound"}`+"\000", @@ -66,7 +66,7 @@ func TestService(t *testing.T) { var b bytes.Buffer w := bufio.NewWriter(&b) msg := []byte(`{"method":"InvalidMethod"}`) - if err := service.handleMessage(r, w, msg); err != nil { + if err := service.HandleMessage(nil, r, w, msg); err != nil { t.Fatal("HandleMessage returned error on invalid method") } expect(t, `{"parameters":{"parameter":"method"},"error":"org.varlink.service.InvalidParameter"}`+"\000", @@ -79,7 +79,7 @@ func TestService(t *testing.T) { var b bytes.Buffer w := bufio.NewWriter(&b) msg := []byte(`{"method":"org.varlink.service.WrongMethod"}`) - if err := service.handleMessage(r, w, msg); err != nil { + if err := service.HandleMessage(nil, r, w, msg); err != nil { t.Fatal("HandleMessage returned error on wrong method") } expect(t, `{"parameters":{"method":"WrongMethod"},"error":"org.varlink.service.MethodNotFound"}`+"\000", @@ -92,7 +92,7 @@ func TestService(t *testing.T) { var b bytes.Buffer w := bufio.NewWriter(&b) msg := []byte(`{"method":"org.varlink.service.GetInterfaceDescription","parameters": null}`) - if err := service.handleMessage(r, w, msg); err != nil { + if err := service.HandleMessage(nil, r, w, msg); err != nil { t.Fatalf("HandleMessage returned error: %v", err) } expect(t, `{"parameters":{"parameter":"parameters"},"error":"org.varlink.service.InvalidParameter"}`+"\000", @@ -105,7 +105,7 @@ func TestService(t *testing.T) { var b bytes.Buffer w := bufio.NewWriter(&b) msg := []byte(`{"method":"org.varlink.service.GetInterfaceDescription","parameters":{}}`) - if err := service.handleMessage(r, w, msg); err != nil { + if err := service.HandleMessage(nil, r, w, msg); err != nil { t.Fatalf("HandleMessage returned error: %v", err) } expect(t, `{"parameters":{"parameter":"interface"},"error":"org.varlink.service.InvalidParameter"}`+"\000", @@ -118,7 +118,7 @@ func TestService(t *testing.T) { var b bytes.Buffer w := bufio.NewWriter(&b) msg := []byte(`{"method":"org.varlink.service.GetInterfaceDescription","parameters":{"interface":"foo"}}`) - if err := service.handleMessage(r, w, msg); err != nil { + if err := service.HandleMessage(nil, r, w, msg); err != nil { t.Fatalf("HandleMessage returned error: %v", err) } expect(t, `{"parameters":{"parameter":"interface"},"error":"org.varlink.service.InvalidParameter"}`+"\000", @@ -131,7 +131,7 @@ func TestService(t *testing.T) { var b bytes.Buffer w := bufio.NewWriter(&b) msg := []byte(`{"method":"org.varlink.service.GetInterfaceDescription","parameters":{"interface":"org.varlink.service"}}`) - if err := service.handleMessage(r, w, msg); err != nil { + if err := service.HandleMessage(nil, r, w, msg); err != nil { t.Fatalf("HandleMessage returned error: %v", err) } expect(t, `{"parameters":{"description":"# The Varlink Service Interface is provided by every varlink service. It\n# describes the service and the interfaces it implements.\ninterface org.varlink.service\n\n# Get a list of all the interfaces a service provides and information\n# about the implementation.\nmethod GetInfo() -\u003e (\n vendor: string,\n product: string,\n version: string,\n url: string,\n interfaces: []string\n)\n\n# Get the description of an interface that is implemented by this service.\nmethod GetInterfaceDescription(interface: string) -\u003e (description: string)\n\n# The requested interface was not found.\nerror InterfaceNotFound (interface: string)\n\n# The requested method was not found\nerror MethodNotFound (method: string)\n\n# The interface defines the requested method, but the service does not\n# implement it.\nerror MethodNotImplemented (method: string)\n\n# One of the passed parameters is invalid.\nerror InvalidParameter (parameter: string)"}}`+"\000", @@ -144,7 +144,7 @@ func TestService(t *testing.T) { var b bytes.Buffer w := bufio.NewWriter(&b) msg := []byte(`{"method":"org.varlink.service.GetInfo"}`) - if err := service.handleMessage(r, w, msg); err != nil { + if err := service.HandleMessage(nil, r, w, msg); err != nil { t.Fatalf("HandleMessage returned error: %v", err) } expect(t, `{"parameters":{"vendor":"Varlink","product":"Varlink Test","version":"1","url":"https://github.com/varlink/go/varlink","interfaces":["org.varlink.service"]}}`+"\000", @@ -224,7 +224,7 @@ func TestMoreService(t *testing.T) { var b bytes.Buffer w := bufio.NewWriter(&b) msg := []byte(`{"method":"org.example.test.Pingf"}`) - if err := service.handleMessage(r, w, msg); err != nil { + if err := service.HandleMessage(nil, r, w, msg); err != nil { t.Fatalf("HandleMessage returned error: %v", err) } expect(t, `{"parameters":{"method":"Pingf"},"error":"org.varlink.service.MethodNotImplemented"}`+"\000", @@ -237,7 +237,7 @@ func TestMoreService(t *testing.T) { var b bytes.Buffer w := bufio.NewWriter(&b) msg := []byte(`{"method":"org.example.test.PingError", "more" : true}`) - if err := service.handleMessage(r, w, msg); err != nil { + if err := service.HandleMessage(nil, r, w, msg); err != nil { t.Fatalf("HandleMessage returned error: %v", err) } expect(t, `{"error":"org.example.test.PingError"}`+"\000", @@ -249,7 +249,7 @@ func TestMoreService(t *testing.T) { var b bytes.Buffer w := bufio.NewWriter(&b) msg := []byte(`{"method":"org.example.test.Ping", "more" : true}`) - if err := service.handleMessage(r, w, msg); err != nil { + if err := service.HandleMessage(nil, r, w, msg); err != nil { t.Fatalf("HandleMessage returned error: %v", err) } expect(t, `{"continues":true}`+"\000"+`{"continues":true}`+"\000"+`{}`+"\000", -- cgit v1.2.3-54-g00ecf