summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--cmd/podman/cliconfig/config.go4
-rw-r--r--cmd/podman/main_remote.go6
-rw-r--r--cmd/podman/shared/intermediate_varlink.go3
-rw-r--r--cmd/podman/varlink.go34
-rw-r--r--docs/podman-create.1.md2
-rw-r--r--docs/podman-run.1.md2
-rw-r--r--docs/podman-varlink.1.md19
-rw-r--r--libpod/runtime_pod_linux.go126
-rw-r--r--pkg/adapter/client.go35
-rw-r--r--pkg/adapter/client_config.go4
-rw-r--r--pkg/adapter/containers.go7
-rw-r--r--pkg/adapter/runtime_remote.go13
-rw-r--r--pkg/varlinkapi/attach.go1
-rw-r--r--pkg/varlinkapi/system.go17
-rw-r--r--vendor.conf2
-rw-r--r--vendor/github.com/varlink/go/varlink/bridge.go11
-rw-r--r--vendor/github.com/varlink/go/varlink/bridge_windows.go11
17 files changed, 206 insertions, 91 deletions
diff --git a/cmd/podman/cliconfig/config.go b/cmd/podman/cliconfig/config.go
index b770aaca0..aaa4513d8 100644
--- a/cmd/podman/cliconfig/config.go
+++ b/cmd/podman/cliconfig/config.go
@@ -32,6 +32,10 @@ type MainFlags struct {
CpuProfile string
LogLevel string
TmpDir string
+
+ RemoteUserName string
+ RemoteHost string
+ VarlinkAddress string
}
type AttachValues struct {
diff --git a/cmd/podman/main_remote.go b/cmd/podman/main_remote.go
index 753730b56..c8bb3ad3e 100644
--- a/cmd/podman/main_remote.go
+++ b/cmd/podman/main_remote.go
@@ -9,7 +9,11 @@ import (
const remote = true
func init() {
- // remote client specific flags can go here.
+ rootCmd.PersistentFlags().StringVar(&MainGlobalOpts.RemoteUserName, "username", "", "username on the remote host")
+ rootCmd.PersistentFlags().StringVar(&MainGlobalOpts.RemoteHost, "remote-host", "", "remote host")
+ // TODO maybe we allow the altering of this for bridge connections?
+ //rootCmd.PersistentFlags().StringVar(&MainGlobalOpts.VarlinkAddress, "varlink-address", adapter.DefaultAddress, "address of the varlink socket")
+ rootCmd.PersistentFlags().StringVar(&MainGlobalOpts.LogLevel, "log-level", "error", "Log messages above specified level: debug, info, warn, error, fatal or panic")
}
func setSyslog() error {
diff --git a/cmd/podman/shared/intermediate_varlink.go b/cmd/podman/shared/intermediate_varlink.go
index 5e21245e3..4742d4909 100644
--- a/cmd/podman/shared/intermediate_varlink.go
+++ b/cmd/podman/shared/intermediate_varlink.go
@@ -3,8 +3,6 @@
package shared
import (
- "fmt"
-
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/varlink"
"github.com/containers/libpod/pkg/rootless"
@@ -209,7 +207,6 @@ func boolFromVarlink(v *bool, flagName string, defaultValue bool) CRBool {
cr.Val = defaultValue
cr.Changed = false
} else {
- fmt.Println(flagName, cr.Val)
cr.Val = *v
cr.Changed = true
}
diff --git a/cmd/podman/varlink.go b/cmd/podman/varlink.go
index 978678a84..215542d2c 100644
--- a/cmd/podman/varlink.go
+++ b/cmd/podman/varlink.go
@@ -3,11 +3,17 @@
package main
import (
+ "fmt"
+ "os"
+ "path/filepath"
"time"
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/libpodruntime"
iopodman "github.com/containers/libpod/cmd/podman/varlink"
+ "github.com/containers/libpod/pkg/adapter"
+ "github.com/containers/libpod/pkg/rootless"
+ "github.com/containers/libpod/pkg/util"
"github.com/containers/libpod/pkg/varlinkapi"
"github.com/containers/libpod/version"
"github.com/pkg/errors"
@@ -45,13 +51,31 @@ func init() {
}
func varlinkCmd(c *cliconfig.VarlinkValues) error {
- args := c.InputArgs
- if len(args) < 1 {
- return errors.Errorf("you must provide a varlink URI")
+ varlinkURI := adapter.DefaultAddress
+ if rootless.IsRootless() {
+ xdg, err := util.GetRootlessRuntimeDir()
+ if err != nil {
+ return err
+ }
+ socketDir := filepath.Join(xdg, "podman/io.podman")
+ if _, err := os.Stat(filepath.Dir(socketDir)); os.IsNotExist(err) {
+ if err := os.Mkdir(filepath.Dir(socketDir), 0755); err != nil {
+ return err
+ }
+ }
+ varlinkURI = fmt.Sprintf("unix:%s", socketDir)
}
+ args := c.InputArgs
+
if len(args) > 1 {
- return errors.Errorf("too many arguments. Requires exactly 1")
+ return errors.Errorf("too many arguments. you may optionally provide 1")
+ }
+
+ if len(args) > 0 {
+ varlinkURI = args[0]
}
+
+ logrus.Debugf("Using varlink socket: %s", varlinkURI)
timeout := time.Duration(c.Timeout) * time.Millisecond
// Create a single runtime for varlink
@@ -81,7 +105,7 @@ func varlinkCmd(c *cliconfig.VarlinkValues) error {
}
// Run the varlink server at the given address
- if err = service.Listen(args[0], timeout); err != nil {
+ if err = service.Listen(varlinkURI, timeout); err != nil {
switch err.(type) {
case varlink.ServiceTimeoutError:
logrus.Infof("varlink service expired (use --timeout to increase session time beyond %d ms, 0 means never timeout)", c.Int64("timeout"))
diff --git a/docs/podman-create.1.md b/docs/podman-create.1.md
index da5750209..cf36106e8 100644
--- a/docs/podman-create.1.md
+++ b/docs/podman-create.1.md
@@ -592,7 +592,7 @@ Automatically remove the container when it exits. The default is *false*.
Note that the container will not be removed when it could not be created or
started successfully. This allows the user to inspect the container after
-failure. The `--rm` flag is incompatible with the `-d` flag.
+failure.
**--rootfs**
diff --git a/docs/podman-run.1.md b/docs/podman-run.1.md
index 00b1a70c6..1840e0f0b 100644
--- a/docs/podman-run.1.md
+++ b/docs/podman-run.1.md
@@ -614,7 +614,7 @@ Automatically remove the container when it exits. The default is *false*.
Note that the container will not be removed when it could not be created or
started successfully. This allows the user to inspect the container after
-failure. The `--rm` flag is incompatible with the `-d` flag.
+failure.
**--rootfs**
diff --git a/docs/podman-varlink.1.md b/docs/podman-varlink.1.md
index 7eeb3dd66..0501d853f 100644
--- a/docs/podman-varlink.1.md
+++ b/docs/podman-varlink.1.md
@@ -7,8 +7,10 @@ podman\-varlink - Runs the varlink backend interface
**podman varlink** [*options*] *uri*
## DESCRIPTION
-Starts the varlink service listening on *uri* that allows varlink clients to interact with podman. This should generally be done
-with systemd. See _Configuration_ below.
+Starts the varlink service listening on *uri* that allows varlink clients to interact with podman. If no *uri* is provided, a default
+URI will be used depending on the user calling the varlink service. The default for the root user is `unix:/run/podman/io.podman`. Regular
+users will have a default *uri* of `$XDG_RUNTIME_DIR/podman/io.podman`. For example, `unix:/run/user/1000/podman/io.podman`
+The varlink service should generally be done with systemd. See _Configuration_ below.
## GLOBAL OPTIONS
@@ -23,16 +25,23 @@ second. A value of `0` means no timeout and the session will not expire.
## EXAMPLES
-Run the podman varlink service manually and accept the default timeout.
+Run the podman varlink service accepting all default options.
```
-$ podman varlink unix:/run/podman/io.podman
+$ podman varlink
+```
+
+
+Run the podman varlink service with an alternate URI and accept the default timeout.
+
+```
+$ podman varlink unix:/tmp/io.podman
```
Run the podman varlink service manually with a 5 second timeout.
```
-$ podman varlink --timeout 5000 unix:/run/podman/io.podman
+$ podman varlink --timeout 5000
```
## CONFIGURATION
diff --git a/libpod/runtime_pod_linux.go b/libpod/runtime_pod_linux.go
index 0011c771a..5867b1f87 100644
--- a/libpod/runtime_pod_linux.go
+++ b/libpod/runtime_pod_linux.go
@@ -149,10 +149,10 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool)
}
// Go through and lock all containers so we can operate on them all at once
- dependencies := make(map[string][]string)
for _, ctr := range ctrs {
- ctr.lock.Lock()
- defer ctr.lock.Unlock()
+ ctrLock := ctr.lock
+ ctrLock.Lock()
+ defer ctrLock.Unlock()
// Sync all containers
if err := ctr.syncContainer(); err != nil {
@@ -177,23 +177,12 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool)
if len(ctr.state.ExecSessions) != 0 && !force {
return errors.Wrapf(ErrCtrStateInvalid, "pod %s contains container %s which has active exec sessions", p.ID(), ctr.ID())
}
-
- deps, err := r.state.ContainerInUse(ctr)
- if err != nil {
- return err
- }
- dependencies[ctr.ID()] = deps
}
- // Check if containers have dependencies
- // If they do, and the dependencies are not in the pod, error
- for ctr, deps := range dependencies {
- for _, dep := range deps {
- if _, ok := dependencies[dep]; !ok {
- return errors.Wrapf(ErrCtrExists, "container %s depends on container %s not in pod %s", ctr, dep, p.ID())
- }
- }
- }
+ // We maintain the invariant that container dependencies must all exist
+ // within the container's pod.
+ // No need to check dependencies as such - we're removing all containers
+ // in the pod at once, no dependency issues.
// First loop through all containers and stop them
// Do not remove in this loop to ensure that we don't remove unless all
@@ -220,18 +209,40 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool)
}
}
- // Start removing containers
- // We can remove containers even if they have dependencies now
- // As we have guaranteed their dependencies are in the pod
+ // Remove all containers in the pod from the state.
+ if err := r.state.RemovePodContainers(p); err != nil {
+ return err
+ }
+
+ var removalErr error
+
+ // Clean up after our removed containers.
+ // Errors here are nonfatal - the containers have already been evicted.
+ // We'll do our best to clean up after them, but we have to keep going
+ // and remove the pod as well.
+ // From here until we remove the pod from the state, no error returns.
for _, ctr := range ctrs {
+ // The container no longer exists in the state, mark invalid.
+ ctr.valid = false
+
+ ctr.newContainerEvent(events.Remove)
+
// Clean up network namespace, cgroups, mounts
if err := ctr.cleanup(ctx); err != nil {
- return err
+ if removalErr == nil {
+ removalErr = err
+ } else {
+ logrus.Errorf("Unable to clean up container %s: %v", ctr.ID(), err)
+ }
}
// Stop container's storage
if err := ctr.teardownStorage(); err != nil {
- return err
+ if removalErr == nil {
+ removalErr = err
+ } else {
+ logrus.Errorf("Unable to tear down container %s storage: %v", ctr.ID(), err)
+ }
}
// Delete the container from runtime (only if we are not
@@ -239,26 +250,24 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool)
if ctr.state.State != ContainerStateConfigured &&
ctr.state.State != ContainerStateExited {
if err := ctr.delete(ctx); err != nil {
- return err
+ if removalErr == nil {
+ removalErr = err
+ } else {
+ logrus.Errorf("Unable to remove container %s from OCI runtime: %v", ctr.ID(), err)
+ }
}
}
// Free the container's lock
if err := ctr.lock.Free(); err != nil {
- return err
+ if removalErr == nil {
+ removalErr = errors.Wrapf(err, "error freeing container %s lock", ctr.ID())
+ } else {
+ logrus.Errorf("Unable to free container %s lock: %v", ctr.ID(), err)
+ }
}
}
- // Remove containers from the state
- if err := r.state.RemovePodContainers(p); err != nil {
- return err
- }
-
- // Mark containers invalid
- for _, ctr := range ctrs {
- ctr.valid = false
- }
-
// Remove pod cgroup, if present
if p.state.CgroupPath != "" {
logrus.Debugf("Removing pod cgroup %s", p.state.CgroupPath)
@@ -266,10 +275,11 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool)
switch p.runtime.config.CgroupManager {
case SystemdCgroupsManager:
if err := deleteSystemdCgroup(p.state.CgroupPath); err != nil {
- // The pod is already almost gone.
- // No point in hard-failing if we fail
- // this bit of cleanup.
- logrus.Errorf("Error deleting pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err)
+ if removalErr == nil {
+ removalErr = errors.Wrapf(err, "error removing pod %s cgroup", p.ID())
+ } else {
+ logrus.Errorf("Error deleting pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err)
+ }
}
case CgroupfsCgroupsManager:
// Delete the cgroupfs cgroup
@@ -280,34 +290,60 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool)
conmonCgroupPath := filepath.Join(p.state.CgroupPath, "conmon")
conmonCgroup, err := cgroups.Load(v1CGroups, cgroups.StaticPath(conmonCgroupPath))
if err != nil && err != cgroups.ErrCgroupDeleted {
- return err
+ if removalErr == nil {
+ removalErr = errors.Wrapf(err, "error retrieving pod %s conmon cgroup", p.ID())
+ } else {
+ logrus.Debugf("Error retrieving pod %s conmon cgroup %s: %v", p.ID(), conmonCgroupPath, err)
+ }
}
if err == nil {
if err := conmonCgroup.Delete(); err != nil {
- logrus.Errorf("Error deleting pod %s conmon cgroup %s: %v", p.ID(), conmonCgroupPath, err)
+ if removalErr == nil {
+ removalErr = errors.Wrapf(err, "error removing pod %s conmon cgroup", p.ID())
+ } else {
+ logrus.Errorf("Error deleting pod %s conmon cgroup %s: %v", p.ID(), conmonCgroupPath, err)
+ }
}
}
cgroup, err := cgroups.Load(v1CGroups, cgroups.StaticPath(p.state.CgroupPath))
if err != nil && err != cgroups.ErrCgroupDeleted {
- return err
+ if removalErr == nil {
+ removalErr = errors.Wrapf(err, "error retrieving pod %s cgroup", p.ID())
+ } else {
+ logrus.Errorf("Error retrieving pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err)
+ }
}
if err == nil {
if err := cgroup.Delete(); err != nil {
- logrus.Errorf("Error deleting pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err)
+ if removalErr == nil {
+ removalErr = errors.Wrapf(err, "error removing pod %s cgroup", p.ID())
+ } else {
+ logrus.Errorf("Error deleting pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err)
+ }
}
}
default:
- return errors.Wrapf(ErrInvalidArg, "unknown cgroups manager %s specified", p.runtime.config.CgroupManager)
+ // This should be caught much earlier, but let's still
+ // keep going so we make sure to evict the pod before
+ // ending up with an inconsistent state.
+ if removalErr == nil {
+ removalErr = errors.Wrapf(ErrInternal, "unrecognized cgroup manager %s when removing pod %s cgroups", p.runtime.config.CgroupManager, p.ID())
+ } else {
+ logrus.Errorf("Unknown cgroups manager %s specified - cannot remove pod %s cgroup", p.runtime.config.CgroupManager, p.ID())
+ }
}
}
// Remove pod from state
if err := r.state.RemovePod(p); err != nil {
+ if removalErr != nil {
+ logrus.Errorf("%v", removalErr)
+ }
return err
}
// Mark pod invalid
p.valid = false
p.newPodEvent(events.Remove)
- return nil
+ return removalErr
}
diff --git a/pkg/adapter/client.go b/pkg/adapter/client.go
index 6512a5952..f672a92a6 100644
--- a/pkg/adapter/client.go
+++ b/pkg/adapter/client.go
@@ -3,30 +3,45 @@
package adapter
import (
+ "fmt"
"os"
- "github.com/sirupsen/logrus"
+ "github.com/pkg/errors"
"github.com/varlink/go/varlink"
)
-// DefaultAddress is the default address of the varlink socket
-const DefaultAddress = "unix:/run/podman/io.podman"
+type VarlinkConnectionInfo struct {
+ RemoteUserName string
+ RemoteHost string
+ VarlinkAddress string
+}
// Connect provides a varlink connection
func (r RemoteRuntime) Connect() (*varlink.Connection, error) {
- var err error
- var connection *varlink.Connection
- if bridge := os.Getenv("PODMAN_VARLINK_BRIDGE"); bridge != "" {
- logrus.Infof("Connecting with varlink bridge")
- logrus.Debugf("%s", bridge)
+ var (
+ err error
+ connection *varlink.Connection
+ )
+
+ logLevel := r.cmd.LogLevel
+
+ // I'm leaving this here for now as a document of the birdge format. It can be removed later once the bridge
+ // function is more flushed out.
+ //bridge := `ssh -T root@192.168.122.1 "/usr/bin/varlink -A '/usr/bin/podman varlink \$VARLINK_ADDRESS' bridge"`
+ if len(r.cmd.RemoteHost) > 0 {
+ // The user has provided a remote host endpoint
+ if len(r.cmd.RemoteUserName) < 1 {
+ return nil, errors.New("you must provide a username when providing a remote host name")
+ }
+ bridge := fmt.Sprintf(`ssh -T %s@%s /usr/bin/varlink -A \'/usr/bin/podman --log-level=%s varlink \\\$VARLINK_ADDRESS\' bridge`, r.cmd.RemoteUserName, r.cmd.RemoteHost, logLevel)
+ connection, err = varlink.NewBridge(bridge)
+ } else if bridge := os.Getenv("PODMAN_VARLINK_BRIDGE"); bridge != "" {
connection, err = varlink.NewBridge(bridge)
} else {
address := os.Getenv("PODMAN_VARLINK_ADDRESS")
if address == "" {
address = DefaultAddress
}
- logrus.Infof("Connecting with varlink address")
- logrus.Debugf("%s", address)
connection, err = varlink.NewConnection(address)
}
if err != nil {
diff --git a/pkg/adapter/client_config.go b/pkg/adapter/client_config.go
new file mode 100644
index 000000000..d165ef1cc
--- /dev/null
+++ b/pkg/adapter/client_config.go
@@ -0,0 +1,4 @@
+package adapter
+
+// DefaultAddress is the default address of the varlink socket
+const DefaultAddress = "unix:/run/podman/io.podman"
diff --git a/pkg/adapter/containers.go b/pkg/adapter/containers.go
index 82d999202..ff7b6377a 100644
--- a/pkg/adapter/containers.go
+++ b/pkg/adapter/containers.go
@@ -413,7 +413,9 @@ func (r *LocalRuntime) Run(ctx context.Context, c *cliconfig.RunValues, exitCode
}
if c.IsSet("rm") {
- r.Runtime.RemoveContainer(ctx, ctr, false, true)
+ if err := r.Runtime.RemoveContainer(ctx, ctr, false, false); err != nil {
+ logrus.Errorf("Error removing container %s: %v", ctr.ID(), err)
+ }
}
return exitCode, nil
@@ -965,8 +967,9 @@ func (r *LocalRuntime) CleanupContainers(ctx context.Context, cli *cliconfig.Cle
return ok, failures, nil
}
+// Only used when cleaning up containers
func removeContainer(ctx context.Context, ctr *libpod.Container, runtime *LocalRuntime) error {
- if err := runtime.RemoveContainer(ctx, ctr, false, true); err != nil {
+ if err := runtime.RemoveContainer(ctx, ctr, false, false); err != nil {
return errors.Wrapf(err, "failed to cleanup and remove container %v", ctr.ID())
}
return nil
diff --git a/pkg/adapter/runtime_remote.go b/pkg/adapter/runtime_remote.go
index 34c3b2a6c..e86287462 100644
--- a/pkg/adapter/runtime_remote.go
+++ b/pkg/adapter/runtime_remote.go
@@ -39,6 +39,7 @@ type RemoteImageRuntime struct{}
type RemoteRuntime struct {
Conn *varlink.Connection
Remote bool
+ cmd cliconfig.MainFlags
}
// LocalRuntime describes a typical libpod runtime
@@ -48,17 +49,17 @@ type LocalRuntime struct {
// GetRuntime returns a LocalRuntime struct with the actual runtime embedded in it
func GetRuntime(ctx context.Context, c *cliconfig.PodmanCommand) (*LocalRuntime, error) {
- runtime := RemoteRuntime{}
+ runtime := RemoteRuntime{
+ Remote: true,
+ cmd: c.GlobalFlags,
+ }
conn, err := runtime.Connect()
if err != nil {
return nil, err
}
-
+ runtime.Conn = conn
return &LocalRuntime{
- &RemoteRuntime{
- Conn: conn,
- Remote: true,
- },
+ &runtime,
}, nil
}
diff --git a/pkg/varlinkapi/attach.go b/pkg/varlinkapi/attach.go
index 6c62d3514..2234899a5 100644
--- a/pkg/varlinkapi/attach.go
+++ b/pkg/varlinkapi/attach.go
@@ -60,6 +60,7 @@ func (i *LibpodAPI) Attach(call iopodman.VarlinkCall, name string, detachKeys st
if !start && state != libpod.ContainerStateRunning {
return call.ReplyErrorOccurred("container must be running to attach")
}
+ call.Reply(nil)
reader, writer, _, pw, streams := setupStreams(call)
go func() {
diff --git a/pkg/varlinkapi/system.go b/pkg/varlinkapi/system.go
index 7f436a954..59bfec75b 100644
--- a/pkg/varlinkapi/system.go
+++ b/pkg/varlinkapi/system.go
@@ -86,15 +86,18 @@ func (i *LibpodAPI) GetInfo(call iopodman.VarlinkCall) error {
Graph_status: graphStatus,
}
- registriesInterface := info[2].Data["registries"]
- insecureRegistriesInterface := info[3].Data["registries"]
- if registriesInterface != nil {
- registries = registriesInterface.([]string)
+ if len(info) > 2 {
+ registriesInterface := info[2].Data["registries"]
+ if registriesInterface != nil {
+ registries = registriesInterface.([]string)
+ }
}
- if insecureRegistriesInterface != nil {
- insecureRegistries = insecureRegistriesInterface.([]string)
+ if len(info) > 3 {
+ insecureRegistriesInterface := info[3].Data["registries"]
+ if insecureRegistriesInterface != nil {
+ insecureRegistries = insecureRegistriesInterface.([]string)
+ }
}
-
podmanInfo.Store = infoStore
podmanInfo.Podman = pmaninfo
podmanInfo.Registries = registries
diff --git a/vendor.conf b/vendor.conf
index d5e2b60bd..2f7e36d85 100644
--- a/vendor.conf
+++ b/vendor.conf
@@ -93,8 +93,8 @@ k8s.io/api kubernetes-1.10.13-beta.0 https://github.com/kubernetes/api
k8s.io/apimachinery kubernetes-1.10.13-beta.0 https://github.com/kubernetes/apimachinery
k8s.io/client-go kubernetes-1.10.13-beta.0 https://github.com/kubernetes/client-go
github.com/mrunalp/fileutils 7d4729fb36185a7c1719923406c9d40e54fb93c7
-github.com/varlink/go 64e07fabffa33e385817b41971cf2674f692f391
github.com/containers/buildah v1.8.2
+github.com/varlink/go 0f1d566d194b9d6d48e0d47c5e4d822628919066
# TODO: Gotty has not been updated since 2012. Can we find replacement?
github.com/Nvveen/Gotty cd527374f1e5bff4938207604a14f2e38a9cf512
github.com/fsouza/go-dockerclient v1.3.0
diff --git a/vendor/github.com/varlink/go/varlink/bridge.go b/vendor/github.com/varlink/go/varlink/bridge.go
index 0ea5de682..c478dcd88 100644
--- a/vendor/github.com/varlink/go/varlink/bridge.go
+++ b/vendor/github.com/varlink/go/varlink/bridge.go
@@ -6,6 +6,7 @@ import (
"bufio"
"io"
"net"
+ "os"
"os/exec"
)
@@ -30,12 +31,13 @@ func (p PipeCon) Close() error {
return nil
}
-// NewConnection returns a new connection to the given address.
-func NewBridge(bridge string) (*Connection, error) {
+// NewBridgeWithStderr returns a new connection with the given bridge.
+func NewBridgeWithStderr(bridge string, stderr io.Writer) (*Connection, error) {
//var err error
c := Connection{}
cmd := exec.Command("sh", "-c", bridge)
+ cmd.Stderr = stderr
r, err := cmd.StdoutPipe()
if err != nil {
return nil, err
@@ -56,3 +58,8 @@ func NewBridge(bridge string) (*Connection, error) {
return &c, nil
}
+
+// NewBridge returns a new connection with the given bridge.
+func NewBridge(bridge string) (*Connection, error) {
+ return NewBridgeWithStderr(bridge, os.Stderr)
+}
diff --git a/vendor/github.com/varlink/go/varlink/bridge_windows.go b/vendor/github.com/varlink/go/varlink/bridge_windows.go
index 751224ec8..42953b871 100644
--- a/vendor/github.com/varlink/go/varlink/bridge_windows.go
+++ b/vendor/github.com/varlink/go/varlink/bridge_windows.go
@@ -4,6 +4,7 @@ import (
"bufio"
"io"
"net"
+ "os"
"os/exec"
)
@@ -28,12 +29,13 @@ func (p PipeCon) Close() error {
return nil
}
-// NewConnection returns a new connection to the given address.
-func NewBridge(bridge string) (*Connection, error) {
+// NewBridgeWithStderr returns a new connection with the given bridge.
+func NewBridgeWithStderr(bridge string, stderr io.Writer) (*Connection, error) {
//var err error
c := Connection{}
cmd := exec.Command("cmd", "/C", bridge)
+ cmd.Stderr = stderr
r, err := cmd.StdoutPipe()
if err != nil {
return nil, err
@@ -54,3 +56,8 @@ func NewBridge(bridge string) (*Connection, error) {
return &c, nil
}
+
+// NewBridge returns a new connection with the given bridge.
+func NewBridge(bridge string) (*Connection, error) {
+ return NewBridgeWithStderr(bridge, os.Stderr)
+} \ No newline at end of file