summaryrefslogtreecommitdiff
path: root/vendor
diff options
context:
space:
mode:
Diffstat (limited to 'vendor')
-rw-r--r--vendor/github.com/containers/common/pkg/config/config.go3
-rw-r--r--vendor/github.com/containers/common/pkg/config/default.go10
-rw-r--r--vendor/github.com/containers/common/pkg/ssh/connection_golang.go326
-rw-r--r--vendor/github.com/containers/common/pkg/ssh/connection_native.go182
-rw-r--r--vendor/github.com/containers/common/pkg/ssh/ssh.go59
-rw-r--r--vendor/github.com/containers/common/pkg/ssh/types.go223
-rw-r--r--vendor/github.com/containers/common/pkg/ssh/utils.go198
-rw-r--r--vendor/github.com/dtylman/scp/.gitignore25
-rw-r--r--vendor/github.com/dtylman/scp/LICENSE21
-rw-r--r--vendor/github.com/dtylman/scp/README.md42
-rw-r--r--vendor/github.com/dtylman/scp/msg.go121
-rw-r--r--vendor/github.com/dtylman/scp/scp.go153
-rw-r--r--vendor/github.com/kr/fs/LICENSE27
-rw-r--r--vendor/github.com/kr/fs/Readme3
-rw-r--r--vendor/github.com/kr/fs/filesystem.go36
-rw-r--r--vendor/github.com/kr/fs/go.mod1
-rw-r--r--vendor/github.com/kr/fs/walk.go95
-rw-r--r--vendor/github.com/pkg/sftp/.gitignore10
-rw-r--r--vendor/github.com/pkg/sftp/CONTRIBUTORS3
-rw-r--r--vendor/github.com/pkg/sftp/LICENSE9
-rw-r--r--vendor/github.com/pkg/sftp/Makefile27
-rw-r--r--vendor/github.com/pkg/sftp/README.md44
-rw-r--r--vendor/github.com/pkg/sftp/allocator.go96
-rw-r--r--vendor/github.com/pkg/sftp/attrs.go90
-rw-r--r--vendor/github.com/pkg/sftp/attrs_stubs.go11
-rw-r--r--vendor/github.com/pkg/sftp/attrs_unix.go16
-rw-r--r--vendor/github.com/pkg/sftp/client.go1977
-rw-r--r--vendor/github.com/pkg/sftp/conn.go189
-rw-r--r--vendor/github.com/pkg/sftp/debug.go9
-rw-r--r--vendor/github.com/pkg/sftp/fuzz.go22
-rw-r--r--vendor/github.com/pkg/sftp/go.mod10
-rw-r--r--vendor/github.com/pkg/sftp/go.sum25
-rw-r--r--vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/attrs.go325
-rw-r--r--vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/buffer.go293
-rw-r--r--vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extended_packets.go142
-rw-r--r--vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extensions.go46
-rw-r--r--vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/filexfer.go54
-rw-r--r--vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fx.go147
-rw-r--r--vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fxp.go124
-rw-r--r--vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/handle_packets.go249
-rw-r--r--vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/init_packets.go99
-rw-r--r--vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/open_packets.go89
-rw-r--r--vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/packets.go323
-rw-r--r--vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/path_packets.go368
-rw-r--r--vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/permissions.go114
-rw-r--r--vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/response_packets.go243
-rw-r--r--vendor/github.com/pkg/sftp/ls_formatting.go81
-rw-r--r--vendor/github.com/pkg/sftp/ls_plan9.go21
-rw-r--r--vendor/github.com/pkg/sftp/ls_stub.go11
-rw-r--r--vendor/github.com/pkg/sftp/ls_unix.go23
-rw-r--r--vendor/github.com/pkg/sftp/match.go137
-rw-r--r--vendor/github.com/pkg/sftp/packet-manager.go216
-rw-r--r--vendor/github.com/pkg/sftp/packet-typing.go135
-rw-r--r--vendor/github.com/pkg/sftp/packet.go1276
-rw-r--r--vendor/github.com/pkg/sftp/pool.go79
-rw-r--r--vendor/github.com/pkg/sftp/release.go5
-rw-r--r--vendor/github.com/pkg/sftp/request-attrs.go63
-rw-r--r--vendor/github.com/pkg/sftp/request-errors.go54
-rw-r--r--vendor/github.com/pkg/sftp/request-example.go666
-rw-r--r--vendor/github.com/pkg/sftp/request-interfaces.go123
-rw-r--r--vendor/github.com/pkg/sftp/request-plan9.go34
-rw-r--r--vendor/github.com/pkg/sftp/request-readme.md53
-rw-r--r--vendor/github.com/pkg/sftp/request-server.go328
-rw-r--r--vendor/github.com/pkg/sftp/request-unix.go27
-rw-r--r--vendor/github.com/pkg/sftp/request.go630
-rw-r--r--vendor/github.com/pkg/sftp/request_windows.go44
-rw-r--r--vendor/github.com/pkg/sftp/server.go616
-rw-r--r--vendor/github.com/pkg/sftp/server_statvfs_darwin.go21
-rw-r--r--vendor/github.com/pkg/sftp/server_statvfs_impl.go29
-rw-r--r--vendor/github.com/pkg/sftp/server_statvfs_linux.go22
-rw-r--r--vendor/github.com/pkg/sftp/server_statvfs_plan9.go13
-rw-r--r--vendor/github.com/pkg/sftp/server_statvfs_stubs.go15
-rw-r--r--vendor/github.com/pkg/sftp/sftp.go258
-rw-r--r--vendor/github.com/pkg/sftp/stat_plan9.go103
-rw-r--r--vendor/github.com/pkg/sftp/stat_posix.go124
-rw-r--r--vendor/github.com/pkg/sftp/syscall_fixed.go9
-rw-r--r--vendor/github.com/pkg/sftp/syscall_good.go8
-rw-r--r--vendor/golang.org/x/crypto/curve25519/curve25519.go9
-rw-r--r--vendor/golang.org/x/crypto/ssh/agent/client.go29
-rw-r--r--vendor/golang.org/x/crypto/ssh/certs.go2
-rw-r--r--vendor/modules.txt14
81 files changed, 11584 insertions, 373 deletions
diff --git a/vendor/github.com/containers/common/pkg/config/config.go b/vendor/github.com/containers/common/pkg/config/config.go
index 3d90268cd..a6276fbef 100644
--- a/vendor/github.com/containers/common/pkg/config/config.go
+++ b/vendor/github.com/containers/common/pkg/config/config.go
@@ -375,6 +375,9 @@ type EngineConfig struct {
// ServiceDestinations mapped by service Names
ServiceDestinations map[string]Destination `toml:"service_destinations,omitempty"`
+ // SSHConfig contains the ssh config file path if not the default
+ SSHConfig string `toml:"ssh_config,omitempty"`
+
// RuntimePath is the path to OCI runtime binary for launching containers.
// The first path pointing to a valid file will be used This is used only
// when there are no OCIRuntime/OCIRuntimes defined. It is used only to be
diff --git a/vendor/github.com/containers/common/pkg/config/default.go b/vendor/github.com/containers/common/pkg/config/default.go
index 161a9c8d6..c7ddf90ee 100644
--- a/vendor/github.com/containers/common/pkg/config/default.go
+++ b/vendor/github.com/containers/common/pkg/config/default.go
@@ -287,6 +287,7 @@ func defaultConfigFromMemory() (*EngineConfig, error) {
logrus.Warnf("Storage configuration is unset - using hardcoded default graph root %q", _defaultGraphRoot)
storeOpts.GraphRoot = _defaultGraphRoot
}
+
c.graphRoot = storeOpts.GraphRoot
c.ImageCopyTmpDir = getDefaultTmpDir()
c.StaticDir = filepath.Join(storeOpts.GraphRoot, "libpod")
@@ -397,6 +398,7 @@ func defaultConfigFromMemory() (*EngineConfig, error) {
c.ChownCopiedFiles = true
c.PodExitPolicy = defaultPodExitPolicy
+ c.SSHConfig = getDefaultSSHConfig()
return c, nil
}
@@ -633,3 +635,11 @@ func machineVolumes(volumes []string) ([]string, error) {
}
return translatedVolumes, nil
}
+
+func getDefaultSSHConfig() string {
+ if path, ok := os.LookupEnv("CONTAINERS_SSH_CONF"); ok {
+ return path
+ }
+ dirname := homedir.Get()
+ return filepath.Join(dirname, ".ssh", "config")
+}
diff --git a/vendor/github.com/containers/common/pkg/ssh/connection_golang.go b/vendor/github.com/containers/common/pkg/ssh/connection_golang.go
new file mode 100644
index 000000000..b6680b3d8
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/ssh/connection_golang.go
@@ -0,0 +1,326 @@
+package ssh
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net"
+ "net/url"
+ "os"
+ "os/user"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "time"
+
+ "github.com/containers/common/pkg/config"
+ "github.com/containers/storage/pkg/homedir"
+ "github.com/pkg/sftp"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/crypto/ssh"
+ "golang.org/x/crypto/ssh/agent"
+ "golang.org/x/crypto/ssh/knownhosts"
+)
+
+func golangConnectionCreate(options ConnectionCreateOptions) error {
+ var match bool
+ var err error
+ if match, err = regexp.Match("^[A-Za-z][A-Za-z0-9+.-]*://", []byte(options.Path)); err != nil {
+ return fmt.Errorf("invalid destination: %w", err)
+ }
+
+ if !match {
+ options.Path = "ssh://" + options.Path
+ }
+
+ if len(options.Socket) > 0 {
+ options.Path += options.Socket
+ }
+
+ dst, uri, err := Validate(options.User, options.Path, options.Port, options.Identity)
+ if err != nil {
+ return err
+ }
+
+ if uri.Path == "" || uri.Path == "/" {
+ if uri.Path, err = getUDS(uri, options.Identity); err != nil {
+ return err
+ }
+ dst.URI += uri.Path
+ }
+
+ cfg, err := config.ReadCustomConfig()
+ if err != nil {
+ return err
+ }
+ if cfg.Engine.ServiceDestinations == nil {
+ cfg.Engine.ServiceDestinations = map[string]config.Destination{
+ options.Name: *dst,
+ }
+ cfg.Engine.ActiveService = options.Name
+ } else {
+ cfg.Engine.ServiceDestinations[options.Name] = *dst
+ }
+ return cfg.Write()
+}
+
+func golangConnectionDial(options ConnectionDialOptions) (*ConnectionDialReport, error) {
+ _, uri, err := Validate(options.User, options.Host, options.Port, options.Identity)
+ if err != nil {
+ return nil, err
+ }
+ cfg, err := ValidateAndConfigure(uri, options.Identity)
+ if err != nil {
+ return nil, err
+ }
+
+ dial, err := ssh.Dial("tcp", uri.Host, cfg) // dial the client
+ if err != nil {
+ return nil, fmt.Errorf("failed to connect: %w", err)
+ }
+
+ return &ConnectionDialReport{dial}, nil
+}
+
+func golangConnectionExec(options ConnectionExecOptions) (*ConnectionExecReport, error) {
+ _, uri, err := Validate(options.User, options.Host, options.Port, options.Identity)
+ if err != nil {
+ return nil, err
+ }
+
+ cfg, err := ValidateAndConfigure(uri, options.Identity)
+ if err != nil {
+ return nil, err
+ }
+ dialAdd, err := ssh.Dial("tcp", uri.Host, cfg) // dial the client
+ if err != nil {
+ return nil, fmt.Errorf("failed to connect: %w", err)
+ }
+
+ out, err := ExecRemoteCommand(dialAdd, strings.Join(options.Args, " "))
+ if err != nil {
+ return nil, err
+ }
+ return &ConnectionExecReport{Response: string(out)}, nil
+}
+
+func golangConnectionScp(options ConnectionScpOptions) (*ConnectionScpReport, error) {
+ host, remoteFile, localFile, swap, err := ParseScpArgs(options)
+ if err != nil {
+ return nil, err
+ }
+
+ _, uri, err := Validate(options.User, host, options.Port, options.Identity)
+ if err != nil {
+ return nil, err
+ }
+ cfg, err := ValidateAndConfigure(uri, options.Identity)
+ if err != nil {
+ return nil, err
+ }
+
+ dial, err := ssh.Dial("tcp", uri.Host, cfg) // dial the client
+ if err != nil {
+ return nil, fmt.Errorf("failed to connect: %w", err)
+ }
+ sc, err := sftp.NewClient(dial)
+ if err != nil {
+ return nil, err
+ }
+
+ f, err := os.OpenFile(localFile, (os.O_RDWR | os.O_CREATE), 0o644)
+ if err != nil {
+ return nil, err
+ }
+
+ parent := filepath.Dir(remoteFile)
+ path := string(filepath.Separator)
+ dirs := strings.Split(parent, path)
+ for _, dir := range dirs {
+ path = filepath.Join(path, dir)
+ // ignore errors due to most of the dirs already existing
+ _ = sc.Mkdir(path)
+ }
+
+ remote, err := sc.OpenFile(remoteFile, (os.O_RDWR | os.O_CREATE))
+ if err != nil {
+ return nil, err
+ }
+ defer remote.Close()
+
+ if !swap {
+ _, err = io.Copy(remote, f)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ _, err = io.Copy(f, remote)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return &ConnectionScpReport{Response: remote.Name()}, nil
+}
+
+// ExecRemoteCommand takes a ssh client connection and a command to run and executes the
+// command on the specified client. The function returns the Stdout from the client or the Stderr
+func ExecRemoteCommand(dial *ssh.Client, run string) ([]byte, error) {
+ sess, err := dial.NewSession() // new ssh client session
+ if err != nil {
+ return nil, err
+ }
+ defer sess.Close()
+
+ var buffer bytes.Buffer
+ var bufferErr bytes.Buffer
+ sess.Stdout = &buffer // output from client funneled into buffer
+ sess.Stderr = &bufferErr // err form client funneled into buffer
+ if err := sess.Run(run); err != nil { // run the command on the ssh client
+ return nil, fmt.Errorf("%v: %w", bufferErr.String(), err)
+ }
+ return buffer.Bytes(), nil
+}
+
+func GetUserInfo(uri *url.URL) (*url.Userinfo, error) {
+ var (
+ usr *user.User
+ err error
+ )
+ if u, found := os.LookupEnv("_CONTAINERS_ROOTLESS_UID"); found {
+ usr, err = user.LookupId(u)
+ if err != nil {
+ return nil, fmt.Errorf("failed to lookup rootless user: %w", err)
+ }
+ } else {
+ usr, err = user.Current()
+ if err != nil {
+ return nil, fmt.Errorf("failed to obtain current user: %w", err)
+ }
+ }
+
+ pw, set := uri.User.Password()
+ if set {
+ return url.UserPassword(usr.Username, pw), nil
+ }
+ return url.User(usr.Username), nil
+}
+
+// ValidateAndConfigure will take a ssh url and an identity key (rsa and the like) and ensure the information given is valid
+// iden iden can be blank to mean no identity key
+// once the function validates the information it creates and returns an ssh.ClientConfig.
+func ValidateAndConfigure(uri *url.URL, iden string) (*ssh.ClientConfig, error) {
+ var signers []ssh.Signer
+ passwd, passwdSet := uri.User.Password()
+ if iden != "" { // iden might be blank if coming from image scp or if no validation is needed
+ value := iden
+ s, err := PublicKey(value, []byte(passwd))
+ if err != nil {
+ return nil, fmt.Errorf("failed to read identity %q: %w", value, err)
+ }
+ signers = append(signers, s)
+ logrus.Debugf("SSH Ident Key %q %s %s", value, ssh.FingerprintSHA256(s.PublicKey()), s.PublicKey().Type())
+ } else if sock, found := os.LookupEnv("SSH_AUTH_SOCK"); found { // validate ssh information, specifically the unix file socket used by the ssh agent.
+ logrus.Debugf("Found SSH_AUTH_SOCK %q, ssh-agent signer enabled", sock)
+
+ c, err := net.Dial("unix", sock)
+ if err != nil {
+ return nil, err
+ }
+ agentSigners, err := agent.NewClient(c).Signers()
+ if err != nil {
+ return nil, err
+ }
+
+ signers = append(signers, agentSigners...)
+
+ if logrus.IsLevelEnabled(logrus.DebugLevel) {
+ for _, s := range agentSigners {
+ logrus.Debugf("SSH Agent Key %s %s", ssh.FingerprintSHA256(s.PublicKey()), s.PublicKey().Type())
+ }
+ }
+ }
+ var authMethods []ssh.AuthMethod // now we validate and check for the authorization methods, most notaibly public key authorization
+ if len(signers) > 0 {
+ dedup := make(map[string]ssh.Signer)
+ for _, s := range signers {
+ fp := ssh.FingerprintSHA256(s.PublicKey())
+ if _, found := dedup[fp]; found {
+ logrus.Debugf("Dedup SSH Key %s %s", ssh.FingerprintSHA256(s.PublicKey()), s.PublicKey().Type())
+ }
+ dedup[fp] = s
+ }
+
+ var uniq []ssh.Signer
+ for _, s := range dedup {
+ uniq = append(uniq, s)
+ }
+ authMethods = append(authMethods, ssh.PublicKeysCallback(func() ([]ssh.Signer, error) {
+ return uniq, nil
+ }))
+ }
+ if passwdSet { // if password authentication is given and valid, add to the list
+ authMethods = append(authMethods, ssh.Password(passwd))
+ }
+ if len(authMethods) == 0 {
+ authMethods = append(authMethods, ssh.PasswordCallback(func() (string, error) {
+ pass, err := ReadPassword(fmt.Sprintf("%s's login password:", uri.User.Username()))
+ return string(pass), err
+ }))
+ }
+ tick, err := time.ParseDuration("40s")
+ if err != nil {
+ return nil, err
+ }
+ keyFilePath := filepath.Join(homedir.Get(), ".ssh", "known_hosts")
+ known, err := knownhosts.New(keyFilePath)
+ if err != nil {
+ return nil, fmt.Errorf("error creating host key callback function for %s: %w", keyFilePath, err)
+ }
+
+ cfg := &ssh.ClientConfig{
+ User: uri.User.Username(),
+ Auth: authMethods,
+ HostKeyCallback: known,
+ Timeout: tick,
+ }
+ return cfg, nil
+}
+
+func getUDS(uri *url.URL, iden string) (string, error) {
+ cfg, err := ValidateAndConfigure(uri, iden)
+ if err != nil {
+ return "", fmt.Errorf("failed to validate: %w", err)
+ }
+ dial, err := ssh.Dial("tcp", uri.Host, cfg)
+ if err != nil {
+ return "", fmt.Errorf("failed to connect: %w", err)
+ }
+ defer dial.Close()
+
+ session, err := dial.NewSession()
+ if err != nil {
+ return "", fmt.Errorf("failed to create new ssh session on %q: %w", uri.Host, err)
+ }
+ defer session.Close()
+
+ // Override podman binary for testing etc
+ podman := "podman"
+ if v, found := os.LookupEnv("PODMAN_BINARY"); found {
+ podman = v
+ }
+ infoJSON, err := ExecRemoteCommand(dial, podman+" info --format=json")
+ if err != nil {
+ return "", err
+ }
+
+ var info Info
+ if err := json.Unmarshal(infoJSON, &info); err != nil {
+ return "", fmt.Errorf("failed to parse 'podman info' results: %w", err)
+ }
+
+ if info.Host.RemoteSocket == nil || len(info.Host.RemoteSocket.Path) == 0 {
+ return "", fmt.Errorf("remote podman %q failed to report its UDS socket", uri.Host)
+ }
+ return info.Host.RemoteSocket.Path, nil
+}
diff --git a/vendor/github.com/containers/common/pkg/ssh/connection_native.go b/vendor/github.com/containers/common/pkg/ssh/connection_native.go
new file mode 100644
index 000000000..4c407360a
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/ssh/connection_native.go
@@ -0,0 +1,182 @@
+package ssh
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "os/exec"
+ "regexp"
+ "strings"
+
+ "github.com/containers/common/pkg/config"
+)
+
+func nativeConnectionCreate(options ConnectionCreateOptions) error {
+ var match bool
+ var err error
+ if match, err = regexp.Match("^[A-Za-z][A-Za-z0-9+.-]*://", []byte(options.Path)); err != nil {
+ return fmt.Errorf("invalid destination: %w", err)
+ }
+
+ if !match {
+ options.Path = "ssh://" + options.Path
+ }
+
+ if len(options.Socket) > 0 {
+ options.Path += options.Socket
+ }
+
+ dst, uri, err := Validate(options.User, options.Path, options.Port, options.Identity)
+ if err != nil {
+ return err
+ }
+
+ // test connection
+ ssh, err := exec.LookPath("ssh")
+ if err != nil {
+ return fmt.Errorf("no ssh binary found")
+ }
+
+ if strings.Contains(uri.Host, "/run") {
+ uri.Host = strings.Split(uri.Host, "/run")[0]
+ }
+ conf, err := config.Default()
+ if err != nil {
+ return err
+ }
+
+ args := []string{uri.User.String() + "@" + uri.Hostname()}
+
+ if len(dst.Identity) > 0 {
+ args = append(args, "-i", dst.Identity)
+ }
+ if len(conf.Engine.SSHConfig) > 0 {
+ args = append(args, "-F", conf.Engine.SSHConfig)
+ }
+
+ output := &bytes.Buffer{}
+ args = append(args, "podman", "info", "--format", "json")
+ info := exec.Command(ssh, args...)
+ info.Stdout = output
+ err = info.Run()
+ if err != nil {
+ return err
+ }
+
+ remoteInfo := &Info{}
+ if err := json.Unmarshal(output.Bytes(), &remoteInfo); err != nil {
+ return fmt.Errorf("failed to parse 'podman info' results: %w", err)
+ }
+
+ if remoteInfo.Host.RemoteSocket == nil || len(remoteInfo.Host.RemoteSocket.Path) == 0 {
+ return fmt.Errorf("remote podman %q failed to report its UDS socket", uri.Host)
+ }
+
+ cfg, err := config.ReadCustomConfig()
+ if err != nil {
+ return err
+ }
+ if options.Default {
+ cfg.Engine.ActiveService = options.Name
+ }
+
+ if cfg.Engine.ServiceDestinations == nil {
+ cfg.Engine.ServiceDestinations = map[string]config.Destination{
+ options.Name: *dst,
+ }
+ cfg.Engine.ActiveService = options.Name
+ } else {
+ cfg.Engine.ServiceDestinations[options.Name] = *dst
+ }
+
+ return cfg.Write()
+}
+
+func nativeConnectionExec(options ConnectionExecOptions) (*ConnectionExecReport, error) {
+ dst, uri, err := Validate(options.User, options.Host, options.Port, options.Identity)
+ if err != nil {
+ return nil, err
+ }
+
+ ssh, err := exec.LookPath("ssh")
+ if err != nil {
+ return nil, fmt.Errorf("no ssh binary found")
+ }
+
+ output := &bytes.Buffer{}
+ errors := &bytes.Buffer{}
+ if strings.Contains(uri.Host, "/run") {
+ uri.Host = strings.Split(uri.Host, "/run")[0]
+ }
+
+ options.Args = append([]string{uri.User.String() + "@" + uri.Hostname()}, options.Args...)
+ conf, err := config.Default()
+ if err != nil {
+ return nil, err
+ }
+
+ args := []string{}
+ if len(dst.Identity) > 0 {
+ args = append(args, "-i", dst.Identity)
+ }
+ if len(conf.Engine.SSHConfig) > 0 {
+ args = append(args, "-F", conf.Engine.SSHConfig)
+ }
+ args = append(args, options.Args...)
+ info := exec.Command(ssh, args...)
+ info.Stdout = output
+ info.Stderr = errors
+ err = info.Run()
+ if err != nil {
+ return nil, err
+ }
+ return &ConnectionExecReport{Response: output.String()}, nil
+}
+
+func nativeConnectionScp(options ConnectionScpOptions) (*ConnectionScpReport, error) {
+ host, remotePath, localPath, swap, err := ParseScpArgs(options)
+ if err != nil {
+ return nil, err
+ }
+ dst, uri, err := Validate(options.User, host, options.Port, options.Identity)
+ if err != nil {
+ return nil, err
+ }
+
+ scp, err := exec.LookPath("scp")
+ if err != nil {
+ return nil, fmt.Errorf("no scp binary found")
+ }
+
+ conf, err := config.Default()
+ if err != nil {
+ return nil, err
+ }
+
+ args := []string{}
+ if len(dst.Identity) > 0 {
+ args = append(args, "-i", dst.Identity)
+ }
+ if len(conf.Engine.SSHConfig) > 0 {
+ args = append(args, "-F", conf.Engine.SSHConfig)
+ }
+
+ userString := ""
+ if !strings.Contains(host, "@") {
+ userString = uri.User.String() + "@"
+ }
+ // meaning, we are copying from a remote host
+ if swap {
+ args = append(args, userString+host+":"+remotePath, localPath)
+ } else {
+ args = append(args, localPath, userString+host+":"+remotePath)
+ }
+
+ info := exec.Command(scp, args...)
+ err = info.Run()
+ if err != nil {
+ return nil, err
+ }
+
+ return &ConnectionScpReport{Response: remotePath}, nil
+}
diff --git a/vendor/github.com/containers/common/pkg/ssh/ssh.go b/vendor/github.com/containers/common/pkg/ssh/ssh.go
new file mode 100644
index 000000000..d638d69ad
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/ssh/ssh.go
@@ -0,0 +1,59 @@
+package ssh
+
+import (
+ "fmt"
+
+ "golang.org/x/crypto/ssh"
+)
+
+func Create(options *ConnectionCreateOptions, kind EngineMode) error {
+ if kind == NativeMode {
+ return nativeConnectionCreate(*options)
+ }
+ return golangConnectionCreate(*options)
+}
+
+func Dial(options *ConnectionDialOptions, kind EngineMode) (*ssh.Client, error) {
+ var rep *ConnectionDialReport
+ var err error
+ if kind == NativeMode {
+ return nil, fmt.Errorf("ssh dial failed: you cannot create a dial-able client with native ssh")
+ }
+ rep, err = golangConnectionDial(*options)
+ if err != nil {
+ return nil, err
+ }
+ return rep.Client, nil
+}
+
+func Exec(options *ConnectionExecOptions, kind EngineMode) (string, error) {
+ var rep *ConnectionExecReport
+ var err error
+ if kind == NativeMode {
+ rep, err = nativeConnectionExec(*options)
+ if err != nil {
+ return "", err
+ }
+ } else {
+ rep, err = golangConnectionExec(*options)
+ if err != nil {
+ return "", err
+ }
+ }
+ return rep.Response, nil
+}
+
+func Scp(options *ConnectionScpOptions, kind EngineMode) (string, error) {
+ var rep *ConnectionScpReport
+ var err error
+ if kind == NativeMode {
+ if rep, err = nativeConnectionScp(*options); err != nil {
+ return "", err
+ }
+ return rep.Response, nil
+ }
+ if rep, err = golangConnectionScp(*options); err != nil {
+ return "", err
+ }
+ return rep.Response, nil
+}
diff --git a/vendor/github.com/containers/common/pkg/ssh/types.go b/vendor/github.com/containers/common/pkg/ssh/types.go
new file mode 100644
index 000000000..f22b5fba9
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/ssh/types.go
@@ -0,0 +1,223 @@
+package ssh
+
+import (
+ "net/url"
+ "time"
+
+ "github.com/containers/storage/pkg/idtools"
+ "golang.org/x/crypto/ssh"
+)
+
+type EngineMode string
+
+const (
+ NativeMode = EngineMode("native")
+ GolangMode = EngineMode("golang")
+ InvalidMode = EngineMode("invalid")
+)
+
+type ConnectionCreateOptions struct {
+ Name string
+ Path string
+ User *url.Userinfo
+ Port int
+ Identity string
+ Socket string
+ Default bool
+}
+
+type ConnectionDialOptions struct {
+ Host string
+ Identity string
+ User *url.Userinfo
+ Port int
+ Auth []string
+ Timeout time.Duration
+}
+
+type ConnectionDialReport struct {
+ Client *ssh.Client
+}
+
+type ConnectionExecOptions struct {
+ Host string
+ Identity string
+ User *url.Userinfo
+ Port int
+ Auth []string
+ Args []string
+ Timeout time.Duration
+}
+
+type ConnectionExecReport struct {
+ Response string
+}
+
+type ConnectionScpOptions struct {
+ User *url.Userinfo
+ Source string
+ Destination string
+ Identity string
+ Port int
+}
+
+type ConnectionScpReport struct {
+ Response string
+}
+
+// Info is the overall struct that describes the host system
+// running libpod/podman
+type Info struct {
+ Host *HostInfo `json:"host"`
+ Store *StoreInfo `json:"store"`
+ Registries map[string]interface{} `json:"registries"`
+ Plugins Plugins `json:"plugins"`
+ Version Version `json:"version"`
+}
+
+// Version is an output struct for API
+type Version struct {
+ APIVersion string
+ Version string
+ GoVersion string
+ GitCommit string
+ BuiltTime string
+ Built int64
+ OsArch string
+ Os string
+}
+
+// SecurityInfo describes the libpod host
+type SecurityInfo struct {
+ AppArmorEnabled bool `json:"apparmorEnabled"`
+ DefaultCapabilities string `json:"capabilities"`
+ Rootless bool `json:"rootless"`
+ SECCOMPEnabled bool `json:"seccompEnabled"`
+ SECCOMPProfilePath string `json:"seccompProfilePath"`
+ SELinuxEnabled bool `json:"selinuxEnabled"`
+}
+
+// HostInfo describes the libpod host
+type HostInfo struct {
+ Arch string `json:"arch"`
+ BuildahVersion string `json:"buildahVersion"`
+ CgroupManager string `json:"cgroupManager"`
+ CgroupsVersion string `json:"cgroupVersion"`
+ CgroupControllers []string `json:"cgroupControllers"`
+ Conmon *ConmonInfo `json:"conmon"`
+ CPUs int `json:"cpus"`
+ CPUUtilization *CPUUsage `json:"cpuUtilization"`
+ Distribution DistributionInfo `json:"distribution"`
+ EventLogger string `json:"eventLogger"`
+ Hostname string `json:"hostname"`
+ IDMappings IDMappings `json:"idMappings,omitempty"`
+ Kernel string `json:"kernel"`
+ LogDriver string `json:"logDriver"`
+ MemFree int64 `json:"memFree"`
+ MemTotal int64 `json:"memTotal"`
+ NetworkBackend string `json:"networkBackend"`
+ OCIRuntime *OCIRuntimeInfo `json:"ociRuntime"`
+ OS string `json:"os"`
+ // RemoteSocket returns the UNIX domain socket the Podman service is listening on
+ RemoteSocket *RemoteSocket `json:"remoteSocket,omitempty"`
+ RuntimeInfo map[string]interface{} `json:"runtimeInfo,omitempty"`
+ // ServiceIsRemote is true when the podman/libpod service is remote to the client
+ ServiceIsRemote bool `json:"serviceIsRemote"`
+ Security SecurityInfo `json:"security"`
+ Slirp4NetNS SlirpInfo `json:"slirp4netns,omitempty"`
+ SwapFree int64 `json:"swapFree"`
+ SwapTotal int64 `json:"swapTotal"`
+ Uptime string `json:"uptime"`
+ Linkmode string `json:"linkmode"`
+}
+
+// RemoteSocket describes information about the API socket
+type RemoteSocket struct {
+ Path string `json:"path,omitempty"`
+ Exists bool `json:"exists,omitempty"`
+}
+
+// SlirpInfo describes the slirp executable that is being used
+type SlirpInfo struct {
+ Executable string `json:"executable"`
+ Package string `json:"package"`
+ Version string `json:"version"`
+}
+
+// IDMappings describe the GID and UID mappings
+type IDMappings struct {
+ GIDMap []idtools.IDMap `json:"gidmap"`
+ UIDMap []idtools.IDMap `json:"uidmap"`
+}
+
+// DistributionInfo describes the host distribution for libpod
+type DistributionInfo struct {
+ Distribution string `json:"distribution"`
+ Variant string `json:"variant,omitempty"`
+ Version string `json:"version"`
+ Codename string `json:"codename,omitempty"`
+}
+
+// ConmonInfo describes the conmon executable being used
+type ConmonInfo struct {
+ Package string `json:"package"`
+ Path string `json:"path"`
+ Version string `json:"version"`
+}
+
+// OCIRuntimeInfo describes the runtime (crun or runc) being
+// used with podman
+type OCIRuntimeInfo struct {
+ Name string `json:"name"`
+ Package string `json:"package"`
+ Path string `json:"path"`
+ Version string `json:"version"`
+}
+
+// StoreInfo describes the container storage and its
+// attributes
+type StoreInfo struct {
+ ConfigFile string `json:"configFile"`
+ ContainerStore ContainerStore `json:"containerStore"`
+ GraphDriverName string `json:"graphDriverName"`
+ GraphOptions map[string]interface{} `json:"graphOptions"`
+ GraphRoot string `json:"graphRoot"`
+ // GraphRootAllocated is how much space the graphroot has in bytes
+ GraphRootAllocated uint64 `json:"graphRootAllocated"`
+ // GraphRootUsed is how much of graphroot is used in bytes
+ GraphRootUsed uint64 `json:"graphRootUsed"`
+ GraphStatus map[string]string `json:"graphStatus"`
+ ImageCopyTmpDir string `json:"imageCopyTmpDir"`
+ ImageStore ImageStore `json:"imageStore"`
+ RunRoot string `json:"runRoot"`
+ VolumePath string `json:"volumePath"`
+}
+
+// ImageStore describes the image store. Right now only the number
+// of images present
+type ImageStore struct {
+ Number int `json:"number"`
+}
+
+// ContainerStore describes the quantity of containers in the
+// store by status
+type ContainerStore struct {
+ Number int `json:"number"`
+ Paused int `json:"paused"`
+ Running int `json:"running"`
+ Stopped int `json:"stopped"`
+}
+
+type Plugins struct {
+ Volume []string `json:"volume"`
+ Network []string `json:"network"`
+ Log []string `json:"log"`
+ // Authorization is provided for compatibility, will always be nil as Podman has no daemon
+ Authorization []string `json:"authorization"`
+}
+
+type CPUUsage struct {
+ UserPercent float64 `json:"userPercent"`
+ SystemPercent float64 `json:"systemPercent"`
+ IdlePercent float64 `json:"idlePercent"`
+}
diff --git a/vendor/github.com/containers/common/pkg/ssh/utils.go b/vendor/github.com/containers/common/pkg/ssh/utils.go
new file mode 100644
index 000000000..c15745015
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/ssh/utils.go
@@ -0,0 +1,198 @@
+package ssh
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/url"
+ "os"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/containers/common/pkg/config"
+ "golang.org/x/crypto/ssh"
+ "golang.org/x/term"
+)
+
+func Validate(user *url.Userinfo, path string, port int, identity string) (*config.Destination, *url.URL, error) {
+ sock := ""
+ if strings.Contains(path, "/run") {
+ sock = strings.Split(path, "/run")[1]
+ }
+ uri, err := url.Parse(path)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // sometimes we are not going to have a path, this breaks uri.Hostname()
+ if uri.Host == "" && strings.Contains(uri.String(), "@") {
+ uri.Host = strings.Split(uri.String(), "@")[1]
+ }
+
+ if uri.Port() == "" {
+ if port != 0 {
+ uri.Host = net.JoinHostPort(uri.Hostname(), strconv.Itoa(port))
+ } else {
+ uri.Host = net.JoinHostPort(uri.Hostname(), "22")
+ }
+ }
+
+ if user != nil {
+ uri.User = user
+ }
+
+ uriStr := ""
+ if len(sock) > 0 {
+ uriStr = "ssh://" + uri.User.Username() + "@" + uri.Host + "/run" + sock
+ } else {
+ uriStr = "ssh://" + uri.User.Username() + "@" + uri.Host
+ }
+
+ dst := config.Destination{
+ URI: uriStr,
+ }
+
+ if len(identity) > 0 {
+ dst.Identity = identity
+ }
+ return &dst, uri, err
+}
+
+var (
+ passPhrase []byte
+ phraseSync sync.Once
+ password []byte
+ passwordSync sync.Once
+)
+
+// ReadPassword prompts for a secret and returns value input by user from stdin
+// Unlike terminal.ReadPassword(), $(echo $SECRET | podman...) is supported.
+// Additionally, all input after `<secret>/n` is queued to podman command.
+func ReadPassword(prompt string) (pw []byte, err error) {
+ fd := int(os.Stdin.Fd())
+ if term.IsTerminal(fd) {
+ fmt.Fprint(os.Stderr, prompt)
+ pw, err = term.ReadPassword(fd)
+ fmt.Fprintln(os.Stderr)
+ return
+ }
+
+ var b [1]byte
+ for {
+ n, err := os.Stdin.Read(b[:])
+ // terminal.ReadPassword discards any '\r', so we do the same
+ if n > 0 && b[0] != '\r' {
+ if b[0] == '\n' {
+ return pw, nil
+ }
+ pw = append(pw, b[0])
+ // limit size, so that a wrong input won't fill up the memory
+ if len(pw) > 1024 {
+ err = fmt.Errorf("password too long, 1024 byte limit")
+ }
+ }
+ if err != nil {
+ // terminal.ReadPassword accepts EOF-terminated passwords
+ // if non-empty, so we do the same
+ if err == io.EOF && len(pw) > 0 {
+ err = nil
+ }
+ return pw, err
+ }
+ }
+}
+
+func PublicKey(path string, passphrase []byte) (ssh.Signer, error) {
+ key, err := ioutil.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+
+ signer, err := ssh.ParsePrivateKey(key)
+ if err != nil {
+ if _, ok := err.(*ssh.PassphraseMissingError); !ok {
+ return nil, err
+ }
+ if len(passphrase) == 0 {
+ passphrase = ReadPassphrase()
+ }
+ return ssh.ParsePrivateKeyWithPassphrase(key, passphrase)
+ }
+ return signer, nil
+}
+
+func ReadPassphrase() []byte {
+ phraseSync.Do(func() {
+ secret, err := ReadPassword("Key Passphrase: ")
+ if err != nil {
+ secret = []byte{}
+ }
+ passPhrase = secret
+ })
+ return passPhrase
+}
+
+func ReadLogin() []byte {
+ passwordSync.Do(func() {
+ secret, err := ReadPassword("Login password: ")
+ if err != nil {
+ secret = []byte{}
+ }
+ password = secret
+ })
+ return password
+}
+
+func ParseScpArgs(options ConnectionScpOptions) (string, string, string, bool, error) {
+ // assume load to remote
+ host := options.Destination
+ if strings.Contains(host, "ssh://") {
+ host = strings.Split(host, "ssh://")[1]
+ }
+ localPath := options.Source
+ if strings.Contains(localPath, "ssh://") {
+ localPath = strings.Split(localPath, "ssh://")[1]
+ }
+ remotePath := ""
+ swap := false
+ if split := strings.Split(localPath, ":"); len(split) == 2 {
+ // save to remote, load to local
+ host = split[0]
+ remotePath = split[1]
+ localPath = options.Destination
+ swap = true
+ } else {
+ split = strings.Split(host, ":")
+ if len(split) != 2 {
+ return "", "", "", false, fmt.Errorf("no remote destination provided")
+ }
+ host = split[0]
+ remotePath = split[1]
+ }
+ remotePath = strings.TrimSuffix(remotePath, "\n")
+ return host, remotePath, localPath, swap, nil
+}
+
+func DialNet(sshClient *ssh.Client, mode string, url *url.URL) (net.Conn, error) {
+ port, err := strconv.Atoi(url.Port())
+ if err != nil {
+ return nil, err
+ }
+ if _, _, err = Validate(url.User, url.Hostname(), port, ""); err != nil {
+ return nil, err
+ }
+ return sshClient.Dial(mode, url.Path)
+}
+
+func DefineMode(flag string) EngineMode {
+ switch flag {
+ case "native":
+ return NativeMode
+ case "golang":
+ return GolangMode
+ default:
+ return InvalidMode
+ }
+}
diff --git a/vendor/github.com/dtylman/scp/.gitignore b/vendor/github.com/dtylman/scp/.gitignore
deleted file mode 100644
index 6e1690ed6..000000000
--- a/vendor/github.com/dtylman/scp/.gitignore
+++ /dev/null
@@ -1,25 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
-*.test
-*.prof
-example/example
diff --git a/vendor/github.com/dtylman/scp/LICENSE b/vendor/github.com/dtylman/scp/LICENSE
deleted file mode 100644
index 6565de59d..000000000
--- a/vendor/github.com/dtylman/scp/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-MIT License
-
-Copyright (c) 2016 Danny
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/vendor/github.com/dtylman/scp/README.md b/vendor/github.com/dtylman/scp/README.md
deleted file mode 100644
index 48cfefe02..000000000
--- a/vendor/github.com/dtylman/scp/README.md
+++ /dev/null
@@ -1,42 +0,0 @@
-# scp
-
-[![Go Report Card](https://goreportcard.com/badge/github.com/dtylman/scp)](https://goreportcard.com/report/github.com/dtylman/scp)
-
-A Simple `go` SCP client library.
-
-## Usage
-
-```go
-import (
- "github.com/dtylman/scp"
- "golang.org/x/crypto/ssh"
-)
-```
-
-## Sending Files
-
-Copies `/var/log/messages` to remote `/tmp/lala`:
-
-```go
-var sc* ssh.Client
-// establish ssh connection into sc here...
-n,err:=scp.CopyTo(sc, "/var/log/messages", "/tmp/lala")
-if err==nil{
- fmt.Printf("Sent %v bytes",n)
-}
-```
-
-## Receiving Files
-
-Copies remote `/var/log/message` to local `/tmp/lala`:
-
-```go
-var sc* ssh.Client
-// establish ssh connection into sc here...
-n,err:=scp.CopyFrom(sc, "/var/log/message", "/tmp/lala")
-if err==nil{
- fmt.Printf("Sent %v bytes",n)
-}
-```
-
-
diff --git a/vendor/github.com/dtylman/scp/msg.go b/vendor/github.com/dtylman/scp/msg.go
deleted file mode 100644
index 6dfc53535..000000000
--- a/vendor/github.com/dtylman/scp/msg.go
+++ /dev/null
@@ -1,121 +0,0 @@
-package scp
-
-import (
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "strconv"
- "strings"
-)
-
-const (
- //CopyMessage Copy Message Opcode
- CopyMessage = 'C'
- //ErrorMessage Error OpCode
- ErrorMessage = 0x1
- //WarnMessage Warning Opcode
- WarnMessage = 0x2
-)
-
-//Message is scp control message
-type Message struct {
- Type byte
- Error error
- Mode string
- Size int64
- FileName string
-}
-
-func (m *Message) readByte(reader io.Reader) (byte, error) {
- buff := make([]byte, 1)
- _, err := io.ReadFull(reader, buff)
- if err != nil {
- return 0, err
- }
- return buff[0], nil
-
-}
-
-func (m *Message) readOpCode(reader io.Reader) error {
- var err error
- m.Type, err = m.readByte(reader)
- return err
-}
-
-//ReadError reads an error message
-func (m *Message) ReadError(reader io.Reader) error {
- msg, err := ioutil.ReadAll(reader)
- if err != nil {
- return err
- }
- m.Error = errors.New(strings.TrimSpace(string(msg)))
- return nil
-}
-
-func (m *Message) readLine(reader io.Reader) (string, error) {
- line := ""
- b, err := m.readByte(reader)
- if err != nil {
- return "", err
- }
- for b != 10 {
- line += string(b)
- b, err = m.readByte(reader)
- if err != nil {
- return "", err
- }
- }
- return line, nil
-}
-
-func (m *Message) readCopy(reader io.Reader) error {
- line, err := m.readLine(reader)
- if err != nil {
- return err
- }
- parts := strings.Split(line, " ")
- if len(parts) < 2 {
- return errors.New("Invalid copy line: " + line)
- }
- m.Mode = parts[0]
- m.Size, err = strconv.ParseInt(parts[1], 10, 0)
- if err != nil {
- return err
- }
- m.FileName = parts[2]
- return nil
-}
-
-//ReadFrom reads message from reader
-func (m *Message) ReadFrom(reader io.Reader) (int64, error) {
- err := m.readOpCode(reader)
- if err != nil {
- return 0, err
- }
- switch m.Type {
- case CopyMessage:
- err = m.readCopy(reader)
- if err != nil {
- return 0, err
- }
- case ErrorMessage, WarnMessage:
- err = m.ReadError(reader)
- if err != nil {
- return 0, err
- }
- default:
- return 0, fmt.Errorf("Unsupported opcode: %v", m.Type)
- }
- return m.Size, nil
-}
-
-//NewMessageFromReader constructs a new message from a data in reader
-func NewMessageFromReader(reader io.Reader) (*Message, error) {
- m := new(Message)
- _, err := m.ReadFrom(reader)
- if err != nil {
- return nil, err
- }
- return m, nil
-}
diff --git a/vendor/github.com/dtylman/scp/scp.go b/vendor/github.com/dtylman/scp/scp.go
deleted file mode 100644
index 841c16965..000000000
--- a/vendor/github.com/dtylman/scp/scp.go
+++ /dev/null
@@ -1,153 +0,0 @@
-package scp
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "os"
- "path/filepath"
-
- log "github.com/sirupsen/logrus"
- "golang.org/x/crypto/ssh"
-)
-
-const (
- fileMode = "0644"
- buffSize = 1024 * 256
-)
-
-//CopyTo copy from local to remote
-func CopyTo(sshClient *ssh.Client, local string, remote string) (int64, error) {
- session, err := sshClient.NewSession()
- if err != nil {
- return 0, err
- }
- defer session.Close()
- stderr := &bytes.Buffer{}
- session.Stderr = stderr
- stdout := &bytes.Buffer{}
- session.Stdout = stdout
- writer, err := session.StdinPipe()
- if err != nil {
- return 0, err
- }
- defer writer.Close()
- err = session.Start("scp -t " + filepath.Dir(remote))
- if err != nil {
- return 0, err
- }
-
- localFile, err := os.Open(local)
- if err != nil {
- return 0, err
- }
- fileInfo, err := localFile.Stat()
- if err != nil {
- return 0, err
- }
- _, err = fmt.Fprintf(writer, "C%s %d %s\n", fileMode, fileInfo.Size(), filepath.Base(remote))
- if err != nil {
- return 0, err
- }
- n, err := copyN(writer, localFile, fileInfo.Size())
- if err != nil {
- return 0, err
- }
- err = ack(writer)
- if err != nil {
- return 0, err
- }
-
- err = session.Wait()
- log.Debugf("Copied %v bytes out of %v. err: %v stdout:%v. stderr:%v", n, fileInfo.Size(), err, stdout, stderr)
- //NOTE: Process exited with status 1 is not an error, it just how scp work. (waiting for the next control message and we send EOF)
- return n, nil
-}
-
-//CopyFrom copy from remote to local
-func CopyFrom(sshClient *ssh.Client, remote string, local string) (int64, error) {
- session, err := sshClient.NewSession()
- if err != nil {
- return 0, err
- }
- defer session.Close()
- stderr := &bytes.Buffer{}
- session.Stderr = stderr
- writer, err := session.StdinPipe()
- if err != nil {
- return 0, err
- }
- defer writer.Close()
- reader, err := session.StdoutPipe()
- if err != nil {
- return 0, err
- }
- err = session.Start("scp -f " + remote)
- if err != nil {
- return 0, err
- }
- err = ack(writer)
- if err != nil {
- return 0, err
- }
- msg, err := NewMessageFromReader(reader)
- if err != nil {
- return 0, err
- }
- if msg.Type == ErrorMessage || msg.Type == WarnMessage {
- return 0, msg.Error
- }
- log.Debugf("Receiving %v", msg)
-
- err = ack(writer)
- if err != nil {
- return 0, err
- }
- outFile, err := os.Create(local)
- if err != nil {
- return 0, err
- }
- defer outFile.Close()
- n, err := copyN(outFile, reader, msg.Size)
- if err != nil {
- return 0, err
- }
- err = outFile.Sync()
- if err != nil {
- return 0, err
- }
- err = outFile.Close()
- if err != nil {
- return 0, err
- }
- err = session.Wait()
- log.Debugf("Copied %v bytes out of %v. err: %v stderr:%v", n, msg.Size, err, stderr)
- return n, nil
-}
-
-func ack(writer io.Writer) error {
- var msg = []byte{0, 0, 10, 13}
- n, err := writer.Write(msg)
- if err != nil {
- return err
- }
- if n < len(msg) {
- return errors.New("Failed to write ack buffer")
- }
- return nil
-}
-
-func copyN(writer io.Writer, src io.Reader, size int64) (int64, error) {
- reader := io.LimitReader(src, size)
- var total int64
- for total < size {
- n, err := io.CopyBuffer(writer, reader, make([]byte, buffSize))
- log.Debugf("Copied chunk %v total: %v out of %v err: %v ", n, total, size, err)
- if err != nil {
- return 0, err
- }
- total += n
- }
- return total, nil
-}
diff --git a/vendor/github.com/kr/fs/LICENSE b/vendor/github.com/kr/fs/LICENSE
new file mode 100644
index 000000000..744875676
--- /dev/null
+++ b/vendor/github.com/kr/fs/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/kr/fs/Readme b/vendor/github.com/kr/fs/Readme
new file mode 100644
index 000000000..c95e13fc8
--- /dev/null
+++ b/vendor/github.com/kr/fs/Readme
@@ -0,0 +1,3 @@
+Filesystem Package
+
+http://godoc.org/github.com/kr/fs
diff --git a/vendor/github.com/kr/fs/filesystem.go b/vendor/github.com/kr/fs/filesystem.go
new file mode 100644
index 000000000..f1c4805fb
--- /dev/null
+++ b/vendor/github.com/kr/fs/filesystem.go
@@ -0,0 +1,36 @@
+package fs
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+)
+
+// FileSystem defines the methods of an abstract filesystem.
+type FileSystem interface {
+
+ // ReadDir reads the directory named by dirname and returns a
+ // list of directory entries.
+ ReadDir(dirname string) ([]os.FileInfo, error)
+
+ // Lstat returns a FileInfo describing the named file. If the file is a
+ // symbolic link, the returned FileInfo describes the symbolic link. Lstat
+ // makes no attempt to follow the link.
+ Lstat(name string) (os.FileInfo, error)
+
+ // Join joins any number of path elements into a single path, adding a
+ // separator if necessary. The result is Cleaned; in particular, all
+ // empty strings are ignored.
+ //
+ // The separator is FileSystem specific.
+ Join(elem ...string) string
+}
+
+// fs represents a FileSystem provided by the os package.
+type fs struct{}
+
+func (f *fs) ReadDir(dirname string) ([]os.FileInfo, error) { return ioutil.ReadDir(dirname) }
+
+func (f *fs) Lstat(name string) (os.FileInfo, error) { return os.Lstat(name) }
+
+func (f *fs) Join(elem ...string) string { return filepath.Join(elem...) }
diff --git a/vendor/github.com/kr/fs/go.mod b/vendor/github.com/kr/fs/go.mod
new file mode 100644
index 000000000..7c206e04c
--- /dev/null
+++ b/vendor/github.com/kr/fs/go.mod
@@ -0,0 +1 @@
+module "github.com/kr/fs"
diff --git a/vendor/github.com/kr/fs/walk.go b/vendor/github.com/kr/fs/walk.go
new file mode 100644
index 000000000..6ffa1e0b2
--- /dev/null
+++ b/vendor/github.com/kr/fs/walk.go
@@ -0,0 +1,95 @@
+// Package fs provides filesystem-related functions.
+package fs
+
+import (
+ "os"
+)
+
+// Walker provides a convenient interface for iterating over the
+// descendants of a filesystem path.
+// Successive calls to the Step method will step through each
+// file or directory in the tree, including the root. The files
+// are walked in lexical order, which makes the output deterministic
+// but means that for very large directories Walker can be inefficient.
+// Walker does not follow symbolic links.
+type Walker struct {
+ fs FileSystem
+ cur item
+ stack []item
+ descend bool
+}
+
+type item struct {
+ path string
+ info os.FileInfo
+ err error
+}
+
+// Walk returns a new Walker rooted at root.
+func Walk(root string) *Walker {
+ return WalkFS(root, new(fs))
+}
+
+// WalkFS returns a new Walker rooted at root on the FileSystem fs.
+func WalkFS(root string, fs FileSystem) *Walker {
+ info, err := fs.Lstat(root)
+ return &Walker{
+ fs: fs,
+ stack: []item{{root, info, err}},
+ }
+}
+
+// Step advances the Walker to the next file or directory,
+// which will then be available through the Path, Stat,
+// and Err methods.
+// It returns false when the walk stops at the end of the tree.
+func (w *Walker) Step() bool {
+ if w.descend && w.cur.err == nil && w.cur.info.IsDir() {
+ list, err := w.fs.ReadDir(w.cur.path)
+ if err != nil {
+ w.cur.err = err
+ w.stack = append(w.stack, w.cur)
+ } else {
+ for i := len(list) - 1; i >= 0; i-- {
+ path := w.fs.Join(w.cur.path, list[i].Name())
+ w.stack = append(w.stack, item{path, list[i], nil})
+ }
+ }
+ }
+
+ if len(w.stack) == 0 {
+ return false
+ }
+ i := len(w.stack) - 1
+ w.cur = w.stack[i]
+ w.stack = w.stack[:i]
+ w.descend = true
+ return true
+}
+
+// Path returns the path to the most recent file or directory
+// visited by a call to Step. It contains the argument to Walk
+// as a prefix; that is, if Walk is called with "dir", which is
+// a directory containing the file "a", Path will return "dir/a".
+func (w *Walker) Path() string {
+ return w.cur.path
+}
+
+// Stat returns info for the most recent file or directory
+// visited by a call to Step.
+func (w *Walker) Stat() os.FileInfo {
+ return w.cur.info
+}
+
+// Err returns the error, if any, for the most recent attempt
+// by Step to visit a file or directory. If a directory has
+// an error, w will not descend into that directory.
+func (w *Walker) Err() error {
+ return w.cur.err
+}
+
+// SkipDir causes the currently visited directory to be skipped.
+// If w is not on a directory, SkipDir has no effect.
+func (w *Walker) SkipDir() {
+ w.descend = false
+}
diff --git a/vendor/github.com/pkg/sftp/.gitignore b/vendor/github.com/pkg/sftp/.gitignore
new file mode 100644
index 000000000..caf2dca22
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/.gitignore
@@ -0,0 +1,10 @@
+.*.swo
+.*.swp
+
+server_standalone/server_standalone
+
+examples/*/id_rsa
+examples/*/id_rsa.pub
+
+memprofile.out
+memprofile.svg
diff --git a/vendor/github.com/pkg/sftp/CONTRIBUTORS b/vendor/github.com/pkg/sftp/CONTRIBUTORS
new file mode 100644
index 000000000..5c7196ae6
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/CONTRIBUTORS
@@ -0,0 +1,3 @@
+Dave Cheney <dave@cheney.net>
+Saulius Gurklys <s4uliu5@gmail.com>
+John Eikenberry <jae@zhar.net>
diff --git a/vendor/github.com/pkg/sftp/LICENSE b/vendor/github.com/pkg/sftp/LICENSE
new file mode 100644
index 000000000..b7b53921e
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/LICENSE
@@ -0,0 +1,9 @@
+Copyright (c) 2013, Dave Cheney
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/pkg/sftp/Makefile b/vendor/github.com/pkg/sftp/Makefile
new file mode 100644
index 000000000..4d3a00799
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/Makefile
@@ -0,0 +1,27 @@
+.PHONY: integration integration_w_race benchmark
+
+integration:
+ go test -integration -v ./...
+ go test -testserver -v ./...
+ go test -integration -testserver -v ./...
+ go test -integration -allocator -v ./...
+ go test -testserver -allocator -v ./...
+ go test -integration -testserver -allocator -v ./...
+
+integration_w_race:
+ go test -race -integration -v ./...
+ go test -race -testserver -v ./...
+ go test -race -integration -testserver -v ./...
+ go test -race -integration -allocator -v ./...
+ go test -race -testserver -allocator -v ./...
+ go test -race -integration -allocator -testserver -v ./...
+
+COUNT ?= 1
+BENCHMARK_PATTERN ?= "."
+
+benchmark:
+ go test -integration -run=NONE -bench=$(BENCHMARK_PATTERN) -benchmem -count=$(COUNT)
+
+benchmark_w_memprofile:
+ go test -integration -run=NONE -bench=$(BENCHMARK_PATTERN) -benchmem -count=$(COUNT) -memprofile memprofile.out
+ go tool pprof -svg -output=memprofile.svg memprofile.out
diff --git a/vendor/github.com/pkg/sftp/README.md b/vendor/github.com/pkg/sftp/README.md
new file mode 100644
index 000000000..5e78cd396
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/README.md
@@ -0,0 +1,44 @@
+sftp
+----
+
+The `sftp` package provides support for file system operations on remote ssh
+servers using the SFTP subsystem. It also implements an SFTP server for serving
+files from the filesystem.
+
+![CI Status](https://github.com/pkg/sftp/workflows/CI/badge.svg?branch=master&event=push) [![Go Reference](https://pkg.go.dev/badge/github.com/pkg/sftp.svg)](https://pkg.go.dev/github.com/pkg/sftp)
+
+usage and examples
+------------------
+
+See [https://pkg.go.dev/github.com/pkg/sftp](https://pkg.go.dev/github.com/pkg/sftp) for
+examples and usage.
+
+The basic operation of the package mirrors the facilities of the
+[os](http://golang.org/pkg/os) package.
+
+The Walker interface for directory traversal is heavily inspired by Keith
+Rarick's [fs](https://pkg.go.dev/github.com/kr/fs) package.
+
+roadmap
+-------
+
+* There is way too much duplication in the Client methods. If there was an
+ unmarshal(interface{}) method this would reduce a heap of the duplication.
+
+contributing
+------------
+
+We welcome pull requests, bug fixes and issue reports.
+
+Before proposing a large change, first please discuss your change by raising an
+issue.
+
+For API/code bugs, please include a small, self contained code example to
+reproduce the issue. For pull requests, remember test coverage.
+
+We try to handle issues and pull requests with a 0 open philosophy. That means
+we will try to address the submission as soon as possible and will work toward
+a resolution. If progress can no longer be made (eg. unreproducible bug) or
+stops (eg. unresponsive submitter), we will close the bug.
+
+Thanks.
diff --git a/vendor/github.com/pkg/sftp/allocator.go b/vendor/github.com/pkg/sftp/allocator.go
new file mode 100644
index 000000000..3e67e5433
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/allocator.go
@@ -0,0 +1,96 @@
+package sftp
+
+import (
+ "sync"
+)
+
+type allocator struct {
+ sync.Mutex
+ available [][]byte
+ // map key is the request order
+ used map[uint32][][]byte
+}
+
+func newAllocator() *allocator {
+ return &allocator{
+ // micro optimization: initialize available pages with an initial capacity
+ available: make([][]byte, 0, SftpServerWorkerCount*2),
+ used: make(map[uint32][][]byte),
+ }
+}
+
+// GetPage returns a previously allocated and unused []byte or create a new one.
+// The slice have a fixed size = maxMsgLength, this value is suitable for both
+// receiving new packets and reading the files to serve
+func (a *allocator) GetPage(requestOrderID uint32) []byte {
+ a.Lock()
+ defer a.Unlock()
+
+ var result []byte
+
+ // get an available page and remove it from the available ones.
+ if len(a.available) > 0 {
+ truncLength := len(a.available) - 1
+ result = a.available[truncLength]
+
+ a.available[truncLength] = nil // clear out the internal pointer
+ a.available = a.available[:truncLength] // truncate the slice
+ }
+
+ // no preallocated slice found, just allocate a new one
+ if result == nil {
+ result = make([]byte, maxMsgLength)
+ }
+
+ // put result in used pages
+ a.used[requestOrderID] = append(a.used[requestOrderID], result)
+
+ return result
+}
+
+// ReleasePages marks unused all pages in use for the given requestID
+func (a *allocator) ReleasePages(requestOrderID uint32) {
+ a.Lock()
+ defer a.Unlock()
+
+ if used := a.used[requestOrderID]; len(used) > 0 {
+ a.available = append(a.available, used...)
+ }
+ delete(a.used, requestOrderID)
+}
+
+// Free removes all the used and available pages.
+// Call this method when the allocator is not needed anymore
+func (a *allocator) Free() {
+ a.Lock()
+ defer a.Unlock()
+
+ a.available = nil
+ a.used = make(map[uint32][][]byte)
+}
+
+func (a *allocator) countUsedPages() int {
+ a.Lock()
+ defer a.Unlock()
+
+ num := 0
+ for _, p := range a.used {
+ num += len(p)
+ }
+ return num
+}
+
+func (a *allocator) countAvailablePages() int {
+ a.Lock()
+ defer a.Unlock()
+
+ return len(a.available)
+}
+
+func (a *allocator) isRequestOrderIDUsed(requestOrderID uint32) bool {
+ a.Lock()
+ defer a.Unlock()
+
+ _, ok := a.used[requestOrderID]
+ return ok
+}
diff --git a/vendor/github.com/pkg/sftp/attrs.go b/vendor/github.com/pkg/sftp/attrs.go
new file mode 100644
index 000000000..2bb2d5764
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/attrs.go
@@ -0,0 +1,90 @@
+package sftp
+
+// ssh_FXP_ATTRS support
+// see http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-5
+
+import (
+ "os"
+ "time"
+)
+
+const (
+ sshFileXferAttrSize = 0x00000001
+ sshFileXferAttrUIDGID = 0x00000002
+ sshFileXferAttrPermissions = 0x00000004
+ sshFileXferAttrACmodTime = 0x00000008
+ sshFileXferAttrExtended = 0x80000000
+
+ sshFileXferAttrAll = sshFileXferAttrSize | sshFileXferAttrUIDGID | sshFileXferAttrPermissions |
+ sshFileXferAttrACmodTime | sshFileXferAttrExtended
+)
+
+// fileInfo is an artificial type designed to satisfy os.FileInfo.
+type fileInfo struct {
+ name string
+ stat *FileStat
+}
+
+// Name returns the base name of the file.
+func (fi *fileInfo) Name() string { return fi.name }
+
+// Size returns the length in bytes for regular files; system-dependent for others.
+func (fi *fileInfo) Size() int64 { return int64(fi.stat.Size) }
+
+// Mode returns file mode bits.
+func (fi *fileInfo) Mode() os.FileMode { return toFileMode(fi.stat.Mode) }
+
+// ModTime returns the last modification time of the file.
+func (fi *fileInfo) ModTime() time.Time { return time.Unix(int64(fi.stat.Mtime), 0) }
+
+// IsDir returns true if the file is a directory.
+func (fi *fileInfo) IsDir() bool { return fi.Mode().IsDir() }
+
+func (fi *fileInfo) Sys() interface{} { return fi.stat }
+
+// FileStat holds the original unmarshalled values from a call to READDIR or
+// *STAT. It is exported for the purposes of accessing the raw values via
+// os.FileInfo.Sys(). It is also used server side to store the unmarshalled
+// values for SetStat.
+type FileStat struct {
+ Size uint64
+ Mode uint32
+ Mtime uint32
+ Atime uint32
+ UID uint32
+ GID uint32
+ Extended []StatExtended
+}
+
+// StatExtended contains additional, extended information for a FileStat.
+type StatExtended struct {
+ ExtType string
+ ExtData string
+}
+
+func fileInfoFromStat(stat *FileStat, name string) os.FileInfo {
+ return &fileInfo{
+ name: name,
+ stat: stat,
+ }
+}
+
+func fileStatFromInfo(fi os.FileInfo) (uint32, *FileStat) {
+ mtime := fi.ModTime().Unix()
+ atime := mtime
+ var flags uint32 = sshFileXferAttrSize |
+ sshFileXferAttrPermissions |
+ sshFileXferAttrACmodTime
+
+ fileStat := &FileStat{
+ Size: uint64(fi.Size()),
+ Mode: fromFileMode(fi.Mode()),
+ Mtime: uint32(mtime),
+ Atime: uint32(atime),
+ }
+
+ // os specific file stat decoding
+ fileStatFromInfoOs(fi, &flags, fileStat)
+
+ return flags, fileStat
+}
diff --git a/vendor/github.com/pkg/sftp/attrs_stubs.go b/vendor/github.com/pkg/sftp/attrs_stubs.go
new file mode 100644
index 000000000..c01f33677
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/attrs_stubs.go
@@ -0,0 +1,11 @@
+// +build plan9 windows android
+
+package sftp
+
+import (
+ "os"
+)
+
+func fileStatFromInfoOs(fi os.FileInfo, flags *uint32, fileStat *FileStat) {
+ // todo
+}
diff --git a/vendor/github.com/pkg/sftp/attrs_unix.go b/vendor/github.com/pkg/sftp/attrs_unix.go
new file mode 100644
index 000000000..d1f445241
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/attrs_unix.go
@@ -0,0 +1,16 @@
+// +build darwin dragonfly freebsd !android,linux netbsd openbsd solaris aix js
+
+package sftp
+
+import (
+ "os"
+ "syscall"
+)
+
+func fileStatFromInfoOs(fi os.FileInfo, flags *uint32, fileStat *FileStat) {
+ if statt, ok := fi.Sys().(*syscall.Stat_t); ok {
+ *flags |= sshFileXferAttrUIDGID
+ fileStat.UID = statt.Uid
+ fileStat.GID = statt.Gid
+ }
+}
diff --git a/vendor/github.com/pkg/sftp/client.go b/vendor/github.com/pkg/sftp/client.go
new file mode 100644
index 000000000..9e0b61645
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/client.go
@@ -0,0 +1,1977 @@
+package sftp
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "io"
+ "math"
+ "os"
+ "path"
+ "sync"
+ "sync/atomic"
+ "syscall"
+ "time"
+
+ "github.com/kr/fs"
+ "golang.org/x/crypto/ssh"
+)
+
+var (
+ // ErrInternalInconsistency indicates the packets sent and the data queued to be
+ // written to the file don't match up. It is an unusual error and usually is
+ // caused by bad behavior server side or connection issues. The error is
+ // limited in scope to the call where it happened, the client object is still
+ // OK to use as long as the connection is still open.
+ ErrInternalInconsistency = errors.New("internal inconsistency")
+ // InternalInconsistency alias for ErrInternalInconsistency.
+ //
+ // Deprecated: please use ErrInternalInconsistency
+ InternalInconsistency = ErrInternalInconsistency
+)
+
+// A ClientOption is a function which applies configuration to a Client.
+type ClientOption func(*Client) error
+
+// MaxPacketChecked sets the maximum size of the payload, measured in bytes.
+// This option only accepts sizes servers should support, ie. <= 32768 bytes.
+//
+// If you get the error "failed to send packet header: EOF" when copying a
+// large file, try lowering this number.
+//
+// The default packet size is 32768 bytes.
+func MaxPacketChecked(size int) ClientOption {
+ return func(c *Client) error {
+ if size < 1 {
+ return errors.New("size must be greater or equal to 1")
+ }
+ if size > 32768 {
+ return errors.New("sizes larger than 32KB might not work with all servers")
+ }
+ c.maxPacket = size
+ return nil
+ }
+}
+
+// MaxPacketUnchecked sets the maximum size of the payload, measured in bytes.
+// It accepts sizes larger than the 32768 bytes all servers should support.
+// Only use a setting higher than 32768 if your application always connects to
+// the same server or after sufficiently broad testing.
+//
+// If you get the error "failed to send packet header: EOF" when copying a
+// large file, try lowering this number.
+//
+// The default packet size is 32768 bytes.
+func MaxPacketUnchecked(size int) ClientOption {
+ return func(c *Client) error {
+ if size < 1 {
+ return errors.New("size must be greater or equal to 1")
+ }
+ c.maxPacket = size
+ return nil
+ }
+}
+
+// MaxPacket sets the maximum size of the payload, measured in bytes.
+// This option only accepts sizes servers should support, ie. <= 32768 bytes.
+// This is a synonym for MaxPacketChecked that provides backward compatibility.
+//
+// If you get the error "failed to send packet header: EOF" when copying a
+// large file, try lowering this number.
+//
+// The default packet size is 32768 bytes.
+func MaxPacket(size int) ClientOption {
+ return MaxPacketChecked(size)
+}
+
+// MaxConcurrentRequestsPerFile sets the maximum concurrent requests allowed for a single file.
+//
+// The default maximum concurrent requests is 64.
+func MaxConcurrentRequestsPerFile(n int) ClientOption {
+ return func(c *Client) error {
+ if n < 1 {
+ return errors.New("n must be greater or equal to 1")
+ }
+ c.maxConcurrentRequests = n
+ return nil
+ }
+}
+
+// UseConcurrentWrites allows the Client to perform concurrent Writes.
+//
+// Using concurrency while doing writes, requires special consideration.
+// A write to a later offset in a file after an error,
+// could end up with a file length longer than what was successfully written.
+//
+// When using this option, if you receive an error during `io.Copy` or `io.WriteTo`,
+// you may need to `Truncate` the target Writer to avoid “holes” in the data written.
+func UseConcurrentWrites(value bool) ClientOption {
+ return func(c *Client) error {
+ c.useConcurrentWrites = value
+ return nil
+ }
+}
+
+// UseConcurrentReads allows the Client to perform concurrent Reads.
+//
+// Concurrent reads are generally safe to use and not using them will degrade
+// performance, so this option is enabled by default.
+//
+// When enabled, WriteTo will use Stat/Fstat to get the file size and determines
+// how many concurrent workers to use.
+// Some "read once" servers will delete the file if they receive a stat call on an
+// open file and then the download will fail.
+// Disabling concurrent reads you will be able to download files from these servers.
+// If concurrent reads are disabled, the UseFstat option is ignored.
+func UseConcurrentReads(value bool) ClientOption {
+ return func(c *Client) error {
+ c.disableConcurrentReads = !value
+ return nil
+ }
+}
+
+// UseFstat sets whether to use Fstat or Stat when File.WriteTo is called
+// (usually when copying files).
+// Some servers limit the amount of open files and calling Stat after opening
+// the file will throw an error From the server. Setting this flag will call
+// Fstat instead of Stat which is suppose to be called on an open file handle.
+//
+// It has been found that that with IBM Sterling SFTP servers which have
+// "extractability" level set to 1 which means only 1 file can be opened at
+// any given time.
+//
+// If the server you are working with still has an issue with both Stat and
+// Fstat calls you can always open a file and read it until the end.
+//
+// Another reason to read the file until its end and Fstat doesn't work is
+// that in some servers, reading a full file will automatically delete the
+// file as some of these mainframes map the file to a message in a queue.
+// Once the file has been read it will get deleted.
+func UseFstat(value bool) ClientOption {
+ return func(c *Client) error {
+ c.useFstat = value
+ return nil
+ }
+}
+
+// Client represents an SFTP session on a *ssh.ClientConn SSH connection.
+// Multiple Clients can be active on a single SSH connection, and a Client
+// may be called concurrently from multiple Goroutines.
+//
+// Client implements the github.com/kr/fs.FileSystem interface.
+type Client struct {
+ clientConn
+
+ ext map[string]string // Extensions (name -> data).
+
+ maxPacket int // max packet size read or written.
+ maxConcurrentRequests int
+ nextid uint32
+
+ // write concurrency is… error prone.
+ // Default behavior should be to not use it.
+ useConcurrentWrites bool
+ useFstat bool
+ disableConcurrentReads bool
+}
+
+// NewClient creates a new SFTP client on conn, using zero or more option
+// functions.
+func NewClient(conn *ssh.Client, opts ...ClientOption) (*Client, error) {
+ s, err := conn.NewSession()
+ if err != nil {
+ return nil, err
+ }
+ if err := s.RequestSubsystem("sftp"); err != nil {
+ return nil, err
+ }
+ pw, err := s.StdinPipe()
+ if err != nil {
+ return nil, err
+ }
+ pr, err := s.StdoutPipe()
+ if err != nil {
+ return nil, err
+ }
+
+ return NewClientPipe(pr, pw, opts...)
+}
+
+// NewClientPipe creates a new SFTP client given a Reader and a WriteCloser.
+// This can be used for connecting to an SFTP server over TCP/TLS or by using
+// the system's ssh client program (e.g. via exec.Command).
+func NewClientPipe(rd io.Reader, wr io.WriteCloser, opts ...ClientOption) (*Client, error) {
+ sftp := &Client{
+ clientConn: clientConn{
+ conn: conn{
+ Reader: rd,
+ WriteCloser: wr,
+ },
+ inflight: make(map[uint32]chan<- result),
+ closed: make(chan struct{}),
+ },
+
+ ext: make(map[string]string),
+
+ maxPacket: 1 << 15,
+ maxConcurrentRequests: 64,
+ }
+
+ for _, opt := range opts {
+ if err := opt(sftp); err != nil {
+ wr.Close()
+ return nil, err
+ }
+ }
+
+ if err := sftp.sendInit(); err != nil {
+ wr.Close()
+ return nil, err
+ }
+ if err := sftp.recvVersion(); err != nil {
+ wr.Close()
+ return nil, err
+ }
+
+ sftp.clientConn.wg.Add(1)
+ go sftp.loop()
+
+ return sftp, nil
+}
+
+// Create creates the named file mode 0666 (before umask), truncating it if it
+// already exists. If successful, methods on the returned File can be used for
+// I/O; the associated file descriptor has mode O_RDWR. If you need more
+// control over the flags/mode used to open the file see client.OpenFile.
+//
+// Note that some SFTP servers (eg. AWS Transfer) do not support opening files
+// read/write at the same time. For those services you will need to use
+// `client.OpenFile(os.O_WRONLY|os.O_CREATE|os.O_TRUNC)`.
+func (c *Client) Create(path string) (*File, error) {
+ return c.open(path, flags(os.O_RDWR|os.O_CREATE|os.O_TRUNC))
+}
+
+const sftpProtocolVersion = 3 // http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02
+
+func (c *Client) sendInit() error {
+ return c.clientConn.conn.sendPacket(&sshFxInitPacket{
+ Version: sftpProtocolVersion, // http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02
+ })
+}
+
+// returns the next value of c.nextid
+func (c *Client) nextID() uint32 {
+ return atomic.AddUint32(&c.nextid, 1)
+}
+
+func (c *Client) recvVersion() error {
+ typ, data, err := c.recvPacket(0)
+ if err != nil {
+ return err
+ }
+ if typ != sshFxpVersion {
+ return &unexpectedPacketErr{sshFxpVersion, typ}
+ }
+
+ version, data, err := unmarshalUint32Safe(data)
+ if err != nil {
+ return err
+ }
+ if version != sftpProtocolVersion {
+ return &unexpectedVersionErr{sftpProtocolVersion, version}
+ }
+
+ for len(data) > 0 {
+ var ext extensionPair
+ ext, data, err = unmarshalExtensionPair(data)
+ if err != nil {
+ return err
+ }
+ c.ext[ext.Name] = ext.Data
+ }
+
+ return nil
+}
+
+// HasExtension checks whether the server supports a named extension.
+//
+// The first return value is the extension data reported by the server
+// (typically a version number).
+func (c *Client) HasExtension(name string) (string, bool) {
+ data, ok := c.ext[name]
+ return data, ok
+}
+
+// Walk returns a new Walker rooted at root.
+func (c *Client) Walk(root string) *fs.Walker {
+ return fs.WalkFS(root, c)
+}
+
+// ReadDir reads the directory named by dirname and returns a list of
+// directory entries.
+func (c *Client) ReadDir(p string) ([]os.FileInfo, error) {
+ handle, err := c.opendir(p)
+ if err != nil {
+ return nil, err
+ }
+ defer c.close(handle) // this has to defer earlier than the lock below
+ var attrs []os.FileInfo
+ var done = false
+ for !done {
+ id := c.nextID()
+ typ, data, err1 := c.sendPacket(nil, &sshFxpReaddirPacket{
+ ID: id,
+ Handle: handle,
+ })
+ if err1 != nil {
+ err = err1
+ done = true
+ break
+ }
+ switch typ {
+ case sshFxpName:
+ sid, data := unmarshalUint32(data)
+ if sid != id {
+ return nil, &unexpectedIDErr{id, sid}
+ }
+ count, data := unmarshalUint32(data)
+ for i := uint32(0); i < count; i++ {
+ var filename string
+ filename, data = unmarshalString(data)
+ _, data = unmarshalString(data) // discard longname
+ var attr *FileStat
+ attr, data = unmarshalAttrs(data)
+ if filename == "." || filename == ".." {
+ continue
+ }
+ attrs = append(attrs, fileInfoFromStat(attr, path.Base(filename)))
+ }
+ case sshFxpStatus:
+ // TODO(dfc) scope warning!
+ err = normaliseError(unmarshalStatus(id, data))
+ done = true
+ default:
+ return nil, unimplementedPacketErr(typ)
+ }
+ }
+ if err == io.EOF {
+ err = nil
+ }
+ return attrs, err
+}
+
+func (c *Client) opendir(path string) (string, error) {
+ id := c.nextID()
+ typ, data, err := c.sendPacket(nil, &sshFxpOpendirPacket{
+ ID: id,
+ Path: path,
+ })
+ if err != nil {
+ return "", err
+ }
+ switch typ {
+ case sshFxpHandle:
+ sid, data := unmarshalUint32(data)
+ if sid != id {
+ return "", &unexpectedIDErr{id, sid}
+ }
+ handle, _ := unmarshalString(data)
+ return handle, nil
+ case sshFxpStatus:
+ return "", normaliseError(unmarshalStatus(id, data))
+ default:
+ return "", unimplementedPacketErr(typ)
+ }
+}
+
+// Stat returns a FileInfo structure describing the file specified by path 'p'.
+// If 'p' is a symbolic link, the returned FileInfo structure describes the referent file.
+func (c *Client) Stat(p string) (os.FileInfo, error) {
+ fs, err := c.stat(p)
+ if err != nil {
+ return nil, err
+ }
+ return fileInfoFromStat(fs, path.Base(p)), nil
+}
+
+// Lstat returns a FileInfo structure describing the file specified by path 'p'.
+// If 'p' is a symbolic link, the returned FileInfo structure describes the symbolic link.
+func (c *Client) Lstat(p string) (os.FileInfo, error) {
+ id := c.nextID()
+ typ, data, err := c.sendPacket(nil, &sshFxpLstatPacket{
+ ID: id,
+ Path: p,
+ })
+ if err != nil {
+ return nil, err
+ }
+ switch typ {
+ case sshFxpAttrs:
+ sid, data := unmarshalUint32(data)
+ if sid != id {
+ return nil, &unexpectedIDErr{id, sid}
+ }
+ attr, _ := unmarshalAttrs(data)
+ return fileInfoFromStat(attr, path.Base(p)), nil
+ case sshFxpStatus:
+ return nil, normaliseError(unmarshalStatus(id, data))
+ default:
+ return nil, unimplementedPacketErr(typ)
+ }
+}
+
+// ReadLink reads the target of a symbolic link.
+func (c *Client) ReadLink(p string) (string, error) {
+ id := c.nextID()
+ typ, data, err := c.sendPacket(nil, &sshFxpReadlinkPacket{
+ ID: id,
+ Path: p,
+ })
+ if err != nil {
+ return "", err
+ }
+ switch typ {
+ case sshFxpName:
+ sid, data := unmarshalUint32(data)
+ if sid != id {
+ return "", &unexpectedIDErr{id, sid}
+ }
+ count, data := unmarshalUint32(data)
+ if count != 1 {
+ return "", unexpectedCount(1, count)
+ }
+ filename, _ := unmarshalString(data) // ignore dummy attributes
+ return filename, nil
+ case sshFxpStatus:
+ return "", normaliseError(unmarshalStatus(id, data))
+ default:
+ return "", unimplementedPacketErr(typ)
+ }
+}
+
+// Link creates a hard link at 'newname', pointing at the same inode as 'oldname'
+func (c *Client) Link(oldname, newname string) error {
+ id := c.nextID()
+ typ, data, err := c.sendPacket(nil, &sshFxpHardlinkPacket{
+ ID: id,
+ Oldpath: oldname,
+ Newpath: newname,
+ })
+ if err != nil {
+ return err
+ }
+ switch typ {
+ case sshFxpStatus:
+ return normaliseError(unmarshalStatus(id, data))
+ default:
+ return unimplementedPacketErr(typ)
+ }
+}
+
+// Symlink creates a symbolic link at 'newname', pointing at target 'oldname'
+func (c *Client) Symlink(oldname, newname string) error {
+ id := c.nextID()
+ typ, data, err := c.sendPacket(nil, &sshFxpSymlinkPacket{
+ ID: id,
+ Linkpath: newname,
+ Targetpath: oldname,
+ })
+ if err != nil {
+ return err
+ }
+ switch typ {
+ case sshFxpStatus:
+ return normaliseError(unmarshalStatus(id, data))
+ default:
+ return unimplementedPacketErr(typ)
+ }
+}
+
+func (c *Client) setfstat(handle string, flags uint32, attrs interface{}) error {
+ id := c.nextID()
+ typ, data, err := c.sendPacket(nil, &sshFxpFsetstatPacket{
+ ID: id,
+ Handle: handle,
+ Flags: flags,
+ Attrs: attrs,
+ })
+ if err != nil {
+ return err
+ }
+ switch typ {
+ case sshFxpStatus:
+ return normaliseError(unmarshalStatus(id, data))
+ default:
+ return unimplementedPacketErr(typ)
+ }
+}
+
+// setstat is a convience wrapper to allow for changing of various parts of the file descriptor.
+func (c *Client) setstat(path string, flags uint32, attrs interface{}) error {
+ id := c.nextID()
+ typ, data, err := c.sendPacket(nil, &sshFxpSetstatPacket{
+ ID: id,
+ Path: path,
+ Flags: flags,
+ Attrs: attrs,
+ })
+ if err != nil {
+ return err
+ }
+ switch typ {
+ case sshFxpStatus:
+ return normaliseError(unmarshalStatus(id, data))
+ default:
+ return unimplementedPacketErr(typ)
+ }
+}
+
+// Chtimes changes the access and modification times of the named file.
+func (c *Client) Chtimes(path string, atime time.Time, mtime time.Time) error {
+ type times struct {
+ Atime uint32
+ Mtime uint32
+ }
+ attrs := times{uint32(atime.Unix()), uint32(mtime.Unix())}
+ return c.setstat(path, sshFileXferAttrACmodTime, attrs)
+}
+
+// Chown changes the user and group owners of the named file.
+func (c *Client) Chown(path string, uid, gid int) error {
+ type owner struct {
+ UID uint32
+ GID uint32
+ }
+ attrs := owner{uint32(uid), uint32(gid)}
+ return c.setstat(path, sshFileXferAttrUIDGID, attrs)
+}
+
+// Chmod changes the permissions of the named file.
+//
+// Chmod does not apply a umask, because even retrieving the umask is not
+// possible in a portable way without causing a race condition. Callers
+// should mask off umask bits, if desired.
+func (c *Client) Chmod(path string, mode os.FileMode) error {
+ return c.setstat(path, sshFileXferAttrPermissions, toChmodPerm(mode))
+}
+
+// Truncate sets the size of the named file. Although it may be safely assumed
+// that if the size is less than its current size it will be truncated to fit,
+// the SFTP protocol does not specify what behavior the server should do when setting
+// size greater than the current size.
+func (c *Client) Truncate(path string, size int64) error {
+ return c.setstat(path, sshFileXferAttrSize, uint64(size))
+}
+
+// Open opens the named file for reading. If successful, methods on the
+// returned file can be used for reading; the associated file descriptor
+// has mode O_RDONLY.
+func (c *Client) Open(path string) (*File, error) {
+ return c.open(path, flags(os.O_RDONLY))
+}
+
+// OpenFile is the generalized open call; most users will use Open or
+// Create instead. It opens the named file with specified flag (O_RDONLY
+// etc.). If successful, methods on the returned File can be used for I/O.
+func (c *Client) OpenFile(path string, f int) (*File, error) {
+ return c.open(path, flags(f))
+}
+
+func (c *Client) open(path string, pflags uint32) (*File, error) {
+ id := c.nextID()
+ typ, data, err := c.sendPacket(nil, &sshFxpOpenPacket{
+ ID: id,
+ Path: path,
+ Pflags: pflags,
+ })
+ if err != nil {
+ return nil, err
+ }
+ switch typ {
+ case sshFxpHandle:
+ sid, data := unmarshalUint32(data)
+ if sid != id {
+ return nil, &unexpectedIDErr{id, sid}
+ }
+ handle, _ := unmarshalString(data)
+ return &File{c: c, path: path, handle: handle}, nil
+ case sshFxpStatus:
+ return nil, normaliseError(unmarshalStatus(id, data))
+ default:
+ return nil, unimplementedPacketErr(typ)
+ }
+}
+
+// close closes a handle handle previously returned in the response
+// to SSH_FXP_OPEN or SSH_FXP_OPENDIR. The handle becomes invalid
+// immediately after this request has been sent.
+func (c *Client) close(handle string) error {
+ id := c.nextID()
+ typ, data, err := c.sendPacket(nil, &sshFxpClosePacket{
+ ID: id,
+ Handle: handle,
+ })
+ if err != nil {
+ return err
+ }
+ switch typ {
+ case sshFxpStatus:
+ return normaliseError(unmarshalStatus(id, data))
+ default:
+ return unimplementedPacketErr(typ)
+ }
+}
+
+func (c *Client) stat(path string) (*FileStat, error) {
+ id := c.nextID()
+ typ, data, err := c.sendPacket(nil, &sshFxpStatPacket{
+ ID: id,
+ Path: path,
+ })
+ if err != nil {
+ return nil, err
+ }
+ switch typ {
+ case sshFxpAttrs:
+ sid, data := unmarshalUint32(data)
+ if sid != id {
+ return nil, &unexpectedIDErr{id, sid}
+ }
+ attr, _ := unmarshalAttrs(data)
+ return attr, nil
+ case sshFxpStatus:
+ return nil, normaliseError(unmarshalStatus(id, data))
+ default:
+ return nil, unimplementedPacketErr(typ)
+ }
+}
+
+func (c *Client) fstat(handle string) (*FileStat, error) {
+ id := c.nextID()
+ typ, data, err := c.sendPacket(nil, &sshFxpFstatPacket{
+ ID: id,
+ Handle: handle,
+ })
+ if err != nil {
+ return nil, err
+ }
+ switch typ {
+ case sshFxpAttrs:
+ sid, data := unmarshalUint32(data)
+ if sid != id {
+ return nil, &unexpectedIDErr{id, sid}
+ }
+ attr, _ := unmarshalAttrs(data)
+ return attr, nil
+ case sshFxpStatus:
+ return nil, normaliseError(unmarshalStatus(id, data))
+ default:
+ return nil, unimplementedPacketErr(typ)
+ }
+}
+
+// StatVFS retrieves VFS statistics from a remote host.
+//
+// It implements the statvfs@openssh.com SSH_FXP_EXTENDED feature
+// from http://www.opensource.apple.com/source/OpenSSH/OpenSSH-175/openssh/PROTOCOL?txt.
+func (c *Client) StatVFS(path string) (*StatVFS, error) {
+ // send the StatVFS packet to the server
+ id := c.nextID()
+ typ, data, err := c.sendPacket(nil, &sshFxpStatvfsPacket{
+ ID: id,
+ Path: path,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ switch typ {
+ // server responded with valid data
+ case sshFxpExtendedReply:
+ var response StatVFS
+ err = binary.Read(bytes.NewReader(data), binary.BigEndian, &response)
+ if err != nil {
+ return nil, errors.New("can not parse reply")
+ }
+
+ return &response, nil
+
+ // the resquest failed
+ case sshFxpStatus:
+ return nil, normaliseError(unmarshalStatus(id, data))
+
+ default:
+ return nil, unimplementedPacketErr(typ)
+ }
+}
+
+// Join joins any number of path elements into a single path, adding a
+// separating slash if necessary. The result is Cleaned; in particular, all
+// empty strings are ignored.
+func (c *Client) Join(elem ...string) string { return path.Join(elem...) }
+
+// Remove removes the specified file or directory. An error will be returned if no
+// file or directory with the specified path exists, or if the specified directory
+// is not empty.
+func (c *Client) Remove(path string) error {
+ err := c.removeFile(path)
+ // some servers, *cough* osx *cough*, return EPERM, not ENODIR.
+ // serv-u returns ssh_FX_FILE_IS_A_DIRECTORY
+ // EPERM is converted to os.ErrPermission so it is not a StatusError
+ if err, ok := err.(*StatusError); ok {
+ switch err.Code {
+ case sshFxFailure, sshFxFileIsADirectory:
+ return c.RemoveDirectory(path)
+ }
+ }
+ if os.IsPermission(err) {
+ return c.RemoveDirectory(path)
+ }
+ return err
+}
+
+func (c *Client) removeFile(path string) error {
+ id := c.nextID()
+ typ, data, err := c.sendPacket(nil, &sshFxpRemovePacket{
+ ID: id,
+ Filename: path,
+ })
+ if err != nil {
+ return err
+ }
+ switch typ {
+ case sshFxpStatus:
+ return normaliseError(unmarshalStatus(id, data))
+ default:
+ return unimplementedPacketErr(typ)
+ }
+}
+
+// RemoveDirectory removes a directory path.
+func (c *Client) RemoveDirectory(path string) error {
+ id := c.nextID()
+ typ, data, err := c.sendPacket(nil, &sshFxpRmdirPacket{
+ ID: id,
+ Path: path,
+ })
+ if err != nil {
+ return err
+ }
+ switch typ {
+ case sshFxpStatus:
+ return normaliseError(unmarshalStatus(id, data))
+ default:
+ return unimplementedPacketErr(typ)
+ }
+}
+
+// Rename renames a file.
+func (c *Client) Rename(oldname, newname string) error {
+ id := c.nextID()
+ typ, data, err := c.sendPacket(nil, &sshFxpRenamePacket{
+ ID: id,
+ Oldpath: oldname,
+ Newpath: newname,
+ })
+ if err != nil {
+ return err
+ }
+ switch typ {
+ case sshFxpStatus:
+ return normaliseError(unmarshalStatus(id, data))
+ default:
+ return unimplementedPacketErr(typ)
+ }
+}
+
+// PosixRename renames a file using the posix-rename@openssh.com extension
+// which will replace newname if it already exists.
+func (c *Client) PosixRename(oldname, newname string) error {
+ id := c.nextID()
+ typ, data, err := c.sendPacket(nil, &sshFxpPosixRenamePacket{
+ ID: id,
+ Oldpath: oldname,
+ Newpath: newname,
+ })
+ if err != nil {
+ return err
+ }
+ switch typ {
+ case sshFxpStatus:
+ return normaliseError(unmarshalStatus(id, data))
+ default:
+ return unimplementedPacketErr(typ)
+ }
+}
+
+// RealPath can be used to have the server canonicalize any given path name to an absolute path.
+//
+// This is useful for converting path names containing ".." components,
+// or relative pathnames without a leading slash into absolute paths.
+func (c *Client) RealPath(path string) (string, error) {
+ id := c.nextID()
+ typ, data, err := c.sendPacket(nil, &sshFxpRealpathPacket{
+ ID: id,
+ Path: path,
+ })
+ if err != nil {
+ return "", err
+ }
+ switch typ {
+ case sshFxpName:
+ sid, data := unmarshalUint32(data)
+ if sid != id {
+ return "", &unexpectedIDErr{id, sid}
+ }
+ count, data := unmarshalUint32(data)
+ if count != 1 {
+ return "", unexpectedCount(1, count)
+ }
+ filename, _ := unmarshalString(data) // ignore attributes
+ return filename, nil
+ case sshFxpStatus:
+ return "", normaliseError(unmarshalStatus(id, data))
+ default:
+ return "", unimplementedPacketErr(typ)
+ }
+}
+
+// Getwd returns the current working directory of the server. Operations
+// involving relative paths will be based at this location.
+func (c *Client) Getwd() (string, error) {
+ return c.RealPath(".")
+}
+
+// Mkdir creates the specified directory. An error will be returned if a file or
+// directory with the specified path already exists, or if the directory's
+// parent folder does not exist (the method cannot create complete paths).
+func (c *Client) Mkdir(path string) error {
+ id := c.nextID()
+ typ, data, err := c.sendPacket(nil, &sshFxpMkdirPacket{
+ ID: id,
+ Path: path,
+ })
+ if err != nil {
+ return err
+ }
+ switch typ {
+ case sshFxpStatus:
+ return normaliseError(unmarshalStatus(id, data))
+ default:
+ return unimplementedPacketErr(typ)
+ }
+}
+
+// MkdirAll creates a directory named path, along with any necessary parents,
+// and returns nil, or else returns an error.
+// If path is already a directory, MkdirAll does nothing and returns nil.
+// If path contains a regular file, an error is returned
+func (c *Client) MkdirAll(path string) error {
+ // Most of this code mimics https://golang.org/src/os/path.go?s=514:561#L13
+ // Fast path: if we can tell whether path is a directory or file, stop with success or error.
+ dir, err := c.Stat(path)
+ if err == nil {
+ if dir.IsDir() {
+ return nil
+ }
+ return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR}
+ }
+
+ // Slow path: make sure parent exists and then call Mkdir for path.
+ i := len(path)
+ for i > 0 && path[i-1] == '/' { // Skip trailing path separator.
+ i--
+ }
+
+ j := i
+ for j > 0 && path[j-1] != '/' { // Scan backward over element.
+ j--
+ }
+
+ if j > 1 {
+ // Create parent
+ err = c.MkdirAll(path[0 : j-1])
+ if err != nil {
+ return err
+ }
+ }
+
+ // Parent now exists; invoke Mkdir and use its result.
+ err = c.Mkdir(path)
+ if err != nil {
+ // Handle arguments like "foo/." by
+ // double-checking that directory doesn't exist.
+ dir, err1 := c.Lstat(path)
+ if err1 == nil && dir.IsDir() {
+ return nil
+ }
+ return err
+ }
+ return nil
+}
+
+// File represents a remote file.
+type File struct {
+ c *Client
+ path string
+ handle string
+
+ mu sync.Mutex
+ offset int64 // current offset within remote file
+}
+
+// Close closes the File, rendering it unusable for I/O. It returns an
+// error, if any.
+func (f *File) Close() error {
+ return f.c.close(f.handle)
+}
+
+// Name returns the name of the file as presented to Open or Create.
+func (f *File) Name() string {
+ return f.path
+}
+
+// Read reads up to len(b) bytes from the File. It returns the number of bytes
+// read and an error, if any. Read follows io.Reader semantics, so when Read
+// encounters an error or EOF condition after successfully reading n > 0 bytes,
+// it returns the number of bytes read.
+//
+// To maximise throughput for transferring the entire file (especially
+// over high latency links) it is recommended to use WriteTo rather
+// than calling Read multiple times. io.Copy will do this
+// automatically.
+func (f *File) Read(b []byte) (int, error) {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+
+ n, err := f.ReadAt(b, f.offset)
+ f.offset += int64(n)
+ return n, err
+}
+
+// readChunkAt attempts to read the whole entire length of the buffer from the file starting at the offset.
+// It will continue progressively reading into the buffer until it fills the whole buffer, or an error occurs.
+func (f *File) readChunkAt(ch chan result, b []byte, off int64) (n int, err error) {
+ for err == nil && n < len(b) {
+ id := f.c.nextID()
+ typ, data, err := f.c.sendPacket(ch, &sshFxpReadPacket{
+ ID: id,
+ Handle: f.handle,
+ Offset: uint64(off) + uint64(n),
+ Len: uint32(len(b) - n),
+ })
+ if err != nil {
+ return n, err
+ }
+
+ switch typ {
+ case sshFxpStatus:
+ return n, normaliseError(unmarshalStatus(id, data))
+
+ case sshFxpData:
+ sid, data := unmarshalUint32(data)
+ if id != sid {
+ return n, &unexpectedIDErr{id, sid}
+ }
+
+ l, data := unmarshalUint32(data)
+ n += copy(b[n:], data[:l])
+
+ default:
+ return n, unimplementedPacketErr(typ)
+ }
+ }
+
+ return
+}
+
+func (f *File) readAtSequential(b []byte, off int64) (read int, err error) {
+ for read < len(b) {
+ rb := b[read:]
+ if len(rb) > f.c.maxPacket {
+ rb = rb[:f.c.maxPacket]
+ }
+ n, err := f.readChunkAt(nil, rb, off+int64(read))
+ if n < 0 {
+ panic("sftp.File: returned negative count from readChunkAt")
+ }
+ if n > 0 {
+ read += n
+ }
+ if err != nil {
+ return read, err
+ }
+ }
+ return read, nil
+}
+
+// ReadAt reads up to len(b) byte from the File at a given offset `off`. It returns
+// the number of bytes read and an error, if any. ReadAt follows io.ReaderAt semantics,
+// so the file offset is not altered during the read.
+func (f *File) ReadAt(b []byte, off int64) (int, error) {
+ if len(b) <= f.c.maxPacket {
+ // This should be able to be serviced with 1/2 requests.
+ // So, just do it directly.
+ return f.readChunkAt(nil, b, off)
+ }
+
+ if f.c.disableConcurrentReads {
+ return f.readAtSequential(b, off)
+ }
+
+ // Split the read into multiple maxPacket-sized concurrent reads bounded by maxConcurrentRequests.
+ // This allows writes with a suitably large buffer to transfer data at a much faster rate
+ // by overlapping round trip times.
+
+ cancel := make(chan struct{})
+
+ concurrency := len(b)/f.c.maxPacket + 1
+ if concurrency > f.c.maxConcurrentRequests || concurrency < 1 {
+ concurrency = f.c.maxConcurrentRequests
+ }
+
+ resPool := newResChanPool(concurrency)
+
+ type work struct {
+ id uint32
+ res chan result
+
+ b []byte
+ off int64
+ }
+ workCh := make(chan work)
+
+ // Slice: cut up the Read into any number of buffers of length <= f.c.maxPacket, and at appropriate offsets.
+ go func() {
+ defer close(workCh)
+
+ b := b
+ offset := off
+ chunkSize := f.c.maxPacket
+
+ for len(b) > 0 {
+ rb := b
+ if len(rb) > chunkSize {
+ rb = rb[:chunkSize]
+ }
+
+ id := f.c.nextID()
+ res := resPool.Get()
+
+ f.c.dispatchRequest(res, &sshFxpReadPacket{
+ ID: id,
+ Handle: f.handle,
+ Offset: uint64(offset),
+ Len: uint32(chunkSize),
+ })
+
+ select {
+ case workCh <- work{id, res, rb, offset}:
+ case <-cancel:
+ return
+ }
+
+ offset += int64(len(rb))
+ b = b[len(rb):]
+ }
+ }()
+
+ type rErr struct {
+ off int64
+ err error
+ }
+ errCh := make(chan rErr)
+
+ var wg sync.WaitGroup
+ wg.Add(concurrency)
+ for i := 0; i < concurrency; i++ {
+ // Map_i: each worker gets work, and then performs the Read into its buffer from its respective offset.
+ go func() {
+ defer wg.Done()
+
+ for packet := range workCh {
+ var n int
+
+ s := <-packet.res
+ resPool.Put(packet.res)
+
+ err := s.err
+ if err == nil {
+ switch s.typ {
+ case sshFxpStatus:
+ err = normaliseError(unmarshalStatus(packet.id, s.data))
+
+ case sshFxpData:
+ sid, data := unmarshalUint32(s.data)
+ if packet.id != sid {
+ err = &unexpectedIDErr{packet.id, sid}
+
+ } else {
+ l, data := unmarshalUint32(data)
+ n = copy(packet.b, data[:l])
+
+ // For normal disk files, it is guaranteed that this will read
+ // the specified number of bytes, or up to end of file.
+ // This implies, if we have a short read, that means EOF.
+ if n < len(packet.b) {
+ err = io.EOF
+ }
+ }
+
+ default:
+ err = unimplementedPacketErr(s.typ)
+ }
+ }
+
+ if err != nil {
+ // return the offset as the start + how much we read before the error.
+ errCh <- rErr{packet.off + int64(n), err}
+ return
+ }
+ }
+ }()
+ }
+
+ // Wait for long tail, before closing results.
+ go func() {
+ wg.Wait()
+ close(errCh)
+ }()
+
+ // Reduce: collect all the results into a relevant return: the earliest offset to return an error.
+ firstErr := rErr{math.MaxInt64, nil}
+ for rErr := range errCh {
+ if rErr.off <= firstErr.off {
+ firstErr = rErr
+ }
+
+ select {
+ case <-cancel:
+ default:
+ // stop any more work from being distributed. (Just in case.)
+ close(cancel)
+ }
+ }
+
+ if firstErr.err != nil {
+ // firstErr.err != nil if and only if firstErr.off > our starting offset.
+ return int(firstErr.off - off), firstErr.err
+ }
+
+ // As per spec for io.ReaderAt, we return nil error if and only if we read everything.
+ return len(b), nil
+}
+
+// writeToSequential implements WriteTo, but works sequentially with no parallelism.
+func (f *File) writeToSequential(w io.Writer) (written int64, err error) {
+ b := make([]byte, f.c.maxPacket)
+ ch := make(chan result, 1) // reusable channel
+
+ for {
+ n, err := f.readChunkAt(ch, b, f.offset)
+ if n < 0 {
+ panic("sftp.File: returned negative count from readChunkAt")
+ }
+
+ if n > 0 {
+ f.offset += int64(n)
+
+ m, err := w.Write(b[:n])
+ written += int64(m)
+
+ if err != nil {
+ return written, err
+ }
+ }
+
+ if err != nil {
+ if err == io.EOF {
+ return written, nil // return nil explicitly.
+ }
+
+ return written, err
+ }
+ }
+}
+
+// WriteTo writes the file to the given Writer.
+// The return value is the number of bytes written.
+// Any error encountered during the write is also returned.
+//
+// This method is preferred over calling Read multiple times
+// to maximise throughput for transferring the entire file,
+// especially over high latency links.
+func (f *File) WriteTo(w io.Writer) (written int64, err error) {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+
+ if f.c.disableConcurrentReads {
+ return f.writeToSequential(w)
+ }
+
+ // For concurrency, we want to guess how many concurrent workers we should use.
+ var fileStat *FileStat
+ if f.c.useFstat {
+ fileStat, err = f.c.fstat(f.handle)
+ } else {
+ fileStat, err = f.c.stat(f.path)
+ }
+ if err != nil {
+ return 0, err
+ }
+
+ fileSize := fileStat.Size
+ if fileSize <= uint64(f.c.maxPacket) || !isRegular(fileStat.Mode) {
+ // only regular files are guaranteed to return (full read) xor (partial read, next error)
+ return f.writeToSequential(w)
+ }
+
+ concurrency64 := fileSize/uint64(f.c.maxPacket) + 1 // a bad guess, but better than no guess
+ if concurrency64 > uint64(f.c.maxConcurrentRequests) || concurrency64 < 1 {
+ concurrency64 = uint64(f.c.maxConcurrentRequests)
+ }
+ // Now that concurrency64 is saturated to an int value, we know this assignment cannot possibly overflow.
+ concurrency := int(concurrency64)
+
+ chunkSize := f.c.maxPacket
+ pool := newBufPool(concurrency, chunkSize)
+ resPool := newResChanPool(concurrency)
+
+ cancel := make(chan struct{})
+ var wg sync.WaitGroup
+ defer func() {
+ // Once the writing Reduce phase has ended, all the feed work needs to unconditionally stop.
+ close(cancel)
+
+ // We want to wait until all outstanding goroutines with an `f` or `f.c` reference have completed.
+ // Just to be sure we don’t orphan any goroutines any hanging references.
+ wg.Wait()
+ }()
+
+ type writeWork struct {
+ b []byte
+ off int64
+ err error
+
+ next chan writeWork
+ }
+ writeCh := make(chan writeWork)
+
+ type readWork struct {
+ id uint32
+ res chan result
+ off int64
+
+ cur, next chan writeWork
+ }
+ readCh := make(chan readWork)
+
+ // Slice: hand out chunks of work on demand, with a `cur` and `next` channel built-in for sequencing.
+ go func() {
+ defer close(readCh)
+
+ off := f.offset
+
+ cur := writeCh
+ for {
+ id := f.c.nextID()
+ res := resPool.Get()
+
+ next := make(chan writeWork)
+ readWork := readWork{
+ id: id,
+ res: res,
+ off: off,
+
+ cur: cur,
+ next: next,
+ }
+
+ f.c.dispatchRequest(res, &sshFxpReadPacket{
+ ID: id,
+ Handle: f.handle,
+ Offset: uint64(off),
+ Len: uint32(chunkSize),
+ })
+
+ select {
+ case readCh <- readWork:
+ case <-cancel:
+ return
+ }
+
+ off += int64(chunkSize)
+ cur = next
+ }
+ }()
+
+ wg.Add(concurrency)
+ for i := 0; i < concurrency; i++ {
+ // Map_i: each worker gets readWork, and does the Read into a buffer at the given offset.
+ go func() {
+ defer wg.Done()
+
+ for readWork := range readCh {
+ var b []byte
+ var n int
+
+ s := <-readWork.res
+ resPool.Put(readWork.res)
+
+ err := s.err
+ if err == nil {
+ switch s.typ {
+ case sshFxpStatus:
+ err = normaliseError(unmarshalStatus(readWork.id, s.data))
+
+ case sshFxpData:
+ sid, data := unmarshalUint32(s.data)
+ if readWork.id != sid {
+ err = &unexpectedIDErr{readWork.id, sid}
+
+ } else {
+ l, data := unmarshalUint32(data)
+ b = pool.Get()[:l]
+ n = copy(b, data[:l])
+ b = b[:n]
+ }
+
+ default:
+ err = unimplementedPacketErr(s.typ)
+ }
+ }
+
+ writeWork := writeWork{
+ b: b,
+ off: readWork.off,
+ err: err,
+
+ next: readWork.next,
+ }
+
+ select {
+ case readWork.cur <- writeWork:
+ case <-cancel:
+ return
+ }
+
+ if err != nil {
+ return
+ }
+ }
+ }()
+ }
+
+ // Reduce: serialize the results from the reads into sequential writes.
+ cur := writeCh
+ for {
+ packet, ok := <-cur
+ if !ok {
+ return written, errors.New("sftp.File.WriteTo: unexpectedly closed channel")
+ }
+
+ // Because writes are serialized, this will always be the last successfully read byte.
+ f.offset = packet.off + int64(len(packet.b))
+
+ if len(packet.b) > 0 {
+ n, err := w.Write(packet.b)
+ written += int64(n)
+ if err != nil {
+ return written, err
+ }
+ }
+
+ if packet.err != nil {
+ if packet.err == io.EOF {
+ return written, nil
+ }
+
+ return written, packet.err
+ }
+
+ pool.Put(packet.b)
+ cur = packet.next
+ }
+}
+
+// Stat returns the FileInfo structure describing file. If there is an
+// error.
+func (f *File) Stat() (os.FileInfo, error) {
+ fs, err := f.c.fstat(f.handle)
+ if err != nil {
+ return nil, err
+ }
+ return fileInfoFromStat(fs, path.Base(f.path)), nil
+}
+
+// Write writes len(b) bytes to the File. It returns the number of bytes
+// written and an error, if any. Write returns a non-nil error when n !=
+// len(b).
+//
+// To maximise throughput for transferring the entire file (especially
+// over high latency links) it is recommended to use ReadFrom rather
+// than calling Write multiple times. io.Copy will do this
+// automatically.
+func (f *File) Write(b []byte) (int, error) {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+
+ n, err := f.WriteAt(b, f.offset)
+ f.offset += int64(n)
+ return n, err
+}
+
+func (f *File) writeChunkAt(ch chan result, b []byte, off int64) (int, error) {
+ typ, data, err := f.c.sendPacket(ch, &sshFxpWritePacket{
+ ID: f.c.nextID(),
+ Handle: f.handle,
+ Offset: uint64(off),
+ Length: uint32(len(b)),
+ Data: b,
+ })
+ if err != nil {
+ return 0, err
+ }
+
+ switch typ {
+ case sshFxpStatus:
+ id, _ := unmarshalUint32(data)
+ err := normaliseError(unmarshalStatus(id, data))
+ if err != nil {
+ return 0, err
+ }
+
+ default:
+ return 0, unimplementedPacketErr(typ)
+ }
+
+ return len(b), nil
+}
+
+// writeAtConcurrent implements WriterAt, but works concurrently rather than sequentially.
+func (f *File) writeAtConcurrent(b []byte, off int64) (int, error) {
+ // Split the write into multiple maxPacket sized concurrent writes
+ // bounded by maxConcurrentRequests. This allows writes with a suitably
+ // large buffer to transfer data at a much faster rate due to
+ // overlapping round trip times.
+
+ cancel := make(chan struct{})
+
+ type work struct {
+ id uint32
+ res chan result
+
+ off int64
+ }
+ workCh := make(chan work)
+
+ concurrency := len(b)/f.c.maxPacket + 1
+ if concurrency > f.c.maxConcurrentRequests || concurrency < 1 {
+ concurrency = f.c.maxConcurrentRequests
+ }
+
+ pool := newResChanPool(concurrency)
+
+ // Slice: cut up the Read into any number of buffers of length <= f.c.maxPacket, and at appropriate offsets.
+ go func() {
+ defer close(workCh)
+
+ var read int
+ chunkSize := f.c.maxPacket
+
+ for read < len(b) {
+ wb := b[read:]
+ if len(wb) > chunkSize {
+ wb = wb[:chunkSize]
+ }
+
+ id := f.c.nextID()
+ res := pool.Get()
+ off := off + int64(read)
+
+ f.c.dispatchRequest(res, &sshFxpWritePacket{
+ ID: id,
+ Handle: f.handle,
+ Offset: uint64(off),
+ Length: uint32(len(wb)),
+ Data: wb,
+ })
+
+ select {
+ case workCh <- work{id, res, off}:
+ case <-cancel:
+ return
+ }
+
+ read += len(wb)
+ }
+ }()
+
+ type wErr struct {
+ off int64
+ err error
+ }
+ errCh := make(chan wErr)
+
+ var wg sync.WaitGroup
+ wg.Add(concurrency)
+ for i := 0; i < concurrency; i++ {
+ // Map_i: each worker gets work, and does the Write from each buffer to its respective offset.
+ go func() {
+ defer wg.Done()
+
+ for work := range workCh {
+ s := <-work.res
+ pool.Put(work.res)
+
+ err := s.err
+ if err == nil {
+ switch s.typ {
+ case sshFxpStatus:
+ err = normaliseError(unmarshalStatus(work.id, s.data))
+ default:
+ err = unimplementedPacketErr(s.typ)
+ }
+ }
+
+ if err != nil {
+ errCh <- wErr{work.off, err}
+ }
+ }
+ }()
+ }
+
+ // Wait for long tail, before closing results.
+ go func() {
+ wg.Wait()
+ close(errCh)
+ }()
+
+ // Reduce: collect all the results into a relevant return: the earliest offset to return an error.
+ firstErr := wErr{math.MaxInt64, nil}
+ for wErr := range errCh {
+ if wErr.off <= firstErr.off {
+ firstErr = wErr
+ }
+
+ select {
+ case <-cancel:
+ default:
+ // stop any more work from being distributed. (Just in case.)
+ close(cancel)
+ }
+ }
+
+ if firstErr.err != nil {
+ // firstErr.err != nil if and only if firstErr.off >= our starting offset.
+ return int(firstErr.off - off), firstErr.err
+ }
+
+ return len(b), nil
+}
+
+// WriteAt writes up to len(b) byte to the File at a given offset `off`. It returns
+// the number of bytes written and an error, if any. WriteAt follows io.WriterAt semantics,
+// so the file offset is not altered during the write.
+func (f *File) WriteAt(b []byte, off int64) (written int, err error) {
+ if len(b) <= f.c.maxPacket {
+ // We can do this in one write.
+ return f.writeChunkAt(nil, b, off)
+ }
+
+ if f.c.useConcurrentWrites {
+ return f.writeAtConcurrent(b, off)
+ }
+
+ ch := make(chan result, 1) // reusable channel
+
+ chunkSize := f.c.maxPacket
+
+ for written < len(b) {
+ wb := b[written:]
+ if len(wb) > chunkSize {
+ wb = wb[:chunkSize]
+ }
+
+ n, err := f.writeChunkAt(ch, wb, off+int64(written))
+ if n > 0 {
+ written += n
+ }
+
+ if err != nil {
+ return written, err
+ }
+ }
+
+ return len(b), nil
+}
+
+// ReadFromWithConcurrency implements ReaderFrom,
+// but uses the given concurrency to issue multiple requests at the same time.
+//
+// Giving a concurrency of less than one will default to the Client’s max concurrency.
+//
+// Otherwise, the given concurrency will be capped by the Client's max concurrency.
+func (f *File) ReadFromWithConcurrency(r io.Reader, concurrency int) (read int64, err error) {
+ // Split the write into multiple maxPacket sized concurrent writes.
+ // This allows writes with a suitably large reader
+ // to transfer data at a much faster rate due to overlapping round trip times.
+
+ cancel := make(chan struct{})
+
+ type work struct {
+ id uint32
+ res chan result
+
+ off int64
+ }
+ workCh := make(chan work)
+
+ type rwErr struct {
+ off int64
+ err error
+ }
+ errCh := make(chan rwErr)
+
+ if concurrency > f.c.maxConcurrentRequests || concurrency < 1 {
+ concurrency = f.c.maxConcurrentRequests
+ }
+
+ pool := newResChanPool(concurrency)
+
+ // Slice: cut up the Read into any number of buffers of length <= f.c.maxPacket, and at appropriate offsets.
+ go func() {
+ defer close(workCh)
+
+ b := make([]byte, f.c.maxPacket)
+ off := f.offset
+
+ for {
+ n, err := r.Read(b)
+
+ if n > 0 {
+ read += int64(n)
+
+ id := f.c.nextID()
+ res := pool.Get()
+
+ f.c.dispatchRequest(res, &sshFxpWritePacket{
+ ID: id,
+ Handle: f.handle,
+ Offset: uint64(off),
+ Length: uint32(n),
+ Data: b,
+ })
+
+ select {
+ case workCh <- work{id, res, off}:
+ case <-cancel:
+ return
+ }
+
+ off += int64(n)
+ }
+
+ if err != nil {
+ if err != io.EOF {
+ errCh <- rwErr{off, err}
+ }
+ return
+ }
+ }
+ }()
+
+ var wg sync.WaitGroup
+ wg.Add(concurrency)
+ for i := 0; i < concurrency; i++ {
+ // Map_i: each worker gets work, and does the Write from each buffer to its respective offset.
+ go func() {
+ defer wg.Done()
+
+ for work := range workCh {
+ s := <-work.res
+ pool.Put(work.res)
+
+ err := s.err
+ if err == nil {
+ switch s.typ {
+ case sshFxpStatus:
+ err = normaliseError(unmarshalStatus(work.id, s.data))
+ default:
+ err = unimplementedPacketErr(s.typ)
+ }
+ }
+
+ if err != nil {
+ errCh <- rwErr{work.off, err}
+ }
+ }
+ }()
+ }
+
+ // Wait for long tail, before closing results.
+ go func() {
+ wg.Wait()
+ close(errCh)
+ }()
+
+ // Reduce: Collect all the results into a relevant return: the earliest offset to return an error.
+ firstErr := rwErr{math.MaxInt64, nil}
+ for rwErr := range errCh {
+ if rwErr.off <= firstErr.off {
+ firstErr = rwErr
+ }
+
+ select {
+ case <-cancel:
+ default:
+ // stop any more work from being distributed.
+ close(cancel)
+ }
+ }
+
+ if firstErr.err != nil {
+ // firstErr.err != nil if and only if firstErr.off is a valid offset.
+ //
+ // firstErr.off will then be the lesser of:
+ // * the offset of the first error from writing,
+ // * the last successfully read offset.
+ //
+ // This could be less than the last successfully written offset,
+ // which is the whole reason for the UseConcurrentWrites() ClientOption.
+ //
+ // Callers are responsible for truncating any SFTP files to a safe length.
+ f.offset = firstErr.off
+
+ // ReadFrom is defined to return the read bytes, regardless of any writer errors.
+ return read, firstErr.err
+ }
+
+ f.offset += read
+ return read, nil
+}
+
+// ReadFrom reads data from r until EOF and writes it to the file. The return
+// value is the number of bytes read. Any error except io.EOF encountered
+// during the read is also returned.
+//
+// This method is preferred over calling Write multiple times
+// to maximise throughput for transferring the entire file,
+// especially over high-latency links.
+func (f *File) ReadFrom(r io.Reader) (int64, error) {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+
+ if f.c.useConcurrentWrites {
+ var remain int64
+ switch r := r.(type) {
+ case interface{ Len() int }:
+ remain = int64(r.Len())
+
+ case interface{ Size() int64 }:
+ remain = r.Size()
+
+ case *io.LimitedReader:
+ remain = r.N
+
+ case interface{ Stat() (os.FileInfo, error) }:
+ info, err := r.Stat()
+ if err == nil {
+ remain = info.Size()
+ }
+ }
+
+ if remain < 0 {
+ // We can strongly assert that we want default max concurrency here.
+ return f.ReadFromWithConcurrency(r, f.c.maxConcurrentRequests)
+ }
+
+ if remain > int64(f.c.maxPacket) {
+ // Otherwise, only use concurrency, if it would be at least two packets.
+
+ // This is the best reasonable guess we can make.
+ concurrency64 := remain/int64(f.c.maxPacket) + 1
+
+ // We need to cap this value to an `int` size value to avoid overflow on 32-bit machines.
+ // So, we may as well pre-cap it to `f.c.maxConcurrentRequests`.
+ if concurrency64 > int64(f.c.maxConcurrentRequests) {
+ concurrency64 = int64(f.c.maxConcurrentRequests)
+ }
+
+ return f.ReadFromWithConcurrency(r, int(concurrency64))
+ }
+ }
+
+ ch := make(chan result, 1) // reusable channel
+
+ b := make([]byte, f.c.maxPacket)
+
+ var read int64
+ for {
+ n, err := r.Read(b)
+ if n < 0 {
+ panic("sftp.File: reader returned negative count from Read")
+ }
+
+ if n > 0 {
+ read += int64(n)
+
+ m, err2 := f.writeChunkAt(ch, b[:n], f.offset)
+ f.offset += int64(m)
+
+ if err == nil {
+ err = err2
+ }
+ }
+
+ if err != nil {
+ if err == io.EOF {
+ return read, nil // return nil explicitly.
+ }
+
+ return read, err
+ }
+ }
+}
+
+// Seek implements io.Seeker by setting the client offset for the next Read or
+// Write. It returns the next offset read. Seeking before or after the end of
+// the file is undefined. Seeking relative to the end calls Stat.
+func (f *File) Seek(offset int64, whence int) (int64, error) {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+
+ switch whence {
+ case io.SeekStart:
+ case io.SeekCurrent:
+ offset += f.offset
+ case io.SeekEnd:
+ fi, err := f.Stat()
+ if err != nil {
+ return f.offset, err
+ }
+ offset += fi.Size()
+ default:
+ return f.offset, unimplementedSeekWhence(whence)
+ }
+
+ if offset < 0 {
+ return f.offset, os.ErrInvalid
+ }
+
+ f.offset = offset
+ return f.offset, nil
+}
+
+// Chown changes the uid/gid of the current file.
+func (f *File) Chown(uid, gid int) error {
+ return f.c.Chown(f.path, uid, gid)
+}
+
+// Chmod changes the permissions of the current file.
+//
+// See Client.Chmod for details.
+func (f *File) Chmod(mode os.FileMode) error {
+ return f.c.setfstat(f.handle, sshFileXferAttrPermissions, toChmodPerm(mode))
+}
+
+// Sync requests a flush of the contents of a File to stable storage.
+//
+// Sync requires the server to support the fsync@openssh.com extension.
+func (f *File) Sync() error {
+ id := f.c.nextID()
+ typ, data, err := f.c.sendPacket(nil, &sshFxpFsyncPacket{
+ ID: id,
+ Handle: f.handle,
+ })
+
+ switch {
+ case err != nil:
+ return err
+ case typ == sshFxpStatus:
+ return normaliseError(unmarshalStatus(id, data))
+ default:
+ return &unexpectedPacketErr{want: sshFxpStatus, got: typ}
+ }
+}
+
+// Truncate sets the size of the current file. Although it may be safely assumed
+// that if the size is less than its current size it will be truncated to fit,
+// the SFTP protocol does not specify what behavior the server should do when setting
+// size greater than the current size.
+// We send a SSH_FXP_FSETSTAT here since we have a file handle
+func (f *File) Truncate(size int64) error {
+ return f.c.setfstat(f.handle, sshFileXferAttrSize, uint64(size))
+}
+
+// normaliseError normalises an error into a more standard form that can be
+// checked against stdlib errors like io.EOF or os.ErrNotExist.
+func normaliseError(err error) error {
+ switch err := err.(type) {
+ case *StatusError:
+ switch err.Code {
+ case sshFxEOF:
+ return io.EOF
+ case sshFxNoSuchFile:
+ return os.ErrNotExist
+ case sshFxPermissionDenied:
+ return os.ErrPermission
+ case sshFxOk:
+ return nil
+ default:
+ return err
+ }
+ default:
+ return err
+ }
+}
+
+// flags converts the flags passed to OpenFile into ssh flags.
+// Unsupported flags are ignored.
+func flags(f int) uint32 {
+ var out uint32
+ switch f & os.O_WRONLY {
+ case os.O_WRONLY:
+ out |= sshFxfWrite
+ case os.O_RDONLY:
+ out |= sshFxfRead
+ }
+ if f&os.O_RDWR == os.O_RDWR {
+ out |= sshFxfRead | sshFxfWrite
+ }
+ if f&os.O_APPEND == os.O_APPEND {
+ out |= sshFxfAppend
+ }
+ if f&os.O_CREATE == os.O_CREATE {
+ out |= sshFxfCreat
+ }
+ if f&os.O_TRUNC == os.O_TRUNC {
+ out |= sshFxfTrunc
+ }
+ if f&os.O_EXCL == os.O_EXCL {
+ out |= sshFxfExcl
+ }
+ return out
+}
+
+// toChmodPerm converts Go permission bits to POSIX permission bits.
+//
+// This differs from fromFileMode in that we preserve the POSIX versions of
+// setuid, setgid and sticky in m, because we've historically supported those
+// bits, and we mask off any non-permission bits.
+func toChmodPerm(m os.FileMode) (perm uint32) {
+ const mask = os.ModePerm | s_ISUID | s_ISGID | s_ISVTX
+ perm = uint32(m & mask)
+
+ if m&os.ModeSetuid != 0 {
+ perm |= s_ISUID
+ }
+ if m&os.ModeSetgid != 0 {
+ perm |= s_ISGID
+ }
+ if m&os.ModeSticky != 0 {
+ perm |= s_ISVTX
+ }
+
+ return perm
+}
diff --git a/vendor/github.com/pkg/sftp/conn.go b/vendor/github.com/pkg/sftp/conn.go
new file mode 100644
index 000000000..7d9514237
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/conn.go
@@ -0,0 +1,189 @@
+package sftp
+
+import (
+ "encoding"
+ "fmt"
+ "io"
+ "sync"
+)
+
+// conn implements a bidirectional channel on which client and server
+// connections are multiplexed.
+type conn struct {
+ io.Reader
+ io.WriteCloser
+ // this is the same allocator used in packet manager
+ alloc *allocator
+ sync.Mutex // used to serialise writes to sendPacket
+}
+
+// the orderID is used in server mode if the allocator is enabled.
+// For the client mode just pass 0
+func (c *conn) recvPacket(orderID uint32) (uint8, []byte, error) {
+ return recvPacket(c, c.alloc, orderID)
+}
+
+func (c *conn) sendPacket(m encoding.BinaryMarshaler) error {
+ c.Lock()
+ defer c.Unlock()
+
+ return sendPacket(c, m)
+}
+
+func (c *conn) Close() error {
+ c.Lock()
+ defer c.Unlock()
+ return c.WriteCloser.Close()
+}
+
+type clientConn struct {
+ conn
+ wg sync.WaitGroup
+
+ sync.Mutex // protects inflight
+ inflight map[uint32]chan<- result // outstanding requests
+
+ closed chan struct{}
+ err error
+}
+
+// Wait blocks until the conn has shut down, and return the error
+// causing the shutdown. It can be called concurrently from multiple
+// goroutines.
+func (c *clientConn) Wait() error {
+ <-c.closed
+ return c.err
+}
+
+// Close closes the SFTP session.
+func (c *clientConn) Close() error {
+ defer c.wg.Wait()
+ return c.conn.Close()
+}
+
+func (c *clientConn) loop() {
+ defer c.wg.Done()
+ err := c.recv()
+ if err != nil {
+ c.broadcastErr(err)
+ }
+}
+
+// recv continuously reads from the server and forwards responses to the
+// appropriate channel.
+func (c *clientConn) recv() error {
+ defer c.conn.Close()
+
+ for {
+ typ, data, err := c.recvPacket(0)
+ if err != nil {
+ return err
+ }
+ sid, _, err := unmarshalUint32Safe(data)
+ if err != nil {
+ return err
+ }
+
+ ch, ok := c.getChannel(sid)
+ if !ok {
+ // This is an unexpected occurrence. Send the error
+ // back to all listeners so that they terminate
+ // gracefully.
+ return fmt.Errorf("sid not found: %d", sid)
+ }
+
+ ch <- result{typ: typ, data: data}
+ }
+}
+
+func (c *clientConn) putChannel(ch chan<- result, sid uint32) bool {
+ c.Lock()
+ defer c.Unlock()
+
+ select {
+ case <-c.closed:
+ // already closed with broadcastErr, return error on chan.
+ ch <- result{err: ErrSSHFxConnectionLost}
+ return false
+ default:
+ }
+
+ c.inflight[sid] = ch
+ return true
+}
+
+func (c *clientConn) getChannel(sid uint32) (chan<- result, bool) {
+ c.Lock()
+ defer c.Unlock()
+
+ ch, ok := c.inflight[sid]
+ delete(c.inflight, sid)
+
+ return ch, ok
+}
+
+// result captures the result of receiving the a packet from the server
+type result struct {
+ typ byte
+ data []byte
+ err error
+}
+
+type idmarshaler interface {
+ id() uint32
+ encoding.BinaryMarshaler
+}
+
+func (c *clientConn) sendPacket(ch chan result, p idmarshaler) (byte, []byte, error) {
+ if cap(ch) < 1 {
+ ch = make(chan result, 1)
+ }
+
+ c.dispatchRequest(ch, p)
+ s := <-ch
+ return s.typ, s.data, s.err
+}
+
+// dispatchRequest should ideally only be called by race-detection tests outside of this file,
+// where you have to ensure two packets are in flight sequentially after each other.
+func (c *clientConn) dispatchRequest(ch chan<- result, p idmarshaler) {
+ sid := p.id()
+
+ if !c.putChannel(ch, sid) {
+ // already closed.
+ return
+ }
+
+ if err := c.conn.sendPacket(p); err != nil {
+ if ch, ok := c.getChannel(sid); ok {
+ ch <- result{err: err}
+ }
+ }
+}
+
+// broadcastErr sends an error to all goroutines waiting for a response.
+func (c *clientConn) broadcastErr(err error) {
+ c.Lock()
+ defer c.Unlock()
+
+ bcastRes := result{err: ErrSSHFxConnectionLost}
+ for sid, ch := range c.inflight {
+ ch <- bcastRes
+
+ // Replace the chan in inflight,
+ // we have hijacked this chan,
+ // and this guarantees always-only-once sending.
+ c.inflight[sid] = make(chan<- result, 1)
+ }
+
+ c.err = err
+ close(c.closed)
+}
+
+type serverConn struct {
+ conn
+}
+
+func (s *serverConn) sendError(id uint32, err error) error {
+ return s.sendPacket(statusFromError(id, err))
+}
diff --git a/vendor/github.com/pkg/sftp/debug.go b/vendor/github.com/pkg/sftp/debug.go
new file mode 100644
index 000000000..3e264abe3
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/debug.go
@@ -0,0 +1,9 @@
+// +build debug
+
+package sftp
+
+import "log"
+
+func debug(fmt string, args ...interface{}) {
+ log.Printf(fmt, args...)
+}
diff --git a/vendor/github.com/pkg/sftp/fuzz.go b/vendor/github.com/pkg/sftp/fuzz.go
new file mode 100644
index 000000000..169aebc28
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/fuzz.go
@@ -0,0 +1,22 @@
+// +build gofuzz
+
+package sftp
+
+import "bytes"
+
+type sinkfuzz struct{}
+
+func (*sinkfuzz) Close() error { return nil }
+func (*sinkfuzz) Write(p []byte) (int, error) { return len(p), nil }
+
+var devnull = &sinkfuzz{}
+
+// To run: go-fuzz-build && go-fuzz
+func Fuzz(data []byte) int {
+ c, err := NewClientPipe(bytes.NewReader(data), devnull)
+ if err != nil {
+ return 0
+ }
+ c.Close()
+ return 1
+}
diff --git a/vendor/github.com/pkg/sftp/go.mod b/vendor/github.com/pkg/sftp/go.mod
new file mode 100644
index 000000000..b0347871f
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/go.mod
@@ -0,0 +1,10 @@
+module github.com/pkg/sftp
+
+go 1.15
+
+require (
+ github.com/kr/fs v0.1.0
+ github.com/stretchr/testify v1.7.0
+ golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3
+ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e // indirect
+)
diff --git a/vendor/github.com/pkg/sftp/go.sum b/vendor/github.com/pkg/sftp/go.sum
new file mode 100644
index 000000000..2b66d87e3
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/go.sum
@@ -0,0 +1,25 @@
+github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
+github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3 h1:0es+/5331RGQPcXlMfP+WrnIIS6dNnNRe0WB02W0F4M=
+golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/attrs.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/attrs.go
new file mode 100644
index 000000000..eed61bfc6
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/attrs.go
@@ -0,0 +1,325 @@
+package filexfer
+
+// Attributes related flags.
+const (
+ AttrSize = 1 << iota // SSH_FILEXFER_ATTR_SIZE
+ AttrUIDGID // SSH_FILEXFER_ATTR_UIDGID
+ AttrPermissions // SSH_FILEXFER_ATTR_PERMISSIONS
+ AttrACModTime // SSH_FILEXFER_ACMODTIME
+
+ AttrExtended = 1 << 31 // SSH_FILEXFER_ATTR_EXTENDED
+)
+
+// Attributes defines the file attributes type defined in draft-ietf-secsh-filexfer-02
+//
+// Defined in: https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-5
+type Attributes struct {
+ Flags uint32
+
+ // AttrSize
+ Size uint64
+
+ // AttrUIDGID
+ UID uint32
+ GID uint32
+
+ // AttrPermissions
+ Permissions FileMode
+
+ // AttrACmodTime
+ ATime uint32
+ MTime uint32
+
+ // AttrExtended
+ ExtendedAttributes []ExtendedAttribute
+}
+
+// GetSize returns the Size field and a bool that is true if and only if the value is valid/defined.
+func (a *Attributes) GetSize() (size uint64, ok bool) {
+ return a.Size, a.Flags&AttrSize != 0
+}
+
+// SetSize is a convenience function that sets the Size field,
+// and marks the field as valid/defined in Flags.
+func (a *Attributes) SetSize(size uint64) {
+ a.Flags |= AttrSize
+ a.Size = size
+}
+
+// GetUIDGID returns the UID and GID fields and a bool that is true if and only if the values are valid/defined.
+func (a *Attributes) GetUIDGID() (uid, gid uint32, ok bool) {
+ return a.UID, a.GID, a.Flags&AttrUIDGID != 0
+}
+
+// SetUIDGID is a convenience function that sets the UID and GID fields,
+// and marks the fields as valid/defined in Flags.
+func (a *Attributes) SetUIDGID(uid, gid uint32) {
+ a.Flags |= AttrUIDGID
+ a.UID = uid
+ a.GID = gid
+}
+
+// GetPermissions returns the Permissions field and a bool that is true if and only if the value is valid/defined.
+func (a *Attributes) GetPermissions() (perms FileMode, ok bool) {
+ return a.Permissions, a.Flags&AttrPermissions != 0
+}
+
+// SetPermissions is a convenience function that sets the Permissions field,
+// and marks the field as valid/defined in Flags.
+func (a *Attributes) SetPermissions(perms FileMode) {
+ a.Flags |= AttrPermissions
+ a.Permissions = perms
+}
+
+// GetACModTime returns the ATime and MTime fields and a bool that is true if and only if the values are valid/defined.
+func (a *Attributes) GetACModTime() (atime, mtime uint32, ok bool) {
+ return a.ATime, a.MTime, a.Flags&AttrACModTime != 0
+}
+
+// SetACModTime is a convenience function that sets the ATime and MTime fields,
+// and marks the fields as valid/defined in Flags.
+func (a *Attributes) SetACModTime(atime, mtime uint32) {
+ a.Flags |= AttrACModTime
+ a.ATime = atime
+ a.MTime = mtime
+}
+
+// Len returns the number of bytes a would marshal into.
+func (a *Attributes) Len() int {
+ length := 4
+
+ if a.Flags&AttrSize != 0 {
+ length += 8
+ }
+
+ if a.Flags&AttrUIDGID != 0 {
+ length += 4 + 4
+ }
+
+ if a.Flags&AttrPermissions != 0 {
+ length += 4
+ }
+
+ if a.Flags&AttrACModTime != 0 {
+ length += 4 + 4
+ }
+
+ if a.Flags&AttrExtended != 0 {
+ length += 4
+
+ for _, ext := range a.ExtendedAttributes {
+ length += ext.Len()
+ }
+ }
+
+ return length
+}
+
+// MarshalInto marshals e onto the end of the given Buffer.
+func (a *Attributes) MarshalInto(b *Buffer) {
+ b.AppendUint32(a.Flags)
+
+ if a.Flags&AttrSize != 0 {
+ b.AppendUint64(a.Size)
+ }
+
+ if a.Flags&AttrUIDGID != 0 {
+ b.AppendUint32(a.UID)
+ b.AppendUint32(a.GID)
+ }
+
+ if a.Flags&AttrPermissions != 0 {
+ b.AppendUint32(uint32(a.Permissions))
+ }
+
+ if a.Flags&AttrACModTime != 0 {
+ b.AppendUint32(a.ATime)
+ b.AppendUint32(a.MTime)
+ }
+
+ if a.Flags&AttrExtended != 0 {
+ b.AppendUint32(uint32(len(a.ExtendedAttributes)))
+
+ for _, ext := range a.ExtendedAttributes {
+ ext.MarshalInto(b)
+ }
+ }
+}
+
+// MarshalBinary returns a as the binary encoding of a.
+func (a *Attributes) MarshalBinary() ([]byte, error) {
+ buf := NewBuffer(make([]byte, 0, a.Len()))
+ a.MarshalInto(buf)
+ return buf.Bytes(), nil
+}
+
+// UnmarshalFrom unmarshals an Attributes from the given Buffer into e.
+//
+// NOTE: The values of fields not covered in the a.Flags are explicitly undefined.
+func (a *Attributes) UnmarshalFrom(b *Buffer) (err error) {
+ flags, err := b.ConsumeUint32()
+ if err != nil {
+ return err
+ }
+
+ return a.XXX_UnmarshalByFlags(flags, b)
+}
+
+// XXX_UnmarshalByFlags uses the pre-existing a.Flags field to determine which fields to decode.
+// DO NOT USE THIS: it is an anti-corruption function to implement existing internal usage in pkg/sftp.
+// This function is not a part of any compatibility promise.
+func (a *Attributes) XXX_UnmarshalByFlags(flags uint32, b *Buffer) (err error) {
+ a.Flags = flags
+
+ // Short-circuit dummy attributes.
+ if a.Flags == 0 {
+ return nil
+ }
+
+ if a.Flags&AttrSize != 0 {
+ if a.Size, err = b.ConsumeUint64(); err != nil {
+ return err
+ }
+ }
+
+ if a.Flags&AttrUIDGID != 0 {
+ if a.UID, err = b.ConsumeUint32(); err != nil {
+ return err
+ }
+
+ if a.GID, err = b.ConsumeUint32(); err != nil {
+ return err
+ }
+ }
+
+ if a.Flags&AttrPermissions != 0 {
+ m, err := b.ConsumeUint32()
+ if err != nil {
+ return err
+ }
+
+ a.Permissions = FileMode(m)
+ }
+
+ if a.Flags&AttrACModTime != 0 {
+ if a.ATime, err = b.ConsumeUint32(); err != nil {
+ return err
+ }
+
+ if a.MTime, err = b.ConsumeUint32(); err != nil {
+ return err
+ }
+ }
+
+ if a.Flags&AttrExtended != 0 {
+ count, err := b.ConsumeUint32()
+ if err != nil {
+ return err
+ }
+
+ a.ExtendedAttributes = make([]ExtendedAttribute, count)
+ for i := range a.ExtendedAttributes {
+ a.ExtendedAttributes[i].UnmarshalFrom(b)
+ }
+ }
+
+ return nil
+}
+
+// UnmarshalBinary decodes the binary encoding of Attributes into e.
+func (a *Attributes) UnmarshalBinary(data []byte) error {
+ return a.UnmarshalFrom(NewBuffer(data))
+}
+
+// ExtendedAttribute defines the extended file attribute type defined in draft-ietf-secsh-filexfer-02
+//
+// Defined in: https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-5
+type ExtendedAttribute struct {
+ Type string
+ Data string
+}
+
+// Len returns the number of bytes e would marshal into.
+func (e *ExtendedAttribute) Len() int {
+ return 4 + len(e.Type) + 4 + len(e.Data)
+}
+
+// MarshalInto marshals e onto the end of the given Buffer.
+func (e *ExtendedAttribute) MarshalInto(b *Buffer) {
+ b.AppendString(e.Type)
+ b.AppendString(e.Data)
+}
+
+// MarshalBinary returns e as the binary encoding of e.
+func (e *ExtendedAttribute) MarshalBinary() ([]byte, error) {
+ buf := NewBuffer(make([]byte, 0, e.Len()))
+ e.MarshalInto(buf)
+ return buf.Bytes(), nil
+}
+
+// UnmarshalFrom unmarshals an ExtendedAattribute from the given Buffer into e.
+func (e *ExtendedAttribute) UnmarshalFrom(b *Buffer) (err error) {
+ if e.Type, err = b.ConsumeString(); err != nil {
+ return err
+ }
+
+ if e.Data, err = b.ConsumeString(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// UnmarshalBinary decodes the binary encoding of ExtendedAttribute into e.
+func (e *ExtendedAttribute) UnmarshalBinary(data []byte) error {
+ return e.UnmarshalFrom(NewBuffer(data))
+}
+
+// NameEntry implements the SSH_FXP_NAME repeated data type from draft-ietf-secsh-filexfer-02
+//
+// This type is incompatible with versions 4 or higher.
+type NameEntry struct {
+ Filename string
+ Longname string
+ Attrs Attributes
+}
+
+// Len returns the number of bytes e would marshal into.
+func (e *NameEntry) Len() int {
+ return 4 + len(e.Filename) + 4 + len(e.Longname) + e.Attrs.Len()
+}
+
+// MarshalInto marshals e onto the end of the given Buffer.
+func (e *NameEntry) MarshalInto(b *Buffer) {
+ b.AppendString(e.Filename)
+ b.AppendString(e.Longname)
+
+ e.Attrs.MarshalInto(b)
+}
+
+// MarshalBinary returns e as the binary encoding of e.
+func (e *NameEntry) MarshalBinary() ([]byte, error) {
+ buf := NewBuffer(make([]byte, 0, e.Len()))
+ e.MarshalInto(buf)
+ return buf.Bytes(), nil
+}
+
+// UnmarshalFrom unmarshals an NameEntry from the given Buffer into e.
+//
+// NOTE: The values of fields not covered in the a.Flags are explicitly undefined.
+func (e *NameEntry) UnmarshalFrom(b *Buffer) (err error) {
+ if e.Filename, err = b.ConsumeString(); err != nil {
+ return err
+ }
+
+ if e.Longname, err = b.ConsumeString(); err != nil {
+ return err
+ }
+
+ return e.Attrs.UnmarshalFrom(b)
+}
+
+// UnmarshalBinary decodes the binary encoding of NameEntry into e.
+func (e *NameEntry) UnmarshalBinary(data []byte) error {
+ return e.UnmarshalFrom(NewBuffer(data))
+}
diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/buffer.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/buffer.go
new file mode 100644
index 000000000..a6086036e
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/buffer.go
@@ -0,0 +1,293 @@
+package filexfer
+
+import (
+ "encoding/binary"
+ "errors"
+)
+
+// Various encoding errors.
+var (
+ ErrShortPacket = errors.New("packet too short")
+ ErrLongPacket = errors.New("packet too long")
+)
+
+// Buffer wraps up the various encoding details of the SSH format.
+//
+// Data types are encoded as per section 4 from https://tools.ietf.org/html/draft-ietf-secsh-architecture-09#page-8
+type Buffer struct {
+ b []byte
+ off int
+}
+
+// NewBuffer creates and initializes a new buffer using buf as its initial contents.
+// The new buffer takes ownership of buf, and the caller should not use buf after this call.
+//
+// In most cases, new(Buffer) (or just declaring a Buffer variable) is sufficient to initialize a Buffer.
+func NewBuffer(buf []byte) *Buffer {
+ return &Buffer{
+ b: buf,
+ }
+}
+
+// NewMarshalBuffer creates a new Buffer ready to start marshaling a Packet into.
+// It preallocates enough space for uint32(length), uint8(type), uint32(request-id) and size more bytes.
+func NewMarshalBuffer(size int) *Buffer {
+ return NewBuffer(make([]byte, 4+1+4+size))
+}
+
+// Bytes returns a slice of length b.Len() holding the unconsumed bytes in the Buffer.
+// The slice is valid for use only until the next buffer modification
+// (that is, only until the next call to an Append or Consume method).
+func (b *Buffer) Bytes() []byte {
+ return b.b[b.off:]
+}
+
+// Len returns the number of unconsumed bytes in the buffer.
+func (b *Buffer) Len() int { return len(b.b) - b.off }
+
+// Cap returns the capacity of the buffer’s underlying byte slice,
+// that is, the total space allocated for the buffer’s data.
+func (b *Buffer) Cap() int { return cap(b.b) }
+
+// Reset resets the buffer to be empty, but it retains the underlying storage for use by future Appends.
+func (b *Buffer) Reset() {
+ b.b = b.b[:0]
+ b.off = 0
+}
+
+// StartPacket resets and initializes the buffer to be ready to start marshaling a packet into.
+// It truncates the buffer, reserves space for uint32(length), then appends the given packetType and requestID.
+func (b *Buffer) StartPacket(packetType PacketType, requestID uint32) {
+ b.b, b.off = append(b.b[:0], make([]byte, 4)...), 0
+
+ b.AppendUint8(uint8(packetType))
+ b.AppendUint32(requestID)
+}
+
+// Packet finalizes the packet started from StartPacket.
+// It is expected that this will end the ownership of the underlying byte-slice,
+// and so the returned byte-slices may be reused the same as any other byte-slice,
+// the caller should not use this buffer after this call.
+//
+// It writes the packet body length into the first four bytes of the buffer in network byte order (big endian).
+// The packet body length is the length of this buffer less the 4-byte length itself, plus the length of payload.
+//
+// It is assumed that no Consume methods have been called on this buffer,
+// and so it returns the whole underlying slice.
+func (b *Buffer) Packet(payload []byte) (header, payloadPassThru []byte, err error) {
+ b.PutLength(len(b.b) - 4 + len(payload))
+
+ return b.b, payload, nil
+}
+
+// ConsumeUint8 consumes a single byte from the buffer.
+// If the buffer does not have enough data, it will return ErrShortPacket.
+func (b *Buffer) ConsumeUint8() (uint8, error) {
+ if b.Len() < 1 {
+ return 0, ErrShortPacket
+ }
+
+ var v uint8
+ v, b.off = b.b[b.off], b.off+1
+ return v, nil
+}
+
+// AppendUint8 appends a single byte into the buffer.
+func (b *Buffer) AppendUint8(v uint8) {
+ b.b = append(b.b, v)
+}
+
+// ConsumeBool consumes a single byte from the buffer, and returns true if that byte is non-zero.
+// If the buffer does not have enough data, it will return ErrShortPacket.
+func (b *Buffer) ConsumeBool() (bool, error) {
+ v, err := b.ConsumeUint8()
+ if err != nil {
+ return false, err
+ }
+
+ return v != 0, nil
+}
+
+// AppendBool appends a single bool into the buffer.
+// It encodes it as a single byte, with false as 0, and true as 1.
+func (b *Buffer) AppendBool(v bool) {
+ if v {
+ b.AppendUint8(1)
+ } else {
+ b.AppendUint8(0)
+ }
+}
+
+// ConsumeUint16 consumes a single uint16 from the buffer, in network byte order (big-endian).
+// If the buffer does not have enough data, it will return ErrShortPacket.
+func (b *Buffer) ConsumeUint16() (uint16, error) {
+ if b.Len() < 2 {
+ return 0, ErrShortPacket
+ }
+
+ v := binary.BigEndian.Uint16(b.b[b.off:])
+ b.off += 2
+ return v, nil
+}
+
+// AppendUint16 appends single uint16 into the buffer, in network byte order (big-endian).
+func (b *Buffer) AppendUint16(v uint16) {
+ b.b = append(b.b,
+ byte(v>>8),
+ byte(v>>0),
+ )
+}
+
+// unmarshalUint32 is used internally to read the packet length.
+// It is unsafe, and so not exported.
+// Even within this package, its use should be avoided.
+func unmarshalUint32(b []byte) uint32 {
+ return binary.BigEndian.Uint32(b[:4])
+}
+
+// ConsumeUint32 consumes a single uint32 from the buffer, in network byte order (big-endian).
+// If the buffer does not have enough data, it will return ErrShortPacket.
+func (b *Buffer) ConsumeUint32() (uint32, error) {
+ if b.Len() < 4 {
+ return 0, ErrShortPacket
+ }
+
+ v := binary.BigEndian.Uint32(b.b[b.off:])
+ b.off += 4
+ return v, nil
+}
+
+// AppendUint32 appends a single uint32 into the buffer, in network byte order (big-endian).
+func (b *Buffer) AppendUint32(v uint32) {
+ b.b = append(b.b,
+ byte(v>>24),
+ byte(v>>16),
+ byte(v>>8),
+ byte(v>>0),
+ )
+}
+
+// ConsumeUint64 consumes a single uint64 from the buffer, in network byte order (big-endian).
+// If the buffer does not have enough data, it will return ErrShortPacket.
+func (b *Buffer) ConsumeUint64() (uint64, error) {
+ if b.Len() < 8 {
+ return 0, ErrShortPacket
+ }
+
+ v := binary.BigEndian.Uint64(b.b[b.off:])
+ b.off += 8
+ return v, nil
+}
+
+// AppendUint64 appends a single uint64 into the buffer, in network byte order (big-endian).
+func (b *Buffer) AppendUint64(v uint64) {
+ b.b = append(b.b,
+ byte(v>>56),
+ byte(v>>48),
+ byte(v>>40),
+ byte(v>>32),
+ byte(v>>24),
+ byte(v>>16),
+ byte(v>>8),
+ byte(v>>0),
+ )
+}
+
+// ConsumeInt64 consumes a single int64 from the buffer, in network byte order (big-endian) with two’s complement.
+// If the buffer does not have enough data, it will return ErrShortPacket.
+func (b *Buffer) ConsumeInt64() (int64, error) {
+ u, err := b.ConsumeUint64()
+ if err != nil {
+ return 0, err
+ }
+
+ return int64(u), err
+}
+
+// AppendInt64 appends a single int64 into the buffer, in network byte order (big-endian) with two’s complement.
+func (b *Buffer) AppendInt64(v int64) {
+ b.AppendUint64(uint64(v))
+}
+
+// ConsumeByteSlice consumes a single string of raw binary data from the buffer.
+// A string is a uint32 length, followed by that number of raw bytes.
+// If the buffer does not have enough data, or defines a length larger than available, it will return ErrShortPacket.
+//
+// The returned slice aliases the buffer contents, and is valid only as long as the buffer is not reused
+// (that is, only until the next call to Reset, PutLength, StartPacket, or UnmarshalBinary).
+//
+// In no case will any Consume calls return overlapping slice aliases,
+// and Append calls are guaranteed to not disturb this slice alias.
+func (b *Buffer) ConsumeByteSlice() ([]byte, error) {
+ length, err := b.ConsumeUint32()
+ if err != nil {
+ return nil, err
+ }
+
+ if b.Len() < int(length) {
+ return nil, ErrShortPacket
+ }
+
+ v := b.b[b.off:]
+ if len(v) > int(length) {
+ v = v[:length:length]
+ }
+ b.off += int(length)
+ return v, nil
+}
+
+// AppendByteSlice appends a single string of raw binary data into the buffer.
+// A string is a uint32 length, followed by that number of raw bytes.
+func (b *Buffer) AppendByteSlice(v []byte) {
+ b.AppendUint32(uint32(len(v)))
+ b.b = append(b.b, v...)
+}
+
+// ConsumeString consumes a single string of binary data from the buffer.
+// A string is a uint32 length, followed by that number of raw bytes.
+// If the buffer does not have enough data, or defines a length larger than available, it will return ErrShortPacket.
+//
+// NOTE: Go implicitly assumes that strings contain UTF-8 encoded data.
+// All caveats on using arbitrary binary data in Go strings applies.
+func (b *Buffer) ConsumeString() (string, error) {
+ v, err := b.ConsumeByteSlice()
+ if err != nil {
+ return "", err
+ }
+
+ return string(v), nil
+}
+
+// AppendString appends a single string of binary data into the buffer.
+// A string is a uint32 length, followed by that number of raw bytes.
+func (b *Buffer) AppendString(v string) {
+ b.AppendByteSlice([]byte(v))
+}
+
+// PutLength writes the given size into the first four bytes of the buffer in network byte order (big endian).
+func (b *Buffer) PutLength(size int) {
+ if len(b.b) < 4 {
+ b.b = append(b.b, make([]byte, 4-len(b.b))...)
+ }
+
+ binary.BigEndian.PutUint32(b.b, uint32(size))
+}
+
+// MarshalBinary returns a clone of the full internal buffer.
+func (b *Buffer) MarshalBinary() ([]byte, error) {
+ clone := make([]byte, len(b.b))
+ n := copy(clone, b.b)
+ return clone[:n], nil
+}
+
+// UnmarshalBinary sets the internal buffer of b to be a clone of data, and zeros the internal offset.
+func (b *Buffer) UnmarshalBinary(data []byte) error {
+ if grow := len(data) - len(b.b); grow > 0 {
+ b.b = append(b.b, make([]byte, grow)...)
+ }
+
+ n := copy(b.b, data)
+ b.b = b.b[:n]
+ b.off = 0
+ return nil
+}
diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extended_packets.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extended_packets.go
new file mode 100644
index 000000000..6b7b2cef4
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extended_packets.go
@@ -0,0 +1,142 @@
+package filexfer
+
+import (
+ "encoding"
+ "sync"
+)
+
+// ExtendedData aliases the untyped interface composition of encoding.BinaryMarshaler and encoding.BinaryUnmarshaler.
+type ExtendedData = interface {
+ encoding.BinaryMarshaler
+ encoding.BinaryUnmarshaler
+}
+
+// ExtendedDataConstructor defines a function that returns a new(ArbitraryExtendedPacket).
+type ExtendedDataConstructor func() ExtendedData
+
+var extendedPacketTypes = struct {
+ mu sync.RWMutex
+ constructors map[string]ExtendedDataConstructor
+}{
+ constructors: make(map[string]ExtendedDataConstructor),
+}
+
+// RegisterExtendedPacketType defines a specific ExtendedDataConstructor for the given extension string.
+func RegisterExtendedPacketType(extension string, constructor ExtendedDataConstructor) {
+ extendedPacketTypes.mu.Lock()
+ defer extendedPacketTypes.mu.Unlock()
+
+ if _, exist := extendedPacketTypes.constructors[extension]; exist {
+ panic("encoding/ssh/filexfer: multiple registration of extended packet type " + extension)
+ }
+
+ extendedPacketTypes.constructors[extension] = constructor
+}
+
+func newExtendedPacket(extension string) ExtendedData {
+ extendedPacketTypes.mu.RLock()
+ defer extendedPacketTypes.mu.RUnlock()
+
+ if f := extendedPacketTypes.constructors[extension]; f != nil {
+ return f()
+ }
+
+ return new(Buffer)
+}
+
+// ExtendedPacket defines the SSH_FXP_CLOSE packet.
+type ExtendedPacket struct {
+ ExtendedRequest string
+
+ Data ExtendedData
+}
+
+// Type returns the SSH_FXP_xy value associated with this packet type.
+func (p *ExtendedPacket) Type() PacketType {
+ return PacketTypeExtended
+}
+
+// MarshalPacket returns p as a two-part binary encoding of p.
+//
+// The Data is marshaled into binary, and returned as the payload.
+func (p *ExtendedPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
+ buf := NewBuffer(b)
+ if buf.Cap() < 9 {
+ size := 4 + len(p.ExtendedRequest) // string(extended-request)
+ buf = NewMarshalBuffer(size)
+ }
+
+ buf.StartPacket(PacketTypeExtended, reqid)
+ buf.AppendString(p.ExtendedRequest)
+
+ if p.Data != nil {
+ payload, err = p.Data.MarshalBinary()
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ return buf.Packet(payload)
+}
+
+// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
+// It is assumed that the uint32(request-id) has already been consumed.
+//
+// If p.Data is nil, and the extension has been registered, a new type will be made from the registration.
+// If the extension has not been registered, then a new Buffer will be allocated.
+// Then the request-specific-data will be unmarshaled from the rest of the buffer.
+func (p *ExtendedPacket) UnmarshalPacketBody(buf *Buffer) (err error) {
+ if p.ExtendedRequest, err = buf.ConsumeString(); err != nil {
+ return err
+ }
+
+ if p.Data == nil {
+ p.Data = newExtendedPacket(p.ExtendedRequest)
+ }
+
+ return p.Data.UnmarshalBinary(buf.Bytes())
+}
+
+// ExtendedReplyPacket defines the SSH_FXP_CLOSE packet.
+type ExtendedReplyPacket struct {
+ Data ExtendedData
+}
+
+// Type returns the SSH_FXP_xy value associated with this packet type.
+func (p *ExtendedReplyPacket) Type() PacketType {
+ return PacketTypeExtendedReply
+}
+
+// MarshalPacket returns p as a two-part binary encoding of p.
+//
+// The Data is marshaled into binary, and returned as the payload.
+func (p *ExtendedReplyPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
+ buf := NewBuffer(b)
+ if buf.Cap() < 9 {
+ buf = NewMarshalBuffer(0)
+ }
+
+ buf.StartPacket(PacketTypeExtendedReply, reqid)
+
+ if p.Data != nil {
+ payload, err = p.Data.MarshalBinary()
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ return buf.Packet(payload)
+}
+
+// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
+// It is assumed that the uint32(request-id) has already been consumed.
+//
+// If p.Data is nil, and there is request-specific-data,
+// then the request-specific-data will be wrapped in a Buffer and assigned to p.Data.
+func (p *ExtendedReplyPacket) UnmarshalPacketBody(buf *Buffer) (err error) {
+ if p.Data == nil {
+ p.Data = new(Buffer)
+ }
+
+ return p.Data.UnmarshalBinary(buf.Bytes())
+}
diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extensions.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extensions.go
new file mode 100644
index 000000000..11c0b99c2
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extensions.go
@@ -0,0 +1,46 @@
+package filexfer
+
+// ExtensionPair defines the extension-pair type defined in draft-ietf-secsh-filexfer-13.
+// This type is backwards-compatible with how draft-ietf-secsh-filexfer-02 defines extensions.
+//
+// Defined in: https://tools.ietf.org/html/draft-ietf-secsh-filexfer-13#section-4.2
+type ExtensionPair struct {
+ Name string
+ Data string
+}
+
+// Len returns the number of bytes e would marshal into.
+func (e *ExtensionPair) Len() int {
+ return 4 + len(e.Name) + 4 + len(e.Data)
+}
+
+// MarshalInto marshals e onto the end of the given Buffer.
+func (e *ExtensionPair) MarshalInto(buf *Buffer) {
+ buf.AppendString(e.Name)
+ buf.AppendString(e.Data)
+}
+
+// MarshalBinary returns e as the binary encoding of e.
+func (e *ExtensionPair) MarshalBinary() ([]byte, error) {
+ buf := NewBuffer(make([]byte, 0, e.Len()))
+ e.MarshalInto(buf)
+ return buf.Bytes(), nil
+}
+
+// UnmarshalFrom unmarshals an ExtensionPair from the given Buffer into e.
+func (e *ExtensionPair) UnmarshalFrom(buf *Buffer) (err error) {
+ if e.Name, err = buf.ConsumeString(); err != nil {
+ return err
+ }
+
+ if e.Data, err = buf.ConsumeString(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// UnmarshalBinary decodes the binary encoding of ExtensionPair into e.
+func (e *ExtensionPair) UnmarshalBinary(data []byte) error {
+ return e.UnmarshalFrom(NewBuffer(data))
+}
diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/filexfer.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/filexfer.go
new file mode 100644
index 000000000..1e5abf746
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/filexfer.go
@@ -0,0 +1,54 @@
+// Package filexfer implements the wire encoding for secsh-filexfer as described in https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02
+package filexfer
+
+// PacketMarshaller narrowly defines packets that will only be transmitted.
+//
+// ExtendedPacket types will often only implement this interface,
+// since decoding the whole packet body of an ExtendedPacket can only be done dependent on the ExtendedRequest field.
+type PacketMarshaller interface {
+ // MarshalPacket is the primary intended way to encode a packet.
+ // The request-id for the packet is set from reqid.
+ //
+ // An optional buffer may be given in b.
+ // If the buffer has a minimum capacity, it shall be truncated and used to marshal the header into.
+ // The minimum capacity for the packet must be a constant expression, and should be at least 9.
+ //
+ // It shall return the main body of the encoded packet in header,
+ // and may optionally return an additional payload to be written immediately after the header.
+ //
+ // It shall encode in the first 4-bytes of the header the proper length of the rest of the header+payload.
+ MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error)
+}
+
+// Packet defines the behavior of a full generic SFTP packet.
+//
+// InitPacket, and VersionPacket are not generic SFTP packets, and instead implement (Un)MarshalBinary.
+//
+// ExtendedPacket types should not iplement this interface,
+// since decoding the whole packet body of an ExtendedPacket can only be done dependent on the ExtendedRequest field.
+type Packet interface {
+ PacketMarshaller
+
+ // Type returns the SSH_FXP_xy value associated with the specific packet.
+ Type() PacketType
+
+ // UnmarshalPacketBody decodes a packet body from the given Buffer.
+ // It is assumed that the common header values of the length, type and request-id have already been consumed.
+ //
+ // Implementations should not alias the given Buffer,
+ // instead they can consider prepopulating an internal buffer as a hint,
+ // and copying into that buffer if it has sufficient length.
+ UnmarshalPacketBody(buf *Buffer) error
+}
+
+// ComposePacket converts returns from MarshalPacket into an equivalent call to MarshalBinary.
+func ComposePacket(header, payload []byte, err error) ([]byte, error) {
+ return append(header, payload...), err
+}
+
+// Default length values,
+// Defined in draft-ietf-secsh-filexfer-02 section 3.
+const (
+ DefaultMaxPacketLength = 34000
+ DefaultMaxDataLength = 32768
+)
diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fx.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fx.go
new file mode 100644
index 000000000..48f869861
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fx.go
@@ -0,0 +1,147 @@
+package filexfer
+
+import (
+ "fmt"
+)
+
+// Status defines the SFTP error codes used in SSH_FXP_STATUS response packets.
+type Status uint32
+
+// Defines the various SSH_FX_* values.
+const (
+ // see draft-ietf-secsh-filexfer-02
+ // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-7
+ StatusOK = Status(iota)
+ StatusEOF
+ StatusNoSuchFile
+ StatusPermissionDenied
+ StatusFailure
+ StatusBadMessage
+ StatusNoConnection
+ StatusConnectionLost
+ StatusOPUnsupported
+
+ // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-03#section-7
+ StatusV4InvalidHandle
+ StatusV4NoSuchPath
+ StatusV4FileAlreadyExists
+ StatusV4WriteProtect
+
+ // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-04#section-7
+ StatusV4NoMedia
+
+ // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-05#section-7
+ StatusV5NoSpaceOnFilesystem
+ StatusV5QuotaExceeded
+ StatusV5UnknownPrincipal
+ StatusV5LockConflict
+
+ // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-06#section-8
+ StatusV6DirNotEmpty
+ StatusV6NotADirectory
+ StatusV6InvalidFilename
+ StatusV6LinkLoop
+
+ // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-07#section-8
+ StatusV6CannotDelete
+ StatusV6InvalidParameter
+ StatusV6FileIsADirectory
+ StatusV6ByteRangeLockConflict
+ StatusV6ByteRangeLockRefused
+ StatusV6DeletePending
+
+ // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-08#section-8.1
+ StatusV6FileCorrupt
+
+ // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-10#section-9.1
+ StatusV6OwnerInvalid
+ StatusV6GroupInvalid
+
+ // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-13#section-9.1
+ StatusV6NoMatchingByteRangeLock
+)
+
+func (s Status) Error() string {
+ return s.String()
+}
+
+// Is returns true if the target is the same Status code,
+// or target is a StatusPacket with the same Status code.
+func (s Status) Is(target error) bool {
+ if target, ok := target.(*StatusPacket); ok {
+ return target.StatusCode == s
+ }
+
+ return s == target
+}
+
+func (s Status) String() string {
+ switch s {
+ case StatusOK:
+ return "SSH_FX_OK"
+ case StatusEOF:
+ return "SSH_FX_EOF"
+ case StatusNoSuchFile:
+ return "SSH_FX_NO_SUCH_FILE"
+ case StatusPermissionDenied:
+ return "SSH_FX_PERMISSION_DENIED"
+ case StatusFailure:
+ return "SSH_FX_FAILURE"
+ case StatusBadMessage:
+ return "SSH_FX_BAD_MESSAGE"
+ case StatusNoConnection:
+ return "SSH_FX_NO_CONNECTION"
+ case StatusConnectionLost:
+ return "SSH_FX_CONNECTION_LOST"
+ case StatusOPUnsupported:
+ return "SSH_FX_OP_UNSUPPORTED"
+ case StatusV4InvalidHandle:
+ return "SSH_FX_INVALID_HANDLE"
+ case StatusV4NoSuchPath:
+ return "SSH_FX_NO_SUCH_PATH"
+ case StatusV4FileAlreadyExists:
+ return "SSH_FX_FILE_ALREADY_EXISTS"
+ case StatusV4WriteProtect:
+ return "SSH_FX_WRITE_PROTECT"
+ case StatusV4NoMedia:
+ return "SSH_FX_NO_MEDIA"
+ case StatusV5NoSpaceOnFilesystem:
+ return "SSH_FX_NO_SPACE_ON_FILESYSTEM"
+ case StatusV5QuotaExceeded:
+ return "SSH_FX_QUOTA_EXCEEDED"
+ case StatusV5UnknownPrincipal:
+ return "SSH_FX_UNKNOWN_PRINCIPAL"
+ case StatusV5LockConflict:
+ return "SSH_FX_LOCK_CONFLICT"
+ case StatusV6DirNotEmpty:
+ return "SSH_FX_DIR_NOT_EMPTY"
+ case StatusV6NotADirectory:
+ return "SSH_FX_NOT_A_DIRECTORY"
+ case StatusV6InvalidFilename:
+ return "SSH_FX_INVALID_FILENAME"
+ case StatusV6LinkLoop:
+ return "SSH_FX_LINK_LOOP"
+ case StatusV6CannotDelete:
+ return "SSH_FX_CANNOT_DELETE"
+ case StatusV6InvalidParameter:
+ return "SSH_FX_INVALID_PARAMETER"
+ case StatusV6FileIsADirectory:
+ return "SSH_FX_FILE_IS_A_DIRECTORY"
+ case StatusV6ByteRangeLockConflict:
+ return "SSH_FX_BYTE_RANGE_LOCK_CONFLICT"
+ case StatusV6ByteRangeLockRefused:
+ return "SSH_FX_BYTE_RANGE_LOCK_REFUSED"
+ case StatusV6DeletePending:
+ return "SSH_FX_DELETE_PENDING"
+ case StatusV6FileCorrupt:
+ return "SSH_FX_FILE_CORRUPT"
+ case StatusV6OwnerInvalid:
+ return "SSH_FX_OWNER_INVALID"
+ case StatusV6GroupInvalid:
+ return "SSH_FX_GROUP_INVALID"
+ case StatusV6NoMatchingByteRangeLock:
+ return "SSH_FX_NO_MATCHING_BYTE_RANGE_LOCK"
+ default:
+ return fmt.Sprintf("SSH_FX_UNKNOWN(%d)", s)
+ }
+}
diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fxp.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fxp.go
new file mode 100644
index 000000000..15caf6d28
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fxp.go
@@ -0,0 +1,124 @@
+package filexfer
+
+import (
+ "fmt"
+)
+
+// PacketType defines the various SFTP packet types.
+type PacketType uint8
+
+// Request packet types.
+const (
+ // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-3
+ PacketTypeInit = PacketType(iota + 1)
+ PacketTypeVersion
+ PacketTypeOpen
+ PacketTypeClose
+ PacketTypeRead
+ PacketTypeWrite
+ PacketTypeLStat
+ PacketTypeFStat
+ PacketTypeSetstat
+ PacketTypeFSetstat
+ PacketTypeOpenDir
+ PacketTypeReadDir
+ PacketTypeRemove
+ PacketTypeMkdir
+ PacketTypeRmdir
+ PacketTypeRealPath
+ PacketTypeStat
+ PacketTypeRename
+ PacketTypeReadLink
+ PacketTypeSymlink
+
+ // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-07#section-3.3
+ PacketTypeV6Link
+
+ // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-08#section-3.3
+ PacketTypeV6Block
+ PacketTypeV6Unblock
+)
+
+// Response packet types.
+const (
+ // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-3
+ PacketTypeStatus = PacketType(iota + 101)
+ PacketTypeHandle
+ PacketTypeData
+ PacketTypeName
+ PacketTypeAttrs
+)
+
+// Extended packet types.
+const (
+ // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-3
+ PacketTypeExtended = PacketType(iota + 200)
+ PacketTypeExtendedReply
+)
+
+func (f PacketType) String() string {
+ switch f {
+ case PacketTypeInit:
+ return "SSH_FXP_INIT"
+ case PacketTypeVersion:
+ return "SSH_FXP_VERSION"
+ case PacketTypeOpen:
+ return "SSH_FXP_OPEN"
+ case PacketTypeClose:
+ return "SSH_FXP_CLOSE"
+ case PacketTypeRead:
+ return "SSH_FXP_READ"
+ case PacketTypeWrite:
+ return "SSH_FXP_WRITE"
+ case PacketTypeLStat:
+ return "SSH_FXP_LSTAT"
+ case PacketTypeFStat:
+ return "SSH_FXP_FSTAT"
+ case PacketTypeSetstat:
+ return "SSH_FXP_SETSTAT"
+ case PacketTypeFSetstat:
+ return "SSH_FXP_FSETSTAT"
+ case PacketTypeOpenDir:
+ return "SSH_FXP_OPENDIR"
+ case PacketTypeReadDir:
+ return "SSH_FXP_READDIR"
+ case PacketTypeRemove:
+ return "SSH_FXP_REMOVE"
+ case PacketTypeMkdir:
+ return "SSH_FXP_MKDIR"
+ case PacketTypeRmdir:
+ return "SSH_FXP_RMDIR"
+ case PacketTypeRealPath:
+ return "SSH_FXP_REALPATH"
+ case PacketTypeStat:
+ return "SSH_FXP_STAT"
+ case PacketTypeRename:
+ return "SSH_FXP_RENAME"
+ case PacketTypeReadLink:
+ return "SSH_FXP_READLINK"
+ case PacketTypeSymlink:
+ return "SSH_FXP_SYMLINK"
+ case PacketTypeV6Link:
+ return "SSH_FXP_LINK"
+ case PacketTypeV6Block:
+ return "SSH_FXP_BLOCK"
+ case PacketTypeV6Unblock:
+ return "SSH_FXP_UNBLOCK"
+ case PacketTypeStatus:
+ return "SSH_FXP_STATUS"
+ case PacketTypeHandle:
+ return "SSH_FXP_HANDLE"
+ case PacketTypeData:
+ return "SSH_FXP_DATA"
+ case PacketTypeName:
+ return "SSH_FXP_NAME"
+ case PacketTypeAttrs:
+ return "SSH_FXP_ATTRS"
+ case PacketTypeExtended:
+ return "SSH_FXP_EXTENDED"
+ case PacketTypeExtendedReply:
+ return "SSH_FXP_EXTENDED_REPLY"
+ default:
+ return fmt.Sprintf("SSH_FXP_UNKNOWN(%d)", f)
+ }
+}
diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/handle_packets.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/handle_packets.go
new file mode 100644
index 000000000..a14277128
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/handle_packets.go
@@ -0,0 +1,249 @@
+package filexfer
+
+// ClosePacket defines the SSH_FXP_CLOSE packet.
+type ClosePacket struct {
+ Handle string
+}
+
+// Type returns the SSH_FXP_xy value associated with this packet type.
+func (p *ClosePacket) Type() PacketType {
+ return PacketTypeClose
+}
+
+// MarshalPacket returns p as a two-part binary encoding of p.
+func (p *ClosePacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
+ buf := NewBuffer(b)
+ if buf.Cap() < 9 {
+ size := 4 + len(p.Handle) // string(handle)
+ buf = NewMarshalBuffer(size)
+ }
+
+ buf.StartPacket(PacketTypeClose, reqid)
+ buf.AppendString(p.Handle)
+
+ return buf.Packet(payload)
+}
+
+// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
+// It is assumed that the uint32(request-id) has already been consumed.
+func (p *ClosePacket) UnmarshalPacketBody(buf *Buffer) (err error) {
+ if p.Handle, err = buf.ConsumeString(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ReadPacket defines the SSH_FXP_READ packet.
+type ReadPacket struct {
+ Handle string
+ Offset uint64
+ Len uint32
+}
+
+// Type returns the SSH_FXP_xy value associated with this packet type.
+func (p *ReadPacket) Type() PacketType {
+ return PacketTypeRead
+}
+
+// MarshalPacket returns p as a two-part binary encoding of p.
+func (p *ReadPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
+ buf := NewBuffer(b)
+ if buf.Cap() < 9 {
+ // string(handle) + uint64(offset) + uint32(len)
+ size := 4 + len(p.Handle) + 8 + 4
+ buf = NewMarshalBuffer(size)
+ }
+
+ buf.StartPacket(PacketTypeRead, reqid)
+ buf.AppendString(p.Handle)
+ buf.AppendUint64(p.Offset)
+ buf.AppendUint32(p.Len)
+
+ return buf.Packet(payload)
+}
+
+// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
+// It is assumed that the uint32(request-id) has already been consumed.
+func (p *ReadPacket) UnmarshalPacketBody(buf *Buffer) (err error) {
+ if p.Handle, err = buf.ConsumeString(); err != nil {
+ return err
+ }
+
+ if p.Offset, err = buf.ConsumeUint64(); err != nil {
+ return err
+ }
+
+ if p.Len, err = buf.ConsumeUint32(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// WritePacket defines the SSH_FXP_WRITE packet.
+type WritePacket struct {
+ Handle string
+ Offset uint64
+ Data []byte
+}
+
+// Type returns the SSH_FXP_xy value associated with this packet type.
+func (p *WritePacket) Type() PacketType {
+ return PacketTypeWrite
+}
+
+// MarshalPacket returns p as a two-part binary encoding of p.
+func (p *WritePacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
+ buf := NewBuffer(b)
+ if buf.Cap() < 9 {
+ // string(handle) + uint64(offset) + uint32(len(data)); data content in payload
+ size := 4 + len(p.Handle) + 8 + 4
+ buf = NewMarshalBuffer(size)
+ }
+
+ buf.StartPacket(PacketTypeWrite, reqid)
+ buf.AppendString(p.Handle)
+ buf.AppendUint64(p.Offset)
+ buf.AppendUint32(uint32(len(p.Data)))
+
+ return buf.Packet(p.Data)
+}
+
+// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
+// It is assumed that the uint32(request-id) has already been consumed.
+//
+// If p.Data is already populated, and of sufficient length to hold the data,
+// then this will copy the data into that byte slice.
+//
+// If p.Data has a length insufficient to hold the data,
+// then this will make a new slice of sufficient length, and copy the data into that.
+//
+// This means this _does not_ alias any of the data buffer that is passed in.
+func (p *WritePacket) UnmarshalPacketBody(buf *Buffer) (err error) {
+ if p.Handle, err = buf.ConsumeString(); err != nil {
+ return err
+ }
+
+ if p.Offset, err = buf.ConsumeUint64(); err != nil {
+ return err
+ }
+
+ data, err := buf.ConsumeByteSlice()
+ if err != nil {
+ return err
+ }
+
+ if len(p.Data) < len(data) {
+ p.Data = make([]byte, len(data))
+ }
+
+ n := copy(p.Data, data)
+ p.Data = p.Data[:n]
+ return nil
+}
+
+// FStatPacket defines the SSH_FXP_FSTAT packet.
+type FStatPacket struct {
+ Handle string
+}
+
+// Type returns the SSH_FXP_xy value associated with this packet type.
+func (p *FStatPacket) Type() PacketType {
+ return PacketTypeFStat
+}
+
+// MarshalPacket returns p as a two-part binary encoding of p.
+func (p *FStatPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
+ buf := NewBuffer(b)
+ if buf.Cap() < 9 {
+ size := 4 + len(p.Handle) // string(handle)
+ buf = NewMarshalBuffer(size)
+ }
+
+ buf.StartPacket(PacketTypeFStat, reqid)
+ buf.AppendString(p.Handle)
+
+ return buf.Packet(payload)
+}
+
+// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
+// It is assumed that the uint32(request-id) has already been consumed.
+func (p *FStatPacket) UnmarshalPacketBody(buf *Buffer) (err error) {
+ if p.Handle, err = buf.ConsumeString(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// FSetstatPacket defines the SSH_FXP_FSETSTAT packet.
+type FSetstatPacket struct {
+ Handle string
+ Attrs Attributes
+}
+
+// Type returns the SSH_FXP_xy value associated with this packet type.
+func (p *FSetstatPacket) Type() PacketType {
+ return PacketTypeFSetstat
+}
+
+// MarshalPacket returns p as a two-part binary encoding of p.
+func (p *FSetstatPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
+ buf := NewBuffer(b)
+ if buf.Cap() < 9 {
+ size := 4 + len(p.Handle) + p.Attrs.Len() // string(handle) + ATTRS(attrs)
+ buf = NewMarshalBuffer(size)
+ }
+
+ buf.StartPacket(PacketTypeFSetstat, reqid)
+ buf.AppendString(p.Handle)
+
+ p.Attrs.MarshalInto(buf)
+
+ return buf.Packet(payload)
+}
+
+// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
+// It is assumed that the uint32(request-id) has already been consumed.
+func (p *FSetstatPacket) UnmarshalPacketBody(buf *Buffer) (err error) {
+ if p.Handle, err = buf.ConsumeString(); err != nil {
+ return err
+ }
+
+ return p.Attrs.UnmarshalFrom(buf)
+}
+
+// ReadDirPacket defines the SSH_FXP_READDIR packet.
+type ReadDirPacket struct {
+ Handle string
+}
+
+// Type returns the SSH_FXP_xy value associated with this packet type.
+func (p *ReadDirPacket) Type() PacketType {
+ return PacketTypeReadDir
+}
+
+// MarshalPacket returns p as a two-part binary encoding of p.
+func (p *ReadDirPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
+ buf := NewBuffer(b)
+ if buf.Cap() < 9 {
+ size := 4 + len(p.Handle) // string(handle)
+ buf = NewMarshalBuffer(size)
+ }
+
+ buf.StartPacket(PacketTypeReadDir, reqid)
+ buf.AppendString(p.Handle)
+
+ return buf.Packet(payload)
+}
+
+// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
+// It is assumed that the uint32(request-id) has already been consumed.
+func (p *ReadDirPacket) UnmarshalPacketBody(buf *Buffer) (err error) {
+ if p.Handle, err = buf.ConsumeString(); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/init_packets.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/init_packets.go
new file mode 100644
index 000000000..b0bc6f505
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/init_packets.go
@@ -0,0 +1,99 @@
+package filexfer
+
+// InitPacket defines the SSH_FXP_INIT packet.
+type InitPacket struct {
+ Version uint32
+ Extensions []*ExtensionPair
+}
+
+// MarshalBinary returns p as the binary encoding of p.
+func (p *InitPacket) MarshalBinary() ([]byte, error) {
+ size := 1 + 4 // byte(type) + uint32(version)
+
+ for _, ext := range p.Extensions {
+ size += ext.Len()
+ }
+
+ b := NewBuffer(make([]byte, 4, 4+size))
+ b.AppendUint8(uint8(PacketTypeInit))
+ b.AppendUint32(p.Version)
+
+ for _, ext := range p.Extensions {
+ ext.MarshalInto(b)
+ }
+
+ b.PutLength(size)
+
+ return b.Bytes(), nil
+}
+
+// UnmarshalBinary unmarshals a full raw packet out of the given data.
+// It is assumed that the uint32(length) has already been consumed to receive the data.
+// It is also assumed that the uint8(type) has already been consumed to which packet to unmarshal into.
+func (p *InitPacket) UnmarshalBinary(data []byte) (err error) {
+ buf := NewBuffer(data)
+
+ if p.Version, err = buf.ConsumeUint32(); err != nil {
+ return err
+ }
+
+ for buf.Len() > 0 {
+ var ext ExtensionPair
+ if err := ext.UnmarshalFrom(buf); err != nil {
+ return err
+ }
+
+ p.Extensions = append(p.Extensions, &ext)
+ }
+
+ return nil
+}
+
+// VersionPacket defines the SSH_FXP_VERSION packet.
+type VersionPacket struct {
+ Version uint32
+ Extensions []*ExtensionPair
+}
+
+// MarshalBinary returns p as the binary encoding of p.
+func (p *VersionPacket) MarshalBinary() ([]byte, error) {
+ size := 1 + 4 // byte(type) + uint32(version)
+
+ for _, ext := range p.Extensions {
+ size += ext.Len()
+ }
+
+ b := NewBuffer(make([]byte, 4, 4+size))
+ b.AppendUint8(uint8(PacketTypeVersion))
+ b.AppendUint32(p.Version)
+
+ for _, ext := range p.Extensions {
+ ext.MarshalInto(b)
+ }
+
+ b.PutLength(size)
+
+ return b.Bytes(), nil
+}
+
+// UnmarshalBinary unmarshals a full raw packet out of the given data.
+// It is assumed that the uint32(length) has already been consumed to receive the data.
+// It is also assumed that the uint8(type) has already been consumed to which packet to unmarshal into.
+func (p *VersionPacket) UnmarshalBinary(data []byte) (err error) {
+ buf := NewBuffer(data)
+
+ if p.Version, err = buf.ConsumeUint32(); err != nil {
+ return err
+ }
+
+ for buf.Len() > 0 {
+ var ext ExtensionPair
+ if err := ext.UnmarshalFrom(buf); err != nil {
+ return err
+ }
+
+ p.Extensions = append(p.Extensions, &ext)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/open_packets.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/open_packets.go
new file mode 100644
index 000000000..135871142
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/open_packets.go
@@ -0,0 +1,89 @@
+package filexfer
+
+// SSH_FXF_* flags.
+const (
+ FlagRead = 1 << iota // SSH_FXF_READ
+ FlagWrite // SSH_FXF_WRITE
+ FlagAppend // SSH_FXF_APPEND
+ FlagCreate // SSH_FXF_CREAT
+ FlagTruncate // SSH_FXF_TRUNC
+ FlagExclusive // SSH_FXF_EXCL
+)
+
+// OpenPacket defines the SSH_FXP_OPEN packet.
+type OpenPacket struct {
+ Filename string
+ PFlags uint32
+ Attrs Attributes
+}
+
+// Type returns the SSH_FXP_xy value associated with this packet type.
+func (p *OpenPacket) Type() PacketType {
+ return PacketTypeOpen
+}
+
+// MarshalPacket returns p as a two-part binary encoding of p.
+func (p *OpenPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
+ buf := NewBuffer(b)
+ if buf.Cap() < 9 {
+ // string(filename) + uint32(pflags) + ATTRS(attrs)
+ size := 4 + len(p.Filename) + 4 + p.Attrs.Len()
+ buf = NewMarshalBuffer(size)
+ }
+
+ buf.StartPacket(PacketTypeOpen, reqid)
+ buf.AppendString(p.Filename)
+ buf.AppendUint32(p.PFlags)
+
+ p.Attrs.MarshalInto(buf)
+
+ return buf.Packet(payload)
+}
+
+// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
+// It is assumed that the uint32(request-id) has already been consumed.
+func (p *OpenPacket) UnmarshalPacketBody(buf *Buffer) (err error) {
+ if p.Filename, err = buf.ConsumeString(); err != nil {
+ return err
+ }
+
+ if p.PFlags, err = buf.ConsumeUint32(); err != nil {
+ return err
+ }
+
+ return p.Attrs.UnmarshalFrom(buf)
+}
+
+// OpenDirPacket defines the SSH_FXP_OPENDIR packet.
+type OpenDirPacket struct {
+ Path string
+}
+
+// Type returns the SSH_FXP_xy value associated with this packet type.
+func (p *OpenDirPacket) Type() PacketType {
+ return PacketTypeOpenDir
+}
+
+// MarshalPacket returns p as a two-part binary encoding of p.
+func (p *OpenDirPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
+ buf := NewBuffer(b)
+ if buf.Cap() < 9 {
+ size := 4 + len(p.Path) // string(path)
+ buf = NewMarshalBuffer(size)
+ }
+
+ buf.StartPacket(PacketTypeOpenDir, reqid)
+ buf.AppendString(p.Path)
+
+ return buf.Packet(payload)
+}
+
+// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
+// It is assumed that the uint32(request-id) has already been consumed.
+func (p *OpenDirPacket) UnmarshalPacketBody(buf *Buffer) (err error) {
+ if p.Path, err = buf.ConsumeString(); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/packets.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/packets.go
new file mode 100644
index 000000000..3f24e9c22
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/packets.go
@@ -0,0 +1,323 @@
+package filexfer
+
+import (
+ "errors"
+ "fmt"
+ "io"
+)
+
+// smallBufferSize is an initial allocation minimal capacity.
+const smallBufferSize = 64
+
+func newPacketFromType(typ PacketType) (Packet, error) {
+ switch typ {
+ case PacketTypeOpen:
+ return new(OpenPacket), nil
+ case PacketTypeClose:
+ return new(ClosePacket), nil
+ case PacketTypeRead:
+ return new(ReadPacket), nil
+ case PacketTypeWrite:
+ return new(WritePacket), nil
+ case PacketTypeLStat:
+ return new(LStatPacket), nil
+ case PacketTypeFStat:
+ return new(FStatPacket), nil
+ case PacketTypeSetstat:
+ return new(SetstatPacket), nil
+ case PacketTypeFSetstat:
+ return new(FSetstatPacket), nil
+ case PacketTypeOpenDir:
+ return new(OpenDirPacket), nil
+ case PacketTypeReadDir:
+ return new(ReadDirPacket), nil
+ case PacketTypeRemove:
+ return new(RemovePacket), nil
+ case PacketTypeMkdir:
+ return new(MkdirPacket), nil
+ case PacketTypeRmdir:
+ return new(RmdirPacket), nil
+ case PacketTypeRealPath:
+ return new(RealPathPacket), nil
+ case PacketTypeStat:
+ return new(StatPacket), nil
+ case PacketTypeRename:
+ return new(RenamePacket), nil
+ case PacketTypeReadLink:
+ return new(ReadLinkPacket), nil
+ case PacketTypeSymlink:
+ return new(SymlinkPacket), nil
+ case PacketTypeExtended:
+ return new(ExtendedPacket), nil
+ default:
+ return nil, fmt.Errorf("unexpected request packet type: %v", typ)
+ }
+}
+
+// RawPacket implements the general packet format from draft-ietf-secsh-filexfer-02
+//
+// RawPacket is intended for use in clients receiving responses,
+// where a response will be expected to be of a limited number of types,
+// and unmarshaling unknown/unexpected response packets is unnecessary.
+//
+// For servers expecting to receive arbitrary request packet types,
+// use RequestPacket.
+//
+// Defined in https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-3
+type RawPacket struct {
+ PacketType PacketType
+ RequestID uint32
+
+ Data Buffer
+}
+
+// Type returns the Type field defining the SSH_FXP_xy type for this packet.
+func (p *RawPacket) Type() PacketType {
+ return p.PacketType
+}
+
+// Reset clears the pointers and reference-semantic variables of RawPacket,
+// releasing underlying resources, and making them and the RawPacket suitable to be reused,
+// so long as no other references have been kept.
+func (p *RawPacket) Reset() {
+ p.Data = Buffer{}
+}
+
+// MarshalPacket returns p as a two-part binary encoding of p.
+//
+// The internal p.RequestID is overridden by the reqid argument.
+func (p *RawPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
+ buf := NewBuffer(b)
+ if buf.Cap() < 9 {
+ buf = NewMarshalBuffer(0)
+ }
+
+ buf.StartPacket(p.PacketType, reqid)
+
+ return buf.Packet(p.Data.Bytes())
+}
+
+// MarshalBinary returns p as the binary encoding of p.
+//
+// This is a convenience implementation primarily intended for tests,
+// because it is inefficient with allocations.
+func (p *RawPacket) MarshalBinary() ([]byte, error) {
+ return ComposePacket(p.MarshalPacket(p.RequestID, nil))
+}
+
+// UnmarshalFrom decodes a RawPacket from the given Buffer into p.
+//
+// The Data field will alias the passed in Buffer,
+// so the buffer passed in should not be reused before RawPacket.Reset().
+func (p *RawPacket) UnmarshalFrom(buf *Buffer) error {
+ typ, err := buf.ConsumeUint8()
+ if err != nil {
+ return err
+ }
+
+ p.PacketType = PacketType(typ)
+
+ if p.RequestID, err = buf.ConsumeUint32(); err != nil {
+ return err
+ }
+
+ p.Data = *buf
+ return nil
+}
+
+// UnmarshalBinary decodes a full raw packet out of the given data.
+// It is assumed that the uint32(length) has already been consumed to receive the data.
+//
+// This is a convenience implementation primarily intended for tests,
+// because this must clone the given data byte slice,
+// as Data is not allowed to alias any part of the data byte slice.
+func (p *RawPacket) UnmarshalBinary(data []byte) error {
+ clone := make([]byte, len(data))
+ n := copy(clone, data)
+ return p.UnmarshalFrom(NewBuffer(clone[:n]))
+}
+
+// readPacket reads a uint32 length-prefixed binary data packet from r.
+// using the given byte slice as a backing array.
+//
+// If the packet length read from r is bigger than maxPacketLength,
+// or greater than math.MaxInt32 on a 32-bit implementation,
+// then a `ErrLongPacket` error will be returned.
+//
+// If the given byte slice is insufficient to hold the packet,
+// then it will be extended to fill the packet size.
+func readPacket(r io.Reader, b []byte, maxPacketLength uint32) ([]byte, error) {
+ if cap(b) < 4 {
+ // We will need allocate our own buffer just for reading the packet length.
+
+ // However, we don’t really want to allocate an extremely narrow buffer (4-bytes),
+ // and cause unnecessary allocation churn from both length reads and small packet reads,
+ // so we use smallBufferSize from the bytes package as a reasonable guess.
+
+ // But if callers really do want to force narrow throw-away allocation of every packet body,
+ // they can do so with a buffer of capacity 4.
+ b = make([]byte, smallBufferSize)
+ }
+
+ if _, err := io.ReadFull(r, b[:4]); err != nil {
+ return nil, err
+ }
+
+ length := unmarshalUint32(b)
+ if int(length) < 5 {
+ // Must have at least uint8(type) and uint32(request-id)
+
+ if int(length) < 0 {
+ // Only possible when strconv.IntSize == 32,
+ // the packet length is longer than math.MaxInt32,
+ // and thus longer than any possible slice.
+ return nil, ErrLongPacket
+ }
+
+ return nil, ErrShortPacket
+ }
+ if length > maxPacketLength {
+ return nil, ErrLongPacket
+ }
+
+ if int(length) > cap(b) {
+ // We know int(length) must be positive, because of tests above.
+ b = make([]byte, length)
+ }
+
+ n, err := io.ReadFull(r, b[:length])
+ return b[:n], err
+}
+
+// ReadFrom provides a simple functional packet reader,
+// using the given byte slice as a backing array.
+//
+// To protect against potential denial of service attacks,
+// if the read packet length is longer than maxPacketLength,
+// then no packet data will be read, and ErrLongPacket will be returned.
+// (On 32-bit int architectures, all packets >= 2^31 in length
+// will return ErrLongPacket regardless of maxPacketLength.)
+//
+// If the read packet length is longer than cap(b),
+// then a throw-away slice will allocated to meet the exact packet length.
+// This can be used to limit the length of reused buffers,
+// while still allowing reception of occasional large packets.
+//
+// The Data field may alias the passed in byte slice,
+// so the byte slice passed in should not be reused before RawPacket.Reset().
+func (p *RawPacket) ReadFrom(r io.Reader, b []byte, maxPacketLength uint32) error {
+ b, err := readPacket(r, b, maxPacketLength)
+ if err != nil {
+ return err
+ }
+
+ return p.UnmarshalFrom(NewBuffer(b))
+}
+
+// RequestPacket implements the general packet format from draft-ietf-secsh-filexfer-02
+// but also automatically decode/encodes valid request packets (2 < type < 100 || type == 200).
+//
+// RequestPacket is intended for use in servers receiving requests,
+// where any arbitrary request may be received, and so decoding them automatically
+// is useful.
+//
+// For clients expecting to receive specific response packet types,
+// where automatic unmarshaling of the packet body does not make sense,
+// use RawPacket.
+//
+// Defined in https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-3
+type RequestPacket struct {
+ RequestID uint32
+
+ Request Packet
+}
+
+// Type returns the SSH_FXP_xy value associated with the underlying packet.
+func (p *RequestPacket) Type() PacketType {
+ return p.Request.Type()
+}
+
+// Reset clears the pointers and reference-semantic variables in RequestPacket,
+// releasing underlying resources, and making them and the RequestPacket suitable to be reused,
+// so long as no other references have been kept.
+func (p *RequestPacket) Reset() {
+ p.Request = nil
+}
+
+// MarshalPacket returns p as a two-part binary encoding of p.
+//
+// The internal p.RequestID is overridden by the reqid argument.
+func (p *RequestPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
+ if p.Request == nil {
+ return nil, nil, errors.New("empty request packet")
+ }
+
+ return p.Request.MarshalPacket(reqid, b)
+}
+
+// MarshalBinary returns p as the binary encoding of p.
+//
+// This is a convenience implementation primarily intended for tests,
+// because it is inefficient with allocations.
+func (p *RequestPacket) MarshalBinary() ([]byte, error) {
+ return ComposePacket(p.MarshalPacket(p.RequestID, nil))
+}
+
+// UnmarshalFrom decodes a RequestPacket from the given Buffer into p.
+//
+// The Request field may alias the passed in Buffer, (e.g. SSH_FXP_WRITE),
+// so the buffer passed in should not be reused before RequestPacket.Reset().
+func (p *RequestPacket) UnmarshalFrom(buf *Buffer) error {
+ typ, err := buf.ConsumeUint8()
+ if err != nil {
+ return err
+ }
+
+ p.Request, err = newPacketFromType(PacketType(typ))
+ if err != nil {
+ return err
+ }
+
+ if p.RequestID, err = buf.ConsumeUint32(); err != nil {
+ return err
+ }
+
+ return p.Request.UnmarshalPacketBody(buf)
+}
+
+// UnmarshalBinary decodes a full request packet out of the given data.
+// It is assumed that the uint32(length) has already been consumed to receive the data.
+//
+// This is a convenience implementation primarily intended for tests,
+// because this must clone the given data byte slice,
+// as Request is not allowed to alias any part of the data byte slice.
+func (p *RequestPacket) UnmarshalBinary(data []byte) error {
+ clone := make([]byte, len(data))
+ n := copy(clone, data)
+ return p.UnmarshalFrom(NewBuffer(clone[:n]))
+}
+
+// ReadFrom provides a simple functional packet reader,
+// using the given byte slice as a backing array.
+//
+// To protect against potential denial of service attacks,
+// if the read packet length is longer than maxPacketLength,
+// then no packet data will be read, and ErrLongPacket will be returned.
+// (On 32-bit int architectures, all packets >= 2^31 in length
+// will return ErrLongPacket regardless of maxPacketLength.)
+//
+// If the read packet length is longer than cap(b),
+// then a throw-away slice will allocated to meet the exact packet length.
+// This can be used to limit the length of reused buffers,
+// while still allowing reception of occasional large packets.
+//
+// The Request field may alias the passed in byte slice,
+// so the byte slice passed in should not be reused before RawPacket.Reset().
+func (p *RequestPacket) ReadFrom(r io.Reader, b []byte, maxPacketLength uint32) error {
+ b, err := readPacket(r, b, maxPacketLength)
+ if err != nil {
+ return err
+ }
+
+ return p.UnmarshalFrom(NewBuffer(b))
+}
diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/path_packets.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/path_packets.go
new file mode 100644
index 000000000..e6f692d9f
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/path_packets.go
@@ -0,0 +1,368 @@
+package filexfer
+
+// LStatPacket defines the SSH_FXP_LSTAT packet.
+type LStatPacket struct {
+ Path string
+}
+
+// Type returns the SSH_FXP_xy value associated with this packet type.
+func (p *LStatPacket) Type() PacketType {
+ return PacketTypeLStat
+}
+
+// MarshalPacket returns p as a two-part binary encoding of p.
+func (p *LStatPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
+ buf := NewBuffer(b)
+ if buf.Cap() < 9 {
+ size := 4 + len(p.Path) // string(path)
+ buf = NewMarshalBuffer(size)
+ }
+
+ buf.StartPacket(PacketTypeLStat, reqid)
+ buf.AppendString(p.Path)
+
+ return buf.Packet(payload)
+}
+
+// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
+// It is assumed that the uint32(request-id) has already been consumed.
+func (p *LStatPacket) UnmarshalPacketBody(buf *Buffer) (err error) {
+ if p.Path, err = buf.ConsumeString(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// SetstatPacket defines the SSH_FXP_SETSTAT packet.
+type SetstatPacket struct {
+ Path string
+ Attrs Attributes
+}
+
+// Type returns the SSH_FXP_xy value associated with this packet type.
+func (p *SetstatPacket) Type() PacketType {
+ return PacketTypeSetstat
+}
+
+// MarshalPacket returns p as a two-part binary encoding of p.
+func (p *SetstatPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
+ buf := NewBuffer(b)
+ if buf.Cap() < 9 {
+ size := 4 + len(p.Path) + p.Attrs.Len() // string(path) + ATTRS(attrs)
+ buf = NewMarshalBuffer(size)
+ }
+
+ buf.StartPacket(PacketTypeSetstat, reqid)
+ buf.AppendString(p.Path)
+
+ p.Attrs.MarshalInto(buf)
+
+ return buf.Packet(payload)
+}
+
+// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
+// It is assumed that the uint32(request-id) has already been consumed.
+func (p *SetstatPacket) UnmarshalPacketBody(buf *Buffer) (err error) {
+ if p.Path, err = buf.ConsumeString(); err != nil {
+ return err
+ }
+
+ return p.Attrs.UnmarshalFrom(buf)
+}
+
+// RemovePacket defines the SSH_FXP_REMOVE packet.
+type RemovePacket struct {
+ Path string
+}
+
+// Type returns the SSH_FXP_xy value associated with this packet type.
+func (p *RemovePacket) Type() PacketType {
+ return PacketTypeRemove
+}
+
+// MarshalPacket returns p as a two-part binary encoding of p.
+func (p *RemovePacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
+ buf := NewBuffer(b)
+ if buf.Cap() < 9 {
+ size := 4 + len(p.Path) // string(path)
+ buf = NewMarshalBuffer(size)
+ }
+
+ buf.StartPacket(PacketTypeRemove, reqid)
+ buf.AppendString(p.Path)
+
+ return buf.Packet(payload)
+}
+
+// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
+// It is assumed that the uint32(request-id) has already been consumed.
+func (p *RemovePacket) UnmarshalPacketBody(buf *Buffer) (err error) {
+ if p.Path, err = buf.ConsumeString(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// MkdirPacket defines the SSH_FXP_MKDIR packet.
+type MkdirPacket struct {
+ Path string
+ Attrs Attributes
+}
+
+// Type returns the SSH_FXP_xy value associated with this packet type.
+func (p *MkdirPacket) Type() PacketType {
+ return PacketTypeMkdir
+}
+
+// MarshalPacket returns p as a two-part binary encoding of p.
+func (p *MkdirPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
+ buf := NewBuffer(b)
+ if buf.Cap() < 9 {
+ size := 4 + len(p.Path) + p.Attrs.Len() // string(path) + ATTRS(attrs)
+ buf = NewMarshalBuffer(size)
+ }
+
+ buf.StartPacket(PacketTypeMkdir, reqid)
+ buf.AppendString(p.Path)
+
+ p.Attrs.MarshalInto(buf)
+
+ return buf.Packet(payload)
+}
+
+// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
+// It is assumed that the uint32(request-id) has already been consumed.
+func (p *MkdirPacket) UnmarshalPacketBody(buf *Buffer) (err error) {
+ if p.Path, err = buf.ConsumeString(); err != nil {
+ return err
+ }
+
+ return p.Attrs.UnmarshalFrom(buf)
+}
+
+// RmdirPacket defines the SSH_FXP_RMDIR packet.
+type RmdirPacket struct {
+ Path string
+}
+
+// Type returns the SSH_FXP_xy value associated with this packet type.
+func (p *RmdirPacket) Type() PacketType {
+ return PacketTypeRmdir
+}
+
+// MarshalPacket returns p as a two-part binary encoding of p.
+func (p *RmdirPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
+ buf := NewBuffer(b)
+ if buf.Cap() < 9 {
+ size := 4 + len(p.Path) // string(path)
+ buf = NewMarshalBuffer(size)
+ }
+
+ buf.StartPacket(PacketTypeRmdir, reqid)
+ buf.AppendString(p.Path)
+
+ return buf.Packet(payload)
+}
+
+// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
+// It is assumed that the uint32(request-id) has already been consumed.
+func (p *RmdirPacket) UnmarshalPacketBody(buf *Buffer) (err error) {
+ if p.Path, err = buf.ConsumeString(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// RealPathPacket defines the SSH_FXP_REALPATH packet.
+type RealPathPacket struct {
+ Path string
+}
+
+// Type returns the SSH_FXP_xy value associated with this packet type.
+func (p *RealPathPacket) Type() PacketType {
+ return PacketTypeRealPath
+}
+
+// MarshalPacket returns p as a two-part binary encoding of p.
+func (p *RealPathPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
+ buf := NewBuffer(b)
+ if buf.Cap() < 9 {
+ size := 4 + len(p.Path) // string(path)
+ buf = NewMarshalBuffer(size)
+ }
+
+ buf.StartPacket(PacketTypeRealPath, reqid)
+ buf.AppendString(p.Path)
+
+ return buf.Packet(payload)
+}
+
+// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
+// It is assumed that the uint32(request-id) has already been consumed.
+func (p *RealPathPacket) UnmarshalPacketBody(buf *Buffer) (err error) {
+ if p.Path, err = buf.ConsumeString(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// StatPacket defines the SSH_FXP_STAT packet.
+type StatPacket struct {
+ Path string
+}
+
+// Type returns the SSH_FXP_xy value associated with this packet type.
+func (p *StatPacket) Type() PacketType {
+ return PacketTypeStat
+}
+
+// MarshalPacket returns p as a two-part binary encoding of p.
+func (p *StatPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
+ buf := NewBuffer(b)
+ if buf.Cap() < 9 {
+ size := 4 + len(p.Path) // string(path)
+ buf = NewMarshalBuffer(size)
+ }
+
+ buf.StartPacket(PacketTypeStat, reqid)
+ buf.AppendString(p.Path)
+
+ return buf.Packet(payload)
+}
+
+// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
+// It is assumed that the uint32(request-id) has already been consumed.
+func (p *StatPacket) UnmarshalPacketBody(buf *Buffer) (err error) {
+ if p.Path, err = buf.ConsumeString(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// RenamePacket defines the SSH_FXP_RENAME packet.
+type RenamePacket struct {
+ OldPath string
+ NewPath string
+}
+
+// Type returns the SSH_FXP_xy value associated with this packet type.
+func (p *RenamePacket) Type() PacketType {
+ return PacketTypeRename
+}
+
+// MarshalPacket returns p as a two-part binary encoding of p.
+func (p *RenamePacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
+ buf := NewBuffer(b)
+ if buf.Cap() < 9 {
+ // string(oldpath) + string(newpath)
+ size := 4 + len(p.OldPath) + 4 + len(p.NewPath)
+ buf = NewMarshalBuffer(size)
+ }
+
+ buf.StartPacket(PacketTypeRename, reqid)
+ buf.AppendString(p.OldPath)
+ buf.AppendString(p.NewPath)
+
+ return buf.Packet(payload)
+}
+
+// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
+// It is assumed that the uint32(request-id) has already been consumed.
+func (p *RenamePacket) UnmarshalPacketBody(buf *Buffer) (err error) {
+ if p.OldPath, err = buf.ConsumeString(); err != nil {
+ return err
+ }
+
+ if p.NewPath, err = buf.ConsumeString(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ReadLinkPacket defines the SSH_FXP_READLINK packet.
+type ReadLinkPacket struct {
+ Path string
+}
+
+// Type returns the SSH_FXP_xy value associated with this packet type.
+func (p *ReadLinkPacket) Type() PacketType {
+ return PacketTypeReadLink
+}
+
+// MarshalPacket returns p as a two-part binary encoding of p.
+func (p *ReadLinkPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
+ buf := NewBuffer(b)
+ if buf.Cap() < 9 {
+ size := 4 + len(p.Path) // string(path)
+ buf = NewMarshalBuffer(size)
+ }
+
+ buf.StartPacket(PacketTypeReadLink, reqid)
+ buf.AppendString(p.Path)
+
+ return buf.Packet(payload)
+}
+
+// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
+// It is assumed that the uint32(request-id) has already been consumed.
+func (p *ReadLinkPacket) UnmarshalPacketBody(buf *Buffer) (err error) {
+ if p.Path, err = buf.ConsumeString(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// SymlinkPacket defines the SSH_FXP_SYMLINK packet.
+//
+// The order of the arguments to the SSH_FXP_SYMLINK method was inadvertently reversed.
+// Unfortunately, the reversal was not noticed until the server was widely deployed.
+// Covered in Section 3.1 of https://github.com/openssh/openssh-portable/blob/master/PROTOCOL
+type SymlinkPacket struct {
+ LinkPath string
+ TargetPath string
+}
+
+// Type returns the SSH_FXP_xy value associated with this packet type.
+func (p *SymlinkPacket) Type() PacketType {
+ return PacketTypeSymlink
+}
+
+// MarshalPacket returns p as a two-part binary encoding of p.
+func (p *SymlinkPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
+ buf := NewBuffer(b)
+ if buf.Cap() < 9 {
+ // string(targetpath) + string(linkpath)
+ size := 4 + len(p.TargetPath) + 4 + len(p.LinkPath)
+ buf = NewMarshalBuffer(size)
+ }
+
+ buf.StartPacket(PacketTypeSymlink, reqid)
+
+ // Arguments were inadvertently reversed.
+ buf.AppendString(p.TargetPath)
+ buf.AppendString(p.LinkPath)
+
+ return buf.Packet(payload)
+}
+
+// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
+// It is assumed that the uint32(request-id) has already been consumed.
+func (p *SymlinkPacket) UnmarshalPacketBody(buf *Buffer) (err error) {
+ // Arguments were inadvertently reversed.
+ if p.TargetPath, err = buf.ConsumeString(); err != nil {
+ return err
+ }
+
+ if p.LinkPath, err = buf.ConsumeString(); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/permissions.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/permissions.go
new file mode 100644
index 000000000..2fe63d591
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/permissions.go
@@ -0,0 +1,114 @@
+package filexfer
+
+// FileMode represents a file’s mode and permission bits.
+// The bits are defined according to POSIX standards,
+// and may not apply to the OS being built for.
+type FileMode uint32
+
+// Permission flags, defined here to avoid potential inconsistencies in individual OS implementations.
+const (
+ ModePerm FileMode = 0o0777 // S_IRWXU | S_IRWXG | S_IRWXO
+ ModeUserRead FileMode = 0o0400 // S_IRUSR
+ ModeUserWrite FileMode = 0o0200 // S_IWUSR
+ ModeUserExec FileMode = 0o0100 // S_IXUSR
+ ModeGroupRead FileMode = 0o0040 // S_IRGRP
+ ModeGroupWrite FileMode = 0o0020 // S_IWGRP
+ ModeGroupExec FileMode = 0o0010 // S_IXGRP
+ ModeOtherRead FileMode = 0o0004 // S_IROTH
+ ModeOtherWrite FileMode = 0o0002 // S_IWOTH
+ ModeOtherExec FileMode = 0o0001 // S_IXOTH
+
+ ModeSetUID FileMode = 0o4000 // S_ISUID
+ ModeSetGID FileMode = 0o2000 // S_ISGID
+ ModeSticky FileMode = 0o1000 // S_ISVTX
+
+ ModeType FileMode = 0xF000 // S_IFMT
+ ModeNamedPipe FileMode = 0x1000 // S_IFIFO
+ ModeCharDevice FileMode = 0x2000 // S_IFCHR
+ ModeDir FileMode = 0x4000 // S_IFDIR
+ ModeDevice FileMode = 0x6000 // S_IFBLK
+ ModeRegular FileMode = 0x8000 // S_IFREG
+ ModeSymlink FileMode = 0xA000 // S_IFLNK
+ ModeSocket FileMode = 0xC000 // S_IFSOCK
+)
+
+// IsDir reports whether m describes a directory.
+// That is, it tests for m.Type() == ModeDir.
+func (m FileMode) IsDir() bool {
+ return (m & ModeType) == ModeDir
+}
+
+// IsRegular reports whether m describes a regular file.
+// That is, it tests for m.Type() == ModeRegular
+func (m FileMode) IsRegular() bool {
+ return (m & ModeType) == ModeRegular
+}
+
+// Perm returns the POSIX permission bits in m (m & ModePerm).
+func (m FileMode) Perm() FileMode {
+ return (m & ModePerm)
+}
+
+// Type returns the type bits in m (m & ModeType).
+func (m FileMode) Type() FileMode {
+ return (m & ModeType)
+}
+
+// String returns a `-rwxrwxrwx` style string representing the `ls -l` POSIX permissions string.
+func (m FileMode) String() string {
+ var buf [10]byte
+
+ switch m.Type() {
+ case ModeRegular:
+ buf[0] = '-'
+ case ModeDir:
+ buf[0] = 'd'
+ case ModeSymlink:
+ buf[0] = 'l'
+ case ModeDevice:
+ buf[0] = 'b'
+ case ModeCharDevice:
+ buf[0] = 'c'
+ case ModeNamedPipe:
+ buf[0] = 'p'
+ case ModeSocket:
+ buf[0] = 's'
+ default:
+ buf[0] = '?'
+ }
+
+ const rwx = "rwxrwxrwx"
+ for i, c := range rwx {
+ if m&(1<<uint(9-1-i)) != 0 {
+ buf[i+1] = byte(c)
+ } else {
+ buf[i+1] = '-'
+ }
+ }
+
+ if m&ModeSetUID != 0 {
+ if buf[3] == 'x' {
+ buf[3] = 's'
+ } else {
+ buf[3] = 'S'
+ }
+ }
+
+ if m&ModeSetGID != 0 {
+ if buf[6] == 'x' {
+ buf[6] = 's'
+ } else {
+ buf[6] = 'S'
+ }
+ }
+
+ if m&ModeSticky != 0 {
+ if buf[9] == 'x' {
+ buf[9] = 't'
+ } else {
+ buf[9] = 'T'
+ }
+ }
+
+ return string(buf[:])
+}
diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/response_packets.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/response_packets.go
new file mode 100644
index 000000000..7a9b3eae8
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/response_packets.go
@@ -0,0 +1,243 @@
+package filexfer
+
+import (
+ "fmt"
+)
+
+// StatusPacket defines the SSH_FXP_STATUS packet.
+//
+// Specified in https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-7
+type StatusPacket struct {
+ StatusCode Status
+ ErrorMessage string
+ LanguageTag string
+}
+
+// Error makes StatusPacket an error type.
+func (p *StatusPacket) Error() string {
+ if p.ErrorMessage == "" {
+ return "sftp: " + p.StatusCode.String()
+ }
+
+ return fmt.Sprintf("sftp: %q (%s)", p.ErrorMessage, p.StatusCode)
+}
+
+// Is returns true if target is a StatusPacket with the same StatusCode,
+// or target is a Status code which is the same as SatusCode.
+func (p *StatusPacket) Is(target error) bool {
+ if target, ok := target.(*StatusPacket); ok {
+ return p.StatusCode == target.StatusCode
+ }
+
+ return p.StatusCode == target
+}
+
+// Type returns the SSH_FXP_xy value associated with this packet type.
+func (p *StatusPacket) Type() PacketType {
+ return PacketTypeStatus
+}
+
+// MarshalPacket returns p as a two-part binary encoding of p.
+func (p *StatusPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
+ buf := NewBuffer(b)
+ if buf.Cap() < 9 {
+ // uint32(error/status code) + string(error message) + string(language tag)
+ size := 4 + 4 + len(p.ErrorMessage) + 4 + len(p.LanguageTag)
+ buf = NewMarshalBuffer(size)
+ }
+
+ buf.StartPacket(PacketTypeStatus, reqid)
+ buf.AppendUint32(uint32(p.StatusCode))
+ buf.AppendString(p.ErrorMessage)
+ buf.AppendString(p.LanguageTag)
+
+ return buf.Packet(payload)
+}
+
+// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
+// It is assumed that the uint32(request-id) has already been consumed.
+func (p *StatusPacket) UnmarshalPacketBody(buf *Buffer) (err error) {
+ statusCode, err := buf.ConsumeUint32()
+ if err != nil {
+ return err
+ }
+ p.StatusCode = Status(statusCode)
+
+ if p.ErrorMessage, err = buf.ConsumeString(); err != nil {
+ return err
+ }
+
+ if p.LanguageTag, err = buf.ConsumeString(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// HandlePacket defines the SSH_FXP_HANDLE packet.
+type HandlePacket struct {
+ Handle string
+}
+
+// Type returns the SSH_FXP_xy value associated with this packet type.
+func (p *HandlePacket) Type() PacketType {
+ return PacketTypeHandle
+}
+
+// MarshalPacket returns p as a two-part binary encoding of p.
+func (p *HandlePacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
+ buf := NewBuffer(b)
+ if buf.Cap() < 9 {
+ size := 4 + len(p.Handle) // string(handle)
+ buf = NewMarshalBuffer(size)
+ }
+
+ buf.StartPacket(PacketTypeHandle, reqid)
+ buf.AppendString(p.Handle)
+
+ return buf.Packet(payload)
+}
+
+// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
+// It is assumed that the uint32(request-id) has already been consumed.
+func (p *HandlePacket) UnmarshalPacketBody(buf *Buffer) (err error) {
+ if p.Handle, err = buf.ConsumeString(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// DataPacket defines the SSH_FXP_DATA packet.
+type DataPacket struct {
+ Data []byte
+}
+
+// Type returns the SSH_FXP_xy value associated with this packet type.
+func (p *DataPacket) Type() PacketType {
+ return PacketTypeData
+}
+
+// MarshalPacket returns p as a two-part binary encoding of p.
+func (p *DataPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
+ buf := NewBuffer(b)
+ if buf.Cap() < 9 {
+ size := 4 // uint32(len(data)); data content in payload
+ buf = NewMarshalBuffer(size)
+ }
+
+ buf.StartPacket(PacketTypeData, reqid)
+ buf.AppendUint32(uint32(len(p.Data)))
+
+ return buf.Packet(p.Data)
+}
+
+// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
+// It is assumed that the uint32(request-id) has already been consumed.
+//
+// If p.Data is already populated, and of sufficient length to hold the data,
+// then this will copy the data into that byte slice.
+//
+// If p.Data has a length insufficient to hold the data,
+// then this will make a new slice of sufficient length, and copy the data into that.
+//
+// This means this _does not_ alias any of the data buffer that is passed in.
+func (p *DataPacket) UnmarshalPacketBody(buf *Buffer) (err error) {
+ data, err := buf.ConsumeByteSlice()
+ if err != nil {
+ return err
+ }
+
+ if len(p.Data) < len(data) {
+ p.Data = make([]byte, len(data))
+ }
+
+ n := copy(p.Data, data)
+ p.Data = p.Data[:n]
+ return nil
+}
+
+// NamePacket defines the SSH_FXP_NAME packet.
+type NamePacket struct {
+ Entries []*NameEntry
+}
+
+// Type returns the SSH_FXP_xy value associated with this packet type.
+func (p *NamePacket) Type() PacketType {
+ return PacketTypeName
+}
+
+// MarshalPacket returns p as a two-part binary encoding of p.
+func (p *NamePacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
+ buf := NewBuffer(b)
+ if buf.Cap() < 9 {
+ size := 4 // uint32(len(entries))
+
+ for _, e := range p.Entries {
+ size += e.Len()
+ }
+
+ buf = NewMarshalBuffer(size)
+ }
+
+ buf.StartPacket(PacketTypeName, reqid)
+ buf.AppendUint32(uint32(len(p.Entries)))
+
+ for _, e := range p.Entries {
+ e.MarshalInto(buf)
+ }
+
+ return buf.Packet(payload)
+}
+
+// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
+// It is assumed that the uint32(request-id) has already been consumed.
+func (p *NamePacket) UnmarshalPacketBody(buf *Buffer) (err error) {
+ count, err := buf.ConsumeUint32()
+ if err != nil {
+ return err
+ }
+
+ p.Entries = make([]*NameEntry, 0, count)
+
+ for i := uint32(0); i < count; i++ {
+ var e NameEntry
+ if err := e.UnmarshalFrom(buf); err != nil {
+ return err
+ }
+
+ p.Entries = append(p.Entries, &e)
+ }
+
+ return nil
+}
+
+// AttrsPacket defines the SSH_FXP_ATTRS packet.
+type AttrsPacket struct {
+ Attrs Attributes
+}
+
+// Type returns the SSH_FXP_xy value associated with this packet type.
+func (p *AttrsPacket) Type() PacketType {
+ return PacketTypeAttrs
+}
+
+// MarshalPacket returns p as a two-part binary encoding of p.
+func (p *AttrsPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
+ buf := NewBuffer(b)
+ if buf.Cap() < 9 {
+ size := p.Attrs.Len() // ATTRS(attrs)
+ buf = NewMarshalBuffer(size)
+ }
+
+ buf.StartPacket(PacketTypeAttrs, reqid)
+ p.Attrs.MarshalInto(buf)
+
+ return buf.Packet(payload)
+}
+
+// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
+// It is assumed that the uint32(request-id) has already been consumed.
+func (p *AttrsPacket) UnmarshalPacketBody(buf *Buffer) (err error) {
+ return p.Attrs.UnmarshalFrom(buf)
+}
diff --git a/vendor/github.com/pkg/sftp/ls_formatting.go b/vendor/github.com/pkg/sftp/ls_formatting.go
new file mode 100644
index 000000000..e083e22a4
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/ls_formatting.go
@@ -0,0 +1,81 @@
+package sftp
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "os/user"
+ "strconv"
+ "time"
+
+ sshfx "github.com/pkg/sftp/internal/encoding/ssh/filexfer"
+)
+
+func lsFormatID(id uint32) string {
+ return strconv.FormatUint(uint64(id), 10)
+}
+
+type osIDLookup struct{}
+
+func (osIDLookup) Filelist(*Request) (ListerAt, error) {
+ return nil, errors.New("unimplemented stub")
+}
+
+func (osIDLookup) LookupUserName(uid string) string {
+ u, err := user.LookupId(uid)
+ if err != nil {
+ return uid
+ }
+
+ return u.Username
+}
+
+func (osIDLookup) LookupGroupName(gid string) string {
+ g, err := user.LookupGroupId(gid)
+ if err != nil {
+ return gid
+ }
+
+ return g.Name
+}
+
+// runLs formats the FileInfo as per `ls -l` style, which is in the 'longname' field of a SSH_FXP_NAME entry.
+// This is a fairly simple implementation, just enough to look close to openssh in simple cases.
+func runLs(idLookup NameLookupFileLister, dirent os.FileInfo) string {
+ // example from openssh sftp server:
+ // crw-rw-rw- 1 root wheel 0 Jul 31 20:52 ttyvd
+ // format:
+ // {directory / char device / etc}{rwxrwxrwx} {number of links} owner group size month day [time (this year) | year (otherwise)] name
+
+ symPerms := sshfx.FileMode(fromFileMode(dirent.Mode())).String()
+
+ var numLinks uint64 = 1
+ uid, gid := "0", "0"
+
+ switch sys := dirent.Sys().(type) {
+ case *sshfx.Attributes:
+ uid = lsFormatID(sys.UID)
+ gid = lsFormatID(sys.GID)
+ case *FileStat:
+ uid = lsFormatID(sys.UID)
+ gid = lsFormatID(sys.GID)
+ default:
+ numLinks, uid, gid = lsLinksUIDGID(dirent)
+ }
+
+ if idLookup != nil {
+ uid, gid = idLookup.LookupUserName(uid), idLookup.LookupGroupName(gid)
+ }
+
+ mtime := dirent.ModTime()
+ date := mtime.Format("Jan 2")
+
+ var yearOrTime string
+ if mtime.Before(time.Now().AddDate(0, -6, 0)) {
+ yearOrTime = mtime.Format("2006")
+ } else {
+ yearOrTime = mtime.Format("15:04")
+ }
+
+ return fmt.Sprintf("%s %4d %-8s %-8s %8d %s %5s %s", symPerms, numLinks, uid, gid, dirent.Size(), date, yearOrTime, dirent.Name())
+}
diff --git a/vendor/github.com/pkg/sftp/ls_plan9.go b/vendor/github.com/pkg/sftp/ls_plan9.go
new file mode 100644
index 000000000..a16a3ea06
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/ls_plan9.go
@@ -0,0 +1,21 @@
+// +build plan9
+
+package sftp
+
+import (
+ "os"
+ "syscall"
+)
+
+func lsLinksUIDGID(fi os.FileInfo) (numLinks uint64, uid, gid string) {
+ numLinks = 1
+ uid, gid = "0", "0"
+
+ switch sys := fi.Sys().(type) {
+ case *syscall.Dir:
+ uid = sys.Uid
+ gid = sys.Gid
+ }
+
+ return numLinks, uid, gid
+}
diff --git a/vendor/github.com/pkg/sftp/ls_stub.go b/vendor/github.com/pkg/sftp/ls_stub.go
new file mode 100644
index 000000000..6dec39378
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/ls_stub.go
@@ -0,0 +1,11 @@
+// +build windows android
+
+package sftp
+
+import (
+ "os"
+)
+
+func lsLinksUIDGID(fi os.FileInfo) (numLinks uint64, uid, gid string) {
+ return 1, "0", "0"
+}
diff --git a/vendor/github.com/pkg/sftp/ls_unix.go b/vendor/github.com/pkg/sftp/ls_unix.go
new file mode 100644
index 000000000..59ccffde5
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/ls_unix.go
@@ -0,0 +1,23 @@
+// +build aix darwin dragonfly freebsd !android,linux netbsd openbsd solaris js
+
+package sftp
+
+import (
+ "os"
+ "syscall"
+)
+
+func lsLinksUIDGID(fi os.FileInfo) (numLinks uint64, uid, gid string) {
+ numLinks = 1
+ uid, gid = "0", "0"
+
+ switch sys := fi.Sys().(type) {
+ case *syscall.Stat_t:
+ numLinks = uint64(sys.Nlink)
+ uid = lsFormatID(sys.Uid)
+ gid = lsFormatID(sys.Gid)
+ default:
+ }
+
+ return numLinks, uid, gid
+}
diff --git a/vendor/github.com/pkg/sftp/match.go b/vendor/github.com/pkg/sftp/match.go
new file mode 100644
index 000000000..875006afd
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/match.go
@@ -0,0 +1,137 @@
+package sftp
+
+import (
+ "path"
+ "strings"
+)
+
+// ErrBadPattern indicates a globbing pattern was malformed.
+var ErrBadPattern = path.ErrBadPattern
+
+// Match reports whether name matches the shell pattern.
+//
+// This is an alias for path.Match from the standard library,
+// offered so that callers need not import the path package.
+// For details, see https://golang.org/pkg/path/#Match.
+func Match(pattern, name string) (matched bool, err error) {
+ return path.Match(pattern, name)
+}
+
+// detect if byte(char) is path separator
+func isPathSeparator(c byte) bool {
+ return c == '/'
+}
+
+// Split splits the path p immediately following the final slash,
+// separating it into a directory and file name component.
+//
+// This is an alias for path.Split from the standard library,
+// offered so that callers need not import the path package.
+// For details, see https://golang.org/pkg/path/#Split.
+func Split(p string) (dir, file string) {
+ return path.Split(p)
+}
+
+// Glob returns the names of all files matching pattern or nil
+// if there is no matching file. The syntax of patterns is the same
+// as in Match. The pattern may describe hierarchical names such as
+// /usr/*/bin/ed.
+//
+// Glob ignores file system errors such as I/O errors reading directories.
+// The only possible returned error is ErrBadPattern, when pattern
+// is malformed.
+func (c *Client) Glob(pattern string) (matches []string, err error) {
+ if !hasMeta(pattern) {
+ file, err := c.Lstat(pattern)
+ if err != nil {
+ return nil, nil
+ }
+ dir, _ := Split(pattern)
+ dir = cleanGlobPath(dir)
+ return []string{Join(dir, file.Name())}, nil
+ }
+
+ dir, file := Split(pattern)
+ dir = cleanGlobPath(dir)
+
+ if !hasMeta(dir) {
+ return c.glob(dir, file, nil)
+ }
+
+ // Prevent infinite recursion. See issue 15879.
+ if dir == pattern {
+ return nil, ErrBadPattern
+ }
+
+ var m []string
+ m, err = c.Glob(dir)
+ if err != nil {
+ return
+ }
+ for _, d := range m {
+ matches, err = c.glob(d, file, matches)
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// cleanGlobPath prepares path for glob matching.
+func cleanGlobPath(path string) string {
+ switch path {
+ case "":
+ return "."
+ case "/":
+ return path
+ default:
+ return path[0 : len(path)-1] // chop off trailing separator
+ }
+}
+
+// glob searches for files matching pattern in the directory dir
+// and appends them to matches. If the directory cannot be
+// opened, it returns the existing matches. New matches are
+// added in lexicographical order.
+func (c *Client) glob(dir, pattern string, matches []string) (m []string, e error) {
+ m = matches
+ fi, err := c.Stat(dir)
+ if err != nil {
+ return
+ }
+ if !fi.IsDir() {
+ return
+ }
+ names, err := c.ReadDir(dir)
+ if err != nil {
+ return
+ }
+ //sort.Strings(names)
+
+ for _, n := range names {
+ matched, err := Match(pattern, n.Name())
+ if err != nil {
+ return m, err
+ }
+ if matched {
+ m = append(m, Join(dir, n.Name()))
+ }
+ }
+ return
+}
+
+// Join joins any number of path elements into a single path, separating
+// them with slashes.
+//
+// This is an alias for path.Join from the standard library,
+// offered so that callers need not import the path package.
+// For details, see https://golang.org/pkg/path/#Join.
+func Join(elem ...string) string {
+ return path.Join(elem...)
+}
+
+// hasMeta reports whether path contains any of the magic characters
+// recognized by Match.
+func hasMeta(path string) bool {
+ return strings.ContainsAny(path, "\\*?[")
+}
diff --git a/vendor/github.com/pkg/sftp/packet-manager.go b/vendor/github.com/pkg/sftp/packet-manager.go
new file mode 100644
index 000000000..c740c4c8c
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/packet-manager.go
@@ -0,0 +1,216 @@
+package sftp
+
+import (
+ "encoding"
+ "sort"
+ "sync"
+)
+
+// The goal of the packetManager is to keep the outgoing packets in the same
+// order as the incoming as is requires by section 7 of the RFC.
+
+type packetManager struct {
+ requests chan orderedPacket
+ responses chan orderedPacket
+ fini chan struct{}
+ incoming orderedPackets
+ outgoing orderedPackets
+ sender packetSender // connection object
+ working *sync.WaitGroup
+ packetCount uint32
+ // it is not nil if the allocator is enabled
+ alloc *allocator
+}
+
+type packetSender interface {
+ sendPacket(encoding.BinaryMarshaler) error
+}
+
+func newPktMgr(sender packetSender) *packetManager {
+ s := &packetManager{
+ requests: make(chan orderedPacket, SftpServerWorkerCount),
+ responses: make(chan orderedPacket, SftpServerWorkerCount),
+ fini: make(chan struct{}),
+ incoming: make([]orderedPacket, 0, SftpServerWorkerCount),
+ outgoing: make([]orderedPacket, 0, SftpServerWorkerCount),
+ sender: sender,
+ working: &sync.WaitGroup{},
+ }
+ go s.controller()
+ return s
+}
+
+//// packet ordering
+func (s *packetManager) newOrderID() uint32 {
+ s.packetCount++
+ return s.packetCount
+}
+
+// returns the next orderID without incrementing it.
+// This is used before receiving a new packet, with the allocator enabled, to associate
+// the slice allocated for the received packet with the orderID that will be used to mark
+// the allocated slices for reuse once the request is served
+func (s *packetManager) getNextOrderID() uint32 {
+ return s.packetCount + 1
+}
+
+type orderedRequest struct {
+ requestPacket
+ orderid uint32
+}
+
+func (s *packetManager) newOrderedRequest(p requestPacket) orderedRequest {
+ return orderedRequest{requestPacket: p, orderid: s.newOrderID()}
+}
+func (p orderedRequest) orderID() uint32 { return p.orderid }
+func (p orderedRequest) setOrderID(oid uint32) { p.orderid = oid }
+
+type orderedResponse struct {
+ responsePacket
+ orderid uint32
+}
+
+func (s *packetManager) newOrderedResponse(p responsePacket, id uint32,
+) orderedResponse {
+ return orderedResponse{responsePacket: p, orderid: id}
+}
+func (p orderedResponse) orderID() uint32 { return p.orderid }
+func (p orderedResponse) setOrderID(oid uint32) { p.orderid = oid }
+
+type orderedPacket interface {
+ id() uint32
+ orderID() uint32
+}
+type orderedPackets []orderedPacket
+
+func (o orderedPackets) Sort() {
+ sort.Slice(o, func(i, j int) bool {
+ return o[i].orderID() < o[j].orderID()
+ })
+}
+
+//// packet registry
+// register incoming packets to be handled
+func (s *packetManager) incomingPacket(pkt orderedRequest) {
+ s.working.Add(1)
+ s.requests <- pkt
+}
+
+// register outgoing packets as being ready
+func (s *packetManager) readyPacket(pkt orderedResponse) {
+ s.responses <- pkt
+ s.working.Done()
+}
+
+// shut down packetManager controller
+func (s *packetManager) close() {
+ // pause until current packets are processed
+ s.working.Wait()
+ close(s.fini)
+}
+
+// Passed a worker function, returns a channel for incoming packets.
+// Keep process packet responses in the order they are received while
+// maximizing throughput of file transfers.
+func (s *packetManager) workerChan(runWorker func(chan orderedRequest),
+) chan orderedRequest {
+ // multiple workers for faster read/writes
+ rwChan := make(chan orderedRequest, SftpServerWorkerCount)
+ for i := 0; i < SftpServerWorkerCount; i++ {
+ runWorker(rwChan)
+ }
+
+ // single worker to enforce sequential processing of everything else
+ cmdChan := make(chan orderedRequest)
+ runWorker(cmdChan)
+
+ pktChan := make(chan orderedRequest, SftpServerWorkerCount)
+ go func() {
+ for pkt := range pktChan {
+ switch pkt.requestPacket.(type) {
+ case *sshFxpReadPacket, *sshFxpWritePacket:
+ s.incomingPacket(pkt)
+ rwChan <- pkt
+ continue
+ case *sshFxpClosePacket:
+ // wait for reads/writes to finish when file is closed
+ // incomingPacket() call must occur after this
+ s.working.Wait()
+ }
+ s.incomingPacket(pkt)
+ // all non-RW use sequential cmdChan
+ cmdChan <- pkt
+ }
+ close(rwChan)
+ close(cmdChan)
+ s.close()
+ }()
+
+ return pktChan
+}
+
+// process packets
+func (s *packetManager) controller() {
+ for {
+ select {
+ case pkt := <-s.requests:
+ debug("incoming id (oid): %v (%v)", pkt.id(), pkt.orderID())
+ s.incoming = append(s.incoming, pkt)
+ s.incoming.Sort()
+ case pkt := <-s.responses:
+ debug("outgoing id (oid): %v (%v)", pkt.id(), pkt.orderID())
+ s.outgoing = append(s.outgoing, pkt)
+ s.outgoing.Sort()
+ case <-s.fini:
+ return
+ }
+ s.maybeSendPackets()
+ }
+}
+
+// send as many packets as are ready
+func (s *packetManager) maybeSendPackets() {
+ for {
+ if len(s.outgoing) == 0 || len(s.incoming) == 0 {
+ debug("break! -- outgoing: %v; incoming: %v",
+ len(s.outgoing), len(s.incoming))
+ break
+ }
+ out := s.outgoing[0]
+ in := s.incoming[0]
+ // debug("incoming: %v", ids(s.incoming))
+ // debug("outgoing: %v", ids(s.outgoing))
+ if in.orderID() == out.orderID() {
+ debug("Sending packet: %v", out.id())
+ s.sender.sendPacket(out.(encoding.BinaryMarshaler))
+ if s.alloc != nil {
+ // mark for reuse the slices allocated for this request
+ s.alloc.ReleasePages(in.orderID())
+ }
+ // pop off heads
+ copy(s.incoming, s.incoming[1:]) // shift left
+ s.incoming[len(s.incoming)-1] = nil // clear last
+ s.incoming = s.incoming[:len(s.incoming)-1] // remove last
+ copy(s.outgoing, s.outgoing[1:]) // shift left
+ s.outgoing[len(s.outgoing)-1] = nil // clear last
+ s.outgoing = s.outgoing[:len(s.outgoing)-1] // remove last
+ } else {
+ break
+ }
+ }
+}
+
+// func oids(o []orderedPacket) []uint32 {
+// res := make([]uint32, 0, len(o))
+// for _, v := range o {
+// res = append(res, v.orderId())
+// }
+// return res
+// }
+// func ids(o []orderedPacket) []uint32 {
+// res := make([]uint32, 0, len(o))
+// for _, v := range o {
+// res = append(res, v.id())
+// }
+// return res
+// }
diff --git a/vendor/github.com/pkg/sftp/packet-typing.go b/vendor/github.com/pkg/sftp/packet-typing.go
new file mode 100644
index 000000000..f4f905295
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/packet-typing.go
@@ -0,0 +1,135 @@
+package sftp
+
+import (
+ "encoding"
+ "fmt"
+)
+
+// all incoming packets
+type requestPacket interface {
+ encoding.BinaryUnmarshaler
+ id() uint32
+}
+
+type responsePacket interface {
+ encoding.BinaryMarshaler
+ id() uint32
+}
+
+// interfaces to group types
+type hasPath interface {
+ requestPacket
+ getPath() string
+}
+
+type hasHandle interface {
+ requestPacket
+ getHandle() string
+}
+
+type notReadOnly interface {
+ notReadOnly()
+}
+
+//// define types by adding methods
+// hasPath
+func (p *sshFxpLstatPacket) getPath() string { return p.Path }
+func (p *sshFxpStatPacket) getPath() string { return p.Path }
+func (p *sshFxpRmdirPacket) getPath() string { return p.Path }
+func (p *sshFxpReadlinkPacket) getPath() string { return p.Path }
+func (p *sshFxpRealpathPacket) getPath() string { return p.Path }
+func (p *sshFxpMkdirPacket) getPath() string { return p.Path }
+func (p *sshFxpSetstatPacket) getPath() string { return p.Path }
+func (p *sshFxpStatvfsPacket) getPath() string { return p.Path }
+func (p *sshFxpRemovePacket) getPath() string { return p.Filename }
+func (p *sshFxpRenamePacket) getPath() string { return p.Oldpath }
+func (p *sshFxpSymlinkPacket) getPath() string { return p.Targetpath }
+func (p *sshFxpOpendirPacket) getPath() string { return p.Path }
+func (p *sshFxpOpenPacket) getPath() string { return p.Path }
+
+func (p *sshFxpExtendedPacketPosixRename) getPath() string { return p.Oldpath }
+func (p *sshFxpExtendedPacketHardlink) getPath() string { return p.Oldpath }
+
+// getHandle
+func (p *sshFxpFstatPacket) getHandle() string { return p.Handle }
+func (p *sshFxpFsetstatPacket) getHandle() string { return p.Handle }
+func (p *sshFxpReadPacket) getHandle() string { return p.Handle }
+func (p *sshFxpWritePacket) getHandle() string { return p.Handle }
+func (p *sshFxpReaddirPacket) getHandle() string { return p.Handle }
+func (p *sshFxpClosePacket) getHandle() string { return p.Handle }
+
+// notReadOnly
+func (p *sshFxpWritePacket) notReadOnly() {}
+func (p *sshFxpSetstatPacket) notReadOnly() {}
+func (p *sshFxpFsetstatPacket) notReadOnly() {}
+func (p *sshFxpRemovePacket) notReadOnly() {}
+func (p *sshFxpMkdirPacket) notReadOnly() {}
+func (p *sshFxpRmdirPacket) notReadOnly() {}
+func (p *sshFxpRenamePacket) notReadOnly() {}
+func (p *sshFxpSymlinkPacket) notReadOnly() {}
+func (p *sshFxpExtendedPacketPosixRename) notReadOnly() {}
+func (p *sshFxpExtendedPacketHardlink) notReadOnly() {}
+
+// some packets with ID are missing id()
+func (p *sshFxpDataPacket) id() uint32 { return p.ID }
+func (p *sshFxpStatusPacket) id() uint32 { return p.ID }
+func (p *sshFxpStatResponse) id() uint32 { return p.ID }
+func (p *sshFxpNamePacket) id() uint32 { return p.ID }
+func (p *sshFxpHandlePacket) id() uint32 { return p.ID }
+func (p *StatVFS) id() uint32 { return p.ID }
+func (p *sshFxVersionPacket) id() uint32 { return 0 }
+
+// take raw incoming packet data and build packet objects
+func makePacket(p rxPacket) (requestPacket, error) {
+ var pkt requestPacket
+ switch p.pktType {
+ case sshFxpInit:
+ pkt = &sshFxInitPacket{}
+ case sshFxpLstat:
+ pkt = &sshFxpLstatPacket{}
+ case sshFxpOpen:
+ pkt = &sshFxpOpenPacket{}
+ case sshFxpClose:
+ pkt = &sshFxpClosePacket{}
+ case sshFxpRead:
+ pkt = &sshFxpReadPacket{}
+ case sshFxpWrite:
+ pkt = &sshFxpWritePacket{}
+ case sshFxpFstat:
+ pkt = &sshFxpFstatPacket{}
+ case sshFxpSetstat:
+ pkt = &sshFxpSetstatPacket{}
+ case sshFxpFsetstat:
+ pkt = &sshFxpFsetstatPacket{}
+ case sshFxpOpendir:
+ pkt = &sshFxpOpendirPacket{}
+ case sshFxpReaddir:
+ pkt = &sshFxpReaddirPacket{}
+ case sshFxpRemove:
+ pkt = &sshFxpRemovePacket{}
+ case sshFxpMkdir:
+ pkt = &sshFxpMkdirPacket{}
+ case sshFxpRmdir:
+ pkt = &sshFxpRmdirPacket{}
+ case sshFxpRealpath:
+ pkt = &sshFxpRealpathPacket{}
+ case sshFxpStat:
+ pkt = &sshFxpStatPacket{}
+ case sshFxpRename:
+ pkt = &sshFxpRenamePacket{}
+ case sshFxpReadlink:
+ pkt = &sshFxpReadlinkPacket{}
+ case sshFxpSymlink:
+ pkt = &sshFxpSymlinkPacket{}
+ case sshFxpExtended:
+ pkt = &sshFxpExtendedPacket{}
+ default:
+ return nil, fmt.Errorf("unhandled packet type: %s", p.pktType)
+ }
+ if err := pkt.UnmarshalBinary(p.pktBytes); err != nil {
+ // Return partially unpacked packet to allow callers to return
+ // error messages appropriately with necessary id() method.
+ return pkt, err
+ }
+ return pkt, nil
+}
diff --git a/vendor/github.com/pkg/sftp/packet.go b/vendor/github.com/pkg/sftp/packet.go
new file mode 100644
index 000000000..4059cf8e0
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/packet.go
@@ -0,0 +1,1276 @@
+package sftp
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+)
+
+var (
+ errLongPacket = errors.New("packet too long")
+ errShortPacket = errors.New("packet too short")
+ errUnknownExtendedPacket = errors.New("unknown extended packet")
+)
+
+const (
+ maxMsgLength = 256 * 1024
+ debugDumpTxPacket = false
+ debugDumpRxPacket = false
+ debugDumpTxPacketBytes = false
+ debugDumpRxPacketBytes = false
+)
+
+func marshalUint32(b []byte, v uint32) []byte {
+ return append(b, byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
+}
+
+func marshalUint64(b []byte, v uint64) []byte {
+ return marshalUint32(marshalUint32(b, uint32(v>>32)), uint32(v))
+}
+
+func marshalString(b []byte, v string) []byte {
+ return append(marshalUint32(b, uint32(len(v))), v...)
+}
+
+func marshalFileInfo(b []byte, fi os.FileInfo) []byte {
+ // attributes variable struct, and also variable per protocol version
+ // spec version 3 attributes:
+ // uint32 flags
+ // uint64 size present only if flag SSH_FILEXFER_ATTR_SIZE
+ // uint32 uid present only if flag SSH_FILEXFER_ATTR_UIDGID
+ // uint32 gid present only if flag SSH_FILEXFER_ATTR_UIDGID
+ // uint32 permissions present only if flag SSH_FILEXFER_ATTR_PERMISSIONS
+ // uint32 atime present only if flag SSH_FILEXFER_ACMODTIME
+ // uint32 mtime present only if flag SSH_FILEXFER_ACMODTIME
+ // uint32 extended_count present only if flag SSH_FILEXFER_ATTR_EXTENDED
+ // string extended_type
+ // string extended_data
+ // ... more extended data (extended_type - extended_data pairs),
+ // so that number of pairs equals extended_count
+
+ flags, fileStat := fileStatFromInfo(fi)
+
+ b = marshalUint32(b, flags)
+ if flags&sshFileXferAttrSize != 0 {
+ b = marshalUint64(b, fileStat.Size)
+ }
+ if flags&sshFileXferAttrUIDGID != 0 {
+ b = marshalUint32(b, fileStat.UID)
+ b = marshalUint32(b, fileStat.GID)
+ }
+ if flags&sshFileXferAttrPermissions != 0 {
+ b = marshalUint32(b, fileStat.Mode)
+ }
+ if flags&sshFileXferAttrACmodTime != 0 {
+ b = marshalUint32(b, fileStat.Atime)
+ b = marshalUint32(b, fileStat.Mtime)
+ }
+
+ return b
+}
+
+func marshalStatus(b []byte, err StatusError) []byte {
+ b = marshalUint32(b, err.Code)
+ b = marshalString(b, err.msg)
+ b = marshalString(b, err.lang)
+ return b
+}
+
+func marshal(b []byte, v interface{}) []byte {
+ if v == nil {
+ return b
+ }
+ switch v := v.(type) {
+ case uint8:
+ return append(b, v)
+ case uint32:
+ return marshalUint32(b, v)
+ case uint64:
+ return marshalUint64(b, v)
+ case string:
+ return marshalString(b, v)
+ case os.FileInfo:
+ return marshalFileInfo(b, v)
+ default:
+ switch d := reflect.ValueOf(v); d.Kind() {
+ case reflect.Struct:
+ for i, n := 0, d.NumField(); i < n; i++ {
+ b = marshal(b, d.Field(i).Interface())
+ }
+ return b
+ case reflect.Slice:
+ for i, n := 0, d.Len(); i < n; i++ {
+ b = marshal(b, d.Index(i).Interface())
+ }
+ return b
+ default:
+ panic(fmt.Sprintf("marshal(%#v): cannot handle type %T", v, v))
+ }
+ }
+}
+
+func unmarshalUint32(b []byte) (uint32, []byte) {
+ v := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
+ return v, b[4:]
+}
+
+func unmarshalUint32Safe(b []byte) (uint32, []byte, error) {
+ var v uint32
+ if len(b) < 4 {
+ return 0, nil, errShortPacket
+ }
+ v, b = unmarshalUint32(b)
+ return v, b, nil
+}
+
+func unmarshalUint64(b []byte) (uint64, []byte) {
+ h, b := unmarshalUint32(b)
+ l, b := unmarshalUint32(b)
+ return uint64(h)<<32 | uint64(l), b
+}
+
+func unmarshalUint64Safe(b []byte) (uint64, []byte, error) {
+ var v uint64
+ if len(b) < 8 {
+ return 0, nil, errShortPacket
+ }
+ v, b = unmarshalUint64(b)
+ return v, b, nil
+}
+
+func unmarshalString(b []byte) (string, []byte) {
+ n, b := unmarshalUint32(b)
+ return string(b[:n]), b[n:]
+}
+
+func unmarshalStringSafe(b []byte) (string, []byte, error) {
+ n, b, err := unmarshalUint32Safe(b)
+ if err != nil {
+ return "", nil, err
+ }
+ if int64(n) > int64(len(b)) {
+ return "", nil, errShortPacket
+ }
+ return string(b[:n]), b[n:], nil
+}
+
+func unmarshalAttrs(b []byte) (*FileStat, []byte) {
+ flags, b := unmarshalUint32(b)
+ return unmarshalFileStat(flags, b)
+}
+
+func unmarshalFileStat(flags uint32, b []byte) (*FileStat, []byte) {
+ var fs FileStat
+ if flags&sshFileXferAttrSize == sshFileXferAttrSize {
+ fs.Size, b, _ = unmarshalUint64Safe(b)
+ }
+ if flags&sshFileXferAttrUIDGID == sshFileXferAttrUIDGID {
+ fs.UID, b, _ = unmarshalUint32Safe(b)
+ }
+ if flags&sshFileXferAttrUIDGID == sshFileXferAttrUIDGID {
+ fs.GID, b, _ = unmarshalUint32Safe(b)
+ }
+ if flags&sshFileXferAttrPermissions == sshFileXferAttrPermissions {
+ fs.Mode, b, _ = unmarshalUint32Safe(b)
+ }
+ if flags&sshFileXferAttrACmodTime == sshFileXferAttrACmodTime {
+ fs.Atime, b, _ = unmarshalUint32Safe(b)
+ fs.Mtime, b, _ = unmarshalUint32Safe(b)
+ }
+ if flags&sshFileXferAttrExtended == sshFileXferAttrExtended {
+ var count uint32
+ count, b, _ = unmarshalUint32Safe(b)
+ ext := make([]StatExtended, count)
+ for i := uint32(0); i < count; i++ {
+ var typ string
+ var data string
+ typ, b, _ = unmarshalStringSafe(b)
+ data, b, _ = unmarshalStringSafe(b)
+ ext[i] = StatExtended{
+ ExtType: typ,
+ ExtData: data,
+ }
+ }
+ fs.Extended = ext
+ }
+ return &fs, b
+}
+
+func unmarshalStatus(id uint32, data []byte) error {
+ sid, data := unmarshalUint32(data)
+ if sid != id {
+ return &unexpectedIDErr{id, sid}
+ }
+ code, data := unmarshalUint32(data)
+ msg, data, _ := unmarshalStringSafe(data)
+ lang, _, _ := unmarshalStringSafe(data)
+ return &StatusError{
+ Code: code,
+ msg: msg,
+ lang: lang,
+ }
+}
+
+type packetMarshaler interface {
+ marshalPacket() (header, payload []byte, err error)
+}
+
+func marshalPacket(m encoding.BinaryMarshaler) (header, payload []byte, err error) {
+ if m, ok := m.(packetMarshaler); ok {
+ return m.marshalPacket()
+ }
+
+ header, err = m.MarshalBinary()
+ return
+}
+
+// sendPacket marshals p according to RFC 4234.
+func sendPacket(w io.Writer, m encoding.BinaryMarshaler) error {
+ header, payload, err := marshalPacket(m)
+ if err != nil {
+ return fmt.Errorf("binary marshaller failed: %w", err)
+ }
+
+ length := len(header) + len(payload) - 4 // subtract the uint32(length) from the start
+ if debugDumpTxPacketBytes {
+ debug("send packet: %s %d bytes %x%x", fxp(header[4]), length, header[5:], payload)
+ } else if debugDumpTxPacket {
+ debug("send packet: %s %d bytes", fxp(header[4]), length)
+ }
+
+ binary.BigEndian.PutUint32(header[:4], uint32(length))
+
+ if _, err := w.Write(header); err != nil {
+ return fmt.Errorf("failed to send packet: %w", err)
+ }
+
+ if len(payload) > 0 {
+ if _, err := w.Write(payload); err != nil {
+ return fmt.Errorf("failed to send packet payload: %w", err)
+ }
+ }
+
+ return nil
+}
+
+func recvPacket(r io.Reader, alloc *allocator, orderID uint32) (uint8, []byte, error) {
+ var b []byte
+ if alloc != nil {
+ b = alloc.GetPage(orderID)
+ } else {
+ b = make([]byte, 4)
+ }
+ if _, err := io.ReadFull(r, b[:4]); err != nil {
+ return 0, nil, err
+ }
+ length, _ := unmarshalUint32(b)
+ if length > maxMsgLength {
+ debug("recv packet %d bytes too long", length)
+ return 0, nil, errLongPacket
+ }
+ if length == 0 {
+ debug("recv packet of 0 bytes too short")
+ return 0, nil, errShortPacket
+ }
+ if alloc == nil {
+ b = make([]byte, length)
+ }
+ if _, err := io.ReadFull(r, b[:length]); err != nil {
+ debug("recv packet %d bytes: err %v", length, err)
+ return 0, nil, err
+ }
+ if debugDumpRxPacketBytes {
+ debug("recv packet: %s %d bytes %x", fxp(b[0]), length, b[1:length])
+ } else if debugDumpRxPacket {
+ debug("recv packet: %s %d bytes", fxp(b[0]), length)
+ }
+ return b[0], b[1:length], nil
+}
+
+type extensionPair struct {
+ Name string
+ Data string
+}
+
+func unmarshalExtensionPair(b []byte) (extensionPair, []byte, error) {
+ var ep extensionPair
+ var err error
+ ep.Name, b, err = unmarshalStringSafe(b)
+ if err != nil {
+ return ep, b, err
+ }
+ ep.Data, b, err = unmarshalStringSafe(b)
+ return ep, b, err
+}
+
+// Here starts the definition of packets along with their MarshalBinary
+// implementations.
+// Manually writing the marshalling logic wins us a lot of time and
+// allocation.
+
+type sshFxInitPacket struct {
+ Version uint32
+ Extensions []extensionPair
+}
+
+func (p *sshFxInitPacket) MarshalBinary() ([]byte, error) {
+ l := 4 + 1 + 4 // uint32(length) + byte(type) + uint32(version)
+ for _, e := range p.Extensions {
+ l += 4 + len(e.Name) + 4 + len(e.Data)
+ }
+
+ b := make([]byte, 4, l)
+ b = append(b, sshFxpInit)
+ b = marshalUint32(b, p.Version)
+
+ for _, e := range p.Extensions {
+ b = marshalString(b, e.Name)
+ b = marshalString(b, e.Data)
+ }
+
+ return b, nil
+}
+
+func (p *sshFxInitPacket) UnmarshalBinary(b []byte) error {
+ var err error
+ if p.Version, b, err = unmarshalUint32Safe(b); err != nil {
+ return err
+ }
+ for len(b) > 0 {
+ var ep extensionPair
+ ep, b, err = unmarshalExtensionPair(b)
+ if err != nil {
+ return err
+ }
+ p.Extensions = append(p.Extensions, ep)
+ }
+ return nil
+}
+
+type sshFxVersionPacket struct {
+ Version uint32
+ Extensions []sshExtensionPair
+}
+
+type sshExtensionPair struct {
+ Name, Data string
+}
+
+func (p *sshFxVersionPacket) MarshalBinary() ([]byte, error) {
+ l := 4 + 1 + 4 // uint32(length) + byte(type) + uint32(version)
+ for _, e := range p.Extensions {
+ l += 4 + len(e.Name) + 4 + len(e.Data)
+ }
+
+ b := make([]byte, 4, l)
+ b = append(b, sshFxpVersion)
+ b = marshalUint32(b, p.Version)
+
+ for _, e := range p.Extensions {
+ b = marshalString(b, e.Name)
+ b = marshalString(b, e.Data)
+ }
+
+ return b, nil
+}
+
+func marshalIDStringPacket(packetType byte, id uint32, str string) ([]byte, error) {
+ l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id)
+ 4 + len(str)
+
+ b := make([]byte, 4, l)
+ b = append(b, packetType)
+ b = marshalUint32(b, id)
+ b = marshalString(b, str)
+
+ return b, nil
+}
+
+func unmarshalIDString(b []byte, id *uint32, str *string) error {
+ var err error
+ *id, b, err = unmarshalUint32Safe(b)
+ if err != nil {
+ return err
+ }
+ *str, _, err = unmarshalStringSafe(b)
+ return err
+}
+
+type sshFxpReaddirPacket struct {
+ ID uint32
+ Handle string
+}
+
+func (p *sshFxpReaddirPacket) id() uint32 { return p.ID }
+
+func (p *sshFxpReaddirPacket) MarshalBinary() ([]byte, error) {
+ return marshalIDStringPacket(sshFxpReaddir, p.ID, p.Handle)
+}
+
+func (p *sshFxpReaddirPacket) UnmarshalBinary(b []byte) error {
+ return unmarshalIDString(b, &p.ID, &p.Handle)
+}
+
+type sshFxpOpendirPacket struct {
+ ID uint32
+ Path string
+}
+
+func (p *sshFxpOpendirPacket) id() uint32 { return p.ID }
+
+func (p *sshFxpOpendirPacket) MarshalBinary() ([]byte, error) {
+ return marshalIDStringPacket(sshFxpOpendir, p.ID, p.Path)
+}
+
+func (p *sshFxpOpendirPacket) UnmarshalBinary(b []byte) error {
+ return unmarshalIDString(b, &p.ID, &p.Path)
+}
+
+type sshFxpLstatPacket struct {
+ ID uint32
+ Path string
+}
+
+func (p *sshFxpLstatPacket) id() uint32 { return p.ID }
+
+func (p *sshFxpLstatPacket) MarshalBinary() ([]byte, error) {
+ return marshalIDStringPacket(sshFxpLstat, p.ID, p.Path)
+}
+
+func (p *sshFxpLstatPacket) UnmarshalBinary(b []byte) error {
+ return unmarshalIDString(b, &p.ID, &p.Path)
+}
+
+type sshFxpStatPacket struct {
+ ID uint32
+ Path string
+}
+
+func (p *sshFxpStatPacket) id() uint32 { return p.ID }
+
+func (p *sshFxpStatPacket) MarshalBinary() ([]byte, error) {
+ return marshalIDStringPacket(sshFxpStat, p.ID, p.Path)
+}
+
+func (p *sshFxpStatPacket) UnmarshalBinary(b []byte) error {
+ return unmarshalIDString(b, &p.ID, &p.Path)
+}
+
+type sshFxpFstatPacket struct {
+ ID uint32
+ Handle string
+}
+
+func (p *sshFxpFstatPacket) id() uint32 { return p.ID }
+
+func (p *sshFxpFstatPacket) MarshalBinary() ([]byte, error) {
+ return marshalIDStringPacket(sshFxpFstat, p.ID, p.Handle)
+}
+
+func (p *sshFxpFstatPacket) UnmarshalBinary(b []byte) error {
+ return unmarshalIDString(b, &p.ID, &p.Handle)
+}
+
+type sshFxpClosePacket struct {
+ ID uint32
+ Handle string
+}
+
+func (p *sshFxpClosePacket) id() uint32 { return p.ID }
+
+func (p *sshFxpClosePacket) MarshalBinary() ([]byte, error) {
+ return marshalIDStringPacket(sshFxpClose, p.ID, p.Handle)
+}
+
+func (p *sshFxpClosePacket) UnmarshalBinary(b []byte) error {
+ return unmarshalIDString(b, &p.ID, &p.Handle)
+}
+
+type sshFxpRemovePacket struct {
+ ID uint32
+ Filename string
+}
+
+func (p *sshFxpRemovePacket) id() uint32 { return p.ID }
+
+func (p *sshFxpRemovePacket) MarshalBinary() ([]byte, error) {
+ return marshalIDStringPacket(sshFxpRemove, p.ID, p.Filename)
+}
+
+func (p *sshFxpRemovePacket) UnmarshalBinary(b []byte) error {
+ return unmarshalIDString(b, &p.ID, &p.Filename)
+}
+
+type sshFxpRmdirPacket struct {
+ ID uint32
+ Path string
+}
+
+func (p *sshFxpRmdirPacket) id() uint32 { return p.ID }
+
+func (p *sshFxpRmdirPacket) MarshalBinary() ([]byte, error) {
+ return marshalIDStringPacket(sshFxpRmdir, p.ID, p.Path)
+}
+
+func (p *sshFxpRmdirPacket) UnmarshalBinary(b []byte) error {
+ return unmarshalIDString(b, &p.ID, &p.Path)
+}
+
+type sshFxpSymlinkPacket struct {
+ ID uint32
+ Targetpath string
+ Linkpath string
+}
+
+func (p *sshFxpSymlinkPacket) id() uint32 { return p.ID }
+
+func (p *sshFxpSymlinkPacket) MarshalBinary() ([]byte, error) {
+ l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id)
+ 4 + len(p.Targetpath) +
+ 4 + len(p.Linkpath)
+
+ b := make([]byte, 4, l)
+ b = append(b, sshFxpSymlink)
+ b = marshalUint32(b, p.ID)
+ b = marshalString(b, p.Targetpath)
+ b = marshalString(b, p.Linkpath)
+
+ return b, nil
+}
+
+func (p *sshFxpSymlinkPacket) UnmarshalBinary(b []byte) error {
+ var err error
+ if p.ID, b, err = unmarshalUint32Safe(b); err != nil {
+ return err
+ } else if p.Targetpath, b, err = unmarshalStringSafe(b); err != nil {
+ return err
+ } else if p.Linkpath, _, err = unmarshalStringSafe(b); err != nil {
+ return err
+ }
+ return nil
+}
+
+type sshFxpHardlinkPacket struct {
+ ID uint32
+ Oldpath string
+ Newpath string
+}
+
+func (p *sshFxpHardlinkPacket) id() uint32 { return p.ID }
+
+func (p *sshFxpHardlinkPacket) MarshalBinary() ([]byte, error) {
+ const ext = "hardlink@openssh.com"
+ l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id)
+ 4 + len(ext) +
+ 4 + len(p.Oldpath) +
+ 4 + len(p.Newpath)
+
+ b := make([]byte, 4, l)
+ b = append(b, sshFxpExtended)
+ b = marshalUint32(b, p.ID)
+ b = marshalString(b, ext)
+ b = marshalString(b, p.Oldpath)
+ b = marshalString(b, p.Newpath)
+
+ return b, nil
+}
+
+type sshFxpReadlinkPacket struct {
+ ID uint32
+ Path string
+}
+
+func (p *sshFxpReadlinkPacket) id() uint32 { return p.ID }
+
+func (p *sshFxpReadlinkPacket) MarshalBinary() ([]byte, error) {
+ return marshalIDStringPacket(sshFxpReadlink, p.ID, p.Path)
+}
+
+func (p *sshFxpReadlinkPacket) UnmarshalBinary(b []byte) error {
+ return unmarshalIDString(b, &p.ID, &p.Path)
+}
+
+type sshFxpRealpathPacket struct {
+ ID uint32
+ Path string
+}
+
+func (p *sshFxpRealpathPacket) id() uint32 { return p.ID }
+
+func (p *sshFxpRealpathPacket) MarshalBinary() ([]byte, error) {
+ return marshalIDStringPacket(sshFxpRealpath, p.ID, p.Path)
+}
+
+func (p *sshFxpRealpathPacket) UnmarshalBinary(b []byte) error {
+ return unmarshalIDString(b, &p.ID, &p.Path)
+}
+
+type sshFxpNameAttr struct {
+ Name string
+ LongName string
+ Attrs []interface{}
+}
+
+func (p *sshFxpNameAttr) MarshalBinary() ([]byte, error) {
+ var b []byte
+ b = marshalString(b, p.Name)
+ b = marshalString(b, p.LongName)
+ for _, attr := range p.Attrs {
+ b = marshal(b, attr)
+ }
+ return b, nil
+}
+
+type sshFxpNamePacket struct {
+ ID uint32
+ NameAttrs []*sshFxpNameAttr
+}
+
+func (p *sshFxpNamePacket) marshalPacket() ([]byte, []byte, error) {
+ l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id)
+ 4
+
+ b := make([]byte, 4, l)
+ b = append(b, sshFxpName)
+ b = marshalUint32(b, p.ID)
+ b = marshalUint32(b, uint32(len(p.NameAttrs)))
+
+ var payload []byte
+ for _, na := range p.NameAttrs {
+ ab, err := na.MarshalBinary()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ payload = append(payload, ab...)
+ }
+
+ return b, payload, nil
+}
+
+func (p *sshFxpNamePacket) MarshalBinary() ([]byte, error) {
+ header, payload, err := p.marshalPacket()
+ return append(header, payload...), err
+}
+
+type sshFxpOpenPacket struct {
+ ID uint32
+ Path string
+ Pflags uint32
+ Flags uint32 // ignored
+}
+
+func (p *sshFxpOpenPacket) id() uint32 { return p.ID }
+
+func (p *sshFxpOpenPacket) MarshalBinary() ([]byte, error) {
+ l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id)
+ 4 + len(p.Path) +
+ 4 + 4
+
+ b := make([]byte, 4, l)
+ b = append(b, sshFxpOpen)
+ b = marshalUint32(b, p.ID)
+ b = marshalString(b, p.Path)
+ b = marshalUint32(b, p.Pflags)
+ b = marshalUint32(b, p.Flags)
+
+ return b, nil
+}
+
+func (p *sshFxpOpenPacket) UnmarshalBinary(b []byte) error {
+ var err error
+ if p.ID, b, err = unmarshalUint32Safe(b); err != nil {
+ return err
+ } else if p.Path, b, err = unmarshalStringSafe(b); err != nil {
+ return err
+ } else if p.Pflags, b, err = unmarshalUint32Safe(b); err != nil {
+ return err
+ } else if p.Flags, _, err = unmarshalUint32Safe(b); err != nil {
+ return err
+ }
+ return nil
+}
+
+type sshFxpReadPacket struct {
+ ID uint32
+ Len uint32
+ Offset uint64
+ Handle string
+}
+
+func (p *sshFxpReadPacket) id() uint32 { return p.ID }
+
+func (p *sshFxpReadPacket) MarshalBinary() ([]byte, error) {
+ l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id)
+ 4 + len(p.Handle) +
+ 8 + 4 // uint64 + uint32
+
+ b := make([]byte, 4, l)
+ b = append(b, sshFxpRead)
+ b = marshalUint32(b, p.ID)
+ b = marshalString(b, p.Handle)
+ b = marshalUint64(b, p.Offset)
+ b = marshalUint32(b, p.Len)
+
+ return b, nil
+}
+
+func (p *sshFxpReadPacket) UnmarshalBinary(b []byte) error {
+ var err error
+ if p.ID, b, err = unmarshalUint32Safe(b); err != nil {
+ return err
+ } else if p.Handle, b, err = unmarshalStringSafe(b); err != nil {
+ return err
+ } else if p.Offset, b, err = unmarshalUint64Safe(b); err != nil {
+ return err
+ } else if p.Len, _, err = unmarshalUint32Safe(b); err != nil {
+ return err
+ }
+ return nil
+}
+
+// We need allocate bigger slices with extra capacity to avoid a re-allocation in sshFxpDataPacket.MarshalBinary
+// So, we need: uint32(length) + byte(type) + uint32(id) + uint32(data_length)
+const dataHeaderLen = 4 + 1 + 4 + 4
+
+func (p *sshFxpReadPacket) getDataSlice(alloc *allocator, orderID uint32) []byte {
+ dataLen := p.Len
+ if dataLen > maxTxPacket {
+ dataLen = maxTxPacket
+ }
+
+ if alloc != nil {
+ // GetPage returns a slice with capacity = maxMsgLength this is enough to avoid new allocations in
+ // sshFxpDataPacket.MarshalBinary
+ return alloc.GetPage(orderID)[:dataLen]
+ }
+
+ // allocate with extra space for the header
+ return make([]byte, dataLen, dataLen+dataHeaderLen)
+}
+
+type sshFxpRenamePacket struct {
+ ID uint32
+ Oldpath string
+ Newpath string
+}
+
+func (p *sshFxpRenamePacket) id() uint32 { return p.ID }
+
+func (p *sshFxpRenamePacket) MarshalBinary() ([]byte, error) {
+ l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id)
+ 4 + len(p.Oldpath) +
+ 4 + len(p.Newpath)
+
+ b := make([]byte, 4, l)
+ b = append(b, sshFxpRename)
+ b = marshalUint32(b, p.ID)
+ b = marshalString(b, p.Oldpath)
+ b = marshalString(b, p.Newpath)
+
+ return b, nil
+}
+
+func (p *sshFxpRenamePacket) UnmarshalBinary(b []byte) error {
+ var err error
+ if p.ID, b, err = unmarshalUint32Safe(b); err != nil {
+ return err
+ } else if p.Oldpath, b, err = unmarshalStringSafe(b); err != nil {
+ return err
+ } else if p.Newpath, _, err = unmarshalStringSafe(b); err != nil {
+ return err
+ }
+ return nil
+}
+
+type sshFxpPosixRenamePacket struct {
+ ID uint32
+ Oldpath string
+ Newpath string
+}
+
+func (p *sshFxpPosixRenamePacket) id() uint32 { return p.ID }
+
+func (p *sshFxpPosixRenamePacket) MarshalBinary() ([]byte, error) {
+ const ext = "posix-rename@openssh.com"
+ l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id)
+ 4 + len(ext) +
+ 4 + len(p.Oldpath) +
+ 4 + len(p.Newpath)
+
+ b := make([]byte, 4, l)
+ b = append(b, sshFxpExtended)
+ b = marshalUint32(b, p.ID)
+ b = marshalString(b, ext)
+ b = marshalString(b, p.Oldpath)
+ b = marshalString(b, p.Newpath)
+
+ return b, nil
+}
+
+type sshFxpWritePacket struct {
+ ID uint32
+ Length uint32
+ Offset uint64
+ Handle string
+ Data []byte
+}
+
+func (p *sshFxpWritePacket) id() uint32 { return p.ID }
+
+func (p *sshFxpWritePacket) marshalPacket() ([]byte, []byte, error) {
+ l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id)
+ 4 + len(p.Handle) +
+ 8 + // uint64
+ 4
+
+ b := make([]byte, 4, l)
+ b = append(b, sshFxpWrite)
+ b = marshalUint32(b, p.ID)
+ b = marshalString(b, p.Handle)
+ b = marshalUint64(b, p.Offset)
+ b = marshalUint32(b, p.Length)
+
+ return b, p.Data, nil
+}
+
+func (p *sshFxpWritePacket) MarshalBinary() ([]byte, error) {
+ header, payload, err := p.marshalPacket()
+ return append(header, payload...), err
+}
+
+func (p *sshFxpWritePacket) UnmarshalBinary(b []byte) error {
+ var err error
+ if p.ID, b, err = unmarshalUint32Safe(b); err != nil {
+ return err
+ } else if p.Handle, b, err = unmarshalStringSafe(b); err != nil {
+ return err
+ } else if p.Offset, b, err = unmarshalUint64Safe(b); err != nil {
+ return err
+ } else if p.Length, b, err = unmarshalUint32Safe(b); err != nil {
+ return err
+ } else if uint32(len(b)) < p.Length {
+ return errShortPacket
+ }
+
+ p.Data = b[:p.Length]
+ return nil
+}
+
+type sshFxpMkdirPacket struct {
+ ID uint32
+ Flags uint32 // ignored
+ Path string
+}
+
+func (p *sshFxpMkdirPacket) id() uint32 { return p.ID }
+
+func (p *sshFxpMkdirPacket) MarshalBinary() ([]byte, error) {
+ l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id)
+ 4 + len(p.Path) +
+ 4 // uint32
+
+ b := make([]byte, 4, l)
+ b = append(b, sshFxpMkdir)
+ b = marshalUint32(b, p.ID)
+ b = marshalString(b, p.Path)
+ b = marshalUint32(b, p.Flags)
+
+ return b, nil
+}
+
+func (p *sshFxpMkdirPacket) UnmarshalBinary(b []byte) error {
+ var err error
+ if p.ID, b, err = unmarshalUint32Safe(b); err != nil {
+ return err
+ } else if p.Path, b, err = unmarshalStringSafe(b); err != nil {
+ return err
+ } else if p.Flags, _, err = unmarshalUint32Safe(b); err != nil {
+ return err
+ }
+ return nil
+}
+
+type sshFxpSetstatPacket struct {
+ ID uint32
+ Flags uint32
+ Path string
+ Attrs interface{}
+}
+
+type sshFxpFsetstatPacket struct {
+ ID uint32
+ Flags uint32
+ Handle string
+ Attrs interface{}
+}
+
+func (p *sshFxpSetstatPacket) id() uint32 { return p.ID }
+func (p *sshFxpFsetstatPacket) id() uint32 { return p.ID }
+
+func (p *sshFxpSetstatPacket) marshalPacket() ([]byte, []byte, error) {
+ l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id)
+ 4 + len(p.Path) +
+ 4 // uint32
+
+ b := make([]byte, 4, l)
+ b = append(b, sshFxpSetstat)
+ b = marshalUint32(b, p.ID)
+ b = marshalString(b, p.Path)
+ b = marshalUint32(b, p.Flags)
+
+ payload := marshal(nil, p.Attrs)
+
+ return b, payload, nil
+}
+
+func (p *sshFxpSetstatPacket) MarshalBinary() ([]byte, error) {
+ header, payload, err := p.marshalPacket()
+ return append(header, payload...), err
+}
+
+func (p *sshFxpFsetstatPacket) marshalPacket() ([]byte, []byte, error) {
+ l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id)
+ 4 + len(p.Handle) +
+ 4 // uint32
+
+ b := make([]byte, 4, l)
+ b = append(b, sshFxpFsetstat)
+ b = marshalUint32(b, p.ID)
+ b = marshalString(b, p.Handle)
+ b = marshalUint32(b, p.Flags)
+
+ payload := marshal(nil, p.Attrs)
+
+ return b, payload, nil
+}
+
+func (p *sshFxpFsetstatPacket) MarshalBinary() ([]byte, error) {
+ header, payload, err := p.marshalPacket()
+ return append(header, payload...), err
+}
+
+func (p *sshFxpSetstatPacket) UnmarshalBinary(b []byte) error {
+ var err error
+ if p.ID, b, err = unmarshalUint32Safe(b); err != nil {
+ return err
+ } else if p.Path, b, err = unmarshalStringSafe(b); err != nil {
+ return err
+ } else if p.Flags, b, err = unmarshalUint32Safe(b); err != nil {
+ return err
+ }
+ p.Attrs = b
+ return nil
+}
+
+func (p *sshFxpFsetstatPacket) UnmarshalBinary(b []byte) error {
+ var err error
+ if p.ID, b, err = unmarshalUint32Safe(b); err != nil {
+ return err
+ } else if p.Handle, b, err = unmarshalStringSafe(b); err != nil {
+ return err
+ } else if p.Flags, b, err = unmarshalUint32Safe(b); err != nil {
+ return err
+ }
+ p.Attrs = b
+ return nil
+}
+
+type sshFxpHandlePacket struct {
+ ID uint32
+ Handle string
+}
+
+func (p *sshFxpHandlePacket) MarshalBinary() ([]byte, error) {
+ l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id)
+ 4 + len(p.Handle)
+
+ b := make([]byte, 4, l)
+ b = append(b, sshFxpHandle)
+ b = marshalUint32(b, p.ID)
+ b = marshalString(b, p.Handle)
+
+ return b, nil
+}
+
+type sshFxpStatusPacket struct {
+ ID uint32
+ StatusError
+}
+
+func (p *sshFxpStatusPacket) MarshalBinary() ([]byte, error) {
+ l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id)
+ 4 +
+ 4 + len(p.StatusError.msg) +
+ 4 + len(p.StatusError.lang)
+
+ b := make([]byte, 4, l)
+ b = append(b, sshFxpStatus)
+ b = marshalUint32(b, p.ID)
+ b = marshalStatus(b, p.StatusError)
+
+ return b, nil
+}
+
+type sshFxpDataPacket struct {
+ ID uint32
+ Length uint32
+ Data []byte
+}
+
+func (p *sshFxpDataPacket) marshalPacket() ([]byte, []byte, error) {
+ l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id)
+ 4
+
+ b := make([]byte, 4, l)
+ b = append(b, sshFxpData)
+ b = marshalUint32(b, p.ID)
+ b = marshalUint32(b, p.Length)
+
+ return b, p.Data, nil
+}
+
+// MarshalBinary encodes the receiver into a binary form and returns the result.
+// To avoid a new allocation the Data slice must have a capacity >= Length + 9
+//
+// This is hand-coded rather than just append(header, payload...),
+// in order to try and reuse the r.Data backing store in the packet.
+func (p *sshFxpDataPacket) MarshalBinary() ([]byte, error) {
+ b := append(p.Data, make([]byte, dataHeaderLen)...)
+ copy(b[dataHeaderLen:], p.Data[:p.Length])
+ // b[0:4] will be overwritten with the length in sendPacket
+ b[4] = sshFxpData
+ binary.BigEndian.PutUint32(b[5:9], p.ID)
+ binary.BigEndian.PutUint32(b[9:13], p.Length)
+ return b, nil
+}
+
+func (p *sshFxpDataPacket) UnmarshalBinary(b []byte) error {
+ var err error
+ if p.ID, b, err = unmarshalUint32Safe(b); err != nil {
+ return err
+ } else if p.Length, b, err = unmarshalUint32Safe(b); err != nil {
+ return err
+ } else if uint32(len(b)) < p.Length {
+ return errShortPacket
+ }
+
+ p.Data = b[:p.Length]
+ return nil
+}
+
+type sshFxpStatvfsPacket struct {
+ ID uint32
+ Path string
+}
+
+func (p *sshFxpStatvfsPacket) id() uint32 { return p.ID }
+
+func (p *sshFxpStatvfsPacket) MarshalBinary() ([]byte, error) {
+ const ext = "statvfs@openssh.com"
+ l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id)
+ 4 + len(ext) +
+ 4 + len(p.Path)
+
+ b := make([]byte, 4, l)
+ b = append(b, sshFxpExtended)
+ b = marshalUint32(b, p.ID)
+ b = marshalString(b, ext)
+ b = marshalString(b, p.Path)
+
+ return b, nil
+}
+
+// A StatVFS contains statistics about a filesystem.
+type StatVFS struct {
+ ID uint32
+ Bsize uint64 /* file system block size */
+ Frsize uint64 /* fundamental fs block size */
+ Blocks uint64 /* number of blocks (unit f_frsize) */
+ Bfree uint64 /* free blocks in file system */
+ Bavail uint64 /* free blocks for non-root */
+ Files uint64 /* total file inodes */
+ Ffree uint64 /* free file inodes */
+ Favail uint64 /* free file inodes for to non-root */
+ Fsid uint64 /* file system id */
+ Flag uint64 /* bit mask of f_flag values */
+ Namemax uint64 /* maximum filename length */
+}
+
+// TotalSpace calculates the amount of total space in a filesystem.
+func (p *StatVFS) TotalSpace() uint64 {
+ return p.Frsize * p.Blocks
+}
+
+// FreeSpace calculates the amount of free space in a filesystem.
+func (p *StatVFS) FreeSpace() uint64 {
+ return p.Frsize * p.Bfree
+}
+
+// marshalPacket converts to ssh_FXP_EXTENDED_REPLY packet binary format
+func (p *StatVFS) marshalPacket() ([]byte, []byte, error) {
+ header := []byte{0, 0, 0, 0, sshFxpExtendedReply}
+
+ var buf bytes.Buffer
+ err := binary.Write(&buf, binary.BigEndian, p)
+
+ return header, buf.Bytes(), err
+}
+
+// MarshalBinary encodes the StatVFS as an SSH_FXP_EXTENDED_REPLY packet.
+func (p *StatVFS) MarshalBinary() ([]byte, error) {
+ header, payload, err := p.marshalPacket()
+ return append(header, payload...), err
+}
+
+type sshFxpFsyncPacket struct {
+ ID uint32
+ Handle string
+}
+
+func (p *sshFxpFsyncPacket) id() uint32 { return p.ID }
+
+func (p *sshFxpFsyncPacket) MarshalBinary() ([]byte, error) {
+ const ext = "fsync@openssh.com"
+ l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id)
+ 4 + len(ext) +
+ 4 + len(p.Handle)
+
+ b := make([]byte, 4, l)
+ b = append(b, sshFxpExtended)
+ b = marshalUint32(b, p.ID)
+ b = marshalString(b, ext)
+ b = marshalString(b, p.Handle)
+
+ return b, nil
+}
+
+type sshFxpExtendedPacket struct {
+ ID uint32
+ ExtendedRequest string
+ SpecificPacket interface {
+ serverRespondablePacket
+ readonly() bool
+ }
+}
+
+func (p *sshFxpExtendedPacket) id() uint32 { return p.ID }
+func (p *sshFxpExtendedPacket) readonly() bool {
+ if p.SpecificPacket == nil {
+ return true
+ }
+ return p.SpecificPacket.readonly()
+}
+
+func (p *sshFxpExtendedPacket) respond(svr *Server) responsePacket {
+ if p.SpecificPacket == nil {
+ return statusFromError(p.ID, nil)
+ }
+ return p.SpecificPacket.respond(svr)
+}
+
+func (p *sshFxpExtendedPacket) UnmarshalBinary(b []byte) error {
+ var err error
+ bOrig := b
+ if p.ID, b, err = unmarshalUint32Safe(b); err != nil {
+ return err
+ } else if p.ExtendedRequest, _, err = unmarshalStringSafe(b); err != nil {
+ return err
+ }
+
+ // specific unmarshalling
+ switch p.ExtendedRequest {
+ case "statvfs@openssh.com":
+ p.SpecificPacket = &sshFxpExtendedPacketStatVFS{}
+ case "posix-rename@openssh.com":
+ p.SpecificPacket = &sshFxpExtendedPacketPosixRename{}
+ case "hardlink@openssh.com":
+ p.SpecificPacket = &sshFxpExtendedPacketHardlink{}
+ default:
+ return fmt.Errorf("packet type %v: %w", p.SpecificPacket, errUnknownExtendedPacket)
+ }
+
+ return p.SpecificPacket.UnmarshalBinary(bOrig)
+}
+
+type sshFxpExtendedPacketStatVFS struct {
+ ID uint32
+ ExtendedRequest string
+ Path string
+}
+
+func (p *sshFxpExtendedPacketStatVFS) id() uint32 { return p.ID }
+func (p *sshFxpExtendedPacketStatVFS) readonly() bool { return true }
+func (p *sshFxpExtendedPacketStatVFS) UnmarshalBinary(b []byte) error {
+ var err error
+ if p.ID, b, err = unmarshalUint32Safe(b); err != nil {
+ return err
+ } else if p.ExtendedRequest, b, err = unmarshalStringSafe(b); err != nil {
+ return err
+ } else if p.Path, _, err = unmarshalStringSafe(b); err != nil {
+ return err
+ }
+ return nil
+}
+
+type sshFxpExtendedPacketPosixRename struct {
+ ID uint32
+ ExtendedRequest string
+ Oldpath string
+ Newpath string
+}
+
+func (p *sshFxpExtendedPacketPosixRename) id() uint32 { return p.ID }
+func (p *sshFxpExtendedPacketPosixRename) readonly() bool { return false }
+func (p *sshFxpExtendedPacketPosixRename) UnmarshalBinary(b []byte) error {
+ var err error
+ if p.ID, b, err = unmarshalUint32Safe(b); err != nil {
+ return err
+ } else if p.ExtendedRequest, b, err = unmarshalStringSafe(b); err != nil {
+ return err
+ } else if p.Oldpath, b, err = unmarshalStringSafe(b); err != nil {
+ return err
+ } else if p.Newpath, _, err = unmarshalStringSafe(b); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (p *sshFxpExtendedPacketPosixRename) respond(s *Server) responsePacket {
+ err := os.Rename(toLocalPath(p.Oldpath), toLocalPath(p.Newpath))
+ return statusFromError(p.ID, err)
+}
+
+type sshFxpExtendedPacketHardlink struct {
+ ID uint32
+ ExtendedRequest string
+ Oldpath string
+ Newpath string
+}
+
+// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL
+func (p *sshFxpExtendedPacketHardlink) id() uint32 { return p.ID }
+func (p *sshFxpExtendedPacketHardlink) readonly() bool { return true }
+func (p *sshFxpExtendedPacketHardlink) UnmarshalBinary(b []byte) error {
+ var err error
+ if p.ID, b, err = unmarshalUint32Safe(b); err != nil {
+ return err
+ } else if p.ExtendedRequest, b, err = unmarshalStringSafe(b); err != nil {
+ return err
+ } else if p.Oldpath, b, err = unmarshalStringSafe(b); err != nil {
+ return err
+ } else if p.Newpath, _, err = unmarshalStringSafe(b); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (p *sshFxpExtendedPacketHardlink) respond(s *Server) responsePacket {
+ err := os.Link(toLocalPath(p.Oldpath), toLocalPath(p.Newpath))
+ return statusFromError(p.ID, err)
+}
diff --git a/vendor/github.com/pkg/sftp/pool.go b/vendor/github.com/pkg/sftp/pool.go
new file mode 100644
index 000000000..361262906
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/pool.go
@@ -0,0 +1,79 @@
+package sftp
+
+// bufPool provides a pool of byte-slices to be reused in various parts of the package.
+// It is safe to use concurrently through a pointer.
+type bufPool struct {
+ ch chan []byte
+ blen int
+}
+
+func newBufPool(depth, bufLen int) *bufPool {
+ return &bufPool{
+ ch: make(chan []byte, depth),
+ blen: bufLen,
+ }
+}
+
+func (p *bufPool) Get() []byte {
+ if p.blen <= 0 {
+ panic("bufPool: new buffer creation length must be greater than zero")
+ }
+
+ for {
+ select {
+ case b := <-p.ch:
+ if cap(b) < p.blen {
+ // just in case: throw away any buffer with insufficient capacity.
+ continue
+ }
+
+ return b[:p.blen]
+
+ default:
+ return make([]byte, p.blen)
+ }
+ }
+}
+
+func (p *bufPool) Put(b []byte) {
+ if p == nil {
+ // functional default: no reuse.
+ return
+ }
+
+ if cap(b) < p.blen || cap(b) > p.blen*2 {
+ // DO NOT reuse buffers with insufficient capacity.
+ // This could cause panics when resizing to p.blen.
+
+ // DO NOT reuse buffers with excessive capacity.
+ // This could cause memory leaks.
+ return
+ }
+
+ select {
+ case p.ch <- b:
+ default:
+ }
+}
+
+type resChanPool chan chan result
+
+func newResChanPool(depth int) resChanPool {
+ return make(chan chan result, depth)
+}
+
+func (p resChanPool) Get() chan result {
+ select {
+ case ch := <-p:
+ return ch
+ default:
+ return make(chan result, 1)
+ }
+}
+
+func (p resChanPool) Put(ch chan result) {
+ select {
+ case p <- ch:
+ default:
+ }
+}
diff --git a/vendor/github.com/pkg/sftp/release.go b/vendor/github.com/pkg/sftp/release.go
new file mode 100644
index 000000000..b695528fd
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/release.go
@@ -0,0 +1,5 @@
+// +build !debug
+
+package sftp
+
+func debug(fmt string, args ...interface{}) {}
diff --git a/vendor/github.com/pkg/sftp/request-attrs.go b/vendor/github.com/pkg/sftp/request-attrs.go
new file mode 100644
index 000000000..b5c95b4ad
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/request-attrs.go
@@ -0,0 +1,63 @@
+package sftp
+
+// Methods on the Request object to make working with the Flags bitmasks and
+// Attr(ibutes) byte blob easier. Use Pflags() when working with an Open/Write
+// request and AttrFlags() and Attributes() when working with SetStat requests.
+import "os"
+
+// FileOpenFlags defines Open and Write Flags. Correlate directly with with os.OpenFile flags
+// (https://golang.org/pkg/os/#pkg-constants).
+type FileOpenFlags struct {
+ Read, Write, Append, Creat, Trunc, Excl bool
+}
+
+func newFileOpenFlags(flags uint32) FileOpenFlags {
+ return FileOpenFlags{
+ Read: flags&sshFxfRead != 0,
+ Write: flags&sshFxfWrite != 0,
+ Append: flags&sshFxfAppend != 0,
+ Creat: flags&sshFxfCreat != 0,
+ Trunc: flags&sshFxfTrunc != 0,
+ Excl: flags&sshFxfExcl != 0,
+ }
+}
+
+// Pflags converts the bitmap/uint32 from SFTP Open packet pflag values,
+// into a FileOpenFlags struct with booleans set for flags set in bitmap.
+func (r *Request) Pflags() FileOpenFlags {
+ return newFileOpenFlags(r.Flags)
+}
+
+// FileAttrFlags that indicate whether SFTP file attributes were passed. When a flag is
+// true the corresponding attribute should be available from the FileStat
+// object returned by Attributes method. Used with SetStat.
+type FileAttrFlags struct {
+ Size, UidGid, Permissions, Acmodtime bool
+}
+
+func newFileAttrFlags(flags uint32) FileAttrFlags {
+ return FileAttrFlags{
+ Size: (flags & sshFileXferAttrSize) != 0,
+ UidGid: (flags & sshFileXferAttrUIDGID) != 0,
+ Permissions: (flags & sshFileXferAttrPermissions) != 0,
+ Acmodtime: (flags & sshFileXferAttrACmodTime) != 0,
+ }
+}
+
+// AttrFlags returns a FileAttrFlags boolean struct based on the
+// bitmap/uint32 file attribute flags from the SFTP packaet.
+func (r *Request) AttrFlags() FileAttrFlags {
+ return newFileAttrFlags(r.Flags)
+}
+
+// FileMode returns the Mode SFTP file attributes wrapped as os.FileMode
+func (a FileStat) FileMode() os.FileMode {
+ return os.FileMode(a.Mode)
+}
+
+// Attributes parses file attributes byte blob and return them in a
+// FileStat object.
+func (r *Request) Attributes() *FileStat {
+ fs, _ := unmarshalFileStat(r.Flags, r.Attrs)
+ return fs
+}
diff --git a/vendor/github.com/pkg/sftp/request-errors.go b/vendor/github.com/pkg/sftp/request-errors.go
new file mode 100644
index 000000000..6505b5c74
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/request-errors.go
@@ -0,0 +1,54 @@
+package sftp
+
+type fxerr uint32
+
+// Error types that match the SFTP's SSH_FXP_STATUS codes. Gives you more
+// direct control of the errors being sent vs. letting the library work them
+// out from the standard os/io errors.
+const (
+ ErrSSHFxOk = fxerr(sshFxOk)
+ ErrSSHFxEOF = fxerr(sshFxEOF)
+ ErrSSHFxNoSuchFile = fxerr(sshFxNoSuchFile)
+ ErrSSHFxPermissionDenied = fxerr(sshFxPermissionDenied)
+ ErrSSHFxFailure = fxerr(sshFxFailure)
+ ErrSSHFxBadMessage = fxerr(sshFxBadMessage)
+ ErrSSHFxNoConnection = fxerr(sshFxNoConnection)
+ ErrSSHFxConnectionLost = fxerr(sshFxConnectionLost)
+ ErrSSHFxOpUnsupported = fxerr(sshFxOPUnsupported)
+)
+
+// Deprecated error types, these are aliases for the new ones, please use the new ones directly
+const (
+ ErrSshFxOk = ErrSSHFxOk
+ ErrSshFxEof = ErrSSHFxEOF
+ ErrSshFxNoSuchFile = ErrSSHFxNoSuchFile
+ ErrSshFxPermissionDenied = ErrSSHFxPermissionDenied
+ ErrSshFxFailure = ErrSSHFxFailure
+ ErrSshFxBadMessage = ErrSSHFxBadMessage
+ ErrSshFxNoConnection = ErrSSHFxNoConnection
+ ErrSshFxConnectionLost = ErrSSHFxConnectionLost
+ ErrSshFxOpUnsupported = ErrSSHFxOpUnsupported
+)
+
+func (e fxerr) Error() string {
+ switch e {
+ case ErrSSHFxOk:
+ return "OK"
+ case ErrSSHFxEOF:
+ return "EOF"
+ case ErrSSHFxNoSuchFile:
+ return "no such file"
+ case ErrSSHFxPermissionDenied:
+ return "permission denied"
+ case ErrSSHFxBadMessage:
+ return "bad message"
+ case ErrSSHFxNoConnection:
+ return "no connection"
+ case ErrSSHFxConnectionLost:
+ return "connection lost"
+ case ErrSSHFxOpUnsupported:
+ return "operation unsupported"
+ default:
+ return "failure"
+ }
+}
diff --git a/vendor/github.com/pkg/sftp/request-example.go b/vendor/github.com/pkg/sftp/request-example.go
new file mode 100644
index 000000000..ba22bcd0f
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/request-example.go
@@ -0,0 +1,666 @@
+package sftp
+
+// This serves as an example of how to implement the request server handler as
+// well as a dummy backend for testing. It implements an in-memory backend that
+// works as a very simple filesystem with simple flat key-value lookup system.
+
+import (
+ "errors"
+ "io"
+ "os"
+ "path"
+ "sort"
+ "strings"
+ "sync"
+ "syscall"
+ "time"
+)
+
+const maxSymlinkFollows = 5
+
+var errTooManySymlinks = errors.New("too many symbolic links")
+
+// InMemHandler returns a Hanlders object with the test handlers.
+func InMemHandler() Handlers {
+ root := &root{
+ rootFile: &memFile{name: "/", modtime: time.Now(), isdir: true},
+ files: make(map[string]*memFile),
+ }
+ return Handlers{root, root, root, root}
+}
+
+// Example Handlers
+func (fs *root) Fileread(r *Request) (io.ReaderAt, error) {
+ flags := r.Pflags()
+ if !flags.Read {
+ // sanity check
+ return nil, os.ErrInvalid
+ }
+
+ return fs.OpenFile(r)
+}
+
+func (fs *root) Filewrite(r *Request) (io.WriterAt, error) {
+ flags := r.Pflags()
+ if !flags.Write {
+ // sanity check
+ return nil, os.ErrInvalid
+ }
+
+ return fs.OpenFile(r)
+}
+
+func (fs *root) OpenFile(r *Request) (WriterAtReaderAt, error) {
+ if fs.mockErr != nil {
+ return nil, fs.mockErr
+ }
+ _ = r.WithContext(r.Context()) // initialize context for deadlock testing
+
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+
+ return fs.openfile(r.Filepath, r.Flags)
+}
+
+func (fs *root) putfile(pathname string, file *memFile) error {
+ pathname, err := fs.canonName(pathname)
+ if err != nil {
+ return err
+ }
+
+ if !strings.HasPrefix(pathname, "/") {
+ return os.ErrInvalid
+ }
+
+ if _, err := fs.lfetch(pathname); err != os.ErrNotExist {
+ return os.ErrExist
+ }
+
+ file.name = pathname
+ fs.files[pathname] = file
+
+ return nil
+}
+
+func (fs *root) openfile(pathname string, flags uint32) (*memFile, error) {
+ pflags := newFileOpenFlags(flags)
+
+ file, err := fs.fetch(pathname)
+ if err == os.ErrNotExist {
+ if !pflags.Creat {
+ return nil, os.ErrNotExist
+ }
+
+ var count int
+ // You can create files through dangling symlinks.
+ link, err := fs.lfetch(pathname)
+ for err == nil && link.symlink != "" {
+ if pflags.Excl {
+ // unless you also passed in O_EXCL
+ return nil, os.ErrInvalid
+ }
+
+ if count++; count > maxSymlinkFollows {
+ return nil, errTooManySymlinks
+ }
+
+ pathname = link.symlink
+ link, err = fs.lfetch(pathname)
+ }
+
+ file := &memFile{
+ modtime: time.Now(),
+ }
+
+ if err := fs.putfile(pathname, file); err != nil {
+ return nil, err
+ }
+
+ return file, nil
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ if pflags.Creat && pflags.Excl {
+ return nil, os.ErrExist
+ }
+
+ if file.IsDir() {
+ return nil, os.ErrInvalid
+ }
+
+ if pflags.Trunc {
+ if err := file.Truncate(0); err != nil {
+ return nil, err
+ }
+ }
+
+ return file, nil
+}
+
+func (fs *root) Filecmd(r *Request) error {
+ if fs.mockErr != nil {
+ return fs.mockErr
+ }
+ _ = r.WithContext(r.Context()) // initialize context for deadlock testing
+
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+
+ switch r.Method {
+ case "Setstat":
+ file, err := fs.openfile(r.Filepath, sshFxfWrite)
+ if err != nil {
+ return err
+ }
+
+ if r.AttrFlags().Size {
+ return file.Truncate(int64(r.Attributes().Size))
+ }
+
+ return nil
+
+ case "Rename":
+ // SFTP-v2: "It is an error if there already exists a file with the name specified by newpath."
+ // This varies from the POSIX specification, which allows limited replacement of target files.
+ if fs.exists(r.Target) {
+ return os.ErrExist
+ }
+
+ return fs.rename(r.Filepath, r.Target)
+
+ case "Rmdir":
+ return fs.rmdir(r.Filepath)
+
+ case "Remove":
+ // IEEE 1003.1 remove explicitly can unlink files and remove empty directories.
+ // We use instead here the semantics of unlink, which is allowed to be restricted against directories.
+ return fs.unlink(r.Filepath)
+
+ case "Mkdir":
+ return fs.mkdir(r.Filepath)
+
+ case "Link":
+ return fs.link(r.Filepath, r.Target)
+
+ case "Symlink":
+ // NOTE: r.Filepath is the target, and r.Target is the linkpath.
+ return fs.symlink(r.Filepath, r.Target)
+ }
+
+ return errors.New("unsupported")
+}
+
+func (fs *root) rename(oldpath, newpath string) error {
+ file, err := fs.lfetch(oldpath)
+ if err != nil {
+ return err
+ }
+
+ newpath, err = fs.canonName(newpath)
+ if err != nil {
+ return err
+ }
+
+ if !strings.HasPrefix(newpath, "/") {
+ return os.ErrInvalid
+ }
+
+ target, err := fs.lfetch(newpath)
+ if err != os.ErrNotExist {
+ if target == file {
+ // IEEE 1003.1: if oldpath and newpath are the same directory entry,
+ // then return no error, and perform no further action.
+ return nil
+ }
+
+ switch {
+ case file.IsDir():
+ // IEEE 1003.1: if oldpath is a directory, and newpath exists,
+ // then newpath must be a directory, and empty.
+ // It is to be removed prior to rename.
+ if err := fs.rmdir(newpath); err != nil {
+ return err
+ }
+
+ case target.IsDir():
+ // IEEE 1003.1: if oldpath is not a directory, and newpath exists,
+ // then newpath may not be a directory.
+ return syscall.EISDIR
+ }
+ }
+
+ fs.files[newpath] = file
+
+ if file.IsDir() {
+ dirprefix := file.name + "/"
+
+ for name, file := range fs.files {
+ if strings.HasPrefix(name, dirprefix) {
+ newname := path.Join(newpath, strings.TrimPrefix(name, dirprefix))
+
+ fs.files[newname] = file
+ file.name = newname
+ delete(fs.files, name)
+ }
+ }
+ }
+
+ file.name = newpath
+ delete(fs.files, oldpath)
+
+ return nil
+}
+
+func (fs *root) PosixRename(r *Request) error {
+ if fs.mockErr != nil {
+ return fs.mockErr
+ }
+ _ = r.WithContext(r.Context()) // initialize context for deadlock testing
+
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+
+ return fs.rename(r.Filepath, r.Target)
+}
+
+func (fs *root) StatVFS(r *Request) (*StatVFS, error) {
+ if fs.mockErr != nil {
+ return nil, fs.mockErr
+ }
+
+ return getStatVFSForPath(r.Filepath)
+}
+
+func (fs *root) mkdir(pathname string) error {
+ dir := &memFile{
+ modtime: time.Now(),
+ isdir: true,
+ }
+
+ return fs.putfile(pathname, dir)
+}
+
+func (fs *root) rmdir(pathname string) error {
+ // IEEE 1003.1: If pathname is a symlink, then rmdir should fail with ENOTDIR.
+ dir, err := fs.lfetch(pathname)
+ if err != nil {
+ return err
+ }
+
+ if !dir.IsDir() {
+ return syscall.ENOTDIR
+ }
+
+ // use the dir‘s internal name not the pathname we passed in.
+ // the dir.name is always the canonical name of a directory.
+ pathname = dir.name
+
+ for name := range fs.files {
+ if path.Dir(name) == pathname {
+ return errors.New("directory not empty")
+ }
+ }
+
+ delete(fs.files, pathname)
+
+ return nil
+}
+
+func (fs *root) link(oldpath, newpath string) error {
+ file, err := fs.lfetch(oldpath)
+ if err != nil {
+ return err
+ }
+
+ if file.IsDir() {
+ return errors.New("hard link not allowed for directory")
+ }
+
+ return fs.putfile(newpath, file)
+}
+
+// symlink() creates a symbolic link named `linkpath` which contains the string `target`.
+// NOTE! This would be called with `symlink(req.Filepath, req.Target)` due to different semantics.
+func (fs *root) symlink(target, linkpath string) error {
+ link := &memFile{
+ modtime: time.Now(),
+ symlink: target,
+ }
+
+ return fs.putfile(linkpath, link)
+}
+
+func (fs *root) unlink(pathname string) error {
+ // does not follow symlinks!
+ file, err := fs.lfetch(pathname)
+ if err != nil {
+ return err
+ }
+
+ if file.IsDir() {
+ // IEEE 1003.1: implementations may opt out of allowing the unlinking of directories.
+ // SFTP-v2: SSH_FXP_REMOVE may not remove directories.
+ return os.ErrInvalid
+ }
+
+ // DO NOT use the file’s internal name.
+ // because of hard-links files cannot have a single canonical name.
+ delete(fs.files, pathname)
+
+ return nil
+}
+
+type listerat []os.FileInfo
+
+// Modeled after strings.Reader's ReadAt() implementation
+func (f listerat) ListAt(ls []os.FileInfo, offset int64) (int, error) {
+ var n int
+ if offset >= int64(len(f)) {
+ return 0, io.EOF
+ }
+ n = copy(ls, f[offset:])
+ if n < len(ls) {
+ return n, io.EOF
+ }
+ return n, nil
+}
+
+func (fs *root) Filelist(r *Request) (ListerAt, error) {
+ if fs.mockErr != nil {
+ return nil, fs.mockErr
+ }
+ _ = r.WithContext(r.Context()) // initialize context for deadlock testing
+
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+
+ switch r.Method {
+ case "List":
+ files, err := fs.readdir(r.Filepath)
+ if err != nil {
+ return nil, err
+ }
+ return listerat(files), nil
+
+ case "Stat":
+ file, err := fs.fetch(r.Filepath)
+ if err != nil {
+ return nil, err
+ }
+ return listerat{file}, nil
+
+ case "Readlink":
+ symlink, err := fs.readlink(r.Filepath)
+ if err != nil {
+ return nil, err
+ }
+
+ // SFTP-v2: The server will respond with a SSH_FXP_NAME packet containing only
+ // one name and a dummy attributes value.
+ return listerat{
+ &memFile{
+ name: symlink,
+ err: os.ErrNotExist, // prevent accidental use as a reader/writer.
+ },
+ }, nil
+ }
+
+ return nil, errors.New("unsupported")
+}
+
+func (fs *root) readdir(pathname string) ([]os.FileInfo, error) {
+ dir, err := fs.fetch(pathname)
+ if err != nil {
+ return nil, err
+ }
+
+ if !dir.IsDir() {
+ return nil, syscall.ENOTDIR
+ }
+
+ var files []os.FileInfo
+
+ for name, file := range fs.files {
+ if path.Dir(name) == dir.name {
+ files = append(files, file)
+ }
+ }
+
+ sort.Slice(files, func(i, j int) bool { return files[i].Name() < files[j].Name() })
+
+ return files, nil
+}
+
+func (fs *root) readlink(pathname string) (string, error) {
+ file, err := fs.lfetch(pathname)
+ if err != nil {
+ return "", err
+ }
+
+ if file.symlink == "" {
+ return "", os.ErrInvalid
+ }
+
+ return file.symlink, nil
+}
+
+// implements LstatFileLister interface
+func (fs *root) Lstat(r *Request) (ListerAt, error) {
+ if fs.mockErr != nil {
+ return nil, fs.mockErr
+ }
+ _ = r.WithContext(r.Context()) // initialize context for deadlock testing
+
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+
+ file, err := fs.lfetch(r.Filepath)
+ if err != nil {
+ return nil, err
+ }
+ return listerat{file}, nil
+}
+
+// implements RealpathFileLister interface
+func (fs *root) Realpath(p string) string {
+ if fs.startDirectory == "" || fs.startDirectory == "/" {
+ return cleanPath(p)
+ }
+ return cleanPathWithBase(fs.startDirectory, p)
+}
+
+// In memory file-system-y thing that the Hanlders live on
+type root struct {
+ rootFile *memFile
+ mockErr error
+ startDirectory string
+
+ mu sync.Mutex
+ files map[string]*memFile
+}
+
+// Set a mocked error that the next handler call will return.
+// Set to nil to reset for no error.
+func (fs *root) returnErr(err error) {
+ fs.mockErr = err
+}
+
+func (fs *root) lfetch(path string) (*memFile, error) {
+ if path == "/" {
+ return fs.rootFile, nil
+ }
+
+ file, ok := fs.files[path]
+ if file == nil {
+ if ok {
+ delete(fs.files, path)
+ }
+
+ return nil, os.ErrNotExist
+ }
+
+ return file, nil
+}
+
+// canonName returns the “canonical” name of a file, that is:
+// if the directory of the pathname is a symlink, it follows that symlink to the valid directory name.
+// this is relatively easy, since `dir.name` will be the only valid canonical path for a directory.
+func (fs *root) canonName(pathname string) (string, error) {
+ dirname, filename := path.Dir(pathname), path.Base(pathname)
+
+ dir, err := fs.fetch(dirname)
+ if err != nil {
+ return "", err
+ }
+
+ if !dir.IsDir() {
+ return "", syscall.ENOTDIR
+ }
+
+ return path.Join(dir.name, filename), nil
+}
+
+func (fs *root) exists(path string) bool {
+ path, err := fs.canonName(path)
+ if err != nil {
+ return false
+ }
+
+ _, err = fs.lfetch(path)
+
+ return err != os.ErrNotExist
+}
+
+func (fs *root) fetch(path string) (*memFile, error) {
+ file, err := fs.lfetch(path)
+ if err != nil {
+ return nil, err
+ }
+
+ var count int
+ for file.symlink != "" {
+ if count++; count > maxSymlinkFollows {
+ return nil, errTooManySymlinks
+ }
+
+ file, err = fs.lfetch(file.symlink)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return file, nil
+}
+
+// Implements os.FileInfo, io.ReaderAt and io.WriterAt interfaces.
+// These are the 3 interfaces necessary for the Handlers.
+// Implements the optional interface TransferError.
+type memFile struct {
+ name string
+ modtime time.Time
+ symlink string
+ isdir bool
+
+ mu sync.RWMutex
+ content []byte
+ err error
+}
+
+// These are helper functions, they must be called while holding the memFile.mu mutex
+func (f *memFile) size() int64 { return int64(len(f.content)) }
+func (f *memFile) grow(n int64) { f.content = append(f.content, make([]byte, n)...) }
+
+// Have memFile fulfill os.FileInfo interface
+func (f *memFile) Name() string { return path.Base(f.name) }
+func (f *memFile) Size() int64 {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+
+ return f.size()
+}
+func (f *memFile) Mode() os.FileMode {
+ if f.isdir {
+ return os.FileMode(0755) | os.ModeDir
+ }
+ if f.symlink != "" {
+ return os.FileMode(0777) | os.ModeSymlink
+ }
+ return os.FileMode(0644)
+}
+func (f *memFile) ModTime() time.Time { return f.modtime }
+func (f *memFile) IsDir() bool { return f.isdir }
+func (f *memFile) Sys() interface{} {
+ return fakeFileInfoSys()
+}
+
+func (f *memFile) ReadAt(b []byte, off int64) (int, error) {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+
+ if f.err != nil {
+ return 0, f.err
+ }
+
+ if off < 0 {
+ return 0, errors.New("memFile.ReadAt: negative offset")
+ }
+
+ if off >= f.size() {
+ return 0, io.EOF
+ }
+
+ n := copy(b, f.content[off:])
+ if n < len(b) {
+ return n, io.EOF
+ }
+
+ return n, nil
+}
+
+func (f *memFile) WriteAt(b []byte, off int64) (int, error) {
+ // fmt.Println(string(p), off)
+ // mimic write delays, should be optional
+ time.Sleep(time.Microsecond * time.Duration(len(b)))
+
+ f.mu.Lock()
+ defer f.mu.Unlock()
+
+ if f.err != nil {
+ return 0, f.err
+ }
+
+ grow := int64(len(b)) + off - f.size()
+ if grow > 0 {
+ f.grow(grow)
+ }
+
+ return copy(f.content[off:], b), nil
+}
+
+func (f *memFile) Truncate(size int64) error {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+
+ if f.err != nil {
+ return f.err
+ }
+
+ grow := size - f.size()
+ if grow <= 0 {
+ f.content = f.content[:size]
+ } else {
+ f.grow(grow)
+ }
+
+ return nil
+}
+
+func (f *memFile) TransferError(err error) {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+
+ f.err = err
+}
diff --git a/vendor/github.com/pkg/sftp/request-interfaces.go b/vendor/github.com/pkg/sftp/request-interfaces.go
new file mode 100644
index 000000000..e5dc49bb1
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/request-interfaces.go
@@ -0,0 +1,123 @@
+package sftp
+
+import (
+ "io"
+ "os"
+)
+
+// WriterAtReaderAt defines the interface to return when a file is to
+// be opened for reading and writing
+type WriterAtReaderAt interface {
+ io.WriterAt
+ io.ReaderAt
+}
+
+// Interfaces are differentiated based on required returned values.
+// All input arguments are to be pulled from Request (the only arg).
+
+// The Handler interfaces all take the Request object as its only argument.
+// All the data you should need to handle the call are in the Request object.
+// The request.Method attribute is initially the most important one as it
+// determines which Handler gets called.
+
+// FileReader should return an io.ReaderAt for the filepath
+// Note in cases of an error, the error text will be sent to the client.
+// Called for Methods: Get
+type FileReader interface {
+ Fileread(*Request) (io.ReaderAt, error)
+}
+
+// FileWriter should return an io.WriterAt for the filepath.
+//
+// The request server code will call Close() on the returned io.WriterAt
+// ojbect if an io.Closer type assertion succeeds.
+// Note in cases of an error, the error text will be sent to the client.
+// Note when receiving an Append flag it is important to not open files using
+// O_APPEND if you plan to use WriteAt, as they conflict.
+// Called for Methods: Put, Open
+type FileWriter interface {
+ Filewrite(*Request) (io.WriterAt, error)
+}
+
+// OpenFileWriter is a FileWriter that implements the generic OpenFile method.
+// You need to implement this optional interface if you want to be able
+// to read and write from/to the same handle.
+// Called for Methods: Open
+type OpenFileWriter interface {
+ FileWriter
+ OpenFile(*Request) (WriterAtReaderAt, error)
+}
+
+// FileCmder should return an error
+// Note in cases of an error, the error text will be sent to the client.
+// Called for Methods: Setstat, Rename, Rmdir, Mkdir, Link, Symlink, Remove
+type FileCmder interface {
+ Filecmd(*Request) error
+}
+
+// PosixRenameFileCmder is a FileCmder that implements the PosixRename method.
+// If this interface is implemented PosixRename requests will call it
+// otherwise they will be handled in the same way as Rename
+type PosixRenameFileCmder interface {
+ FileCmder
+ PosixRename(*Request) error
+}
+
+// StatVFSFileCmder is a FileCmder that implements the StatVFS method.
+// You need to implement this interface if you want to handle statvfs requests.
+// Please also be sure that the statvfs@openssh.com extension is enabled
+type StatVFSFileCmder interface {
+ FileCmder
+ StatVFS(*Request) (*StatVFS, error)
+}
+
+// FileLister should return an object that fulfils the ListerAt interface
+// Note in cases of an error, the error text will be sent to the client.
+// Called for Methods: List, Stat, Readlink
+type FileLister interface {
+ Filelist(*Request) (ListerAt, error)
+}
+
+// LstatFileLister is a FileLister that implements the Lstat method.
+// If this interface is implemented Lstat requests will call it
+// otherwise they will be handled in the same way as Stat
+type LstatFileLister interface {
+ FileLister
+ Lstat(*Request) (ListerAt, error)
+}
+
+// RealPathFileLister is a FileLister that implements the Realpath method.
+// We use "/" as start directory for relative paths, implementing this
+// interface you can customize the start directory.
+// You have to return an absolute POSIX path.
+//
+// Deprecated: if you want to set a start directory use WithStartDirectory RequestServerOption instead.
+type RealPathFileLister interface {
+ FileLister
+ RealPath(string) string
+}
+
+// NameLookupFileLister is a FileLister that implmeents the LookupUsername and LookupGroupName methods.
+// If this interface is implemented, then longname ls formatting will use these to convert usernames and groupnames.
+type NameLookupFileLister interface {
+ FileLister
+ LookupUserName(string) string
+ LookupGroupName(string) string
+}
+
+// ListerAt does for file lists what io.ReaderAt does for files.
+// ListAt should return the number of entries copied and an io.EOF
+// error if at end of list. This is testable by comparing how many you
+// copied to how many could be copied (eg. n < len(ls) below).
+// The copy() builtin is best for the copying.
+// Note in cases of an error, the error text will be sent to the client.
+type ListerAt interface {
+ ListAt([]os.FileInfo, int64) (int, error)
+}
+
+// TransferError is an optional interface that readerAt and writerAt
+// can implement to be notified about the error causing Serve() to exit
+// with the request still open
+type TransferError interface {
+ TransferError(err error)
+}
diff --git a/vendor/github.com/pkg/sftp/request-plan9.go b/vendor/github.com/pkg/sftp/request-plan9.go
new file mode 100644
index 000000000..2444da593
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/request-plan9.go
@@ -0,0 +1,34 @@
+// +build plan9
+
+package sftp
+
+import (
+ "path"
+ "path/filepath"
+ "syscall"
+)
+
+func fakeFileInfoSys() interface{} {
+ return &syscall.Dir{}
+}
+
+func testOsSys(sys interface{}) error {
+ return nil
+}
+
+func toLocalPath(p string) string {
+ lp := filepath.FromSlash(p)
+
+ if path.IsAbs(p) {
+ tmp := lp[1:]
+
+ if filepath.IsAbs(tmp) {
+ // If the FromSlash without any starting slashes is absolute,
+ // then we have a filepath encoded with a prefix '/'.
+ // e.g. "/#s/boot" to "#s/boot"
+ return tmp
+ }
+ }
+
+ return lp
+}
diff --git a/vendor/github.com/pkg/sftp/request-readme.md b/vendor/github.com/pkg/sftp/request-readme.md
new file mode 100644
index 000000000..f887274dc
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/request-readme.md
@@ -0,0 +1,53 @@
+# Request Based SFTP API
+
+The request based API allows for custom backends in a way similar to the http
+package. In order to create a backend you need to implement 4 handler
+interfaces; one for reading, one for writing, one for misc commands and one for
+listing files. Each has 1 required method and in each case those methods take
+the Request as the only parameter and they each return something different.
+These 4 interfaces are enough to handle all the SFTP traffic in a simplified
+manner.
+
+The Request structure has 5 public fields which you will deal with.
+
+- Method (string) - string name of incoming call
+- Filepath (string) - POSIX path of file to act on
+- Flags (uint32) - 32bit bitmask value of file open/create flags
+- Attrs ([]byte) - byte string of file attribute data
+- Target (string) - target path for renames and sym-links
+
+Below are the methods and a brief description of what they need to do.
+
+### Fileread(*Request) (io.Reader, error)
+
+Handler for "Get" method and returns an io.Reader for the file which the server
+then sends to the client.
+
+### Filewrite(*Request) (io.Writer, error)
+
+Handler for "Put" method and returns an io.Writer for the file which the server
+then writes the uploaded file to. The file opening "pflags" are currently
+preserved in the Request.Flags field as a 32bit bitmask value. See the [SFTP
+spec](https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-6.3) for
+details.
+
+### Filecmd(*Request) error
+
+Handles "SetStat", "Rename", "Rmdir", "Mkdir" and "Symlink" methods. Makes the
+appropriate changes and returns nil for success or an filesystem like error
+(eg. os.ErrNotExist). The attributes are currently propagated in their raw form
+([]byte) and will need to be unmarshalled to be useful. See the respond method
+on sshFxpSetstatPacket for example of you might want to do this.
+
+### Fileinfo(*Request) ([]os.FileInfo, error)
+
+Handles "List", "Stat", "Readlink" methods. Gathers/creates FileInfo structs
+with the data on the files and returns in a list (list of 1 for Stat and
+Readlink).
+
+
+## TODO
+
+- Add support for API users to see trace/debugging info of what is going on
+inside SFTP server.
+- Unmarshal the file attributes into a structure on the Request object.
diff --git a/vendor/github.com/pkg/sftp/request-server.go b/vendor/github.com/pkg/sftp/request-server.go
new file mode 100644
index 000000000..b7dadd6c1
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/request-server.go
@@ -0,0 +1,328 @@
+package sftp
+
+import (
+ "context"
+ "errors"
+ "io"
+ "path"
+ "path/filepath"
+ "strconv"
+ "sync"
+)
+
+var maxTxPacket uint32 = 1 << 15
+
+// Handlers contains the 4 SFTP server request handlers.
+type Handlers struct {
+ FileGet FileReader
+ FilePut FileWriter
+ FileCmd FileCmder
+ FileList FileLister
+}
+
+// RequestServer abstracts the sftp protocol with an http request-like protocol
+type RequestServer struct {
+ Handlers Handlers
+
+ *serverConn
+ pktMgr *packetManager
+
+ startDirectory string
+
+ mu sync.RWMutex
+ handleCount int
+ openRequests map[string]*Request
+}
+
+// A RequestServerOption is a function which applies configuration to a RequestServer.
+type RequestServerOption func(*RequestServer)
+
+// WithRSAllocator enable the allocator.
+// After processing a packet we keep in memory the allocated slices
+// and we reuse them for new packets.
+// The allocator is experimental
+func WithRSAllocator() RequestServerOption {
+ return func(rs *RequestServer) {
+ alloc := newAllocator()
+ rs.pktMgr.alloc = alloc
+ rs.conn.alloc = alloc
+ }
+}
+
+// WithStartDirectory sets a start directory to use as base for relative paths.
+// If unset the default is "/"
+func WithStartDirectory(startDirectory string) RequestServerOption {
+ return func(rs *RequestServer) {
+ rs.startDirectory = cleanPath(startDirectory)
+ }
+}
+
+// NewRequestServer creates/allocates/returns new RequestServer.
+// Normally there will be one server per user-session.
+func NewRequestServer(rwc io.ReadWriteCloser, h Handlers, options ...RequestServerOption) *RequestServer {
+ svrConn := &serverConn{
+ conn: conn{
+ Reader: rwc,
+ WriteCloser: rwc,
+ },
+ }
+ rs := &RequestServer{
+ Handlers: h,
+
+ serverConn: svrConn,
+ pktMgr: newPktMgr(svrConn),
+
+ startDirectory: "/",
+
+ openRequests: make(map[string]*Request),
+ }
+
+ for _, o := range options {
+ o(rs)
+ }
+ return rs
+}
+
+// New Open packet/Request
+func (rs *RequestServer) nextRequest(r *Request) string {
+ rs.mu.Lock()
+ defer rs.mu.Unlock()
+
+ rs.handleCount++
+
+ r.handle = strconv.Itoa(rs.handleCount)
+ rs.openRequests[r.handle] = r
+
+ return r.handle
+}
+
+// Returns Request from openRequests, bool is false if it is missing.
+//
+// The Requests in openRequests work essentially as open file descriptors that
+// you can do different things with. What you are doing with it are denoted by
+// the first packet of that type (read/write/etc).
+func (rs *RequestServer) getRequest(handle string) (*Request, bool) {
+ rs.mu.RLock()
+ defer rs.mu.RUnlock()
+
+ r, ok := rs.openRequests[handle]
+ return r, ok
+}
+
+// Close the Request and clear from openRequests map
+func (rs *RequestServer) closeRequest(handle string) error {
+ rs.mu.Lock()
+ defer rs.mu.Unlock()
+
+ if r, ok := rs.openRequests[handle]; ok {
+ delete(rs.openRequests, handle)
+ return r.close()
+ }
+
+ return EBADF
+}
+
+// Close the read/write/closer to trigger exiting the main server loop
+func (rs *RequestServer) Close() error { return rs.conn.Close() }
+
+func (rs *RequestServer) serveLoop(pktChan chan<- orderedRequest) error {
+ defer close(pktChan) // shuts down sftpServerWorkers
+
+ var err error
+ var pkt requestPacket
+ var pktType uint8
+ var pktBytes []byte
+
+ for {
+ pktType, pktBytes, err = rs.serverConn.recvPacket(rs.pktMgr.getNextOrderID())
+ if err != nil {
+ // we don't care about releasing allocated pages here, the server will quit and the allocator freed
+ return err
+ }
+
+ pkt, err = makePacket(rxPacket{fxp(pktType), pktBytes})
+ if err != nil {
+ switch {
+ case errors.Is(err, errUnknownExtendedPacket):
+ // do nothing
+ default:
+ debug("makePacket err: %v", err)
+ rs.conn.Close() // shuts down recvPacket
+ return err
+ }
+ }
+
+ pktChan <- rs.pktMgr.newOrderedRequest(pkt)
+ }
+}
+
+// Serve requests for user session
+func (rs *RequestServer) Serve() error {
+ defer func() {
+ if rs.pktMgr.alloc != nil {
+ rs.pktMgr.alloc.Free()
+ }
+ }()
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ var wg sync.WaitGroup
+ runWorker := func(ch chan orderedRequest) {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ if err := rs.packetWorker(ctx, ch); err != nil {
+ rs.conn.Close() // shuts down recvPacket
+ }
+ }()
+ }
+ pktChan := rs.pktMgr.workerChan(runWorker)
+
+ err := rs.serveLoop(pktChan)
+
+ wg.Wait() // wait for all workers to exit
+
+ rs.mu.Lock()
+ defer rs.mu.Unlock()
+
+ // make sure all open requests are properly closed
+ // (eg. possible on dropped connections, client crashes, etc.)
+ for handle, req := range rs.openRequests {
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ req.transferError(err)
+
+ delete(rs.openRequests, handle)
+ req.close()
+ }
+
+ return err
+}
+
+func (rs *RequestServer) packetWorker(ctx context.Context, pktChan chan orderedRequest) error {
+ for pkt := range pktChan {
+ orderID := pkt.orderID()
+ if epkt, ok := pkt.requestPacket.(*sshFxpExtendedPacket); ok {
+ if epkt.SpecificPacket != nil {
+ pkt.requestPacket = epkt.SpecificPacket
+ }
+ }
+
+ var rpkt responsePacket
+ switch pkt := pkt.requestPacket.(type) {
+ case *sshFxInitPacket:
+ rpkt = &sshFxVersionPacket{Version: sftpProtocolVersion, Extensions: sftpExtensions}
+ case *sshFxpClosePacket:
+ handle := pkt.getHandle()
+ rpkt = statusFromError(pkt.ID, rs.closeRequest(handle))
+ case *sshFxpRealpathPacket:
+ var realPath string
+ if realPather, ok := rs.Handlers.FileList.(RealPathFileLister); ok {
+ realPath = realPather.RealPath(pkt.getPath())
+ } else {
+ realPath = cleanPathWithBase(rs.startDirectory, pkt.getPath())
+ }
+ rpkt = cleanPacketPath(pkt, realPath)
+ case *sshFxpOpendirPacket:
+ request := requestFromPacket(ctx, pkt, rs.startDirectory)
+ handle := rs.nextRequest(request)
+ rpkt = request.opendir(rs.Handlers, pkt)
+ if _, ok := rpkt.(*sshFxpHandlePacket); !ok {
+ // if we return an error we have to remove the handle from the active ones
+ rs.closeRequest(handle)
+ }
+ case *sshFxpOpenPacket:
+ request := requestFromPacket(ctx, pkt, rs.startDirectory)
+ handle := rs.nextRequest(request)
+ rpkt = request.open(rs.Handlers, pkt)
+ if _, ok := rpkt.(*sshFxpHandlePacket); !ok {
+ // if we return an error we have to remove the handle from the active ones
+ rs.closeRequest(handle)
+ }
+ case *sshFxpFstatPacket:
+ handle := pkt.getHandle()
+ request, ok := rs.getRequest(handle)
+ if !ok {
+ rpkt = statusFromError(pkt.ID, EBADF)
+ } else {
+ request = &Request{
+ Method: "Stat",
+ Filepath: cleanPathWithBase(rs.startDirectory, request.Filepath),
+ }
+ rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID)
+ }
+ case *sshFxpFsetstatPacket:
+ handle := pkt.getHandle()
+ request, ok := rs.getRequest(handle)
+ if !ok {
+ rpkt = statusFromError(pkt.ID, EBADF)
+ } else {
+ request = &Request{
+ Method: "Setstat",
+ Filepath: cleanPathWithBase(rs.startDirectory, request.Filepath),
+ }
+ rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID)
+ }
+ case *sshFxpExtendedPacketPosixRename:
+ request := &Request{
+ Method: "PosixRename",
+ Filepath: cleanPathWithBase(rs.startDirectory, pkt.Oldpath),
+ Target: cleanPathWithBase(rs.startDirectory, pkt.Newpath),
+ }
+ rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID)
+ case *sshFxpExtendedPacketStatVFS:
+ request := &Request{
+ Method: "StatVFS",
+ Filepath: cleanPathWithBase(rs.startDirectory, pkt.Path),
+ }
+ rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID)
+ case hasHandle:
+ handle := pkt.getHandle()
+ request, ok := rs.getRequest(handle)
+ if !ok {
+ rpkt = statusFromError(pkt.id(), EBADF)
+ } else {
+ rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID)
+ }
+ case hasPath:
+ request := requestFromPacket(ctx, pkt, rs.startDirectory)
+ rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID)
+ request.close()
+ default:
+ rpkt = statusFromError(pkt.id(), ErrSSHFxOpUnsupported)
+ }
+
+ rs.pktMgr.readyPacket(
+ rs.pktMgr.newOrderedResponse(rpkt, orderID))
+ }
+ return nil
+}
+
+// clean and return name packet for file
+func cleanPacketPath(pkt *sshFxpRealpathPacket, realPath string) responsePacket {
+ return &sshFxpNamePacket{
+ ID: pkt.id(),
+ NameAttrs: []*sshFxpNameAttr{
+ {
+ Name: realPath,
+ LongName: realPath,
+ Attrs: emptyFileStat,
+ },
+ },
+ }
+}
+
+// Makes sure we have a clean POSIX (/) absolute path to work with
+func cleanPath(p string) string {
+ return cleanPathWithBase("/", p)
+}
+
+func cleanPathWithBase(base, p string) string {
+ p = filepath.ToSlash(filepath.Clean(p))
+ if !path.IsAbs(p) {
+ return path.Join(base, p)
+ }
+ return p
+}
diff --git a/vendor/github.com/pkg/sftp/request-unix.go b/vendor/github.com/pkg/sftp/request-unix.go
new file mode 100644
index 000000000..50b08a38d
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/request-unix.go
@@ -0,0 +1,27 @@
+// +build !windows,!plan9
+
+package sftp
+
+import (
+ "errors"
+ "syscall"
+)
+
+func fakeFileInfoSys() interface{} {
+ return &syscall.Stat_t{Uid: 65534, Gid: 65534}
+}
+
+func testOsSys(sys interface{}) error {
+ fstat := sys.(*FileStat)
+ if fstat.UID != uint32(65534) {
+ return errors.New("Uid failed to match")
+ }
+ if fstat.GID != uint32(65534) {
+ return errors.New("Gid failed to match")
+ }
+ return nil
+}
+
+func toLocalPath(p string) string {
+ return p
+}
diff --git a/vendor/github.com/pkg/sftp/request.go b/vendor/github.com/pkg/sftp/request.go
new file mode 100644
index 000000000..116c27aab
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/request.go
@@ -0,0 +1,630 @@
+package sftp
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "sync"
+ "syscall"
+)
+
+// MaxFilelist is the max number of files to return in a readdir batch.
+var MaxFilelist int64 = 100
+
+// state encapsulates the reader/writer/readdir from handlers.
+type state struct {
+ mu sync.RWMutex
+
+ writerAt io.WriterAt
+ readerAt io.ReaderAt
+ writerAtReaderAt WriterAtReaderAt
+ listerAt ListerAt
+ lsoffset int64
+}
+
+// copy returns a shallow copy the state.
+// This is broken out to specific fields,
+// because we have to copy around the mutex in state.
+func (s *state) copy() state {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+
+ return state{
+ writerAt: s.writerAt,
+ readerAt: s.readerAt,
+ writerAtReaderAt: s.writerAtReaderAt,
+ listerAt: s.listerAt,
+ lsoffset: s.lsoffset,
+ }
+}
+
+func (s *state) setReaderAt(rd io.ReaderAt) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ s.readerAt = rd
+}
+
+func (s *state) getReaderAt() io.ReaderAt {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+
+ return s.readerAt
+}
+
+func (s *state) setWriterAt(rd io.WriterAt) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ s.writerAt = rd
+}
+
+func (s *state) getWriterAt() io.WriterAt {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+
+ return s.writerAt
+}
+
+func (s *state) setWriterAtReaderAt(rw WriterAtReaderAt) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ s.writerAtReaderAt = rw
+}
+
+func (s *state) getWriterAtReaderAt() WriterAtReaderAt {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+
+ return s.writerAtReaderAt
+}
+
+func (s *state) getAllReaderWriters() (io.ReaderAt, io.WriterAt, WriterAtReaderAt) {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+
+ return s.readerAt, s.writerAt, s.writerAtReaderAt
+}
+
+// Returns current offset for file list
+func (s *state) lsNext() int64 {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+
+ return s.lsoffset
+}
+
+// Increases next offset
+func (s *state) lsInc(offset int64) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ s.lsoffset += offset
+}
+
+// manage file read/write state
+func (s *state) setListerAt(la ListerAt) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ s.listerAt = la
+}
+
+func (s *state) getListerAt() ListerAt {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+
+ return s.listerAt
+}
+
+// Request contains the data and state for the incoming service request.
+type Request struct {
+ // Get, Put, Setstat, Stat, Rename, Remove
+ // Rmdir, Mkdir, List, Readlink, Link, Symlink
+ Method string
+ Filepath string
+ Flags uint32
+ Attrs []byte // convert to sub-struct
+ Target string // for renames and sym-links
+ handle string
+
+ // reader/writer/readdir from handlers
+ state
+
+ // context lasts duration of request
+ ctx context.Context
+ cancelCtx context.CancelFunc
+}
+
+// NewRequest creates a new Request object.
+func NewRequest(method, path string) *Request {
+ return &Request{
+ Method: method,
+ Filepath: cleanPath(path),
+ }
+}
+
+// copy returns a shallow copy of existing request.
+// This is broken out to specific fields,
+// because we have to copy around the mutex in state.
+func (r *Request) copy() *Request {
+ return &Request{
+ Method: r.Method,
+ Filepath: r.Filepath,
+ Flags: r.Flags,
+ Attrs: r.Attrs,
+ Target: r.Target,
+ handle: r.handle,
+
+ state: r.state.copy(),
+
+ ctx: r.ctx,
+ cancelCtx: r.cancelCtx,
+ }
+}
+
+// New Request initialized based on packet data
+func requestFromPacket(ctx context.Context, pkt hasPath, baseDir string) *Request {
+ request := &Request{
+ Method: requestMethod(pkt),
+ Filepath: cleanPathWithBase(baseDir, pkt.getPath()),
+ }
+ request.ctx, request.cancelCtx = context.WithCancel(ctx)
+
+ switch p := pkt.(type) {
+ case *sshFxpOpenPacket:
+ request.Flags = p.Pflags
+ case *sshFxpSetstatPacket:
+ request.Flags = p.Flags
+ request.Attrs = p.Attrs.([]byte)
+ case *sshFxpRenamePacket:
+ request.Target = cleanPathWithBase(baseDir, p.Newpath)
+ case *sshFxpSymlinkPacket:
+ // NOTE: given a POSIX compliant signature: symlink(target, linkpath string)
+ // this makes Request.Target the linkpath, and Request.Filepath the target.
+ request.Target = cleanPathWithBase(baseDir, p.Linkpath)
+ case *sshFxpExtendedPacketHardlink:
+ request.Target = cleanPathWithBase(baseDir, p.Newpath)
+ }
+ return request
+}
+
+// Context returns the request's context. To change the context,
+// use WithContext.
+//
+// The returned context is always non-nil; it defaults to the
+// background context.
+//
+// For incoming server requests, the context is canceled when the
+// request is complete or the client's connection closes.
+func (r *Request) Context() context.Context {
+ if r.ctx != nil {
+ return r.ctx
+ }
+ return context.Background()
+}
+
+// WithContext returns a copy of r with its context changed to ctx.
+// The provided ctx must be non-nil.
+func (r *Request) WithContext(ctx context.Context) *Request {
+ if ctx == nil {
+ panic("nil context")
+ }
+ r2 := r.copy()
+ r2.ctx = ctx
+ r2.cancelCtx = nil
+ return r2
+}
+
+// Close reader/writer if possible
+func (r *Request) close() error {
+ defer func() {
+ if r.cancelCtx != nil {
+ r.cancelCtx()
+ }
+ }()
+
+ rd, wr, rw := r.getAllReaderWriters()
+
+ var err error
+
+ // Close errors on a Writer are far more likely to be the important one.
+ // As they can be information that there was a loss of data.
+ if c, ok := wr.(io.Closer); ok {
+ if err2 := c.Close(); err == nil {
+ // update error if it is still nil
+ err = err2
+ }
+ }
+
+ if c, ok := rw.(io.Closer); ok {
+ if err2 := c.Close(); err == nil {
+ // update error if it is still nil
+ err = err2
+
+ r.setWriterAtReaderAt(nil)
+ }
+ }
+
+ if c, ok := rd.(io.Closer); ok {
+ if err2 := c.Close(); err == nil {
+ // update error if it is still nil
+ err = err2
+ }
+ }
+
+ return err
+}
+
+// Notify transfer error if any
+func (r *Request) transferError(err error) {
+ if err == nil {
+ return
+ }
+
+ rd, wr, rw := r.getAllReaderWriters()
+
+ if t, ok := wr.(TransferError); ok {
+ t.TransferError(err)
+ }
+
+ if t, ok := rw.(TransferError); ok {
+ t.TransferError(err)
+ }
+
+ if t, ok := rd.(TransferError); ok {
+ t.TransferError(err)
+ }
+}
+
+// called from worker to handle packet/request
+func (r *Request) call(handlers Handlers, pkt requestPacket, alloc *allocator, orderID uint32) responsePacket {
+ switch r.Method {
+ case "Get":
+ return fileget(handlers.FileGet, r, pkt, alloc, orderID)
+ case "Put":
+ return fileput(handlers.FilePut, r, pkt, alloc, orderID)
+ case "Open":
+ return fileputget(handlers.FilePut, r, pkt, alloc, orderID)
+ case "Setstat", "Rename", "Rmdir", "Mkdir", "Link", "Symlink", "Remove", "PosixRename", "StatVFS":
+ return filecmd(handlers.FileCmd, r, pkt)
+ case "List":
+ return filelist(handlers.FileList, r, pkt)
+ case "Stat", "Lstat", "Readlink":
+ return filestat(handlers.FileList, r, pkt)
+ default:
+ return statusFromError(pkt.id(), fmt.Errorf("unexpected method: %s", r.Method))
+ }
+}
+
+// Additional initialization for Open packets
+func (r *Request) open(h Handlers, pkt requestPacket) responsePacket {
+ flags := r.Pflags()
+
+ id := pkt.id()
+
+ switch {
+ case flags.Write, flags.Append, flags.Creat, flags.Trunc:
+ if flags.Read {
+ if openFileWriter, ok := h.FilePut.(OpenFileWriter); ok {
+ r.Method = "Open"
+ rw, err := openFileWriter.OpenFile(r)
+ if err != nil {
+ return statusFromError(id, err)
+ }
+
+ r.setWriterAtReaderAt(rw)
+
+ return &sshFxpHandlePacket{
+ ID: id,
+ Handle: r.handle,
+ }
+ }
+ }
+
+ r.Method = "Put"
+ wr, err := h.FilePut.Filewrite(r)
+ if err != nil {
+ return statusFromError(id, err)
+ }
+
+ r.setWriterAt(wr)
+
+ case flags.Read:
+ r.Method = "Get"
+ rd, err := h.FileGet.Fileread(r)
+ if err != nil {
+ return statusFromError(id, err)
+ }
+
+ r.setReaderAt(rd)
+
+ default:
+ return statusFromError(id, errors.New("bad file flags"))
+ }
+
+ return &sshFxpHandlePacket{
+ ID: id,
+ Handle: r.handle,
+ }
+}
+
+func (r *Request) opendir(h Handlers, pkt requestPacket) responsePacket {
+ r.Method = "List"
+ la, err := h.FileList.Filelist(r)
+ if err != nil {
+ return statusFromError(pkt.id(), wrapPathError(r.Filepath, err))
+ }
+
+ r.setListerAt(la)
+
+ return &sshFxpHandlePacket{
+ ID: pkt.id(),
+ Handle: r.handle,
+ }
+}
+
+// wrap FileReader handler
+func fileget(h FileReader, r *Request, pkt requestPacket, alloc *allocator, orderID uint32) responsePacket {
+ rd := r.getReaderAt()
+ if rd == nil {
+ return statusFromError(pkt.id(), errors.New("unexpected read packet"))
+ }
+
+ data, offset, _ := packetData(pkt, alloc, orderID)
+
+ n, err := rd.ReadAt(data, offset)
+ // only return EOF error if no data left to read
+ if err != nil && (err != io.EOF || n == 0) {
+ return statusFromError(pkt.id(), err)
+ }
+
+ return &sshFxpDataPacket{
+ ID: pkt.id(),
+ Length: uint32(n),
+ Data: data[:n],
+ }
+}
+
+// wrap FileWriter handler
+func fileput(h FileWriter, r *Request, pkt requestPacket, alloc *allocator, orderID uint32) responsePacket {
+ wr := r.getWriterAt()
+ if wr == nil {
+ return statusFromError(pkt.id(), errors.New("unexpected write packet"))
+ }
+
+ data, offset, _ := packetData(pkt, alloc, orderID)
+
+ _, err := wr.WriteAt(data, offset)
+ return statusFromError(pkt.id(), err)
+}
+
+// wrap OpenFileWriter handler
+func fileputget(h FileWriter, r *Request, pkt requestPacket, alloc *allocator, orderID uint32) responsePacket {
+ rw := r.getWriterAtReaderAt()
+ if rw == nil {
+ return statusFromError(pkt.id(), errors.New("unexpected write and read packet"))
+ }
+
+ switch p := pkt.(type) {
+ case *sshFxpReadPacket:
+ data, offset := p.getDataSlice(alloc, orderID), int64(p.Offset)
+
+ n, err := rw.ReadAt(data, offset)
+ // only return EOF error if no data left to read
+ if err != nil && (err != io.EOF || n == 0) {
+ return statusFromError(pkt.id(), err)
+ }
+
+ return &sshFxpDataPacket{
+ ID: pkt.id(),
+ Length: uint32(n),
+ Data: data[:n],
+ }
+
+ case *sshFxpWritePacket:
+ data, offset := p.Data, int64(p.Offset)
+
+ _, err := rw.WriteAt(data, offset)
+ return statusFromError(pkt.id(), err)
+
+ default:
+ return statusFromError(pkt.id(), errors.New("unexpected packet type for read or write"))
+ }
+}
+
+// file data for additional read/write packets
+func packetData(p requestPacket, alloc *allocator, orderID uint32) (data []byte, offset int64, length uint32) {
+ switch p := p.(type) {
+ case *sshFxpReadPacket:
+ return p.getDataSlice(alloc, orderID), int64(p.Offset), p.Len
+ case *sshFxpWritePacket:
+ return p.Data, int64(p.Offset), p.Length
+ }
+ return
+}
+
+// wrap FileCmder handler
+func filecmd(h FileCmder, r *Request, pkt requestPacket) responsePacket {
+ switch p := pkt.(type) {
+ case *sshFxpFsetstatPacket:
+ r.Flags = p.Flags
+ r.Attrs = p.Attrs.([]byte)
+ }
+
+ switch r.Method {
+ case "PosixRename":
+ if posixRenamer, ok := h.(PosixRenameFileCmder); ok {
+ err := posixRenamer.PosixRename(r)
+ return statusFromError(pkt.id(), err)
+ }
+
+ // PosixRenameFileCmder not implemented handle this request as a Rename
+ r.Method = "Rename"
+ err := h.Filecmd(r)
+ return statusFromError(pkt.id(), err)
+
+ case "StatVFS":
+ if statVFSCmdr, ok := h.(StatVFSFileCmder); ok {
+ stat, err := statVFSCmdr.StatVFS(r)
+ if err != nil {
+ return statusFromError(pkt.id(), err)
+ }
+ stat.ID = pkt.id()
+ return stat
+ }
+
+ return statusFromError(pkt.id(), ErrSSHFxOpUnsupported)
+ }
+
+ err := h.Filecmd(r)
+ return statusFromError(pkt.id(), err)
+}
+
+// wrap FileLister handler
+func filelist(h FileLister, r *Request, pkt requestPacket) responsePacket {
+ lister := r.getListerAt()
+ if lister == nil {
+ return statusFromError(pkt.id(), errors.New("unexpected dir packet"))
+ }
+
+ offset := r.lsNext()
+ finfo := make([]os.FileInfo, MaxFilelist)
+ n, err := lister.ListAt(finfo, offset)
+ r.lsInc(int64(n))
+ // ignore EOF as we only return it when there are no results
+ finfo = finfo[:n] // avoid need for nil tests below
+
+ switch r.Method {
+ case "List":
+ if err != nil && (err != io.EOF || n == 0) {
+ return statusFromError(pkt.id(), err)
+ }
+
+ nameAttrs := make([]*sshFxpNameAttr, 0, len(finfo))
+
+ // If the type conversion fails, we get untyped `nil`,
+ // which is handled by not looking up any names.
+ idLookup, _ := h.(NameLookupFileLister)
+
+ for _, fi := range finfo {
+ nameAttrs = append(nameAttrs, &sshFxpNameAttr{
+ Name: fi.Name(),
+ LongName: runLs(idLookup, fi),
+ Attrs: []interface{}{fi},
+ })
+ }
+
+ return &sshFxpNamePacket{
+ ID: pkt.id(),
+ NameAttrs: nameAttrs,
+ }
+
+ default:
+ err = fmt.Errorf("unexpected method: %s", r.Method)
+ return statusFromError(pkt.id(), err)
+ }
+}
+
+func filestat(h FileLister, r *Request, pkt requestPacket) responsePacket {
+ var lister ListerAt
+ var err error
+
+ if r.Method == "Lstat" {
+ if lstatFileLister, ok := h.(LstatFileLister); ok {
+ lister, err = lstatFileLister.Lstat(r)
+ } else {
+ // LstatFileLister not implemented handle this request as a Stat
+ r.Method = "Stat"
+ lister, err = h.Filelist(r)
+ }
+ } else {
+ lister, err = h.Filelist(r)
+ }
+ if err != nil {
+ return statusFromError(pkt.id(), err)
+ }
+ finfo := make([]os.FileInfo, 1)
+ n, err := lister.ListAt(finfo, 0)
+ finfo = finfo[:n] // avoid need for nil tests below
+
+ switch r.Method {
+ case "Stat", "Lstat":
+ if err != nil && err != io.EOF {
+ return statusFromError(pkt.id(), err)
+ }
+ if n == 0 {
+ err = &os.PathError{
+ Op: strings.ToLower(r.Method),
+ Path: r.Filepath,
+ Err: syscall.ENOENT,
+ }
+ return statusFromError(pkt.id(), err)
+ }
+ return &sshFxpStatResponse{
+ ID: pkt.id(),
+ info: finfo[0],
+ }
+ case "Readlink":
+ if err != nil && err != io.EOF {
+ return statusFromError(pkt.id(), err)
+ }
+ if n == 0 {
+ err = &os.PathError{
+ Op: "readlink",
+ Path: r.Filepath,
+ Err: syscall.ENOENT,
+ }
+ return statusFromError(pkt.id(), err)
+ }
+ filename := finfo[0].Name()
+ return &sshFxpNamePacket{
+ ID: pkt.id(),
+ NameAttrs: []*sshFxpNameAttr{
+ {
+ Name: filename,
+ LongName: filename,
+ Attrs: emptyFileStat,
+ },
+ },
+ }
+ default:
+ err = fmt.Errorf("unexpected method: %s", r.Method)
+ return statusFromError(pkt.id(), err)
+ }
+}
+
+// init attributes of request object from packet data
+func requestMethod(p requestPacket) (method string) {
+ switch p.(type) {
+ case *sshFxpReadPacket, *sshFxpWritePacket, *sshFxpOpenPacket:
+ // set in open() above
+ case *sshFxpOpendirPacket, *sshFxpReaddirPacket:
+ // set in opendir() above
+ case *sshFxpSetstatPacket, *sshFxpFsetstatPacket:
+ method = "Setstat"
+ case *sshFxpRenamePacket:
+ method = "Rename"
+ case *sshFxpSymlinkPacket:
+ method = "Symlink"
+ case *sshFxpRemovePacket:
+ method = "Remove"
+ case *sshFxpStatPacket, *sshFxpFstatPacket:
+ method = "Stat"
+ case *sshFxpLstatPacket:
+ method = "Lstat"
+ case *sshFxpRmdirPacket:
+ method = "Rmdir"
+ case *sshFxpReadlinkPacket:
+ method = "Readlink"
+ case *sshFxpMkdirPacket:
+ method = "Mkdir"
+ case *sshFxpExtendedPacketHardlink:
+ method = "Link"
+ }
+ return method
+}
diff --git a/vendor/github.com/pkg/sftp/request_windows.go b/vendor/github.com/pkg/sftp/request_windows.go
new file mode 100644
index 000000000..1f6d3df17
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/request_windows.go
@@ -0,0 +1,44 @@
+package sftp
+
+import (
+ "path"
+ "path/filepath"
+ "syscall"
+)
+
+func fakeFileInfoSys() interface{} {
+ return syscall.Win32FileAttributeData{}
+}
+
+func testOsSys(sys interface{}) error {
+ return nil
+}
+
+func toLocalPath(p string) string {
+ lp := filepath.FromSlash(p)
+
+ if path.IsAbs(p) {
+ tmp := lp
+ for len(tmp) > 0 && tmp[0] == '\\' {
+ tmp = tmp[1:]
+ }
+
+ if filepath.IsAbs(tmp) {
+ // If the FromSlash without any starting slashes is absolute,
+ // then we have a filepath encoded with a prefix '/'.
+ // e.g. "/C:/Windows" to "C:\\Windows"
+ return tmp
+ }
+
+ tmp += "\\"
+
+ if filepath.IsAbs(tmp) {
+ // If the FromSlash without any starting slashes but with extra end slash is absolute,
+ // then we have a filepath encoded with a prefix '/' and a dropped '/' at the end.
+ // e.g. "/C:" to "C:\\"
+ return tmp
+ }
+ }
+
+ return lp
+}
diff --git a/vendor/github.com/pkg/sftp/server.go b/vendor/github.com/pkg/sftp/server.go
new file mode 100644
index 000000000..529052b44
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/server.go
@@ -0,0 +1,616 @@
+package sftp
+
+// sftp server counterpart
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+ "sync"
+ "syscall"
+ "time"
+)
+
+const (
+ // SftpServerWorkerCount defines the number of workers for the SFTP server
+ SftpServerWorkerCount = 8
+)
+
+// Server is an SSH File Transfer Protocol (sftp) server.
+// This is intended to provide the sftp subsystem to an ssh server daemon.
+// This implementation currently supports most of sftp server protocol version 3,
+// as specified at http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02
+type Server struct {
+ *serverConn
+ debugStream io.Writer
+ readOnly bool
+ pktMgr *packetManager
+ openFiles map[string]*os.File
+ openFilesLock sync.RWMutex
+ handleCount int
+}
+
+func (svr *Server) nextHandle(f *os.File) string {
+ svr.openFilesLock.Lock()
+ defer svr.openFilesLock.Unlock()
+ svr.handleCount++
+ handle := strconv.Itoa(svr.handleCount)
+ svr.openFiles[handle] = f
+ return handle
+}
+
+func (svr *Server) closeHandle(handle string) error {
+ svr.openFilesLock.Lock()
+ defer svr.openFilesLock.Unlock()
+ if f, ok := svr.openFiles[handle]; ok {
+ delete(svr.openFiles, handle)
+ return f.Close()
+ }
+
+ return EBADF
+}
+
+func (svr *Server) getHandle(handle string) (*os.File, bool) {
+ svr.openFilesLock.RLock()
+ defer svr.openFilesLock.RUnlock()
+ f, ok := svr.openFiles[handle]
+ return f, ok
+}
+
+type serverRespondablePacket interface {
+ encoding.BinaryUnmarshaler
+ id() uint32
+ respond(svr *Server) responsePacket
+}
+
+// NewServer creates a new Server instance around the provided streams, serving
+// content from the root of the filesystem. Optionally, ServerOption
+// functions may be specified to further configure the Server.
+//
+// A subsequent call to Serve() is required to begin serving files over SFTP.
+func NewServer(rwc io.ReadWriteCloser, options ...ServerOption) (*Server, error) {
+ svrConn := &serverConn{
+ conn: conn{
+ Reader: rwc,
+ WriteCloser: rwc,
+ },
+ }
+ s := &Server{
+ serverConn: svrConn,
+ debugStream: ioutil.Discard,
+ pktMgr: newPktMgr(svrConn),
+ openFiles: make(map[string]*os.File),
+ }
+
+ for _, o := range options {
+ if err := o(s); err != nil {
+ return nil, err
+ }
+ }
+
+ return s, nil
+}
+
+// A ServerOption is a function which applies configuration to a Server.
+type ServerOption func(*Server) error
+
+// WithDebug enables Server debugging output to the supplied io.Writer.
+func WithDebug(w io.Writer) ServerOption {
+ return func(s *Server) error {
+ s.debugStream = w
+ return nil
+ }
+}
+
+// ReadOnly configures a Server to serve files in read-only mode.
+func ReadOnly() ServerOption {
+ return func(s *Server) error {
+ s.readOnly = true
+ return nil
+ }
+}
+
+// WithAllocator enable the allocator.
+// After processing a packet we keep in memory the allocated slices
+// and we reuse them for new packets.
+// The allocator is experimental
+func WithAllocator() ServerOption {
+ return func(s *Server) error {
+ alloc := newAllocator()
+ s.pktMgr.alloc = alloc
+ s.conn.alloc = alloc
+ return nil
+ }
+}
+
+type rxPacket struct {
+ pktType fxp
+ pktBytes []byte
+}
+
+// Up to N parallel servers
+func (svr *Server) sftpServerWorker(pktChan chan orderedRequest) error {
+ for pkt := range pktChan {
+ // readonly checks
+ readonly := true
+ switch pkt := pkt.requestPacket.(type) {
+ case notReadOnly:
+ readonly = false
+ case *sshFxpOpenPacket:
+ readonly = pkt.readonly()
+ case *sshFxpExtendedPacket:
+ readonly = pkt.readonly()
+ }
+
+ // If server is operating read-only and a write operation is requested,
+ // return permission denied
+ if !readonly && svr.readOnly {
+ svr.pktMgr.readyPacket(
+ svr.pktMgr.newOrderedResponse(statusFromError(pkt.id(), syscall.EPERM), pkt.orderID()),
+ )
+ continue
+ }
+
+ if err := handlePacket(svr, pkt); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func handlePacket(s *Server, p orderedRequest) error {
+ var rpkt responsePacket
+ orderID := p.orderID()
+ switch p := p.requestPacket.(type) {
+ case *sshFxInitPacket:
+ rpkt = &sshFxVersionPacket{
+ Version: sftpProtocolVersion,
+ Extensions: sftpExtensions,
+ }
+ case *sshFxpStatPacket:
+ // stat the requested file
+ info, err := os.Stat(toLocalPath(p.Path))
+ rpkt = &sshFxpStatResponse{
+ ID: p.ID,
+ info: info,
+ }
+ if err != nil {
+ rpkt = statusFromError(p.ID, err)
+ }
+ case *sshFxpLstatPacket:
+ // stat the requested file
+ info, err := os.Lstat(toLocalPath(p.Path))
+ rpkt = &sshFxpStatResponse{
+ ID: p.ID,
+ info: info,
+ }
+ if err != nil {
+ rpkt = statusFromError(p.ID, err)
+ }
+ case *sshFxpFstatPacket:
+ f, ok := s.getHandle(p.Handle)
+ var err error = EBADF
+ var info os.FileInfo
+ if ok {
+ info, err = f.Stat()
+ rpkt = &sshFxpStatResponse{
+ ID: p.ID,
+ info: info,
+ }
+ }
+ if err != nil {
+ rpkt = statusFromError(p.ID, err)
+ }
+ case *sshFxpMkdirPacket:
+ // TODO FIXME: ignore flags field
+ err := os.Mkdir(toLocalPath(p.Path), 0755)
+ rpkt = statusFromError(p.ID, err)
+ case *sshFxpRmdirPacket:
+ err := os.Remove(toLocalPath(p.Path))
+ rpkt = statusFromError(p.ID, err)
+ case *sshFxpRemovePacket:
+ err := os.Remove(toLocalPath(p.Filename))
+ rpkt = statusFromError(p.ID, err)
+ case *sshFxpRenamePacket:
+ err := os.Rename(toLocalPath(p.Oldpath), toLocalPath(p.Newpath))
+ rpkt = statusFromError(p.ID, err)
+ case *sshFxpSymlinkPacket:
+ err := os.Symlink(toLocalPath(p.Targetpath), toLocalPath(p.Linkpath))
+ rpkt = statusFromError(p.ID, err)
+ case *sshFxpClosePacket:
+ rpkt = statusFromError(p.ID, s.closeHandle(p.Handle))
+ case *sshFxpReadlinkPacket:
+ f, err := os.Readlink(toLocalPath(p.Path))
+ rpkt = &sshFxpNamePacket{
+ ID: p.ID,
+ NameAttrs: []*sshFxpNameAttr{
+ {
+ Name: f,
+ LongName: f,
+ Attrs: emptyFileStat,
+ },
+ },
+ }
+ if err != nil {
+ rpkt = statusFromError(p.ID, err)
+ }
+ case *sshFxpRealpathPacket:
+ f, err := filepath.Abs(toLocalPath(p.Path))
+ f = cleanPath(f)
+ rpkt = &sshFxpNamePacket{
+ ID: p.ID,
+ NameAttrs: []*sshFxpNameAttr{
+ {
+ Name: f,
+ LongName: f,
+ Attrs: emptyFileStat,
+ },
+ },
+ }
+ if err != nil {
+ rpkt = statusFromError(p.ID, err)
+ }
+ case *sshFxpOpendirPacket:
+ p.Path = toLocalPath(p.Path)
+
+ if stat, err := os.Stat(p.Path); err != nil {
+ rpkt = statusFromError(p.ID, err)
+ } else if !stat.IsDir() {
+ rpkt = statusFromError(p.ID, &os.PathError{
+ Path: p.Path, Err: syscall.ENOTDIR})
+ } else {
+ rpkt = (&sshFxpOpenPacket{
+ ID: p.ID,
+ Path: p.Path,
+ Pflags: sshFxfRead,
+ }).respond(s)
+ }
+ case *sshFxpReadPacket:
+ var err error = EBADF
+ f, ok := s.getHandle(p.Handle)
+ if ok {
+ err = nil
+ data := p.getDataSlice(s.pktMgr.alloc, orderID)
+ n, _err := f.ReadAt(data, int64(p.Offset))
+ if _err != nil && (_err != io.EOF || n == 0) {
+ err = _err
+ }
+ rpkt = &sshFxpDataPacket{
+ ID: p.ID,
+ Length: uint32(n),
+ Data: data[:n],
+ // do not use data[:n:n] here to clamp the capacity, we allocated extra capacity above to avoid reallocations
+ }
+ }
+ if err != nil {
+ rpkt = statusFromError(p.ID, err)
+ }
+
+ case *sshFxpWritePacket:
+ f, ok := s.getHandle(p.Handle)
+ var err error = EBADF
+ if ok {
+ _, err = f.WriteAt(p.Data, int64(p.Offset))
+ }
+ rpkt = statusFromError(p.ID, err)
+ case *sshFxpExtendedPacket:
+ if p.SpecificPacket == nil {
+ rpkt = statusFromError(p.ID, ErrSSHFxOpUnsupported)
+ } else {
+ rpkt = p.respond(s)
+ }
+ case serverRespondablePacket:
+ rpkt = p.respond(s)
+ default:
+ return fmt.Errorf("unexpected packet type %T", p)
+ }
+
+ s.pktMgr.readyPacket(s.pktMgr.newOrderedResponse(rpkt, orderID))
+ return nil
+}
+
+// Serve serves SFTP connections until the streams stop or the SFTP subsystem
+// is stopped.
+func (svr *Server) Serve() error {
+ defer func() {
+ if svr.pktMgr.alloc != nil {
+ svr.pktMgr.alloc.Free()
+ }
+ }()
+ var wg sync.WaitGroup
+ runWorker := func(ch chan orderedRequest) {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ if err := svr.sftpServerWorker(ch); err != nil {
+ svr.conn.Close() // shuts down recvPacket
+ }
+ }()
+ }
+ pktChan := svr.pktMgr.workerChan(runWorker)
+
+ var err error
+ var pkt requestPacket
+ var pktType uint8
+ var pktBytes []byte
+ for {
+ pktType, pktBytes, err = svr.serverConn.recvPacket(svr.pktMgr.getNextOrderID())
+ if err != nil {
+ // we don't care about releasing allocated pages here, the server will quit and the allocator freed
+ break
+ }
+
+ pkt, err = makePacket(rxPacket{fxp(pktType), pktBytes})
+ if err != nil {
+ switch {
+ case errors.Is(err, errUnknownExtendedPacket):
+ //if err := svr.serverConn.sendError(pkt, ErrSshFxOpUnsupported); err != nil {
+ // debug("failed to send err packet: %v", err)
+ // svr.conn.Close() // shuts down recvPacket
+ // break
+ //}
+ default:
+ debug("makePacket err: %v", err)
+ svr.conn.Close() // shuts down recvPacket
+ break
+ }
+ }
+
+ pktChan <- svr.pktMgr.newOrderedRequest(pkt)
+ }
+
+ close(pktChan) // shuts down sftpServerWorkers
+ wg.Wait() // wait for all workers to exit
+
+ // close any still-open files
+ for handle, file := range svr.openFiles {
+ fmt.Fprintf(svr.debugStream, "sftp server file with handle %q left open: %v\n", handle, file.Name())
+ file.Close()
+ }
+ return err // error from recvPacket
+}
+
+type ider interface {
+ id() uint32
+}
+
+// The init packet has no ID, so we just return a zero-value ID
+func (p *sshFxInitPacket) id() uint32 { return 0 }
+
+type sshFxpStatResponse struct {
+ ID uint32
+ info os.FileInfo
+}
+
+func (p *sshFxpStatResponse) marshalPacket() ([]byte, []byte, error) {
+ l := 4 + 1 + 4 // uint32(length) + byte(type) + uint32(id)
+
+ b := make([]byte, 4, l)
+ b = append(b, sshFxpAttrs)
+ b = marshalUint32(b, p.ID)
+
+ var payload []byte
+ payload = marshalFileInfo(payload, p.info)
+
+ return b, payload, nil
+}
+
+func (p *sshFxpStatResponse) MarshalBinary() ([]byte, error) {
+ header, payload, err := p.marshalPacket()
+ return append(header, payload...), err
+}
+
+var emptyFileStat = []interface{}{uint32(0)}
+
+func (p *sshFxpOpenPacket) readonly() bool {
+ return !p.hasPflags(sshFxfWrite)
+}
+
+func (p *sshFxpOpenPacket) hasPflags(flags ...uint32) bool {
+ for _, f := range flags {
+ if p.Pflags&f == 0 {
+ return false
+ }
+ }
+ return true
+}
+
+func (p *sshFxpOpenPacket) respond(svr *Server) responsePacket {
+ var osFlags int
+ if p.hasPflags(sshFxfRead, sshFxfWrite) {
+ osFlags |= os.O_RDWR
+ } else if p.hasPflags(sshFxfWrite) {
+ osFlags |= os.O_WRONLY
+ } else if p.hasPflags(sshFxfRead) {
+ osFlags |= os.O_RDONLY
+ } else {
+ // how are they opening?
+ return statusFromError(p.ID, syscall.EINVAL)
+ }
+
+ // Don't use O_APPEND flag as it conflicts with WriteAt.
+ // The sshFxfAppend flag is a no-op here as the client sends the offsets.
+
+ if p.hasPflags(sshFxfCreat) {
+ osFlags |= os.O_CREATE
+ }
+ if p.hasPflags(sshFxfTrunc) {
+ osFlags |= os.O_TRUNC
+ }
+ if p.hasPflags(sshFxfExcl) {
+ osFlags |= os.O_EXCL
+ }
+
+ f, err := os.OpenFile(toLocalPath(p.Path), osFlags, 0644)
+ if err != nil {
+ return statusFromError(p.ID, err)
+ }
+
+ handle := svr.nextHandle(f)
+ return &sshFxpHandlePacket{ID: p.ID, Handle: handle}
+}
+
+func (p *sshFxpReaddirPacket) respond(svr *Server) responsePacket {
+ f, ok := svr.getHandle(p.Handle)
+ if !ok {
+ return statusFromError(p.ID, EBADF)
+ }
+
+ dirents, err := f.Readdir(128)
+ if err != nil {
+ return statusFromError(p.ID, err)
+ }
+
+ idLookup := osIDLookup{}
+
+ ret := &sshFxpNamePacket{ID: p.ID}
+ for _, dirent := range dirents {
+ ret.NameAttrs = append(ret.NameAttrs, &sshFxpNameAttr{
+ Name: dirent.Name(),
+ LongName: runLs(idLookup, dirent),
+ Attrs: []interface{}{dirent},
+ })
+ }
+ return ret
+}
+
+func (p *sshFxpSetstatPacket) respond(svr *Server) responsePacket {
+ // additional unmarshalling is required for each possibility here
+ b := p.Attrs.([]byte)
+ var err error
+
+ p.Path = toLocalPath(p.Path)
+
+ debug("setstat name \"%s\"", p.Path)
+ if (p.Flags & sshFileXferAttrSize) != 0 {
+ var size uint64
+ if size, b, err = unmarshalUint64Safe(b); err == nil {
+ err = os.Truncate(p.Path, int64(size))
+ }
+ }
+ if (p.Flags & sshFileXferAttrPermissions) != 0 {
+ var mode uint32
+ if mode, b, err = unmarshalUint32Safe(b); err == nil {
+ err = os.Chmod(p.Path, os.FileMode(mode))
+ }
+ }
+ if (p.Flags & sshFileXferAttrACmodTime) != 0 {
+ var atime uint32
+ var mtime uint32
+ if atime, b, err = unmarshalUint32Safe(b); err != nil {
+ } else if mtime, b, err = unmarshalUint32Safe(b); err != nil {
+ } else {
+ atimeT := time.Unix(int64(atime), 0)
+ mtimeT := time.Unix(int64(mtime), 0)
+ err = os.Chtimes(p.Path, atimeT, mtimeT)
+ }
+ }
+ if (p.Flags & sshFileXferAttrUIDGID) != 0 {
+ var uid uint32
+ var gid uint32
+ if uid, b, err = unmarshalUint32Safe(b); err != nil {
+ } else if gid, _, err = unmarshalUint32Safe(b); err != nil {
+ } else {
+ err = os.Chown(p.Path, int(uid), int(gid))
+ }
+ }
+
+ return statusFromError(p.ID, err)
+}
+
+func (p *sshFxpFsetstatPacket) respond(svr *Server) responsePacket {
+ f, ok := svr.getHandle(p.Handle)
+ if !ok {
+ return statusFromError(p.ID, EBADF)
+ }
+
+ // additional unmarshalling is required for each possibility here
+ b := p.Attrs.([]byte)
+ var err error
+
+ debug("fsetstat name \"%s\"", f.Name())
+ if (p.Flags & sshFileXferAttrSize) != 0 {
+ var size uint64
+ if size, b, err = unmarshalUint64Safe(b); err == nil {
+ err = f.Truncate(int64(size))
+ }
+ }
+ if (p.Flags & sshFileXferAttrPermissions) != 0 {
+ var mode uint32
+ if mode, b, err = unmarshalUint32Safe(b); err == nil {
+ err = f.Chmod(os.FileMode(mode))
+ }
+ }
+ if (p.Flags & sshFileXferAttrACmodTime) != 0 {
+ var atime uint32
+ var mtime uint32
+ if atime, b, err = unmarshalUint32Safe(b); err != nil {
+ } else if mtime, b, err = unmarshalUint32Safe(b); err != nil {
+ } else {
+ atimeT := time.Unix(int64(atime), 0)
+ mtimeT := time.Unix(int64(mtime), 0)
+ err = os.Chtimes(f.Name(), atimeT, mtimeT)
+ }
+ }
+ if (p.Flags & sshFileXferAttrUIDGID) != 0 {
+ var uid uint32
+ var gid uint32
+ if uid, b, err = unmarshalUint32Safe(b); err != nil {
+ } else if gid, _, err = unmarshalUint32Safe(b); err != nil {
+ } else {
+ err = f.Chown(int(uid), int(gid))
+ }
+ }
+
+ return statusFromError(p.ID, err)
+}
+
+func statusFromError(id uint32, err error) *sshFxpStatusPacket {
+ ret := &sshFxpStatusPacket{
+ ID: id,
+ StatusError: StatusError{
+ // sshFXOk = 0
+ // sshFXEOF = 1
+ // sshFXNoSuchFile = 2 ENOENT
+ // sshFXPermissionDenied = 3
+ // sshFXFailure = 4
+ // sshFXBadMessage = 5
+ // sshFXNoConnection = 6
+ // sshFXConnectionLost = 7
+ // sshFXOPUnsupported = 8
+ Code: sshFxOk,
+ },
+ }
+ if err == nil {
+ return ret
+ }
+
+ debug("statusFromError: error is %T %#v", err, err)
+ ret.StatusError.Code = sshFxFailure
+ ret.StatusError.msg = err.Error()
+
+ if os.IsNotExist(err) {
+ ret.StatusError.Code = sshFxNoSuchFile
+ return ret
+ }
+ if code, ok := translateSyscallError(err); ok {
+ ret.StatusError.Code = code
+ return ret
+ }
+
+ switch e := err.(type) {
+ case fxerr:
+ ret.StatusError.Code = uint32(e)
+ default:
+ if e == io.EOF {
+ ret.StatusError.Code = sshFxEOF
+ }
+ }
+
+ return ret
+}
diff --git a/vendor/github.com/pkg/sftp/server_statvfs_darwin.go b/vendor/github.com/pkg/sftp/server_statvfs_darwin.go
new file mode 100644
index 000000000..8c01dac52
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/server_statvfs_darwin.go
@@ -0,0 +1,21 @@
+package sftp
+
+import (
+ "syscall"
+)
+
+func statvfsFromStatfst(stat *syscall.Statfs_t) (*StatVFS, error) {
+ return &StatVFS{
+ Bsize: uint64(stat.Bsize),
+ Frsize: uint64(stat.Bsize), // fragment size is a linux thing; use block size here
+ Blocks: stat.Blocks,
+ Bfree: stat.Bfree,
+ Bavail: stat.Bavail,
+ Files: stat.Files,
+ Ffree: stat.Ffree,
+ Favail: stat.Ffree, // not sure how to calculate Favail
+ Fsid: uint64(uint64(stat.Fsid.Val[1])<<32 | uint64(stat.Fsid.Val[0])), // endianness?
+ Flag: uint64(stat.Flags), // assuming POSIX?
+ Namemax: 1024, // man 2 statfs shows: #define MAXPATHLEN 1024
+ }, nil
+}
diff --git a/vendor/github.com/pkg/sftp/server_statvfs_impl.go b/vendor/github.com/pkg/sftp/server_statvfs_impl.go
new file mode 100644
index 000000000..94b6d832c
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/server_statvfs_impl.go
@@ -0,0 +1,29 @@
+// +build darwin linux
+
+// fill in statvfs structure with OS specific values
+// Statfs_t is different per-kernel, and only exists on some unixes (not Solaris for instance)
+
+package sftp
+
+import (
+ "syscall"
+)
+
+func (p *sshFxpExtendedPacketStatVFS) respond(svr *Server) responsePacket {
+ retPkt, err := getStatVFSForPath(p.Path)
+ if err != nil {
+ return statusFromError(p.ID, err)
+ }
+ retPkt.ID = p.ID
+
+ return retPkt
+}
+
+func getStatVFSForPath(name string) (*StatVFS, error) {
+ var stat syscall.Statfs_t
+ if err := syscall.Statfs(name, &stat); err != nil {
+ return nil, err
+ }
+
+ return statvfsFromStatfst(&stat)
+}
diff --git a/vendor/github.com/pkg/sftp/server_statvfs_linux.go b/vendor/github.com/pkg/sftp/server_statvfs_linux.go
new file mode 100644
index 000000000..1d180d47c
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/server_statvfs_linux.go
@@ -0,0 +1,22 @@
+// +build linux
+
+package sftp
+
+import (
+ "syscall"
+)
+
+func statvfsFromStatfst(stat *syscall.Statfs_t) (*StatVFS, error) {
+ return &StatVFS{
+ Bsize: uint64(stat.Bsize),
+ Frsize: uint64(stat.Frsize),
+ Blocks: stat.Blocks,
+ Bfree: stat.Bfree,
+ Bavail: stat.Bavail,
+ Files: stat.Files,
+ Ffree: stat.Ffree,
+ Favail: stat.Ffree, // not sure how to calculate Favail
+ Flag: uint64(stat.Flags), // assuming POSIX?
+ Namemax: uint64(stat.Namelen),
+ }, nil
+}
diff --git a/vendor/github.com/pkg/sftp/server_statvfs_plan9.go b/vendor/github.com/pkg/sftp/server_statvfs_plan9.go
new file mode 100644
index 000000000..e71a27d37
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/server_statvfs_plan9.go
@@ -0,0 +1,13 @@
+package sftp
+
+import (
+ "syscall"
+)
+
+func (p *sshFxpExtendedPacketStatVFS) respond(svr *Server) responsePacket {
+ return statusFromError(p.ID, syscall.EPLAN9)
+}
+
+func getStatVFSForPath(name string) (*StatVFS, error) {
+ return nil, syscall.EPLAN9
+}
diff --git a/vendor/github.com/pkg/sftp/server_statvfs_stubs.go b/vendor/github.com/pkg/sftp/server_statvfs_stubs.go
new file mode 100644
index 000000000..fbf49068f
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/server_statvfs_stubs.go
@@ -0,0 +1,15 @@
+// +build !darwin,!linux,!plan9
+
+package sftp
+
+import (
+ "syscall"
+)
+
+func (p *sshFxpExtendedPacketStatVFS) respond(svr *Server) responsePacket {
+ return statusFromError(p.ID, syscall.ENOTSUP)
+}
+
+func getStatVFSForPath(name string) (*StatVFS, error) {
+ return nil, syscall.ENOTSUP
+}
diff --git a/vendor/github.com/pkg/sftp/sftp.go b/vendor/github.com/pkg/sftp/sftp.go
new file mode 100644
index 000000000..9a63c39dc
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/sftp.go
@@ -0,0 +1,258 @@
+// Package sftp implements the SSH File Transfer Protocol as described in
+// https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02
+package sftp
+
+import (
+ "fmt"
+)
+
+const (
+ sshFxpInit = 1
+ sshFxpVersion = 2
+ sshFxpOpen = 3
+ sshFxpClose = 4
+ sshFxpRead = 5
+ sshFxpWrite = 6
+ sshFxpLstat = 7
+ sshFxpFstat = 8
+ sshFxpSetstat = 9
+ sshFxpFsetstat = 10
+ sshFxpOpendir = 11
+ sshFxpReaddir = 12
+ sshFxpRemove = 13
+ sshFxpMkdir = 14
+ sshFxpRmdir = 15
+ sshFxpRealpath = 16
+ sshFxpStat = 17
+ sshFxpRename = 18
+ sshFxpReadlink = 19
+ sshFxpSymlink = 20
+ sshFxpStatus = 101
+ sshFxpHandle = 102
+ sshFxpData = 103
+ sshFxpName = 104
+ sshFxpAttrs = 105
+ sshFxpExtended = 200
+ sshFxpExtendedReply = 201
+)
+
+const (
+ sshFxOk = 0
+ sshFxEOF = 1
+ sshFxNoSuchFile = 2
+ sshFxPermissionDenied = 3
+ sshFxFailure = 4
+ sshFxBadMessage = 5
+ sshFxNoConnection = 6
+ sshFxConnectionLost = 7
+ sshFxOPUnsupported = 8
+
+ // see draft-ietf-secsh-filexfer-13
+ // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-13#section-9.1
+ sshFxInvalidHandle = 9
+ sshFxNoSuchPath = 10
+ sshFxFileAlreadyExists = 11
+ sshFxWriteProtect = 12
+ sshFxNoMedia = 13
+ sshFxNoSpaceOnFilesystem = 14
+ sshFxQuotaExceeded = 15
+ sshFxUnknownPrincipal = 16
+ sshFxLockConflict = 17
+ sshFxDirNotEmpty = 18
+ sshFxNotADirectory = 19
+ sshFxInvalidFilename = 20
+ sshFxLinkLoop = 21
+ sshFxCannotDelete = 22
+ sshFxInvalidParameter = 23
+ sshFxFileIsADirectory = 24
+ sshFxByteRangeLockConflict = 25
+ sshFxByteRangeLockRefused = 26
+ sshFxDeletePending = 27
+ sshFxFileCorrupt = 28
+ sshFxOwnerInvalid = 29
+ sshFxGroupInvalid = 30
+ sshFxNoMatchingByteRangeLock = 31
+)
+
+const (
+ sshFxfRead = 0x00000001
+ sshFxfWrite = 0x00000002
+ sshFxfAppend = 0x00000004
+ sshFxfCreat = 0x00000008
+ sshFxfTrunc = 0x00000010
+ sshFxfExcl = 0x00000020
+)
+
+var (
+ // supportedSFTPExtensions defines the supported extensions
+ supportedSFTPExtensions = []sshExtensionPair{
+ {"hardlink@openssh.com", "1"},
+ {"posix-rename@openssh.com", "1"},
+ {"statvfs@openssh.com", "2"},
+ }
+ sftpExtensions = supportedSFTPExtensions
+)
+
+type fxp uint8
+
+func (f fxp) String() string {
+ switch f {
+ case sshFxpInit:
+ return "SSH_FXP_INIT"
+ case sshFxpVersion:
+ return "SSH_FXP_VERSION"
+ case sshFxpOpen:
+ return "SSH_FXP_OPEN"
+ case sshFxpClose:
+ return "SSH_FXP_CLOSE"
+ case sshFxpRead:
+ return "SSH_FXP_READ"
+ case sshFxpWrite:
+ return "SSH_FXP_WRITE"
+ case sshFxpLstat:
+ return "SSH_FXP_LSTAT"
+ case sshFxpFstat:
+ return "SSH_FXP_FSTAT"
+ case sshFxpSetstat:
+ return "SSH_FXP_SETSTAT"
+ case sshFxpFsetstat:
+ return "SSH_FXP_FSETSTAT"
+ case sshFxpOpendir:
+ return "SSH_FXP_OPENDIR"
+ case sshFxpReaddir:
+ return "SSH_FXP_READDIR"
+ case sshFxpRemove:
+ return "SSH_FXP_REMOVE"
+ case sshFxpMkdir:
+ return "SSH_FXP_MKDIR"
+ case sshFxpRmdir:
+ return "SSH_FXP_RMDIR"
+ case sshFxpRealpath:
+ return "SSH_FXP_REALPATH"
+ case sshFxpStat:
+ return "SSH_FXP_STAT"
+ case sshFxpRename:
+ return "SSH_FXP_RENAME"
+ case sshFxpReadlink:
+ return "SSH_FXP_READLINK"
+ case sshFxpSymlink:
+ return "SSH_FXP_SYMLINK"
+ case sshFxpStatus:
+ return "SSH_FXP_STATUS"
+ case sshFxpHandle:
+ return "SSH_FXP_HANDLE"
+ case sshFxpData:
+ return "SSH_FXP_DATA"
+ case sshFxpName:
+ return "SSH_FXP_NAME"
+ case sshFxpAttrs:
+ return "SSH_FXP_ATTRS"
+ case sshFxpExtended:
+ return "SSH_FXP_EXTENDED"
+ case sshFxpExtendedReply:
+ return "SSH_FXP_EXTENDED_REPLY"
+ default:
+ return "unknown"
+ }
+}
+
+type fx uint8
+
+func (f fx) String() string {
+ switch f {
+ case sshFxOk:
+ return "SSH_FX_OK"
+ case sshFxEOF:
+ return "SSH_FX_EOF"
+ case sshFxNoSuchFile:
+ return "SSH_FX_NO_SUCH_FILE"
+ case sshFxPermissionDenied:
+ return "SSH_FX_PERMISSION_DENIED"
+ case sshFxFailure:
+ return "SSH_FX_FAILURE"
+ case sshFxBadMessage:
+ return "SSH_FX_BAD_MESSAGE"
+ case sshFxNoConnection:
+ return "SSH_FX_NO_CONNECTION"
+ case sshFxConnectionLost:
+ return "SSH_FX_CONNECTION_LOST"
+ case sshFxOPUnsupported:
+ return "SSH_FX_OP_UNSUPPORTED"
+ default:
+ return "unknown"
+ }
+}
+
+type unexpectedPacketErr struct {
+ want, got uint8
+}
+
+func (u *unexpectedPacketErr) Error() string {
+ return fmt.Sprintf("sftp: unexpected packet: want %v, got %v", fxp(u.want), fxp(u.got))
+}
+
+func unimplementedPacketErr(u uint8) error {
+ return fmt.Errorf("sftp: unimplemented packet type: got %v", fxp(u))
+}
+
+type unexpectedIDErr struct{ want, got uint32 }
+
+func (u *unexpectedIDErr) Error() string {
+ return fmt.Sprintf("sftp: unexpected id: want %d, got %d", u.want, u.got)
+}
+
+func unimplementedSeekWhence(whence int) error {
+ return fmt.Errorf("sftp: unimplemented seek whence %d", whence)
+}
+
+func unexpectedCount(want, got uint32) error {
+ return fmt.Errorf("sftp: unexpected count: want %d, got %d", want, got)
+}
+
+type unexpectedVersionErr struct{ want, got uint32 }
+
+func (u *unexpectedVersionErr) Error() string {
+ return fmt.Sprintf("sftp: unexpected server version: want %v, got %v", u.want, u.got)
+}
+
+// A StatusError is returned when an SFTP operation fails, and provides
+// additional information about the failure.
+type StatusError struct {
+ Code uint32
+ msg, lang string
+}
+
+func (s *StatusError) Error() string {
+ return fmt.Sprintf("sftp: %q (%v)", s.msg, fx(s.Code))
+}
+
+// FxCode returns the error code typed to match against the exported codes
+func (s *StatusError) FxCode() fxerr {
+ return fxerr(s.Code)
+}
+
+func getSupportedExtensionByName(extensionName string) (sshExtensionPair, error) {
+ for _, supportedExtension := range supportedSFTPExtensions {
+ if supportedExtension.Name == extensionName {
+ return supportedExtension, nil
+ }
+ }
+ return sshExtensionPair{}, fmt.Errorf("unsupported extension: %s", extensionName)
+}
+
+// SetSFTPExtensions allows to customize the supported server extensions.
+// See the variable supportedSFTPExtensions for supported extensions.
+// This method accepts a slice of sshExtensionPair names for example 'hardlink@openssh.com'.
+// If an invalid extension is given an error will be returned and nothing will be changed
+func SetSFTPExtensions(extensions ...string) error {
+ tempExtensions := []sshExtensionPair{}
+ for _, extension := range extensions {
+ sftpExtension, err := getSupportedExtensionByName(extension)
+ if err != nil {
+ return err
+ }
+ tempExtensions = append(tempExtensions, sftpExtension)
+ }
+ sftpExtensions = tempExtensions
+ return nil
+}
diff --git a/vendor/github.com/pkg/sftp/stat_plan9.go b/vendor/github.com/pkg/sftp/stat_plan9.go
new file mode 100644
index 000000000..761abdf56
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/stat_plan9.go
@@ -0,0 +1,103 @@
+package sftp
+
+import (
+ "os"
+ "syscall"
+)
+
+var EBADF = syscall.NewError("fd out of range or not open")
+
+func wrapPathError(filepath string, err error) error {
+ if errno, ok := err.(syscall.ErrorString); ok {
+ return &os.PathError{Path: filepath, Err: errno}
+ }
+ return err
+}
+
+// translateErrno translates a syscall error number to a SFTP error code.
+func translateErrno(errno syscall.ErrorString) uint32 {
+ switch errno {
+ case "":
+ return sshFxOk
+ case syscall.ENOENT:
+ return sshFxNoSuchFile
+ case syscall.EPERM:
+ return sshFxPermissionDenied
+ }
+
+ return sshFxFailure
+}
+
+func translateSyscallError(err error) (uint32, bool) {
+ switch e := err.(type) {
+ case syscall.ErrorString:
+ return translateErrno(e), true
+ case *os.PathError:
+ debug("statusFromError,pathError: error is %T %#v", e.Err, e.Err)
+ if errno, ok := e.Err.(syscall.ErrorString); ok {
+ return translateErrno(errno), true
+ }
+ }
+ return 0, false
+}
+
+// isRegular returns true if the mode describes a regular file.
+func isRegular(mode uint32) bool {
+ return mode&S_IFMT == syscall.S_IFREG
+}
+
+// toFileMode converts sftp filemode bits to the os.FileMode specification
+func toFileMode(mode uint32) os.FileMode {
+ var fm = os.FileMode(mode & 0777)
+
+ switch mode & S_IFMT {
+ case syscall.S_IFBLK:
+ fm |= os.ModeDevice
+ case syscall.S_IFCHR:
+ fm |= os.ModeDevice | os.ModeCharDevice
+ case syscall.S_IFDIR:
+ fm |= os.ModeDir
+ case syscall.S_IFIFO:
+ fm |= os.ModeNamedPipe
+ case syscall.S_IFLNK:
+ fm |= os.ModeSymlink
+ case syscall.S_IFREG:
+ // nothing to do
+ case syscall.S_IFSOCK:
+ fm |= os.ModeSocket
+ }
+
+ return fm
+}
+
+// fromFileMode converts from the os.FileMode specification to sftp filemode bits
+func fromFileMode(mode os.FileMode) uint32 {
+ ret := uint32(mode & os.ModePerm)
+
+ switch mode & os.ModeType {
+ case os.ModeDevice | os.ModeCharDevice:
+ ret |= syscall.S_IFCHR
+ case os.ModeDevice:
+ ret |= syscall.S_IFBLK
+ case os.ModeDir:
+ ret |= syscall.S_IFDIR
+ case os.ModeNamedPipe:
+ ret |= syscall.S_IFIFO
+ case os.ModeSymlink:
+ ret |= syscall.S_IFLNK
+ case 0:
+ ret |= syscall.S_IFREG
+ case os.ModeSocket:
+ ret |= syscall.S_IFSOCK
+ }
+
+ return ret
+}
+
+// Plan 9 doesn't have setuid, setgid or sticky, but a Plan 9 client should
+// be able to send these bits to a POSIX server.
+const (
+ s_ISUID = 04000
+ s_ISGID = 02000
+ s_ISVTX = 01000
+)
diff --git a/vendor/github.com/pkg/sftp/stat_posix.go b/vendor/github.com/pkg/sftp/stat_posix.go
new file mode 100644
index 000000000..5b870e23c
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/stat_posix.go
@@ -0,0 +1,124 @@
+//go:build !plan9
+// +build !plan9
+
+package sftp
+
+import (
+ "os"
+ "syscall"
+)
+
+const EBADF = syscall.EBADF
+
+func wrapPathError(filepath string, err error) error {
+ if errno, ok := err.(syscall.Errno); ok {
+ return &os.PathError{Path: filepath, Err: errno}
+ }
+ return err
+}
+
+// translateErrno translates a syscall error number to a SFTP error code.
+func translateErrno(errno syscall.Errno) uint32 {
+ switch errno {
+ case 0:
+ return sshFxOk
+ case syscall.ENOENT:
+ return sshFxNoSuchFile
+ case syscall.EACCES, syscall.EPERM:
+ return sshFxPermissionDenied
+ }
+
+ return sshFxFailure
+}
+
+func translateSyscallError(err error) (uint32, bool) {
+ switch e := err.(type) {
+ case syscall.Errno:
+ return translateErrno(e), true
+ case *os.PathError:
+ debug("statusFromError,pathError: error is %T %#v", e.Err, e.Err)
+ if errno, ok := e.Err.(syscall.Errno); ok {
+ return translateErrno(errno), true
+ }
+ }
+ return 0, false
+}
+
+// isRegular returns true if the mode describes a regular file.
+func isRegular(mode uint32) bool {
+ return mode&S_IFMT == syscall.S_IFREG
+}
+
+// toFileMode converts sftp filemode bits to the os.FileMode specification
+func toFileMode(mode uint32) os.FileMode {
+ var fm = os.FileMode(mode & 0777)
+
+ switch mode & S_IFMT {
+ case syscall.S_IFBLK:
+ fm |= os.ModeDevice
+ case syscall.S_IFCHR:
+ fm |= os.ModeDevice | os.ModeCharDevice
+ case syscall.S_IFDIR:
+ fm |= os.ModeDir
+ case syscall.S_IFIFO:
+ fm |= os.ModeNamedPipe
+ case syscall.S_IFLNK:
+ fm |= os.ModeSymlink
+ case syscall.S_IFREG:
+ // nothing to do
+ case syscall.S_IFSOCK:
+ fm |= os.ModeSocket
+ }
+
+ if mode&syscall.S_ISUID != 0 {
+ fm |= os.ModeSetuid
+ }
+ if mode&syscall.S_ISGID != 0 {
+ fm |= os.ModeSetgid
+ }
+ if mode&syscall.S_ISVTX != 0 {
+ fm |= os.ModeSticky
+ }
+
+ return fm
+}
+
+// fromFileMode converts from the os.FileMode specification to sftp filemode bits
+func fromFileMode(mode os.FileMode) uint32 {
+ ret := uint32(mode & os.ModePerm)
+
+ switch mode & os.ModeType {
+ case os.ModeDevice | os.ModeCharDevice:
+ ret |= syscall.S_IFCHR
+ case os.ModeDevice:
+ ret |= syscall.S_IFBLK
+ case os.ModeDir:
+ ret |= syscall.S_IFDIR
+ case os.ModeNamedPipe:
+ ret |= syscall.S_IFIFO
+ case os.ModeSymlink:
+ ret |= syscall.S_IFLNK
+ case 0:
+ ret |= syscall.S_IFREG
+ case os.ModeSocket:
+ ret |= syscall.S_IFSOCK
+ }
+
+ if mode&os.ModeSetuid != 0 {
+ ret |= syscall.S_ISUID
+ }
+ if mode&os.ModeSetgid != 0 {
+ ret |= syscall.S_ISGID
+ }
+ if mode&os.ModeSticky != 0 {
+ ret |= syscall.S_ISVTX
+ }
+
+ return ret
+}
+
+const (
+ s_ISUID = syscall.S_ISUID
+ s_ISGID = syscall.S_ISGID
+ s_ISVTX = syscall.S_ISVTX
+)
diff --git a/vendor/github.com/pkg/sftp/syscall_fixed.go b/vendor/github.com/pkg/sftp/syscall_fixed.go
new file mode 100644
index 000000000..d40457776
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/syscall_fixed.go
@@ -0,0 +1,9 @@
+// +build plan9 windows js,wasm
+
+// Go defines S_IFMT on windows, plan9 and js/wasm as 0x1f000 instead of
+// 0xf000. None of the the other S_IFxyz values include the "1" (in 0x1f000)
+// which prevents them from matching the bitmask.
+
+package sftp
+
+const S_IFMT = 0xf000
diff --git a/vendor/github.com/pkg/sftp/syscall_good.go b/vendor/github.com/pkg/sftp/syscall_good.go
new file mode 100644
index 000000000..4c2b240cf
--- /dev/null
+++ b/vendor/github.com/pkg/sftp/syscall_good.go
@@ -0,0 +1,8 @@
+// +build !plan9,!windows
+// +build !js !wasm
+
+package sftp
+
+import "syscall"
+
+const S_IFMT = syscall.S_IFMT
diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519.go b/vendor/golang.org/x/crypto/curve25519/curve25519.go
index cda3fdd35..bc62161d6 100644
--- a/vendor/golang.org/x/crypto/curve25519/curve25519.go
+++ b/vendor/golang.org/x/crypto/curve25519/curve25519.go
@@ -9,7 +9,8 @@ package curve25519 // import "golang.org/x/crypto/curve25519"
import (
"crypto/subtle"
- "fmt"
+ "errors"
+ "strconv"
"golang.org/x/crypto/curve25519/internal/field"
)
@@ -124,10 +125,10 @@ func X25519(scalar, point []byte) ([]byte, error) {
func x25519(dst *[32]byte, scalar, point []byte) ([]byte, error) {
var in [32]byte
if l := len(scalar); l != 32 {
- return nil, fmt.Errorf("bad scalar length: %d, expected %d", l, 32)
+ return nil, errors.New("bad scalar length: " + strconv.Itoa(l) + ", expected 32")
}
if l := len(point); l != 32 {
- return nil, fmt.Errorf("bad point length: %d, expected %d", l, 32)
+ return nil, errors.New("bad point length: " + strconv.Itoa(l) + ", expected 32")
}
copy(in[:], scalar)
if &point[0] == &Basepoint[0] {
@@ -138,7 +139,7 @@ func x25519(dst *[32]byte, scalar, point []byte) ([]byte, error) {
copy(base[:], point)
ScalarMult(dst, &in, &base)
if subtle.ConstantTimeCompare(dst[:], zero[:]) == 1 {
- return nil, fmt.Errorf("bad input point: low order point")
+ return nil, errors.New("bad input point: low order point")
}
}
return dst[:], nil
diff --git a/vendor/golang.org/x/crypto/ssh/agent/client.go b/vendor/golang.org/x/crypto/ssh/agent/client.go
index dbc79d583..3c4d18a15 100644
--- a/vendor/golang.org/x/crypto/ssh/agent/client.go
+++ b/vendor/golang.org/x/crypto/ssh/agent/client.go
@@ -772,7 +772,7 @@ func (s *agentKeyringSigner) Sign(rand io.Reader, data []byte) (*ssh.Signature,
}
func (s *agentKeyringSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*ssh.Signature, error) {
- if algorithm == "" || algorithm == s.pub.Type() {
+ if algorithm == "" || algorithm == underlyingAlgo(s.pub.Type()) {
return s.Sign(rand, data)
}
@@ -791,6 +791,33 @@ func (s *agentKeyringSigner) SignWithAlgorithm(rand io.Reader, data []byte, algo
var _ ssh.AlgorithmSigner = &agentKeyringSigner{}
+// certKeyAlgoNames is a mapping from known certificate algorithm names to the
+// corresponding public key signature algorithm.
+//
+// This map must be kept in sync with the one in certs.go.
+var certKeyAlgoNames = map[string]string{
+ ssh.CertAlgoRSAv01: ssh.KeyAlgoRSA,
+ ssh.CertAlgoRSASHA256v01: ssh.KeyAlgoRSASHA256,
+ ssh.CertAlgoRSASHA512v01: ssh.KeyAlgoRSASHA512,
+ ssh.CertAlgoDSAv01: ssh.KeyAlgoDSA,
+ ssh.CertAlgoECDSA256v01: ssh.KeyAlgoECDSA256,
+ ssh.CertAlgoECDSA384v01: ssh.KeyAlgoECDSA384,
+ ssh.CertAlgoECDSA521v01: ssh.KeyAlgoECDSA521,
+ ssh.CertAlgoSKECDSA256v01: ssh.KeyAlgoSKECDSA256,
+ ssh.CertAlgoED25519v01: ssh.KeyAlgoED25519,
+ ssh.CertAlgoSKED25519v01: ssh.KeyAlgoSKED25519,
+}
+
+// underlyingAlgo returns the signature algorithm associated with algo (which is
+// an advertised or negotiated public key or host key algorithm). These are
+// usually the same, except for certificate algorithms.
+func underlyingAlgo(algo string) string {
+ if a, ok := certKeyAlgoNames[algo]; ok {
+ return a
+ }
+ return algo
+}
+
// Calls an extension method. It is up to the agent implementation as to whether or not
// any particular extension is supported and may always return an error. Because the
// type of the response is up to the implementation, this returns the bytes of the
diff --git a/vendor/golang.org/x/crypto/ssh/certs.go b/vendor/golang.org/x/crypto/ssh/certs.go
index a69e22491..4600c2077 100644
--- a/vendor/golang.org/x/crypto/ssh/certs.go
+++ b/vendor/golang.org/x/crypto/ssh/certs.go
@@ -460,6 +460,8 @@ func (c *Certificate) SignCert(rand io.Reader, authority Signer) error {
// certKeyAlgoNames is a mapping from known certificate algorithm names to the
// corresponding public key signature algorithm.
+//
+// This map must be kept in sync with the one in agent/client.go.
var certKeyAlgoNames = map[string]string{
CertAlgoRSAv01: KeyAlgoRSA,
CertAlgoRSASHA256v01: KeyAlgoRSASHA256,
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 0013ece48..e4d1330fe 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -114,7 +114,7 @@ github.com/containers/buildah/pkg/rusage
github.com/containers/buildah/pkg/sshagent
github.com/containers/buildah/pkg/util
github.com/containers/buildah/util
-# github.com/containers/common v0.49.2-0.20220804143628-dc97077782d5
+# github.com/containers/common v0.49.2-0.20220809074359-b0ea008ba661
## explicit
github.com/containers/common/libimage
github.com/containers/common/libimage/define
@@ -157,6 +157,7 @@ github.com/containers/common/pkg/secrets/filedriver
github.com/containers/common/pkg/secrets/passdriver
github.com/containers/common/pkg/secrets/shelldriver
github.com/containers/common/pkg/signal
+github.com/containers/common/pkg/ssh
github.com/containers/common/pkg/subscriptions
github.com/containers/common/pkg/supplemented
github.com/containers/common/pkg/sysinfo
@@ -403,9 +404,6 @@ github.com/docker/go-units
# github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316
## explicit
github.com/docker/libnetwork/ipamutils
-# github.com/dtylman/scp v0.0.0-20181017070807-f3000a34aef4
-## explicit
-github.com/dtylman/scp
# github.com/felixge/httpsnoop v1.0.1
github.com/felixge/httpsnoop
# github.com/fsnotify/fsnotify v1.5.4
@@ -488,6 +486,8 @@ github.com/klauspost/compress/zstd
github.com/klauspost/compress/zstd/internal/xxhash
# github.com/klauspost/pgzip v1.2.5
github.com/klauspost/pgzip
+# github.com/kr/fs v0.1.0
+github.com/kr/fs
# github.com/letsencrypt/boulder v0.0.0-20220331220046-b23ab962616e
github.com/letsencrypt/boulder/core
github.com/letsencrypt/boulder/core/proto
@@ -630,6 +630,9 @@ github.com/ostreedev/ostree-go/pkg/glibobject
github.com/ostreedev/ostree-go/pkg/otbuiltin
# github.com/pkg/errors v0.9.1
github.com/pkg/errors
+# github.com/pkg/sftp v1.13.5
+github.com/pkg/sftp
+github.com/pkg/sftp/internal/encoding/ssh/filexfer
# github.com/pmezard/go-difflib v1.0.0
github.com/pmezard/go-difflib/difflib
# github.com/proglottis/gpgme v0.1.3
@@ -745,8 +748,7 @@ go.opencensus.io/internal
go.opencensus.io/trace
go.opencensus.io/trace/internal
go.opencensus.io/trace/tracestate
-# golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4
-## explicit
+# golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d
golang.org/x/crypto/blowfish
golang.org/x/crypto/cast5
golang.org/x/crypto/chacha20