summaryrefslogtreecommitdiff
path: root/vendor/github.com/docker
diff options
context:
space:
mode:
authorbaude <bbaude@redhat.com>2018-04-25 13:26:52 -0500
committerAtomic Bot <atomic-devel@projectatomic.io>2018-04-27 20:51:07 +0000
commita824186ac9803ef5f7548df790988a4ebd2d9c07 (patch)
tree63c64e9be4d9c44bd160dd974b740231497eabcd /vendor/github.com/docker
parent4e468ce83d69e9748e80eb98a6f5bd3c5114cc7d (diff)
downloadpodman-a824186ac9803ef5f7548df790988a4ebd2d9c07.tar.gz
podman-a824186ac9803ef5f7548df790988a4ebd2d9c07.tar.bz2
podman-a824186ac9803ef5f7548df790988a4ebd2d9c07.zip
Use buildah commit and bud in podman
Vendor in buildah and use as much of commit and bug as possible for podman build and commit. Resolves #586 Signed-off-by: baude <bbaude@redhat.com> Closes: #681 Approved by: mheon
Diffstat (limited to 'vendor/github.com/docker')
-rw-r--r--vendor/github.com/docker/docker/api/common.go56
-rw-r--r--vendor/github.com/docker/docker/api/names.go9
-rw-r--r--vendor/github.com/docker/docker/api/types/client.go2
-rw-r--r--vendor/github.com/docker/docker/api/types/container/host_config.go39
-rw-r--r--vendor/github.com/docker/docker/api/types/filters/parse.go272
-rw-r--r--vendor/github.com/docker/docker/api/types/mount/mount.go4
-rw-r--r--vendor/github.com/docker/docker/api/types/time/timestamp.go4
-rw-r--r--vendor/github.com/docker/docker/builder/dockerfile/command/command.go46
-rw-r--r--vendor/github.com/docker/docker/builder/dockerfile/parser/line_parsers.go399
-rw-r--r--vendor/github.com/docker/docker/builder/dockerfile/parser/parser.go363
-rw-r--r--vendor/github.com/docker/docker/builder/dockerfile/parser/split_command.go118
-rw-r--r--vendor/github.com/docker/docker/client/checkpoint_list.go6
-rw-r--r--vendor/github.com/docker/docker/client/client.go86
-rw-r--r--vendor/github.com/docker/docker/client/config_inspect.go6
-rw-r--r--vendor/github.com/docker/docker/client/config_list.go2
-rw-r--r--vendor/github.com/docker/docker/client/config_remove.go2
-rw-r--r--vendor/github.com/docker/docker/client/container_commit.go2
-rw-r--r--vendor/github.com/docker/docker/client/container_create.go2
-rw-r--r--vendor/github.com/docker/docker/client/container_inspect.go11
-rw-r--r--vendor/github.com/docker/docker/client/container_remove.go2
-rw-r--r--vendor/github.com/docker/docker/client/errors.go245
-rw-r--r--vendor/github.com/docker/docker/client/hijack.go5
-rw-r--r--vendor/github.com/docker/docker/client/image_inspect.go6
-rw-r--r--vendor/github.com/docker/docker/client/image_remove.go4
-rw-r--r--vendor/github.com/docker/docker/client/image_search.go2
-rw-r--r--vendor/github.com/docker/docker/client/network_inspect.go6
-rw-r--r--vendor/github.com/docker/docker/client/network_remove.go2
-rw-r--r--vendor/github.com/docker/docker/client/node_inspect.go6
-rw-r--r--vendor/github.com/docker/docker/client/node_list.go2
-rw-r--r--vendor/github.com/docker/docker/client/node_remove.go2
-rw-r--r--vendor/github.com/docker/docker/client/parse_logs.go41
-rw-r--r--vendor/github.com/docker/docker/client/ping.go8
-rw-r--r--vendor/github.com/docker/docker/client/plugin_inspect.go6
-rw-r--r--vendor/github.com/docker/docker/client/plugin_list.go2
-rw-r--r--vendor/github.com/docker/docker/client/plugin_remove.go2
-rw-r--r--vendor/github.com/docker/docker/client/request.go2
-rw-r--r--vendor/github.com/docker/docker/client/secret_inspect.go6
-rw-r--r--vendor/github.com/docker/docker/client/secret_list.go2
-rw-r--r--vendor/github.com/docker/docker/client/secret_remove.go2
-rw-r--r--vendor/github.com/docker/docker/client/service_create.go20
-rw-r--r--vendor/github.com/docker/docker/client/service_inspect.go6
-rw-r--r--vendor/github.com/docker/docker/client/service_list.go2
-rw-r--r--vendor/github.com/docker/docker/client/service_remove.go2
-rw-r--r--vendor/github.com/docker/docker/client/task_inspect.go7
-rw-r--r--vendor/github.com/docker/docker/client/task_list.go2
-rw-r--r--vendor/github.com/docker/docker/client/tlsconfig_clone.go11
-rw-r--r--vendor/github.com/docker/docker/client/tlsconfig_clone_go17.go (renamed from vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go17.go)6
-rw-r--r--vendor/github.com/docker/docker/client/transport.go8
-rw-r--r--vendor/github.com/docker/docker/client/utils.go2
-rw-r--r--vendor/github.com/docker/docker/client/volume_inspect.go16
-rw-r--r--vendor/github.com/docker/docker/client/volume_remove.go2
-rw-r--r--vendor/github.com/docker/docker/hack/README.md60
-rw-r--r--vendor/github.com/docker/docker/hack/integration-cli-on-swarm/README.md69
-rw-r--r--vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/vendor.conf2
-rw-r--r--vendor/github.com/docker/docker/opts/env.go48
-rw-r--r--vendor/github.com/docker/docker/opts/hosts.go165
-rw-r--r--vendor/github.com/docker/docker/opts/hosts_unix.go8
-rw-r--r--vendor/github.com/docker/docker/opts/hosts_windows.go6
-rw-r--r--vendor/github.com/docker/docker/opts/ip.go47
-rw-r--r--vendor/github.com/docker/docker/opts/opts.go327
-rw-r--r--vendor/github.com/docker/docker/opts/opts_unix.go6
-rw-r--r--vendor/github.com/docker/docker/opts/opts_windows.go56
-rw-r--r--vendor/github.com/docker/docker/opts/quotedstring.go37
-rw-r--r--vendor/github.com/docker/docker/opts/runtime.go79
-rw-r--r--vendor/github.com/docker/docker/opts/ulimit.go81
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/README.md1
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/archive.go1237
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/archive_linux.go92
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/archive_other.go7
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/archive_unix.go122
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/archive_windows.go79
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/changes.go441
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/changes_linux.go313
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/changes_other.go97
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/changes_unix.go37
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/changes_windows.go30
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/copy.go472
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/copy_unix.go11
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/copy_windows.go9
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/diff.go256
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/time_linux.go16
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/time_unsupported.go16
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/whiteouts.go23
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/wrap.go59
-rw-r--r--vendor/github.com/docker/docker/pkg/fileutils/fileutils.go298
-rw-r--r--vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go27
-rw-r--r--vendor/github.com/docker/docker/pkg/fileutils/fileutils_solaris.go7
-rw-r--r--vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go22
-rw-r--r--vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go7
-rw-r--r--vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go317
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go3
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go9
-rw-r--r--vendor/github.com/docker/docker/pkg/pools/pools.go137
-rw-r--r--vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go190
-rw-r--r--vendor/github.com/docker/docker/pkg/system/events_windows.go85
-rw-r--r--vendor/github.com/docker/docker/pkg/system/init_unix.go7
-rw-r--r--vendor/github.com/docker/docker/pkg/system/init_windows.go5
-rw-r--r--vendor/github.com/docker/docker/pkg/system/path.go41
-rw-r--r--vendor/github.com/docker/docker/pkg/system/path_unix.go9
-rw-r--r--vendor/github.com/docker/docker/pkg/system/path_windows.go33
-rw-r--r--vendor/github.com/docker/docker/pkg/system/stat_linux.go4
-rw-r--r--vendor/github.com/docker/docker/pkg/term/ascii.go2
-rw-r--r--vendor/github.com/docker/docker/pkg/term/term_windows.go31
-rw-r--r--vendor/github.com/docker/docker/pkg/term/termios_linux.go2
-rw-r--r--vendor/github.com/docker/docker/pkg/term/winsize.go18
-rw-r--r--vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone.go11
-rw-r--r--vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go2
-rw-r--r--vendor/github.com/docker/docker/vendor.conf32
108 files changed, 6680 insertions, 794 deletions
diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go
index 6e462aeda..d0229e038 100644
--- a/vendor/github.com/docker/docker/api/common.go
+++ b/vendor/github.com/docker/docker/api/common.go
@@ -1,65 +1,11 @@
package api
-import (
- "encoding/json"
- "encoding/pem"
- "fmt"
- "os"
- "path/filepath"
-
- "github.com/docker/docker/pkg/ioutils"
- "github.com/docker/docker/pkg/system"
- "github.com/docker/libtrust"
-)
-
// Common constants for daemon and client.
const (
// DefaultVersion of Current REST API
- DefaultVersion string = "1.32"
+ DefaultVersion string = "1.34"
// NoBaseImageSpecifier is the symbol used by the FROM
// command to specify that no base image is to be used.
NoBaseImageSpecifier string = "scratch"
)
-
-// LoadOrCreateTrustKey attempts to load the libtrust key at the given path,
-// otherwise generates a new one
-func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) {
- err := system.MkdirAll(filepath.Dir(trustKeyPath), 0700, "")
- if err != nil {
- return nil, err
- }
- trustKey, err := libtrust.LoadKeyFile(trustKeyPath)
- if err == libtrust.ErrKeyFileDoesNotExist {
- trustKey, err = libtrust.GenerateECP256PrivateKey()
- if err != nil {
- return nil, fmt.Errorf("Error generating key: %s", err)
- }
- encodedKey, err := serializePrivateKey(trustKey, filepath.Ext(trustKeyPath))
- if err != nil {
- return nil, fmt.Errorf("Error serializing key: %s", err)
- }
- if err := ioutils.AtomicWriteFile(trustKeyPath, encodedKey, os.FileMode(0600)); err != nil {
- return nil, fmt.Errorf("Error saving key file: %s", err)
- }
- } else if err != nil {
- return nil, fmt.Errorf("Error loading key file %s: %s", trustKeyPath, err)
- }
- return trustKey, nil
-}
-
-func serializePrivateKey(key libtrust.PrivateKey, ext string) (encoded []byte, err error) {
- if ext == ".json" || ext == ".jwk" {
- encoded, err = json.Marshal(key)
- if err != nil {
- return nil, fmt.Errorf("unable to encode private key JWK: %s", err)
- }
- } else {
- pemBlock, err := key.PEMBlock()
- if err != nil {
- return nil, fmt.Errorf("unable to encode private key PEM: %s", err)
- }
- encoded = pem.EncodeToMemory(pemBlock)
- }
- return
-}
diff --git a/vendor/github.com/docker/docker/api/names.go b/vendor/github.com/docker/docker/api/names.go
deleted file mode 100644
index f147d1f4c..000000000
--- a/vendor/github.com/docker/docker/api/names.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package api
-
-import "regexp"
-
-// RestrictedNameChars collects the characters allowed to represent a name, normally used to validate container and volume names.
-const RestrictedNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.-]`
-
-// RestrictedNamePattern is a regular expression to validate names against the collection of restricted characters.
-var RestrictedNamePattern = regexp.MustCompile(`^` + RestrictedNameChars + `+$`)
diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go
index 18a1263f1..4ca9ccac7 100644
--- a/vendor/github.com/docker/docker/api/types/client.go
+++ b/vendor/github.com/docker/docker/api/types/client.go
@@ -181,7 +181,7 @@ type ImageBuildOptions struct {
SessionID string
// TODO @jhowardmsft LCOW Support: This will require extending to include
- // `Platform string`, but is ommited for now as it's hard-coded temporarily
+ // `Platform string`, but is omitted for now as it's hard-coded temporarily
// to avoid API changes.
}
diff --git a/vendor/github.com/docker/docker/api/types/container/host_config.go b/vendor/github.com/docker/docker/api/types/container/host_config.go
index 9fea9eb04..bb421b388 100644
--- a/vendor/github.com/docker/docker/api/types/container/host_config.go
+++ b/vendor/github.com/docker/docker/api/types/container/host_config.go
@@ -23,41 +23,46 @@ func (i Isolation) IsDefault() bool {
// IpcMode represents the container ipc stack.
type IpcMode string
-// IsPrivate indicates whether the container uses its private ipc stack.
+// IsPrivate indicates whether the container uses its own private ipc namespace which can not be shared.
func (n IpcMode) IsPrivate() bool {
- return !(n.IsHost() || n.IsContainer())
+ return n == "private"
}
-// IsHost indicates whether the container uses the host's ipc stack.
+// IsHost indicates whether the container shares the host's ipc namespace.
func (n IpcMode) IsHost() bool {
return n == "host"
}
-// IsContainer indicates whether the container uses a container's ipc stack.
+// IsShareable indicates whether the container's ipc namespace can be shared with another container.
+func (n IpcMode) IsShareable() bool {
+ return n == "shareable"
+}
+
+// IsContainer indicates whether the container uses another container's ipc namespace.
func (n IpcMode) IsContainer() bool {
parts := strings.SplitN(string(n), ":", 2)
return len(parts) > 1 && parts[0] == "container"
}
-// Valid indicates whether the ipc stack is valid.
+// IsNone indicates whether container IpcMode is set to "none".
+func (n IpcMode) IsNone() bool {
+ return n == "none"
+}
+
+// IsEmpty indicates whether container IpcMode is empty
+func (n IpcMode) IsEmpty() bool {
+ return n == ""
+}
+
+// Valid indicates whether the ipc mode is valid.
func (n IpcMode) Valid() bool {
- parts := strings.Split(string(n), ":")
- switch mode := parts[0]; mode {
- case "", "host":
- case "container":
- if len(parts) != 2 || parts[1] == "" {
- return false
- }
- default:
- return false
- }
- return true
+ return n.IsEmpty() || n.IsNone() || n.IsPrivate() || n.IsHost() || n.IsShareable() || n.IsContainer()
}
// Container returns the name of the container ipc stack is going to be used.
func (n IpcMode) Container() string {
parts := strings.SplitN(string(n), ":", 2)
- if len(parts) > 1 {
+ if len(parts) > 1 && parts[0] == "container" {
return parts[1]
}
return ""
diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go
index beec3d494..d45d0528f 100644
--- a/vendor/github.com/docker/docker/api/types/filters/parse.go
+++ b/vendor/github.com/docker/docker/api/types/filters/parse.go
@@ -1,38 +1,45 @@
-// Package filters provides helper function to parse and handle command line
-// filter, used for example in docker ps or docker images commands.
+/*Package filters provides tools for encoding a mapping of keys to a set of
+multiple values.
+*/
package filters
import (
"encoding/json"
"errors"
- "fmt"
"regexp"
"strings"
"github.com/docker/docker/api/types/versions"
)
-// Args stores filter arguments as map key:{map key: bool}.
-// It contains an aggregation of the map of arguments (which are in the form
-// of -f 'key=value') based on the key, and stores values for the same key
-// in a map with string keys and boolean values.
-// e.g given -f 'label=label1=1' -f 'label=label2=2' -f 'image.name=ubuntu'
-// the args will be {"image.name":{"ubuntu":true},"label":{"label1=1":true,"label2=2":true}}
+// Args stores a mapping of keys to a set of multiple values.
type Args struct {
fields map[string]map[string]bool
}
-// NewArgs initializes a new Args struct.
-func NewArgs() Args {
- return Args{fields: map[string]map[string]bool{}}
+// KeyValuePair are used to initialize a new Args
+type KeyValuePair struct {
+ Key string
+ Value string
}
-// ParseFlag parses the argument to the filter flag. Like
-//
-// `docker ps -f 'created=today' -f 'image.name=ubuntu*'`
+// Arg creates a new KeyValuePair for initializing Args
+func Arg(key, value string) KeyValuePair {
+ return KeyValuePair{Key: key, Value: value}
+}
+
+// NewArgs returns a new Args populated with the initial args
+func NewArgs(initialArgs ...KeyValuePair) Args {
+ args := Args{fields: map[string]map[string]bool{}}
+ for _, arg := range initialArgs {
+ args.Add(arg.Key, arg.Value)
+ }
+ return args
+}
+
+// ParseFlag parses a key=value string and adds it to an Args.
//
-// If prev map is provided, then it is appended to, and returned. By default a new
-// map is created.
+// Deprecated: Use Args.Add()
func ParseFlag(arg string, prev Args) (Args, error) {
filters := prev
if len(arg) == 0 {
@@ -53,74 +60,95 @@ func ParseFlag(arg string, prev Args) (Args, error) {
return filters, nil
}
-// ErrBadFormat is an error returned in case of bad format for a filter.
+// ErrBadFormat is an error returned when a filter is not in the form key=value
+//
+// Deprecated: this error will be removed in a future version
var ErrBadFormat = errors.New("bad format of filter (expected name=value)")
-// ToParam packs the Args into a string for easy transport from client to server.
+// ToParam encodes the Args as args JSON encoded string
+//
+// Deprecated: use ToJSON
func ToParam(a Args) (string, error) {
- // this way we don't URL encode {}, just empty space
- if a.Len() == 0 {
- return "", nil
+ return ToJSON(a)
+}
+
+// MarshalJSON returns a JSON byte representation of the Args
+func (args Args) MarshalJSON() ([]byte, error) {
+ if len(args.fields) == 0 {
+ return []byte{}, nil
}
+ return json.Marshal(args.fields)
+}
- buf, err := json.Marshal(a.fields)
- if err != nil {
- return "", err
+// ToJSON returns the Args as a JSON encoded string
+func ToJSON(a Args) (string, error) {
+ if a.Len() == 0 {
+ return "", nil
}
- return string(buf), nil
+ buf, err := json.Marshal(a)
+ return string(buf), err
}
-// ToParamWithVersion packs the Args into a string for easy transport from client to server.
-// The generated string will depend on the specified version (corresponding to the API version).
+// ToParamWithVersion encodes Args as a JSON string. If version is less than 1.22
+// then the encoded format will use an older legacy format where the values are a
+// list of strings, instead of a set.
+//
+// Deprecated: Use ToJSON
func ToParamWithVersion(version string, a Args) (string, error) {
- // this way we don't URL encode {}, just empty space
if a.Len() == 0 {
return "", nil
}
- // for daemons older than v1.10, filter must be of the form map[string][]string
- var buf []byte
- var err error
if version != "" && versions.LessThan(version, "1.22") {
- buf, err = json.Marshal(convertArgsToSlice(a.fields))
- } else {
- buf, err = json.Marshal(a.fields)
- }
- if err != nil {
- return "", err
+ buf, err := json.Marshal(convertArgsToSlice(a.fields))
+ return string(buf), err
}
- return string(buf), nil
+
+ return ToJSON(a)
}
-// FromParam unpacks the filter Args.
+// FromParam decodes a JSON encoded string into Args
+//
+// Deprecated: use FromJSON
func FromParam(p string) (Args, error) {
- if len(p) == 0 {
- return NewArgs(), nil
+ return FromJSON(p)
+}
+
+// FromJSON decodes a JSON encoded string into Args
+func FromJSON(p string) (Args, error) {
+ args := NewArgs()
+
+ if p == "" {
+ return args, nil
}
- r := strings.NewReader(p)
- d := json.NewDecoder(r)
+ raw := []byte(p)
+ err := json.Unmarshal(raw, &args)
+ if err == nil {
+ return args, nil
+ }
- m := map[string]map[string]bool{}
- if err := d.Decode(&m); err != nil {
- r.Seek(0, 0)
-
- // Allow parsing old arguments in slice format.
- // Because other libraries might be sending them in this format.
- deprecated := map[string][]string{}
- if deprecatedErr := d.Decode(&deprecated); deprecatedErr == nil {
- m = deprecatedArgs(deprecated)
- } else {
- return NewArgs(), err
- }
+ // Fallback to parsing arguments in the legacy slice format
+ deprecated := map[string][]string{}
+ if legacyErr := json.Unmarshal(raw, &deprecated); legacyErr != nil {
+ return args, err
+ }
+
+ args.fields = deprecatedArgs(deprecated)
+ return args, nil
+}
+
+// UnmarshalJSON populates the Args from JSON encode bytes
+func (args Args) UnmarshalJSON(raw []byte) error {
+ if len(raw) == 0 {
+ return nil
}
- return Args{m}, nil
+ return json.Unmarshal(raw, &args.fields)
}
-// Get returns the list of values associates with a field.
-// It returns a slice of strings to keep backwards compatibility with old code.
-func (filters Args) Get(field string) []string {
- values := filters.fields[field]
+// Get returns the list of values associated with the key
+func (args Args) Get(key string) []string {
+ values := args.fields[key]
if values == nil {
return make([]string, 0)
}
@@ -131,37 +159,34 @@ func (filters Args) Get(field string) []string {
return slice
}
-// Add adds a new value to a filter field.
-func (filters Args) Add(name, value string) {
- if _, ok := filters.fields[name]; ok {
- filters.fields[name][value] = true
+// Add a new value to the set of values
+func (args Args) Add(key, value string) {
+ if _, ok := args.fields[key]; ok {
+ args.fields[key][value] = true
} else {
- filters.fields[name] = map[string]bool{value: true}
+ args.fields[key] = map[string]bool{value: true}
}
}
-// Del removes a value from a filter field.
-func (filters Args) Del(name, value string) {
- if _, ok := filters.fields[name]; ok {
- delete(filters.fields[name], value)
- if len(filters.fields[name]) == 0 {
- delete(filters.fields, name)
+// Del removes a value from the set
+func (args Args) Del(key, value string) {
+ if _, ok := args.fields[key]; ok {
+ delete(args.fields[key], value)
+ if len(args.fields[key]) == 0 {
+ delete(args.fields, key)
}
}
}
-// Len returns the number of fields in the arguments.
-func (filters Args) Len() int {
- return len(filters.fields)
+// Len returns the number of keys in the mapping
+func (args Args) Len() int {
+ return len(args.fields)
}
-// MatchKVList returns true if the values for the specified field matches the ones
-// from the sources.
-// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}},
-// field is 'label' and sources are {'label1': '1', 'label2': '2'}
-// it returns true.
-func (filters Args) MatchKVList(field string, sources map[string]string) bool {
- fieldValues := filters.fields[field]
+// MatchKVList returns true if all the pairs in sources exist as key=value
+// pairs in the mapping at key, or if there are no values at key.
+func (args Args) MatchKVList(key string, sources map[string]string) bool {
+ fieldValues := args.fields[key]
//do not filter if there is no filter set or cannot determine filter
if len(fieldValues) == 0 {
@@ -172,8 +197,8 @@ func (filters Args) MatchKVList(field string, sources map[string]string) bool {
return false
}
- for name2match := range fieldValues {
- testKV := strings.SplitN(name2match, "=", 2)
+ for value := range fieldValues {
+ testKV := strings.SplitN(value, "=", 2)
v, ok := sources[testKV[0]]
if !ok {
@@ -187,16 +212,13 @@ func (filters Args) MatchKVList(field string, sources map[string]string) bool {
return true
}
-// Match returns true if the values for the specified field matches the source string
-// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}},
-// field is 'image.name' and source is 'ubuntu'
-// it returns true.
-func (filters Args) Match(field, source string) bool {
- if filters.ExactMatch(field, source) {
+// Match returns true if any of the values at key match the source string
+func (args Args) Match(field, source string) bool {
+ if args.ExactMatch(field, source) {
return true
}
- fieldValues := filters.fields[field]
+ fieldValues := args.fields[field]
for name2match := range fieldValues {
match, err := regexp.MatchString(name2match, source)
if err != nil {
@@ -209,9 +231,9 @@ func (filters Args) Match(field, source string) bool {
return false
}
-// ExactMatch returns true if the source matches exactly one of the filters.
-func (filters Args) ExactMatch(field, source string) bool {
- fieldValues, ok := filters.fields[field]
+// ExactMatch returns true if the source matches exactly one of the values.
+func (args Args) ExactMatch(key, source string) bool {
+ fieldValues, ok := args.fields[key]
//do not filter if there is no filter set or cannot determine filter
if !ok || len(fieldValues) == 0 {
return true
@@ -221,14 +243,15 @@ func (filters Args) ExactMatch(field, source string) bool {
return fieldValues[source]
}
-// UniqueExactMatch returns true if there is only one filter and the source matches exactly this one.
-func (filters Args) UniqueExactMatch(field, source string) bool {
- fieldValues := filters.fields[field]
+// UniqueExactMatch returns true if there is only one value and the source
+// matches exactly the value.
+func (args Args) UniqueExactMatch(key, source string) bool {
+ fieldValues := args.fields[key]
//do not filter if there is no filter set or cannot determine filter
if len(fieldValues) == 0 {
return true
}
- if len(filters.fields[field]) != 1 {
+ if len(args.fields[key]) != 1 {
return false
}
@@ -236,14 +259,14 @@ func (filters Args) UniqueExactMatch(field, source string) bool {
return fieldValues[source]
}
-// FuzzyMatch returns true if the source matches exactly one of the filters,
-// or the source has one of the filters as a prefix.
-func (filters Args) FuzzyMatch(field, source string) bool {
- if filters.ExactMatch(field, source) {
+// FuzzyMatch returns true if the source matches exactly one value, or the
+// source has one of the values as a prefix.
+func (args Args) FuzzyMatch(key, source string) bool {
+ if args.ExactMatch(key, source) {
return true
}
- fieldValues := filters.fields[field]
+ fieldValues := args.fields[key]
for prefix := range fieldValues {
if strings.HasPrefix(source, prefix) {
return true
@@ -252,30 +275,47 @@ func (filters Args) FuzzyMatch(field, source string) bool {
return false
}
-// Include returns true if the name of the field to filter is in the filters.
-func (filters Args) Include(field string) bool {
- _, ok := filters.fields[field]
+// Include returns true if the key exists in the mapping
+//
+// Deprecated: use Contains
+func (args Args) Include(field string) bool {
+ _, ok := args.fields[field]
+ return ok
+}
+
+// Contains returns true if the key exists in the mapping
+func (args Args) Contains(field string) bool {
+ _, ok := args.fields[field]
return ok
}
-// Validate ensures that all the fields in the filter are valid.
-// It returns an error as soon as it finds an invalid field.
-func (filters Args) Validate(accepted map[string]bool) error {
- for name := range filters.fields {
+type invalidFilter string
+
+func (e invalidFilter) Error() string {
+ return "Invalid filter '" + string(e) + "'"
+}
+
+func (invalidFilter) InvalidParameter() {}
+
+// Validate compared the set of accepted keys against the keys in the mapping.
+// An error is returned if any mapping keys are not in the accepted set.
+func (args Args) Validate(accepted map[string]bool) error {
+ for name := range args.fields {
if !accepted[name] {
- return fmt.Errorf("Invalid filter '%s'", name)
+ return invalidFilter(name)
}
}
return nil
}
-// WalkValues iterates over the list of filtered values for a field.
-// It stops the iteration if it finds an error and it returns that error.
-func (filters Args) WalkValues(field string, op func(value string) error) error {
- if _, ok := filters.fields[field]; !ok {
+// WalkValues iterates over the list of values for a key in the mapping and calls
+// op() for each value. If op returns an error the iteration stops and the
+// error is returned.
+func (args Args) WalkValues(field string, op func(value string) error) error {
+ if _, ok := args.fields[field]; !ok {
return nil
}
- for v := range filters.fields[field] {
+ for v := range args.fields[field] {
if err := op(v); err != nil {
return err
}
diff --git a/vendor/github.com/docker/docker/api/types/mount/mount.go b/vendor/github.com/docker/docker/api/types/mount/mount.go
index 2744f85d6..b7d133cd8 100644
--- a/vendor/github.com/docker/docker/api/types/mount/mount.go
+++ b/vendor/github.com/docker/docker/api/types/mount/mount.go
@@ -15,6 +15,8 @@ const (
TypeVolume Type = "volume"
// TypeTmpfs is the type for mounting tmpfs
TypeTmpfs Type = "tmpfs"
+ // TypeNamedPipe is the type for mounting Windows named pipes
+ TypeNamedPipe Type = "npipe"
)
// Mount represents a mount (volume).
@@ -65,7 +67,7 @@ var Propagations = []Propagation{
type Consistency string
const (
- // ConsistencyFull guarantees bind-mount-like consistency
+ // ConsistencyFull guarantees bind mount-like consistency
ConsistencyFull Consistency = "consistent"
// ConsistencyCached mounts can cache read data and FS structure
ConsistencyCached Consistency = "cached"
diff --git a/vendor/github.com/docker/docker/api/types/time/timestamp.go b/vendor/github.com/docker/docker/api/types/time/timestamp.go
index 9aa9702da..ed9c1168b 100644
--- a/vendor/github.com/docker/docker/api/types/time/timestamp.go
+++ b/vendor/github.com/docker/docker/api/types/time/timestamp.go
@@ -29,10 +29,8 @@ func GetTimestamp(value string, reference time.Time) (string, error) {
}
var format string
- var parseInLocation bool
-
// if the string has a Z or a + or three dashes use parse otherwise use parseinlocation
- parseInLocation = !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3)
+ parseInLocation := !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3)
if strings.Contains(value, ".") {
if parseInLocation {
diff --git a/vendor/github.com/docker/docker/builder/dockerfile/command/command.go b/vendor/github.com/docker/docker/builder/dockerfile/command/command.go
new file mode 100644
index 000000000..f23c6874b
--- /dev/null
+++ b/vendor/github.com/docker/docker/builder/dockerfile/command/command.go
@@ -0,0 +1,46 @@
+// Package command contains the set of Dockerfile commands.
+package command
+
+// Define constants for the command strings
+const (
+ Add = "add"
+ Arg = "arg"
+ Cmd = "cmd"
+ Copy = "copy"
+ Entrypoint = "entrypoint"
+ Env = "env"
+ Expose = "expose"
+ From = "from"
+ Healthcheck = "healthcheck"
+ Label = "label"
+ Maintainer = "maintainer"
+ Onbuild = "onbuild"
+ Run = "run"
+ Shell = "shell"
+ StopSignal = "stopsignal"
+ User = "user"
+ Volume = "volume"
+ Workdir = "workdir"
+)
+
+// Commands is list of all Dockerfile commands
+var Commands = map[string]struct{}{
+ Add: {},
+ Arg: {},
+ Cmd: {},
+ Copy: {},
+ Entrypoint: {},
+ Env: {},
+ Expose: {},
+ From: {},
+ Healthcheck: {},
+ Label: {},
+ Maintainer: {},
+ Onbuild: {},
+ Run: {},
+ Shell: {},
+ StopSignal: {},
+ User: {},
+ Volume: {},
+ Workdir: {},
+}
diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/line_parsers.go b/vendor/github.com/docker/docker/builder/dockerfile/parser/line_parsers.go
new file mode 100644
index 000000000..2c375b74e
--- /dev/null
+++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/line_parsers.go
@@ -0,0 +1,399 @@
+package parser
+
+// line parsers are dispatch calls that parse a single unit of text into a
+// Node object which contains the whole statement. Dockerfiles have varied
+// (but not usually unique, see ONBUILD for a unique example) parsing rules
+// per-command, and these unify the processing in a way that makes it
+// manageable.
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "sort"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/docker/docker/builder/dockerfile/command"
+)
+
+var (
+ errDockerfileNotStringArray = errors.New("when using JSON array syntax, arrays must be comprised of strings only")
+)
+
+const (
+ commandLabel = "LABEL"
+)
+
+// ignore the current argument. This will still leave a command parsed, but
+// will not incorporate the arguments into the ast.
+func parseIgnore(rest string, d *Directive) (*Node, map[string]bool, error) {
+ return &Node{}, nil, nil
+}
+
+// used for onbuild. Could potentially be used for anything that represents a
+// statement with sub-statements.
+//
+// ONBUILD RUN foo bar -> (onbuild (run foo bar))
+//
+func parseSubCommand(rest string, d *Directive) (*Node, map[string]bool, error) {
+ if rest == "" {
+ return nil, nil, nil
+ }
+
+ child, err := newNodeFromLine(rest, d)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return &Node{Children: []*Node{child}}, nil, nil
+}
+
+// helper to parse words (i.e space delimited or quoted strings) in a statement.
+// The quotes are preserved as part of this function and they are stripped later
+// as part of processWords().
+func parseWords(rest string, d *Directive) []string {
+ const (
+ inSpaces = iota // looking for start of a word
+ inWord
+ inQuote
+ )
+
+ words := []string{}
+ phase := inSpaces
+ word := ""
+ quote := '\000'
+ blankOK := false
+ var ch rune
+ var chWidth int
+
+ for pos := 0; pos <= len(rest); pos += chWidth {
+ if pos != len(rest) {
+ ch, chWidth = utf8.DecodeRuneInString(rest[pos:])
+ }
+
+ if phase == inSpaces { // Looking for start of word
+ if pos == len(rest) { // end of input
+ break
+ }
+ if unicode.IsSpace(ch) { // skip spaces
+ continue
+ }
+ phase = inWord // found it, fall through
+ }
+ if (phase == inWord || phase == inQuote) && (pos == len(rest)) {
+ if blankOK || len(word) > 0 {
+ words = append(words, word)
+ }
+ break
+ }
+ if phase == inWord {
+ if unicode.IsSpace(ch) {
+ phase = inSpaces
+ if blankOK || len(word) > 0 {
+ words = append(words, word)
+ }
+ word = ""
+ blankOK = false
+ continue
+ }
+ if ch == '\'' || ch == '"' {
+ quote = ch
+ blankOK = true
+ phase = inQuote
+ }
+ if ch == d.escapeToken {
+ if pos+chWidth == len(rest) {
+ continue // just skip an escape token at end of line
+ }
+ // If we're not quoted and we see an escape token, then always just
+ // add the escape token plus the char to the word, even if the char
+ // is a quote.
+ word += string(ch)
+ pos += chWidth
+ ch, chWidth = utf8.DecodeRuneInString(rest[pos:])
+ }
+ word += string(ch)
+ continue
+ }
+ if phase == inQuote {
+ if ch == quote {
+ phase = inWord
+ }
+ // The escape token is special except for ' quotes - can't escape anything for '
+ if ch == d.escapeToken && quote != '\'' {
+ if pos+chWidth == len(rest) {
+ phase = inWord
+ continue // just skip the escape token at end
+ }
+ pos += chWidth
+ word += string(ch)
+ ch, chWidth = utf8.DecodeRuneInString(rest[pos:])
+ }
+ word += string(ch)
+ }
+ }
+
+ return words
+}
+
+// parse environment like statements. Note that this does *not* handle
+// variable interpolation, which will be handled in the evaluator.
+func parseNameVal(rest string, key string, d *Directive) (*Node, error) {
+ // This is kind of tricky because we need to support the old
+ // variant: KEY name value
+ // as well as the new one: KEY name=value ...
+ // The trigger to know which one is being used will be whether we hit
+ // a space or = first. space ==> old, "=" ==> new
+
+ words := parseWords(rest, d)
+ if len(words) == 0 {
+ return nil, nil
+ }
+
+ // Old format (KEY name value)
+ if !strings.Contains(words[0], "=") {
+ parts := tokenWhitespace.Split(rest, 2)
+ if len(parts) < 2 {
+ return nil, fmt.Errorf(key + " must have two arguments")
+ }
+ return newKeyValueNode(parts[0], parts[1]), nil
+ }
+
+ var rootNode *Node
+ var prevNode *Node
+ for _, word := range words {
+ if !strings.Contains(word, "=") {
+ return nil, fmt.Errorf("Syntax error - can't find = in %q. Must be of the form: name=value", word)
+ }
+
+ parts := strings.SplitN(word, "=", 2)
+ node := newKeyValueNode(parts[0], parts[1])
+ rootNode, prevNode = appendKeyValueNode(node, rootNode, prevNode)
+ }
+
+ return rootNode, nil
+}
+
+func newKeyValueNode(key, value string) *Node {
+ return &Node{
+ Value: key,
+ Next: &Node{Value: value},
+ }
+}
+
+func appendKeyValueNode(node, rootNode, prevNode *Node) (*Node, *Node) {
+ if rootNode == nil {
+ rootNode = node
+ }
+ if prevNode != nil {
+ prevNode.Next = node
+ }
+
+ prevNode = node.Next
+ return rootNode, prevNode
+}
+
+func parseEnv(rest string, d *Directive) (*Node, map[string]bool, error) {
+ node, err := parseNameVal(rest, "ENV", d)
+ return node, nil, err
+}
+
+func parseLabel(rest string, d *Directive) (*Node, map[string]bool, error) {
+ node, err := parseNameVal(rest, commandLabel, d)
+ return node, nil, err
+}
+
+// NodeFromLabels returns a Node for the injected labels
+func NodeFromLabels(labels map[string]string) *Node {
+ keys := []string{}
+ for key := range labels {
+ keys = append(keys, key)
+ }
+ // Sort the label to have a repeatable order
+ sort.Strings(keys)
+
+ labelPairs := []string{}
+ var rootNode *Node
+ var prevNode *Node
+ for _, key := range keys {
+ value := labels[key]
+ labelPairs = append(labelPairs, fmt.Sprintf("%q='%s'", key, value))
+ // Value must be single quoted to prevent env variable expansion
+ // See https://github.com/docker/docker/issues/26027
+ node := newKeyValueNode(key, "'"+value+"'")
+ rootNode, prevNode = appendKeyValueNode(node, rootNode, prevNode)
+ }
+
+ return &Node{
+ Value: command.Label,
+ Original: commandLabel + " " + strings.Join(labelPairs, " "),
+ Next: rootNode,
+ }
+}
+
+// parses a statement containing one or more keyword definition(s) and/or
+// value assignments, like `name1 name2= name3="" name4=value`.
+// Note that this is a stricter format than the old format of assignment,
+// allowed by parseNameVal(), in a way that this only allows assignment of the
+// form `keyword=[<value>]` like `name2=`, `name3=""`, and `name4=value` above.
+// In addition, a keyword definition alone is of the form `keyword` like `name1`
+// above. And the assignments `name2=` and `name3=""` are equivalent and
+// assign an empty value to the respective keywords.
+func parseNameOrNameVal(rest string, d *Directive) (*Node, map[string]bool, error) {
+ words := parseWords(rest, d)
+ if len(words) == 0 {
+ return nil, nil, nil
+ }
+
+ var (
+ rootnode *Node
+ prevNode *Node
+ )
+ for i, word := range words {
+ node := &Node{}
+ node.Value = word
+ if i == 0 {
+ rootnode = node
+ } else {
+ prevNode.Next = node
+ }
+ prevNode = node
+ }
+
+ return rootnode, nil, nil
+}
+
+// parses a whitespace-delimited set of arguments. The result is effectively a
+// linked list of string arguments.
+func parseStringsWhitespaceDelimited(rest string, d *Directive) (*Node, map[string]bool, error) {
+ if rest == "" {
+ return nil, nil, nil
+ }
+
+ node := &Node{}
+ rootnode := node
+ prevnode := node
+ for _, str := range tokenWhitespace.Split(rest, -1) { // use regexp
+ prevnode = node
+ node.Value = str
+ node.Next = &Node{}
+ node = node.Next
+ }
+
+ // XXX to get around regexp.Split *always* providing an empty string at the
+ // end due to how our loop is constructed, nil out the last node in the
+ // chain.
+ prevnode.Next = nil
+
+ return rootnode, nil, nil
+}
+
+// parseString just wraps the string in quotes and returns a working node.
+func parseString(rest string, d *Directive) (*Node, map[string]bool, error) {
+ if rest == "" {
+ return nil, nil, nil
+ }
+ n := &Node{}
+ n.Value = rest
+ return n, nil, nil
+}
+
+// parseJSON converts JSON arrays to an AST.
+func parseJSON(rest string, d *Directive) (*Node, map[string]bool, error) {
+ rest = strings.TrimLeftFunc(rest, unicode.IsSpace)
+ if !strings.HasPrefix(rest, "[") {
+ return nil, nil, fmt.Errorf(`Error parsing "%s" as a JSON array`, rest)
+ }
+
+ var myJSON []interface{}
+ if err := json.NewDecoder(strings.NewReader(rest)).Decode(&myJSON); err != nil {
+ return nil, nil, err
+ }
+
+ var top, prev *Node
+ for _, str := range myJSON {
+ s, ok := str.(string)
+ if !ok {
+ return nil, nil, errDockerfileNotStringArray
+ }
+
+ node := &Node{Value: s}
+ if prev == nil {
+ top = node
+ } else {
+ prev.Next = node
+ }
+ prev = node
+ }
+
+ return top, map[string]bool{"json": true}, nil
+}
+
+// parseMaybeJSON determines if the argument appears to be a JSON array. If
+// so, passes to parseJSON; if not, quotes the result and returns a single
+// node.
+func parseMaybeJSON(rest string, d *Directive) (*Node, map[string]bool, error) {
+ if rest == "" {
+ return nil, nil, nil
+ }
+
+ node, attrs, err := parseJSON(rest, d)
+
+ if err == nil {
+ return node, attrs, nil
+ }
+ if err == errDockerfileNotStringArray {
+ return nil, nil, err
+ }
+
+ node = &Node{}
+ node.Value = rest
+ return node, nil, nil
+}
+
+// parseMaybeJSONToList determines if the argument appears to be a JSON array. If
+// so, passes to parseJSON; if not, attempts to parse it as a whitespace
+// delimited string.
+func parseMaybeJSONToList(rest string, d *Directive) (*Node, map[string]bool, error) {
+ node, attrs, err := parseJSON(rest, d)
+
+ if err == nil {
+ return node, attrs, nil
+ }
+ if err == errDockerfileNotStringArray {
+ return nil, nil, err
+ }
+
+ return parseStringsWhitespaceDelimited(rest, d)
+}
+
+// The HEALTHCHECK command is like parseMaybeJSON, but has an extra type argument.
+func parseHealthConfig(rest string, d *Directive) (*Node, map[string]bool, error) {
+ // Find end of first argument
+ var sep int
+ for ; sep < len(rest); sep++ {
+ if unicode.IsSpace(rune(rest[sep])) {
+ break
+ }
+ }
+ next := sep
+ for ; next < len(rest); next++ {
+ if !unicode.IsSpace(rune(rest[next])) {
+ break
+ }
+ }
+
+ if sep == 0 {
+ return nil, nil, nil
+ }
+
+ typ := rest[:sep]
+ cmd, attrs, err := parseMaybeJSON(rest[next:], d)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return &Node{Value: typ, Next: cmd}, attrs, err
+}
diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/parser.go b/vendor/github.com/docker/docker/builder/dockerfile/parser/parser.go
new file mode 100644
index 000000000..42a84c630
--- /dev/null
+++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/parser.go
@@ -0,0 +1,363 @@
+// Package parser implements a parser and parse tree dumper for Dockerfiles.
+package parser
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "regexp"
+ "runtime"
+ "strconv"
+ "strings"
+ "unicode"
+
+ "github.com/docker/docker/builder/dockerfile/command"
+ "github.com/docker/docker/pkg/system"
+ "github.com/pkg/errors"
+)
+
+// Node is a structure used to represent a parse tree.
+//
+// In the node there are three fields, Value, Next, and Children. Value is the
+// current token's string value. Next is always the next non-child token, and
+// children contains all the children. Here's an example:
+//
+// (value next (child child-next child-next-next) next-next)
+//
+// This data structure is frankly pretty lousy for handling complex languages,
+// but lucky for us the Dockerfile isn't very complicated. This structure
+// works a little more effectively than a "proper" parse tree for our needs.
+//
+type Node struct {
+ Value string // actual content
+ Next *Node // the next item in the current sexp
+ Children []*Node // the children of this sexp
+ Attributes map[string]bool // special attributes for this node
+ Original string // original line used before parsing
+ Flags []string // only top Node should have this set
+ StartLine int // the line in the original dockerfile where the node begins
+ endLine int // the line in the original dockerfile where the node ends
+}
+
+// Dump dumps the AST defined by `node` as a list of sexps.
+// Returns a string suitable for printing.
+func (node *Node) Dump() string {
+ str := ""
+ str += node.Value
+
+ if len(node.Flags) > 0 {
+ str += fmt.Sprintf(" %q", node.Flags)
+ }
+
+ for _, n := range node.Children {
+ str += "(" + n.Dump() + ")\n"
+ }
+
+ for n := node.Next; n != nil; n = n.Next {
+ if len(n.Children) > 0 {
+ str += " " + n.Dump()
+ } else {
+ str += " " + strconv.Quote(n.Value)
+ }
+ }
+
+ return strings.TrimSpace(str)
+}
+
+func (node *Node) lines(start, end int) {
+ node.StartLine = start
+ node.endLine = end
+}
+
+// AddChild adds a new child node, and updates line information
+func (node *Node) AddChild(child *Node, startLine, endLine int) {
+ child.lines(startLine, endLine)
+ if node.StartLine < 0 {
+ node.StartLine = startLine
+ }
+ node.endLine = endLine
+ node.Children = append(node.Children, child)
+}
+
+var (
+ dispatch map[string]func(string, *Directive) (*Node, map[string]bool, error)
+ tokenWhitespace = regexp.MustCompile(`[\t\v\f\r ]+`)
+ tokenEscapeCommand = regexp.MustCompile(`^#[ \t]*escape[ \t]*=[ \t]*(?P<escapechar>.).*$`)
+ tokenPlatformCommand = regexp.MustCompile(`^#[ \t]*platform[ \t]*=[ \t]*(?P<platform>.*)$`)
+ tokenComment = regexp.MustCompile(`^#.*$`)
+)
+
+// DefaultEscapeToken is the default escape token
+const DefaultEscapeToken = '\\'
+
+// defaultPlatformToken is the platform assumed for the build if not explicitly provided
+var defaultPlatformToken = runtime.GOOS
+
+// Directive is the structure used during a build run to hold the state of
+// parsing directives.
+type Directive struct {
+ escapeToken rune // Current escape token
+ platformToken string // Current platform token
+ lineContinuationRegex *regexp.Regexp // Current line continuation regex
+ processingComplete bool // Whether we are done looking for directives
+ escapeSeen bool // Whether the escape directive has been seen
+ platformSeen bool // Whether the platform directive has been seen
+}
+
+// setEscapeToken sets the default token for escaping characters in a Dockerfile.
+func (d *Directive) setEscapeToken(s string) error {
+ if s != "`" && s != "\\" {
+ return fmt.Errorf("invalid ESCAPE '%s'. Must be ` or \\", s)
+ }
+ d.escapeToken = rune(s[0])
+ d.lineContinuationRegex = regexp.MustCompile(`\` + s + `[ \t]*$`)
+ return nil
+}
+
+// setPlatformToken sets the default platform for pulling images in a Dockerfile.
+func (d *Directive) setPlatformToken(s string) error {
+ s = strings.ToLower(s)
+ valid := []string{runtime.GOOS}
+ if system.LCOWSupported() {
+ valid = append(valid, "linux")
+ }
+ for _, item := range valid {
+ if s == item {
+ d.platformToken = s
+ return nil
+ }
+ }
+ return fmt.Errorf("invalid PLATFORM '%s'. Must be one of %v", s, valid)
+}
+
+// possibleParserDirective looks for one or more parser directives '# escapeToken=<char>' and
+// '# platform=<string>'. Parser directives must precede any builder instruction
+// or other comments, and cannot be repeated.
+func (d *Directive) possibleParserDirective(line string) error {
+ if d.processingComplete {
+ return nil
+ }
+
+ tecMatch := tokenEscapeCommand.FindStringSubmatch(strings.ToLower(line))
+ if len(tecMatch) != 0 {
+ for i, n := range tokenEscapeCommand.SubexpNames() {
+ if n == "escapechar" {
+ if d.escapeSeen {
+ return errors.New("only one escape parser directive can be used")
+ }
+ d.escapeSeen = true
+ return d.setEscapeToken(tecMatch[i])
+ }
+ }
+ }
+
+ // TODO @jhowardmsft LCOW Support: Eventually this check can be removed,
+ // but only recognise a platform token if running in LCOW mode.
+ if system.LCOWSupported() {
+ tpcMatch := tokenPlatformCommand.FindStringSubmatch(strings.ToLower(line))
+ if len(tpcMatch) != 0 {
+ for i, n := range tokenPlatformCommand.SubexpNames() {
+ if n == "platform" {
+ if d.platformSeen {
+ return errors.New("only one platform parser directive can be used")
+ }
+ d.platformSeen = true
+ return d.setPlatformToken(tpcMatch[i])
+ }
+ }
+ }
+ }
+
+ d.processingComplete = true
+ return nil
+}
+
+// NewDefaultDirective returns a new Directive with the default escapeToken token
+func NewDefaultDirective() *Directive {
+ directive := Directive{}
+ directive.setEscapeToken(string(DefaultEscapeToken))
+ directive.setPlatformToken(defaultPlatformToken)
+ return &directive
+}
+
+func init() {
+ // Dispatch Table. see line_parsers.go for the parse functions.
+ // The command is parsed and mapped to the line parser. The line parser
+ // receives the arguments but not the command, and returns an AST after
+ // reformulating the arguments according to the rules in the parser
+ // functions. Errors are propagated up by Parse() and the resulting AST can
+ // be incorporated directly into the existing AST as a next.
+ dispatch = map[string]func(string, *Directive) (*Node, map[string]bool, error){
+ command.Add: parseMaybeJSONToList,
+ command.Arg: parseNameOrNameVal,
+ command.Cmd: parseMaybeJSON,
+ command.Copy: parseMaybeJSONToList,
+ command.Entrypoint: parseMaybeJSON,
+ command.Env: parseEnv,
+ command.Expose: parseStringsWhitespaceDelimited,
+ command.From: parseStringsWhitespaceDelimited,
+ command.Healthcheck: parseHealthConfig,
+ command.Label: parseLabel,
+ command.Maintainer: parseString,
+ command.Onbuild: parseSubCommand,
+ command.Run: parseMaybeJSON,
+ command.Shell: parseMaybeJSON,
+ command.StopSignal: parseString,
+ command.User: parseString,
+ command.Volume: parseMaybeJSONToList,
+ command.Workdir: parseString,
+ }
+}
+
+// newNodeFromLine splits the line into parts, and dispatches to a function
+// based on the command and command arguments. A Node is created from the
+// result of the dispatch.
+func newNodeFromLine(line string, directive *Directive) (*Node, error) {
+ cmd, flags, args, err := splitCommand(line)
+ if err != nil {
+ return nil, err
+ }
+
+ fn := dispatch[cmd]
+ // Ignore invalid Dockerfile instructions
+ if fn == nil {
+ fn = parseIgnore
+ }
+ next, attrs, err := fn(args, directive)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Node{
+ Value: cmd,
+ Original: line,
+ Flags: flags,
+ Next: next,
+ Attributes: attrs,
+ }, nil
+}
+
+// Result is the result of parsing a Dockerfile
+type Result struct {
+ AST *Node
+ EscapeToken rune
+ Platform string
+ Warnings []string
+}
+
+// PrintWarnings to the writer
+func (r *Result) PrintWarnings(out io.Writer) {
+ if len(r.Warnings) == 0 {
+ return
+ }
+ fmt.Fprintf(out, strings.Join(r.Warnings, "\n")+"\n")
+}
+
+// Parse reads lines from a Reader, parses the lines into an AST and returns
+// the AST and escape token
+func Parse(rwc io.Reader) (*Result, error) {
+ d := NewDefaultDirective()
+ currentLine := 0
+ root := &Node{StartLine: -1}
+ scanner := bufio.NewScanner(rwc)
+ warnings := []string{}
+
+ var err error
+ for scanner.Scan() {
+ bytesRead := scanner.Bytes()
+ if currentLine == 0 {
+ // First line, strip the byte-order-marker if present
+ bytesRead = bytes.TrimPrefix(bytesRead, utf8bom)
+ }
+ bytesRead, err = processLine(d, bytesRead, true)
+ if err != nil {
+ return nil, err
+ }
+ currentLine++
+
+ startLine := currentLine
+ line, isEndOfLine := trimContinuationCharacter(string(bytesRead), d)
+ if isEndOfLine && line == "" {
+ continue
+ }
+
+ var hasEmptyContinuationLine bool
+ for !isEndOfLine && scanner.Scan() {
+ bytesRead, err := processLine(d, scanner.Bytes(), false)
+ if err != nil {
+ return nil, err
+ }
+ currentLine++
+
+ if isComment(scanner.Bytes()) {
+ // original line was a comment (processLine strips comments)
+ continue
+ }
+ if isEmptyContinuationLine(bytesRead) {
+ hasEmptyContinuationLine = true
+ continue
+ }
+
+ continuationLine := string(bytesRead)
+ continuationLine, isEndOfLine = trimContinuationCharacter(continuationLine, d)
+ line += continuationLine
+ }
+
+ if hasEmptyContinuationLine {
+ warning := "[WARNING]: Empty continuation line found in:\n " + line
+ warnings = append(warnings, warning)
+ }
+
+ child, err := newNodeFromLine(line, d)
+ if err != nil {
+ return nil, err
+ }
+ root.AddChild(child, startLine, currentLine)
+ }
+
+ if len(warnings) > 0 {
+ warnings = append(warnings, "[WARNING]: Empty continuation lines will become errors in a future release.")
+ }
+ return &Result{
+ AST: root,
+ Warnings: warnings,
+ EscapeToken: d.escapeToken,
+ Platform: d.platformToken,
+ }, nil
+}
+
+func trimComments(src []byte) []byte {
+ return tokenComment.ReplaceAll(src, []byte{})
+}
+
+func trimWhitespace(src []byte) []byte {
+ return bytes.TrimLeftFunc(src, unicode.IsSpace)
+}
+
+func isComment(line []byte) bool {
+ return tokenComment.Match(trimWhitespace(line))
+}
+
+func isEmptyContinuationLine(line []byte) bool {
+ return len(trimWhitespace(line)) == 0
+}
+
+var utf8bom = []byte{0xEF, 0xBB, 0xBF}
+
+func trimContinuationCharacter(line string, d *Directive) (string, bool) {
+ if d.lineContinuationRegex.MatchString(line) {
+ line = d.lineContinuationRegex.ReplaceAllString(line, "")
+ return line, false
+ }
+ return line, true
+}
+
+// TODO: remove stripLeftWhitespace after deprecation period. It seems silly
+// to preserve whitespace on continuation lines. Why is that done?
+func processLine(d *Directive, token []byte, stripLeftWhitespace bool) ([]byte, error) {
+ if stripLeftWhitespace {
+ token = trimWhitespace(token)
+ }
+ return trimComments(token), d.possibleParserDirective(string(token))
+}
diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/split_command.go b/vendor/github.com/docker/docker/builder/dockerfile/parser/split_command.go
new file mode 100644
index 000000000..171f454f6
--- /dev/null
+++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/split_command.go
@@ -0,0 +1,118 @@
+package parser
+
+import (
+ "strings"
+ "unicode"
+)
+
+// splitCommand takes a single line of text and parses out the cmd and args,
+// which are used for dispatching to more exact parsing functions.
+func splitCommand(line string) (string, []string, string, error) {
+ var args string
+ var flags []string
+
+ // Make sure we get the same results irrespective of leading/trailing spaces
+ cmdline := tokenWhitespace.Split(strings.TrimSpace(line), 2)
+ cmd := strings.ToLower(cmdline[0])
+
+ if len(cmdline) == 2 {
+ var err error
+ args, flags, err = extractBuilderFlags(cmdline[1])
+ if err != nil {
+ return "", nil, "", err
+ }
+ }
+
+ return cmd, flags, strings.TrimSpace(args), nil
+}
+
+func extractBuilderFlags(line string) (string, []string, error) {
+ // Parses the BuilderFlags and returns the remaining part of the line
+
+ const (
+ inSpaces = iota // looking for start of a word
+ inWord
+ inQuote
+ )
+
+ words := []string{}
+ phase := inSpaces
+ word := ""
+ quote := '\000'
+ blankOK := false
+ var ch rune
+
+ for pos := 0; pos <= len(line); pos++ {
+ if pos != len(line) {
+ ch = rune(line[pos])
+ }
+
+ if phase == inSpaces { // Looking for start of word
+ if pos == len(line) { // end of input
+ break
+ }
+ if unicode.IsSpace(ch) { // skip spaces
+ continue
+ }
+
+ // Only keep going if the next word starts with --
+ if ch != '-' || pos+1 == len(line) || rune(line[pos+1]) != '-' {
+ return line[pos:], words, nil
+ }
+
+ phase = inWord // found something with "--", fall through
+ }
+ if (phase == inWord || phase == inQuote) && (pos == len(line)) {
+ if word != "--" && (blankOK || len(word) > 0) {
+ words = append(words, word)
+ }
+ break
+ }
+ if phase == inWord {
+ if unicode.IsSpace(ch) {
+ phase = inSpaces
+ if word == "--" {
+ return line[pos:], words, nil
+ }
+ if blankOK || len(word) > 0 {
+ words = append(words, word)
+ }
+ word = ""
+ blankOK = false
+ continue
+ }
+ if ch == '\'' || ch == '"' {
+ quote = ch
+ blankOK = true
+ phase = inQuote
+ continue
+ }
+ if ch == '\\' {
+ if pos+1 == len(line) {
+ continue // just skip \ at end
+ }
+ pos++
+ ch = rune(line[pos])
+ }
+ word += string(ch)
+ continue
+ }
+ if phase == inQuote {
+ if ch == quote {
+ phase = inWord
+ continue
+ }
+ if ch == '\\' {
+ if pos+1 == len(line) {
+ phase = inWord
+ continue // just skip \ at end
+ }
+ pos++
+ ch = rune(line[pos])
+ }
+ word += string(ch)
+ }
+ }
+
+ return "", words, nil
+}
diff --git a/vendor/github.com/docker/docker/client/checkpoint_list.go b/vendor/github.com/docker/docker/client/checkpoint_list.go
index ffe44bc97..9835bad5c 100644
--- a/vendor/github.com/docker/docker/client/checkpoint_list.go
+++ b/vendor/github.com/docker/docker/client/checkpoint_list.go
@@ -2,7 +2,6 @@ package client
import (
"encoding/json"
- "net/http"
"net/url"
"github.com/docker/docker/api/types"
@@ -20,10 +19,7 @@ func (cli *Client) CheckpointList(ctx context.Context, container string, options
resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil)
if err != nil {
- if resp.statusCode == http.StatusNotFound {
- return checkpoints, containerNotFoundError{container}
- }
- return checkpoints, err
+ return checkpoints, wrapResponseError(err, resp, "container", container)
}
err = json.NewDecoder(resp.body).Decode(&checkpoints)
diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go
index c4e3914b1..893124853 100644
--- a/vendor/github.com/docker/docker/client/client.go
+++ b/vendor/github.com/docker/docker/client/client.go
@@ -1,10 +1,6 @@
/*
Package client is a Go client for the Docker Engine API.
-The "docker" command uses this package to communicate with the daemon. It can also
-be used by your own Go applications to do anything the command-line interface does
-- running containers, pulling images, managing swarms, etc.
-
For more information about the Engine API, see the documentation:
https://docs.docker.com/engine/reference/api/
@@ -51,6 +47,7 @@ import (
"net/http"
"net/url"
"os"
+ "path"
"path/filepath"
"strings"
@@ -159,7 +156,7 @@ func NewEnvClient() (*Client, error) {
// highly recommended that you set a version or your client may break if the
// server is upgraded.
func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) {
- proto, addr, basePath, err := ParseHost(host)
+ hostURL, err := ParseHostURL(host)
if err != nil {
return nil, err
}
@@ -170,7 +167,7 @@ func NewClient(host string, version string, client *http.Client, httpHeaders map
}
} else {
transport := new(http.Transport)
- sockets.ConfigureTransport(transport, proto, addr)
+ sockets.ConfigureTransport(transport, hostURL.Scheme, hostURL.Host)
client = &http.Client{
Transport: transport,
CheckRedirect: CheckRedirect,
@@ -188,28 +185,24 @@ func NewClient(host string, version string, client *http.Client, httpHeaders map
scheme = "https"
}
+ // TODO: store URL instead of proto/addr/basePath
return &Client{
scheme: scheme,
host: host,
- proto: proto,
- addr: addr,
- basePath: basePath,
+ proto: hostURL.Scheme,
+ addr: hostURL.Host,
+ basePath: hostURL.Path,
client: client,
version: version,
customHTTPHeaders: httpHeaders,
}, nil
}
-// Close ensures that transport.Client is closed
-// especially needed while using NewClient with *http.Client = nil
-// for example
-// client.NewClient("unix:///var/run/docker.sock", nil, "v1.18", map[string]string{"User-Agent": "engine-api-cli-1.0"})
+// Close the transport used by the client
func (cli *Client) Close() error {
-
if t, ok := cli.client.Transport.(*http.Transport); ok {
t.CloseIdleConnections()
}
-
return nil
}
@@ -219,37 +212,27 @@ func (cli *Client) getAPIPath(p string, query url.Values) string {
var apiPath string
if cli.version != "" {
v := strings.TrimPrefix(cli.version, "v")
- apiPath = cli.basePath + "/v" + v + p
+ apiPath = path.Join(cli.basePath, "/v"+v, p)
} else {
- apiPath = cli.basePath + p
- }
-
- u := &url.URL{
- Path: apiPath,
- }
- if len(query) > 0 {
- u.RawQuery = query.Encode()
+ apiPath = path.Join(cli.basePath, p)
}
- return u.String()
+ return (&url.URL{Path: apiPath, RawQuery: query.Encode()}).String()
}
-// ClientVersion returns the version string associated with this
-// instance of the Client. Note that this value can be changed
-// via the DOCKER_API_VERSION env var.
-// This operation doesn't acquire a mutex.
+// ClientVersion returns the API version used by this client.
func (cli *Client) ClientVersion() string {
return cli.version
}
-// NegotiateAPIVersion updates the version string associated with this
-// instance of the Client to match the latest version the server supports
+// NegotiateAPIVersion queries the API and updates the version to match the
+// API version. Any errors are silently ignored.
func (cli *Client) NegotiateAPIVersion(ctx context.Context) {
ping, _ := cli.Ping(ctx)
cli.NegotiateAPIVersionPing(ping)
}
-// NegotiateAPIVersionPing updates the version string associated with this
-// instance of the Client to match the latest version the server supports
+// NegotiateAPIVersionPing updates the client version to match the Ping.APIVersion
+// if the ping version is less than the default version.
func (cli *Client) NegotiateAPIVersionPing(p types.Ping) {
if cli.manualOverride {
return
@@ -265,23 +248,34 @@ func (cli *Client) NegotiateAPIVersionPing(p types.Ping) {
cli.version = api.DefaultVersion
}
- // if server version is lower than the maximum version supported by the Client, downgrade
- if versions.LessThan(p.APIVersion, api.DefaultVersion) {
+ // if server version is lower than the client version, downgrade
+ if versions.LessThan(p.APIVersion, cli.version) {
cli.version = p.APIVersion
}
}
-// DaemonHost returns the host associated with this instance of the Client.
-// This operation doesn't acquire a mutex.
+// DaemonHost returns the host address used by the client
func (cli *Client) DaemonHost() string {
return cli.host
}
-// ParseHost verifies that the given host strings is valid.
+// ParseHost parses a url string, validates the strings is a host url, and returns
+// the parsed host as: protocol, address, and base path
+// Deprecated: use ParseHostURL
func ParseHost(host string) (string, string, string, error) {
+ hostURL, err := ParseHostURL(host)
+ if err != nil {
+ return "", "", "", err
+ }
+ return hostURL.Scheme, hostURL.Host, hostURL.Path, nil
+}
+
+// ParseHostURL parses a url string, validates the string is a host url, and
+// returns the parsed URL
+func ParseHostURL(host string) (*url.URL, error) {
protoAddrParts := strings.SplitN(host, "://", 2)
if len(protoAddrParts) == 1 {
- return "", "", "", fmt.Errorf("unable to parse docker host `%s`", host)
+ return nil, fmt.Errorf("unable to parse docker host `%s`", host)
}
var basePath string
@@ -289,16 +283,19 @@ func ParseHost(host string) (string, string, string, error) {
if proto == "tcp" {
parsed, err := url.Parse("tcp://" + addr)
if err != nil {
- return "", "", "", err
+ return nil, err
}
addr = parsed.Host
basePath = parsed.Path
}
- return proto, addr, basePath, nil
+ return &url.URL{
+ Scheme: proto,
+ Host: addr,
+ Path: basePath,
+ }, nil
}
-// CustomHTTPHeaders returns the custom http headers associated with this
-// instance of the Client. This operation doesn't acquire a mutex.
+// CustomHTTPHeaders returns the custom http headers stored by the client.
func (cli *Client) CustomHTTPHeaders() map[string]string {
m := make(map[string]string)
for k, v := range cli.customHTTPHeaders {
@@ -307,8 +304,7 @@ func (cli *Client) CustomHTTPHeaders() map[string]string {
return m
}
-// SetCustomHTTPHeaders updates the custom http headers associated with this
-// instance of the Client. This operation doesn't acquire a mutex.
+// SetCustomHTTPHeaders that will be set on every HTTP request made by the client.
func (cli *Client) SetCustomHTTPHeaders(headers map[string]string) {
cli.customHTTPHeaders = headers
}
diff --git a/vendor/github.com/docker/docker/client/config_inspect.go b/vendor/github.com/docker/docker/client/config_inspect.go
index ebb6d636c..b44d6fdd7 100644
--- a/vendor/github.com/docker/docker/client/config_inspect.go
+++ b/vendor/github.com/docker/docker/client/config_inspect.go
@@ -4,7 +4,6 @@ import (
"bytes"
"encoding/json"
"io/ioutil"
- "net/http"
"github.com/docker/docker/api/types/swarm"
"golang.org/x/net/context"
@@ -17,10 +16,7 @@ func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.C
}
resp, err := cli.get(ctx, "/configs/"+id, nil, nil)
if err != nil {
- if resp.statusCode == http.StatusNotFound {
- return swarm.Config{}, nil, configNotFoundError{id}
- }
- return swarm.Config{}, nil, err
+ return swarm.Config{}, nil, wrapResponseError(err, resp, "config", id)
}
defer ensureReaderClosed(resp)
diff --git a/vendor/github.com/docker/docker/client/config_list.go b/vendor/github.com/docker/docker/client/config_list.go
index 8483ca14d..57febc9ff 100644
--- a/vendor/github.com/docker/docker/client/config_list.go
+++ b/vendor/github.com/docker/docker/client/config_list.go
@@ -18,7 +18,7 @@ func (cli *Client) ConfigList(ctx context.Context, options types.ConfigListOptio
query := url.Values{}
if options.Filters.Len() > 0 {
- filterJSON, err := filters.ToParam(options.Filters)
+ filterJSON, err := filters.ToJSON(options.Filters)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/docker/docker/client/config_remove.go b/vendor/github.com/docker/docker/client/config_remove.go
index 726b5c853..e025d44f7 100644
--- a/vendor/github.com/docker/docker/client/config_remove.go
+++ b/vendor/github.com/docker/docker/client/config_remove.go
@@ -9,5 +9,5 @@ func (cli *Client) ConfigRemove(ctx context.Context, id string) error {
}
resp, err := cli.delete(ctx, "/configs/"+id, nil, nil)
ensureReaderClosed(resp)
- return err
+ return wrapResponseError(err, resp, "config", id)
}
diff --git a/vendor/github.com/docker/docker/client/container_commit.go b/vendor/github.com/docker/docker/client/container_commit.go
index 531d796ee..b3b16abfd 100644
--- a/vendor/github.com/docker/docker/client/container_commit.go
+++ b/vendor/github.com/docker/docker/client/container_commit.go
@@ -39,7 +39,7 @@ func (cli *Client) ContainerCommit(ctx context.Context, container string, option
for _, change := range options.Changes {
query.Add("changes", change)
}
- if options.Pause != true {
+ if !options.Pause {
query.Set("pause", "0")
}
diff --git a/vendor/github.com/docker/docker/client/container_create.go b/vendor/github.com/docker/docker/client/container_create.go
index 6841b0b28..bd817e7fd 100644
--- a/vendor/github.com/docker/docker/client/container_create.go
+++ b/vendor/github.com/docker/docker/client/container_create.go
@@ -45,7 +45,7 @@ func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config
serverResp, err := cli.post(ctx, "/containers/create", query, body, nil)
if err != nil {
if serverResp.statusCode == 404 && strings.Contains(err.Error(), "No such image") {
- return response, imageNotFoundError{config.Image}
+ return response, objectNotFoundError{object: "image", id: config.Image}
}
return response, err
}
diff --git a/vendor/github.com/docker/docker/client/container_inspect.go b/vendor/github.com/docker/docker/client/container_inspect.go
index 17f180974..a15db14be 100644
--- a/vendor/github.com/docker/docker/client/container_inspect.go
+++ b/vendor/github.com/docker/docker/client/container_inspect.go
@@ -4,7 +4,6 @@ import (
"bytes"
"encoding/json"
"io/ioutil"
- "net/http"
"net/url"
"github.com/docker/docker/api/types"
@@ -15,10 +14,7 @@ import (
func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) {
serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil)
if err != nil {
- if serverResp.statusCode == http.StatusNotFound {
- return types.ContainerJSON{}, containerNotFoundError{containerID}
- }
- return types.ContainerJSON{}, err
+ return types.ContainerJSON{}, wrapResponseError(err, serverResp, "container", containerID)
}
var response types.ContainerJSON
@@ -35,10 +31,7 @@ func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID stri
}
serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil)
if err != nil {
- if serverResp.statusCode == http.StatusNotFound {
- return types.ContainerJSON{}, nil, containerNotFoundError{containerID}
- }
- return types.ContainerJSON{}, nil, err
+ return types.ContainerJSON{}, nil, wrapResponseError(err, serverResp, "container", containerID)
}
defer ensureReaderClosed(serverResp)
diff --git a/vendor/github.com/docker/docker/client/container_remove.go b/vendor/github.com/docker/docker/client/container_remove.go
index 3a79590ce..070108bf3 100644
--- a/vendor/github.com/docker/docker/client/container_remove.go
+++ b/vendor/github.com/docker/docker/client/container_remove.go
@@ -23,5 +23,5 @@ func (cli *Client) ContainerRemove(ctx context.Context, containerID string, opti
resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil)
ensureReaderClosed(resp)
- return err
+ return wrapResponseError(err, resp, "container", containerID)
}
diff --git a/vendor/github.com/docker/docker/client/errors.go b/vendor/github.com/docker/docker/client/errors.go
index fc7df9f1e..e41b728d3 100644
--- a/vendor/github.com/docker/docker/client/errors.go
+++ b/vendor/github.com/docker/docker/client/errors.go
@@ -3,6 +3,8 @@ package client
import (
"fmt"
+ "net/http"
+
"github.com/docker/docker/api/types/versions"
"github.com/pkg/errors"
)
@@ -36,95 +38,37 @@ type notFound interface {
NotFound() bool // Is the error a NotFound error
}
-// IsErrNotFound returns true if the error is caused with an
-// object (image, container, network, volume, …) is not found in the docker host.
+// IsErrNotFound returns true if the error is a NotFound error, which is returned
+// by the API when some object is not found.
func IsErrNotFound(err error) bool {
te, ok := err.(notFound)
return ok && te.NotFound()
}
-// imageNotFoundError implements an error returned when an image is not in the docker host.
-type imageNotFoundError struct {
- imageID string
-}
-
-// NotFound indicates that this error type is of NotFound
-func (e imageNotFoundError) NotFound() bool {
- return true
-}
-
-// Error returns a string representation of an imageNotFoundError
-func (e imageNotFoundError) Error() string {
- return fmt.Sprintf("Error: No such image: %s", e.imageID)
-}
-
-// IsErrImageNotFound returns true if the error is caused
-// when an image is not found in the docker host.
-func IsErrImageNotFound(err error) bool {
- return IsErrNotFound(err)
-}
-
-// containerNotFoundError implements an error returned when a container is not in the docker host.
-type containerNotFoundError struct {
- containerID string
-}
-
-// NotFound indicates that this error type is of NotFound
-func (e containerNotFoundError) NotFound() bool {
- return true
-}
-
-// Error returns a string representation of a containerNotFoundError
-func (e containerNotFoundError) Error() string {
- return fmt.Sprintf("Error: No such container: %s", e.containerID)
-}
-
-// IsErrContainerNotFound returns true if the error is caused
-// when a container is not found in the docker host.
-func IsErrContainerNotFound(err error) bool {
- return IsErrNotFound(err)
-}
-
-// networkNotFoundError implements an error returned when a network is not in the docker host.
-type networkNotFoundError struct {
- networkID string
-}
-
-// NotFound indicates that this error type is of NotFound
-func (e networkNotFoundError) NotFound() bool {
- return true
-}
-
-// Error returns a string representation of a networkNotFoundError
-func (e networkNotFoundError) Error() string {
- return fmt.Sprintf("Error: No such network: %s", e.networkID)
+type objectNotFoundError struct {
+ object string
+ id string
}
-// IsErrNetworkNotFound returns true if the error is caused
-// when a network is not found in the docker host.
-func IsErrNetworkNotFound(err error) bool {
- return IsErrNotFound(err)
-}
-
-// volumeNotFoundError implements an error returned when a volume is not in the docker host.
-type volumeNotFoundError struct {
- volumeID string
-}
-
-// NotFound indicates that this error type is of NotFound
-func (e volumeNotFoundError) NotFound() bool {
+func (e objectNotFoundError) NotFound() bool {
return true
}
-// Error returns a string representation of a volumeNotFoundError
-func (e volumeNotFoundError) Error() string {
- return fmt.Sprintf("Error: No such volume: %s", e.volumeID)
+func (e objectNotFoundError) Error() string {
+ return fmt.Sprintf("Error: No such %s: %s", e.object, e.id)
}
-// IsErrVolumeNotFound returns true if the error is caused
-// when a volume is not found in the docker host.
-func IsErrVolumeNotFound(err error) bool {
- return IsErrNotFound(err)
+func wrapResponseError(err error, resp serverResponse, object, id string) error {
+ switch {
+ case err == nil:
+ return nil
+ case resp.statusCode == http.StatusNotFound:
+ return objectNotFoundError{object: object, id: id}
+ case resp.statusCode == http.StatusNotImplemented:
+ return notImplementedError{message: err.Error()}
+ default:
+ return err
+ }
}
// unauthorizedError represents an authorization error in a remote registry.
@@ -144,72 +88,6 @@ func IsErrUnauthorized(err error) bool {
return ok
}
-// nodeNotFoundError implements an error returned when a node is not found.
-type nodeNotFoundError struct {
- nodeID string
-}
-
-// Error returns a string representation of a nodeNotFoundError
-func (e nodeNotFoundError) Error() string {
- return fmt.Sprintf("Error: No such node: %s", e.nodeID)
-}
-
-// NotFound indicates that this error type is of NotFound
-func (e nodeNotFoundError) NotFound() bool {
- return true
-}
-
-// IsErrNodeNotFound returns true if the error is caused
-// when a node is not found.
-func IsErrNodeNotFound(err error) bool {
- _, ok := err.(nodeNotFoundError)
- return ok
-}
-
-// serviceNotFoundError implements an error returned when a service is not found.
-type serviceNotFoundError struct {
- serviceID string
-}
-
-// Error returns a string representation of a serviceNotFoundError
-func (e serviceNotFoundError) Error() string {
- return fmt.Sprintf("Error: No such service: %s", e.serviceID)
-}
-
-// NotFound indicates that this error type is of NotFound
-func (e serviceNotFoundError) NotFound() bool {
- return true
-}
-
-// IsErrServiceNotFound returns true if the error is caused
-// when a service is not found.
-func IsErrServiceNotFound(err error) bool {
- _, ok := err.(serviceNotFoundError)
- return ok
-}
-
-// taskNotFoundError implements an error returned when a task is not found.
-type taskNotFoundError struct {
- taskID string
-}
-
-// Error returns a string representation of a taskNotFoundError
-func (e taskNotFoundError) Error() string {
- return fmt.Sprintf("Error: No such task: %s", e.taskID)
-}
-
-// NotFound indicates that this error type is of NotFound
-func (e taskNotFoundError) NotFound() bool {
- return true
-}
-
-// IsErrTaskNotFound returns true if the error is caused
-// when a task is not found.
-func IsErrTaskNotFound(err error) bool {
- _, ok := err.(taskNotFoundError)
- return ok
-}
-
type pluginPermissionDenied struct {
name string
}
@@ -225,76 +103,31 @@ func IsErrPluginPermissionDenied(err error) bool {
return ok
}
-// NewVersionError returns an error if the APIVersion required
-// if less than the current supported version
-func (cli *Client) NewVersionError(APIrequired, feature string) error {
- if cli.version != "" && versions.LessThan(cli.version, APIrequired) {
- return fmt.Errorf("%q requires API version %s, but the Docker daemon API version is %s", feature, APIrequired, cli.version)
- }
- return nil
-}
-
-// secretNotFoundError implements an error returned when a secret is not found.
-type secretNotFoundError struct {
- name string
-}
-
-// Error returns a string representation of a secretNotFoundError
-func (e secretNotFoundError) Error() string {
- return fmt.Sprintf("Error: no such secret: %s", e.name)
-}
-
-// NotFound indicates that this error type is of NotFound
-func (e secretNotFoundError) NotFound() bool {
- return true
-}
-
-// IsErrSecretNotFound returns true if the error is caused
-// when a secret is not found.
-func IsErrSecretNotFound(err error) bool {
- _, ok := err.(secretNotFoundError)
- return ok
-}
-
-// configNotFoundError implements an error returned when a config is not found.
-type configNotFoundError struct {
- name string
-}
-
-// Error returns a string representation of a configNotFoundError
-func (e configNotFoundError) Error() string {
- return fmt.Sprintf("Error: no such config: %s", e.name)
-}
-
-// NotFound indicates that this error type is of NotFound
-func (e configNotFoundError) NotFound() bool {
- return true
-}
-
-// IsErrConfigNotFound returns true if the error is caused
-// when a config is not found.
-func IsErrConfigNotFound(err error) bool {
- _, ok := err.(configNotFoundError)
- return ok
+type notImplementedError struct {
+ message string
}
-// pluginNotFoundError implements an error returned when a plugin is not in the docker host.
-type pluginNotFoundError struct {
- name string
+func (e notImplementedError) Error() string {
+ return e.message
}
-// NotFound indicates that this error type is of NotFound
-func (e pluginNotFoundError) NotFound() bool {
+func (e notImplementedError) NotImplemented() bool {
return true
}
-// Error returns a string representation of a pluginNotFoundError
-func (e pluginNotFoundError) Error() string {
- return fmt.Sprintf("Error: No such plugin: %s", e.name)
+// IsErrNotImplemented returns true if the error is a NotImplemented error.
+// This is returned by the API when a requested feature has not been
+// implemented.
+func IsErrNotImplemented(err error) bool {
+ te, ok := err.(notImplementedError)
+ return ok && te.NotImplemented()
}
-// IsErrPluginNotFound returns true if the error is caused
-// when a plugin is not found in the docker host.
-func IsErrPluginNotFound(err error) bool {
- return IsErrNotFound(err)
+// NewVersionError returns an error if the APIVersion required
+// if less than the current supported version
+func (cli *Client) NewVersionError(APIrequired, feature string) error {
+ if cli.version != "" && versions.LessThan(cli.version, APIrequired) {
+ return fmt.Errorf("%q requires API version %s, but the Docker daemon API version is %s", feature, APIrequired, cli.version)
+ }
+ return nil
}
diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/docker/docker/client/hijack.go
index 8cf0119f3..d04cebdcf 100644
--- a/vendor/github.com/docker/docker/client/hijack.go
+++ b/vendor/github.com/docker/docker/client/hijack.go
@@ -12,7 +12,6 @@ import (
"time"
"github.com/docker/docker/api/types"
- "github.com/docker/docker/pkg/tlsconfig"
"github.com/docker/go-connections/sockets"
"github.com/pkg/errors"
"golang.org/x/net/context"
@@ -71,7 +70,7 @@ func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Con
timeout := dialer.Timeout
if !dialer.Deadline.IsZero() {
- deadlineTimeout := dialer.Deadline.Sub(time.Now())
+ deadlineTimeout := time.Until(dialer.Deadline)
if timeout == 0 || deadlineTimeout < timeout {
timeout = deadlineTimeout
}
@@ -115,7 +114,7 @@ func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Con
// from the hostname we're connecting to.
if config.ServerName == "" {
// Make a copy to avoid polluting argument or default.
- config = tlsconfig.Clone(config)
+ config = tlsConfigClone(config)
config.ServerName = hostname
}
diff --git a/vendor/github.com/docker/docker/client/image_inspect.go b/vendor/github.com/docker/docker/client/image_inspect.go
index b3a64ce2f..1bc591990 100644
--- a/vendor/github.com/docker/docker/client/image_inspect.go
+++ b/vendor/github.com/docker/docker/client/image_inspect.go
@@ -4,7 +4,6 @@ import (
"bytes"
"encoding/json"
"io/ioutil"
- "net/http"
"github.com/docker/docker/api/types"
"golang.org/x/net/context"
@@ -14,10 +13,7 @@ import (
func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (types.ImageInspect, []byte, error) {
serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", nil, nil)
if err != nil {
- if serverResp.statusCode == http.StatusNotFound {
- return types.ImageInspect{}, nil, imageNotFoundError{imageID}
- }
- return types.ImageInspect{}, nil, err
+ return types.ImageInspect{}, nil, wrapResponseError(err, serverResp, "image", imageID)
}
defer ensureReaderClosed(serverResp)
diff --git a/vendor/github.com/docker/docker/client/image_remove.go b/vendor/github.com/docker/docker/client/image_remove.go
index 6921209ee..81d6c5438 100644
--- a/vendor/github.com/docker/docker/client/image_remove.go
+++ b/vendor/github.com/docker/docker/client/image_remove.go
@@ -19,12 +19,12 @@ func (cli *Client) ImageRemove(ctx context.Context, imageID string, options type
query.Set("noprune", "1")
}
+ var dels []types.ImageDeleteResponseItem
resp, err := cli.delete(ctx, "/images/"+imageID, query, nil)
if err != nil {
- return nil, err
+ return dels, wrapResponseError(err, resp, "image", imageID)
}
- var dels []types.ImageDeleteResponseItem
err = json.NewDecoder(resp.body).Decode(&dels)
ensureReaderClosed(resp)
return dels, err
diff --git a/vendor/github.com/docker/docker/client/image_search.go b/vendor/github.com/docker/docker/client/image_search.go
index b0fcd5c23..5566e9255 100644
--- a/vendor/github.com/docker/docker/client/image_search.go
+++ b/vendor/github.com/docker/docker/client/image_search.go
@@ -21,7 +21,7 @@ func (cli *Client) ImageSearch(ctx context.Context, term string, options types.I
query.Set("limit", fmt.Sprintf("%d", options.Limit))
if options.Filters.Len() > 0 {
- filterJSON, err := filters.ToParam(options.Filters)
+ filterJSON, err := filters.ToJSON(options.Filters)
if err != nil {
return results, err
}
diff --git a/vendor/github.com/docker/docker/client/network_inspect.go b/vendor/github.com/docker/docker/client/network_inspect.go
index 848c9799f..afabe6597 100644
--- a/vendor/github.com/docker/docker/client/network_inspect.go
+++ b/vendor/github.com/docker/docker/client/network_inspect.go
@@ -4,7 +4,6 @@ import (
"bytes"
"encoding/json"
"io/ioutil"
- "net/http"
"net/url"
"github.com/docker/docker/api/types"
@@ -33,10 +32,7 @@ func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string,
}
resp, err = cli.get(ctx, "/networks/"+networkID, query, nil)
if err != nil {
- if resp.statusCode == http.StatusNotFound {
- return networkResource, nil, networkNotFoundError{networkID}
- }
- return networkResource, nil, err
+ return networkResource, nil, wrapResponseError(err, resp, "network", networkID)
}
defer ensureReaderClosed(resp)
diff --git a/vendor/github.com/docker/docker/client/network_remove.go b/vendor/github.com/docker/docker/client/network_remove.go
index 6bd674892..0811b5b51 100644
--- a/vendor/github.com/docker/docker/client/network_remove.go
+++ b/vendor/github.com/docker/docker/client/network_remove.go
@@ -6,5 +6,5 @@ import "golang.org/x/net/context"
func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error {
resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil)
ensureReaderClosed(resp)
- return err
+ return wrapResponseError(err, resp, "network", networkID)
}
diff --git a/vendor/github.com/docker/docker/client/node_inspect.go b/vendor/github.com/docker/docker/client/node_inspect.go
index abf505d29..791d2c006 100644
--- a/vendor/github.com/docker/docker/client/node_inspect.go
+++ b/vendor/github.com/docker/docker/client/node_inspect.go
@@ -4,7 +4,6 @@ import (
"bytes"
"encoding/json"
"io/ioutil"
- "net/http"
"github.com/docker/docker/api/types/swarm"
"golang.org/x/net/context"
@@ -14,10 +13,7 @@ import (
func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) {
serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil)
if err != nil {
- if serverResp.statusCode == http.StatusNotFound {
- return swarm.Node{}, nil, nodeNotFoundError{nodeID}
- }
- return swarm.Node{}, nil, err
+ return swarm.Node{}, nil, wrapResponseError(err, serverResp, "node", nodeID)
}
defer ensureReaderClosed(serverResp)
diff --git a/vendor/github.com/docker/docker/client/node_list.go b/vendor/github.com/docker/docker/client/node_list.go
index 3e8440f08..fed22992c 100644
--- a/vendor/github.com/docker/docker/client/node_list.go
+++ b/vendor/github.com/docker/docker/client/node_list.go
@@ -15,7 +15,7 @@ func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions)
query := url.Values{}
if options.Filters.Len() > 0 {
- filterJSON, err := filters.ToParam(options.Filters)
+ filterJSON, err := filters.ToJSON(options.Filters)
if err != nil {
return nil, err
diff --git a/vendor/github.com/docker/docker/client/node_remove.go b/vendor/github.com/docker/docker/client/node_remove.go
index 0a77f3d57..adbf52feb 100644
--- a/vendor/github.com/docker/docker/client/node_remove.go
+++ b/vendor/github.com/docker/docker/client/node_remove.go
@@ -17,5 +17,5 @@ func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options types.
resp, err := cli.delete(ctx, "/nodes/"+nodeID, query, nil)
ensureReaderClosed(resp)
- return err
+ return wrapResponseError(err, resp, "node", nodeID)
}
diff --git a/vendor/github.com/docker/docker/client/parse_logs.go b/vendor/github.com/docker/docker/client/parse_logs.go
deleted file mode 100644
index e427f80a7..000000000
--- a/vendor/github.com/docker/docker/client/parse_logs.go
+++ /dev/null
@@ -1,41 +0,0 @@
-package client
-
-// parse_logs.go contains utility helpers for getting information out of docker
-// log lines. really, it only contains ParseDetails right now. maybe in the
-// future there will be some desire to parse log messages back into a struct?
-// that would go here if we did
-
-import (
- "net/url"
- "strings"
-
- "github.com/pkg/errors"
-)
-
-// ParseLogDetails takes a details string of key value pairs in the form
-// "k=v,l=w", where the keys and values are url query escaped, and each pair
-// is separated by a comma, returns a map. returns an error if the details
-// string is not in a valid format
-// the exact form of details encoding is implemented in
-// api/server/httputils/write_log_stream.go
-func ParseLogDetails(details string) (map[string]string, error) {
- pairs := strings.Split(details, ",")
- detailsMap := make(map[string]string, len(pairs))
- for _, pair := range pairs {
- p := strings.SplitN(pair, "=", 2)
- // if there is no equals sign, we will only get 1 part back
- if len(p) != 2 {
- return nil, errors.New("invalid details format")
- }
- k, err := url.QueryUnescape(p[0])
- if err != nil {
- return nil, err
- }
- v, err := url.QueryUnescape(p[1])
- if err != nil {
- return nil, err
- }
- detailsMap[k] = v
- }
- return detailsMap, nil
-}
diff --git a/vendor/github.com/docker/docker/client/ping.go b/vendor/github.com/docker/docker/client/ping.go
index a4c2e2c4d..0b6e450da 100644
--- a/vendor/github.com/docker/docker/client/ping.go
+++ b/vendor/github.com/docker/docker/client/ping.go
@@ -1,6 +1,8 @@
package client
import (
+ "path"
+
"github.com/docker/docker/api/types"
"golang.org/x/net/context"
)
@@ -8,7 +10,7 @@ import (
// Ping pings the server and returns the value of the "Docker-Experimental", "OS-Type" & "API-Version" headers
func (cli *Client) Ping(ctx context.Context) (types.Ping, error) {
var ping types.Ping
- req, err := cli.buildRequest("GET", cli.basePath+"/_ping", nil, nil)
+ req, err := cli.buildRequest("GET", path.Join(cli.basePath, "/_ping"), nil, nil)
if err != nil {
return ping, err
}
@@ -26,7 +28,5 @@ func (cli *Client) Ping(ctx context.Context) (types.Ping, error) {
}
ping.OSType = serverResp.header.Get("OSType")
}
-
- err = cli.checkResponseErr(serverResp)
- return ping, err
+ return ping, cli.checkResponseErr(serverResp)
}
diff --git a/vendor/github.com/docker/docker/client/plugin_inspect.go b/vendor/github.com/docker/docker/client/plugin_inspect.go
index 89f39ee2c..6a6fc18df 100644
--- a/vendor/github.com/docker/docker/client/plugin_inspect.go
+++ b/vendor/github.com/docker/docker/client/plugin_inspect.go
@@ -4,7 +4,6 @@ import (
"bytes"
"encoding/json"
"io/ioutil"
- "net/http"
"github.com/docker/docker/api/types"
"golang.org/x/net/context"
@@ -14,10 +13,7 @@ import (
func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) {
resp, err := cli.get(ctx, "/plugins/"+name+"/json", nil, nil)
if err != nil {
- if resp.statusCode == http.StatusNotFound {
- return nil, nil, pluginNotFoundError{name}
- }
- return nil, nil, err
+ return nil, nil, wrapResponseError(err, resp, "plugin", name)
}
defer ensureReaderClosed(resp)
diff --git a/vendor/github.com/docker/docker/client/plugin_list.go b/vendor/github.com/docker/docker/client/plugin_list.go
index 3acde3b96..78dbeb8be 100644
--- a/vendor/github.com/docker/docker/client/plugin_list.go
+++ b/vendor/github.com/docker/docker/client/plugin_list.go
@@ -23,7 +23,7 @@ func (cli *Client) PluginList(ctx context.Context, filter filters.Args) (types.P
}
resp, err := cli.get(ctx, "/plugins", query, nil)
if err != nil {
- return plugins, err
+ return plugins, wrapResponseError(err, resp, "plugin", "")
}
err = json.NewDecoder(resp.body).Decode(&plugins)
diff --git a/vendor/github.com/docker/docker/client/plugin_remove.go b/vendor/github.com/docker/docker/client/plugin_remove.go
index b017e4d34..b498c4820 100644
--- a/vendor/github.com/docker/docker/client/plugin_remove.go
+++ b/vendor/github.com/docker/docker/client/plugin_remove.go
@@ -16,5 +16,5 @@ func (cli *Client) PluginRemove(ctx context.Context, name string, options types.
resp, err := cli.delete(ctx, "/plugins/"+name, query, nil)
ensureReaderClosed(resp)
- return err
+ return wrapResponseError(err, resp, "plugin", name)
}
diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go
index 3e7d43fea..615d0b989 100644
--- a/vendor/github.com/docker/docker/client/request.go
+++ b/vendor/github.com/docker/docker/client/request.go
@@ -203,7 +203,7 @@ func (cli *Client) checkResponseErr(serverResp serverResponse) error {
return err
}
if len(body) == 0 {
- return fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), serverResp.reqURL)
+ return fmt.Errorf("request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), serverResp.reqURL)
}
var ct string
diff --git a/vendor/github.com/docker/docker/client/secret_inspect.go b/vendor/github.com/docker/docker/client/secret_inspect.go
index 9b602972b..6927ea96f 100644
--- a/vendor/github.com/docker/docker/client/secret_inspect.go
+++ b/vendor/github.com/docker/docker/client/secret_inspect.go
@@ -4,7 +4,6 @@ import (
"bytes"
"encoding/json"
"io/ioutil"
- "net/http"
"github.com/docker/docker/api/types/swarm"
"golang.org/x/net/context"
@@ -17,10 +16,7 @@ func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.S
}
resp, err := cli.get(ctx, "/secrets/"+id, nil, nil)
if err != nil {
- if resp.statusCode == http.StatusNotFound {
- return swarm.Secret{}, nil, secretNotFoundError{id}
- }
- return swarm.Secret{}, nil, err
+ return swarm.Secret{}, nil, wrapResponseError(err, resp, "secret", id)
}
defer ensureReaderClosed(resp)
diff --git a/vendor/github.com/docker/docker/client/secret_list.go b/vendor/github.com/docker/docker/client/secret_list.go
index 0d33ecfbc..fdee6e2e0 100644
--- a/vendor/github.com/docker/docker/client/secret_list.go
+++ b/vendor/github.com/docker/docker/client/secret_list.go
@@ -18,7 +18,7 @@ func (cli *Client) SecretList(ctx context.Context, options types.SecretListOptio
query := url.Values{}
if options.Filters.Len() > 0 {
- filterJSON, err := filters.ToParam(options.Filters)
+ filterJSON, err := filters.ToJSON(options.Filters)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/docker/docker/client/secret_remove.go b/vendor/github.com/docker/docker/client/secret_remove.go
index c5e37af17..9b4ee71e2 100644
--- a/vendor/github.com/docker/docker/client/secret_remove.go
+++ b/vendor/github.com/docker/docker/client/secret_remove.go
@@ -9,5 +9,5 @@ func (cli *Client) SecretRemove(ctx context.Context, id string) error {
}
resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil)
ensureReaderClosed(resp)
- return err
+ return wrapResponseError(err, resp, "secret", id)
}
diff --git a/vendor/github.com/docker/docker/client/service_create.go b/vendor/github.com/docker/docker/client/service_create.go
index a36839443..834709d1f 100644
--- a/vendor/github.com/docker/docker/client/service_create.go
+++ b/vendor/github.com/docker/docker/client/service_create.go
@@ -3,11 +3,12 @@ package client
import (
"encoding/json"
"fmt"
+ "strings"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/swarm"
- "github.com/opencontainers/go-digest"
+ digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
@@ -85,21 +86,30 @@ func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec,
return response, err
}
-func imageDigestAndPlatforms(ctx context.Context, cli *Client, image, encodedAuth string) (string, []swarm.Platform, error) {
+func imageDigestAndPlatforms(ctx context.Context, cli DistributionAPIClient, image, encodedAuth string) (string, []swarm.Platform, error) {
distributionInspect, err := cli.DistributionInspect(ctx, image, encodedAuth)
- imageWithDigest := image
var platforms []swarm.Platform
if err != nil {
return "", nil, err
}
- imageWithDigest = imageWithDigestString(image, distributionInspect.Descriptor.Digest)
+ imageWithDigest := imageWithDigestString(image, distributionInspect.Descriptor.Digest)
if len(distributionInspect.Platforms) > 0 {
platforms = make([]swarm.Platform, 0, len(distributionInspect.Platforms))
for _, p := range distributionInspect.Platforms {
+ // clear architecture field for arm. This is a temporary patch to address
+ // https://github.com/docker/swarmkit/issues/2294. The issue is that while
+ // image manifests report "arm" as the architecture, the node reports
+ // something like "armv7l" (includes the variant), which causes arm images
+ // to stop working with swarm mode. This patch removes the architecture
+ // constraint for arm images to ensure tasks get scheduled.
+ arch := p.Architecture
+ if strings.ToLower(arch) == "arm" {
+ arch = ""
+ }
platforms = append(platforms, swarm.Platform{
- Architecture: p.Architecture,
+ Architecture: arch,
OS: p.OS,
})
}
diff --git a/vendor/github.com/docker/docker/client/service_inspect.go b/vendor/github.com/docker/docker/client/service_inspect.go
index d7e051e3a..3e9699e5e 100644
--- a/vendor/github.com/docker/docker/client/service_inspect.go
+++ b/vendor/github.com/docker/docker/client/service_inspect.go
@@ -5,7 +5,6 @@ import (
"encoding/json"
"fmt"
"io/ioutil"
- "net/http"
"net/url"
"github.com/docker/docker/api/types"
@@ -19,10 +18,7 @@ func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string,
query.Set("insertDefaults", fmt.Sprintf("%v", opts.InsertDefaults))
serverResp, err := cli.get(ctx, "/services/"+serviceID, query, nil)
if err != nil {
- if serverResp.statusCode == http.StatusNotFound {
- return swarm.Service{}, nil, serviceNotFoundError{serviceID}
- }
- return swarm.Service{}, nil, err
+ return swarm.Service{}, nil, wrapResponseError(err, serverResp, "service", serviceID)
}
defer ensureReaderClosed(serverResp)
diff --git a/vendor/github.com/docker/docker/client/service_list.go b/vendor/github.com/docker/docker/client/service_list.go
index c29e6d407..eb3ff9739 100644
--- a/vendor/github.com/docker/docker/client/service_list.go
+++ b/vendor/github.com/docker/docker/client/service_list.go
@@ -15,7 +15,7 @@ func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOpt
query := url.Values{}
if options.Filters.Len() > 0 {
- filterJSON, err := filters.ToParam(options.Filters)
+ filterJSON, err := filters.ToJSON(options.Filters)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/docker/docker/client/service_remove.go b/vendor/github.com/docker/docker/client/service_remove.go
index a9331f92c..ad992c01d 100644
--- a/vendor/github.com/docker/docker/client/service_remove.go
+++ b/vendor/github.com/docker/docker/client/service_remove.go
@@ -6,5 +6,5 @@ import "golang.org/x/net/context"
func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error {
resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil)
ensureReaderClosed(resp)
- return err
+ return wrapResponseError(err, resp, "service", serviceID)
}
diff --git a/vendor/github.com/docker/docker/client/task_inspect.go b/vendor/github.com/docker/docker/client/task_inspect.go
index bc8058fc3..dc08cedb9 100644
--- a/vendor/github.com/docker/docker/client/task_inspect.go
+++ b/vendor/github.com/docker/docker/client/task_inspect.go
@@ -4,10 +4,8 @@ import (
"bytes"
"encoding/json"
"io/ioutil"
- "net/http"
"github.com/docker/docker/api/types/swarm"
-
"golang.org/x/net/context"
)
@@ -15,10 +13,7 @@ import (
func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) {
serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil)
if err != nil {
- if serverResp.statusCode == http.StatusNotFound {
- return swarm.Task{}, nil, taskNotFoundError{taskID}
- }
- return swarm.Task{}, nil, err
+ return swarm.Task{}, nil, wrapResponseError(err, serverResp, "task", taskID)
}
defer ensureReaderClosed(serverResp)
diff --git a/vendor/github.com/docker/docker/client/task_list.go b/vendor/github.com/docker/docker/client/task_list.go
index 66324da95..01bd69525 100644
--- a/vendor/github.com/docker/docker/client/task_list.go
+++ b/vendor/github.com/docker/docker/client/task_list.go
@@ -15,7 +15,7 @@ func (cli *Client) TaskList(ctx context.Context, options types.TaskListOptions)
query := url.Values{}
if options.Filters.Len() > 0 {
- filterJSON, err := filters.ToParam(options.Filters)
+ filterJSON, err := filters.ToJSON(options.Filters)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/docker/docker/client/tlsconfig_clone.go b/vendor/github.com/docker/docker/client/tlsconfig_clone.go
new file mode 100644
index 000000000..99b6be1ce
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/tlsconfig_clone.go
@@ -0,0 +1,11 @@
+// +build go1.8
+
+package client
+
+import "crypto/tls"
+
+// tlsConfigClone returns a clone of tls.Config. This function is provided for
+// compatibility for go1.7 that doesn't include this method in stdlib.
+func tlsConfigClone(c *tls.Config) *tls.Config {
+ return c.Clone()
+}
diff --git a/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go17.go b/vendor/github.com/docker/docker/client/tlsconfig_clone_go17.go
index 0d5b448fe..b837b2ade 100644
--- a/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go17.go
+++ b/vendor/github.com/docker/docker/client/tlsconfig_clone_go17.go
@@ -1,12 +1,12 @@
// +build go1.7,!go1.8
-package tlsconfig
+package client
import "crypto/tls"
-// Clone returns a clone of tls.Config. This function is provided for
+// tlsConfigClone returns a clone of tls.Config. This function is provided for
// compatibility for go1.7 that doesn't include this method in stdlib.
-func Clone(c *tls.Config) *tls.Config {
+func tlsConfigClone(c *tls.Config) *tls.Config {
return &tls.Config{
Rand: c.Rand,
Time: c.Time,
diff --git a/vendor/github.com/docker/docker/client/transport.go b/vendor/github.com/docker/docker/client/transport.go
index 401ab15d3..73f6ef7b4 100644
--- a/vendor/github.com/docker/docker/client/transport.go
+++ b/vendor/github.com/docker/docker/client/transport.go
@@ -5,14 +5,6 @@ import (
"net/http"
)
-// transportFunc allows us to inject a mock transport for testing. We define it
-// here so we can detect the tlsconfig and return nil for only this type.
-type transportFunc func(*http.Request) (*http.Response, error)
-
-func (tf transportFunc) RoundTrip(req *http.Request) (*http.Response, error) {
- return tf(req)
-}
-
// resolveTLSConfig attempts to resolve the TLS configuration from the
// RoundTripper.
func resolveTLSConfig(transport http.RoundTripper) *tls.Config {
diff --git a/vendor/github.com/docker/docker/client/utils.go b/vendor/github.com/docker/docker/client/utils.go
index f3d8877df..137705065 100644
--- a/vendor/github.com/docker/docker/client/utils.go
+++ b/vendor/github.com/docker/docker/client/utils.go
@@ -24,7 +24,7 @@ func getDockerOS(serverHeader string) string {
func getFiltersQuery(f filters.Args) (url.Values, error) {
query := url.Values{}
if f.Len() > 0 {
- filterJSON, err := filters.ToParam(f)
+ filterJSON, err := filters.ToJSON(f)
if err != nil {
return query, err
}
diff --git a/vendor/github.com/docker/docker/client/volume_inspect.go b/vendor/github.com/docker/docker/client/volume_inspect.go
index 3860e9b22..988934384 100644
--- a/vendor/github.com/docker/docker/client/volume_inspect.go
+++ b/vendor/github.com/docker/docker/client/volume_inspect.go
@@ -4,7 +4,7 @@ import (
"bytes"
"encoding/json"
"io/ioutil"
- "net/http"
+ "path"
"github.com/docker/docker/api/types"
"golang.org/x/net/context"
@@ -18,13 +18,17 @@ func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (types.Vo
// VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation
func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) {
+ // The empty ID needs to be handled here because with an empty ID the
+ // request url will not contain a trailing / which calls the volume list API
+ // instead of volume inspect
+ if volumeID == "" {
+ return types.Volume{}, nil, objectNotFoundError{object: "volume", id: volumeID}
+ }
+
var volume types.Volume
- resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil)
+ resp, err := cli.get(ctx, path.Join("/volumes", volumeID), nil, nil)
if err != nil {
- if resp.statusCode == http.StatusNotFound {
- return volume, nil, volumeNotFoundError{volumeID}
- }
- return volume, nil, err
+ return volume, nil, wrapResponseError(err, resp, "volume", volumeID)
}
defer ensureReaderClosed(resp)
diff --git a/vendor/github.com/docker/docker/client/volume_remove.go b/vendor/github.com/docker/docker/client/volume_remove.go
index 6c26575b4..3ffb8bcf2 100644
--- a/vendor/github.com/docker/docker/client/volume_remove.go
+++ b/vendor/github.com/docker/docker/client/volume_remove.go
@@ -17,5 +17,5 @@ func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool
}
resp, err := cli.delete(ctx, "/volumes/"+volumeID, query, nil)
ensureReaderClosed(resp)
- return err
+ return wrapResponseError(err, resp, "volume", volumeID)
}
diff --git a/vendor/github.com/docker/docker/hack/README.md b/vendor/github.com/docker/docker/hack/README.md
new file mode 100644
index 000000000..9e588db25
--- /dev/null
+++ b/vendor/github.com/docker/docker/hack/README.md
@@ -0,0 +1,60 @@
+## About
+
+This directory contains a collection of scripts used to build and manage this
+repository. If there are any issues regarding the intention of a particular
+script (or even part of a certain script), please reach out to us.
+It may help us either refine our current scripts, or add on new ones
+that are appropriate for a given use case.
+
+## DinD (dind.sh)
+
+DinD is a wrapper script which allows Docker to be run inside a Docker
+container. DinD requires the container to
+be run with privileged mode enabled.
+
+## Generate Authors (generate-authors.sh)
+
+Generates AUTHORS; a file with all the names and corresponding emails of
+individual contributors. AUTHORS can be found in the home directory of
+this repository.
+
+## Make
+
+There are two make files, each with different extensions. Neither are supposed
+to be called directly; only invoke `make`. Both scripts run inside a Docker
+container.
+
+### make.ps1
+
+- The Windows native build script that uses PowerShell semantics; it is limited
+unlike `hack\make.sh` since it does not provide support for the full set of
+operations provided by the Linux counterpart, `make.sh`. However, `make.ps1`
+does provide support for local Windows development and Windows to Windows CI.
+More information is found within `make.ps1` by the author, @jhowardmsft
+
+### make.sh
+
+- Referenced via `make test` when running tests on a local machine,
+or directly referenced when running tests inside a Docker development container.
+- When running on a local machine, `make test` to run all tests found in
+`test`, `test-unit`, `test-integration`, and `test-docker-py` on
+your local machine. The default timeout is set in `make.sh` to 60 minutes
+(`${TIMEOUT:=60m}`), since it currently takes up to an hour to run
+all of the tests.
+- When running inside a Docker development container, `hack/make.sh` does
+not have a single target that runs all the tests. You need to provide a
+single command line with multiple targets that performs the same thing.
+An example referenced from [Run targets inside a development container](https://docs.docker.com/opensource/project/test-and-docs/#run-targets-inside-a-development-container): `root@5f8630b873fe:/go/src/github.com/moby/moby# hack/make.sh dynbinary binary cross test-unit test-integration test-docker-py`
+- For more information related to testing outside the scope of this README,
+refer to
+[Run tests and test documentation](https://docs.docker.com/opensource/project/test-and-docs/)
+
+## Release (release.sh)
+
+Releases any bundles built by `make` on a public AWS S3 bucket.
+For information regarding configuration, please view `release.sh`.
+
+## Vendor (vendor.sh)
+
+A shell script that is a wrapper around Vndr. For information on how to use
+this, please refer to [vndr's README](https://github.com/LK4D4/vndr/blob/master/README.md)
diff --git a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/README.md b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/README.md
new file mode 100644
index 000000000..1cea52526
--- /dev/null
+++ b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/README.md
@@ -0,0 +1,69 @@
+# Integration Testing on Swarm
+
+IT on Swarm allows you to execute integration test in parallel across a Docker Swarm cluster
+
+## Architecture
+
+### Master service
+
+ - Works as a funker caller
+ - Calls a worker funker (`-worker-service`) with a chunk of `-check.f` filter strings (passed as a file via `-input` flag, typically `/mnt/input`)
+
+### Worker service
+
+ - Works as a funker callee
+ - Executes an equivalent of `TESTFLAGS=-check.f TestFoo|TestBar|TestBaz ... make test-integration-cli` using the bind-mounted API socket (`docker.sock`)
+
+### Client
+
+ - Controls master and workers via `docker stack`
+ - No need to have a local daemon
+
+Typically, the master and workers are supposed to be running on a cloud environment,
+while the client is supposed to be running on a laptop, e.g. Docker for Mac/Windows.
+
+## Requirement
+
+ - Docker daemon 1.13 or later
+ - Private registry for distributed execution with multiple nodes
+
+## Usage
+
+### Step 1: Prepare images
+
+ $ make build-integration-cli-on-swarm
+
+Following environment variables are known to work in this step:
+
+ - `BUILDFLAGS`
+ - `DOCKER_INCREMENTAL_BINARY`
+
+Note: during the transition into Moby Project, you might need to create a symbolic link `$GOPATH/src/github.com/docker/docker` to `$GOPATH/src/github.com/moby/moby`.
+
+### Step 2: Execute tests
+
+ $ ./hack/integration-cli-on-swarm/integration-cli-on-swarm -replicas 40 -push-worker-image YOUR_REGISTRY.EXAMPLE.COM/integration-cli-worker:latest
+
+Following environment variables are known to work in this step:
+
+ - `DOCKER_GRAPHDRIVER`
+ - `DOCKER_EXPERIMENTAL`
+
+#### Flags
+
+Basic flags:
+
+ - `-replicas N`: the number of worker service replicas. i.e. degree of parallelism.
+ - `-chunks N`: the number of chunks. By default, `chunks` == `replicas`.
+ - `-push-worker-image REGISTRY/IMAGE:TAG`: push the worker image to the registry. Note that if you have only single node and hence you do not need a private registry, you do not need to specify `-push-worker-image`.
+
+Experimental flags for mitigating makespan nonuniformity:
+
+ - `-shuffle`: Shuffle the test filter strings
+
+Flags for debugging IT on Swarm itself:
+
+ - `-rand-seed N`: the random seed. This flag is useful for deterministic replaying. By default(0), the timestamp is used.
+ - `-filters-file FILE`: the file contains `-check.f` strings. By default, the file is automatically generated.
+ - `-dry-run`: skip the actual workload
+ - `keep-executor`: do not auto-remove executor containers, which is used for running privileged programs on Swarm
diff --git a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/vendor.conf b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/vendor.conf
new file mode 100644
index 000000000..efd6d6d04
--- /dev/null
+++ b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/vendor.conf
@@ -0,0 +1,2 @@
+# dependencies specific to worker (i.e. github.com/docker/docker/...) are not vendored here
+github.com/bfirsh/funker-go eaa0a2e06f30e72c9a0b7f858951e581e26ef773
diff --git a/vendor/github.com/docker/docker/opts/env.go b/vendor/github.com/docker/docker/opts/env.go
new file mode 100644
index 000000000..4fbd470bc
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/env.go
@@ -0,0 +1,48 @@
+package opts
+
+import (
+ "fmt"
+ "os"
+ "runtime"
+ "strings"
+
+ "github.com/pkg/errors"
+)
+
+// ValidateEnv validates an environment variable and returns it.
+// If no value is specified, it returns the current value using os.Getenv.
+//
+// As on ParseEnvFile and related to #16585, environment variable names
+// are not validate what so ever, it's up to application inside docker
+// to validate them or not.
+//
+// The only validation here is to check if name is empty, per #25099
+func ValidateEnv(val string) (string, error) {
+ arr := strings.Split(val, "=")
+ if arr[0] == "" {
+ return "", errors.Errorf("invalid environment variable: %s", val)
+ }
+ if len(arr) > 1 {
+ return val, nil
+ }
+ if !doesEnvExist(val) {
+ return val, nil
+ }
+ return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil
+}
+
+func doesEnvExist(name string) bool {
+ for _, entry := range os.Environ() {
+ parts := strings.SplitN(entry, "=", 2)
+ if runtime.GOOS == "windows" {
+ // Environment variable are case-insensitive on Windows. PaTh, path and PATH are equivalent.
+ if strings.EqualFold(parts[0], name) {
+ return true
+ }
+ }
+ if parts[0] == name {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/docker/docker/opts/hosts.go b/vendor/github.com/docker/docker/opts/hosts.go
new file mode 100644
index 000000000..594cccf2f
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/hosts.go
@@ -0,0 +1,165 @@
+package opts
+
+import (
+ "fmt"
+ "net"
+ "net/url"
+ "strconv"
+ "strings"
+)
+
+var (
+ // DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. dockerd -H tcp://
+ // These are the IANA registered port numbers for use with Docker
+ // see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker
+ DefaultHTTPPort = 2375 // Default HTTP Port
+ // DefaultTLSHTTPPort Default HTTP Port used when TLS enabled
+ DefaultTLSHTTPPort = 2376 // Default TLS encrypted HTTP Port
+ // DefaultUnixSocket Path for the unix socket.
+ // Docker daemon by default always listens on the default unix socket
+ DefaultUnixSocket = "/var/run/docker.sock"
+ // DefaultTCPHost constant defines the default host string used by docker on Windows
+ DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort)
+ // DefaultTLSHost constant defines the default host string used by docker for TLS sockets
+ DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort)
+ // DefaultNamedPipe defines the default named pipe used by docker on Windows
+ DefaultNamedPipe = `//./pipe/docker_engine`
+)
+
+// ValidateHost validates that the specified string is a valid host and returns it.
+func ValidateHost(val string) (string, error) {
+ host := strings.TrimSpace(val)
+ // The empty string means default and is not handled by parseDockerDaemonHost
+ if host != "" {
+ _, err := parseDockerDaemonHost(host)
+ if err != nil {
+ return val, err
+ }
+ }
+ // Note: unlike most flag validators, we don't return the mutated value here
+ // we need to know what the user entered later (using ParseHost) to adjust for TLS
+ return val, nil
+}
+
+// ParseHost and set defaults for a Daemon host string
+func ParseHost(defaultToTLS bool, val string) (string, error) {
+ host := strings.TrimSpace(val)
+ if host == "" {
+ if defaultToTLS {
+ host = DefaultTLSHost
+ } else {
+ host = DefaultHost
+ }
+ } else {
+ var err error
+ host, err = parseDockerDaemonHost(host)
+ if err != nil {
+ return val, err
+ }
+ }
+ return host, nil
+}
+
+// parseDockerDaemonHost parses the specified address and returns an address that will be used as the host.
+// Depending of the address specified, this may return one of the global Default* strings defined in hosts.go.
+func parseDockerDaemonHost(addr string) (string, error) {
+ addrParts := strings.SplitN(addr, "://", 2)
+ if len(addrParts) == 1 && addrParts[0] != "" {
+ addrParts = []string{"tcp", addrParts[0]}
+ }
+
+ switch addrParts[0] {
+ case "tcp":
+ return ParseTCPAddr(addrParts[1], DefaultTCPHost)
+ case "unix":
+ return parseSimpleProtoAddr("unix", addrParts[1], DefaultUnixSocket)
+ case "npipe":
+ return parseSimpleProtoAddr("npipe", addrParts[1], DefaultNamedPipe)
+ case "fd":
+ return addr, nil
+ default:
+ return "", fmt.Errorf("Invalid bind address format: %s", addr)
+ }
+}
+
+// parseSimpleProtoAddr parses and validates that the specified address is a valid
+// socket address for simple protocols like unix and npipe. It returns a formatted
+// socket address, either using the address parsed from addr, or the contents of
+// defaultAddr if addr is a blank string.
+func parseSimpleProtoAddr(proto, addr, defaultAddr string) (string, error) {
+ addr = strings.TrimPrefix(addr, proto+"://")
+ if strings.Contains(addr, "://") {
+ return "", fmt.Errorf("Invalid proto, expected %s: %s", proto, addr)
+ }
+ if addr == "" {
+ addr = defaultAddr
+ }
+ return fmt.Sprintf("%s://%s", proto, addr), nil
+}
+
+// ParseTCPAddr parses and validates that the specified address is a valid TCP
+// address. It returns a formatted TCP address, either using the address parsed
+// from tryAddr, or the contents of defaultAddr if tryAddr is a blank string.
+// tryAddr is expected to have already been Trim()'d
+// defaultAddr must be in the full `tcp://host:port` form
+func ParseTCPAddr(tryAddr string, defaultAddr string) (string, error) {
+ if tryAddr == "" || tryAddr == "tcp://" {
+ return defaultAddr, nil
+ }
+ addr := strings.TrimPrefix(tryAddr, "tcp://")
+ if strings.Contains(addr, "://") || addr == "" {
+ return "", fmt.Errorf("Invalid proto, expected tcp: %s", tryAddr)
+ }
+
+ defaultAddr = strings.TrimPrefix(defaultAddr, "tcp://")
+ defaultHost, defaultPort, err := net.SplitHostPort(defaultAddr)
+ if err != nil {
+ return "", err
+ }
+ // url.Parse fails for trailing colon on IPv6 brackets on Go 1.5, but
+ // not 1.4. See https://github.com/golang/go/issues/12200 and
+ // https://github.com/golang/go/issues/6530.
+ if strings.HasSuffix(addr, "]:") {
+ addr += defaultPort
+ }
+
+ u, err := url.Parse("tcp://" + addr)
+ if err != nil {
+ return "", err
+ }
+ host, port, err := net.SplitHostPort(u.Host)
+ if err != nil {
+ // try port addition once
+ host, port, err = net.SplitHostPort(net.JoinHostPort(u.Host, defaultPort))
+ }
+ if err != nil {
+ return "", fmt.Errorf("Invalid bind address format: %s", tryAddr)
+ }
+
+ if host == "" {
+ host = defaultHost
+ }
+ if port == "" {
+ port = defaultPort
+ }
+ p, err := strconv.Atoi(port)
+ if err != nil && p == 0 {
+ return "", fmt.Errorf("Invalid bind address format: %s", tryAddr)
+ }
+
+ return fmt.Sprintf("tcp://%s%s", net.JoinHostPort(host, port), u.Path), nil
+}
+
+// ValidateExtraHost validates that the specified string is a valid extrahost and returns it.
+// ExtraHost is in the form of name:ip where the ip has to be a valid ip (IPv4 or IPv6).
+func ValidateExtraHost(val string) (string, error) {
+ // allow for IPv6 addresses in extra hosts by only splitting on first ":"
+ arr := strings.SplitN(val, ":", 2)
+ if len(arr) != 2 || len(arr[0]) == 0 {
+ return "", fmt.Errorf("bad format for add-host: %q", val)
+ }
+ if _, err := ValidateIPAddress(arr[1]); err != nil {
+ return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1])
+ }
+ return val, nil
+}
diff --git a/vendor/github.com/docker/docker/opts/hosts_unix.go b/vendor/github.com/docker/docker/opts/hosts_unix.go
new file mode 100644
index 000000000..611407a9d
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/hosts_unix.go
@@ -0,0 +1,8 @@
+// +build !windows
+
+package opts
+
+import "fmt"
+
+// DefaultHost constant defines the default host string used by docker on other hosts than Windows
+var DefaultHost = fmt.Sprintf("unix://%s", DefaultUnixSocket)
diff --git a/vendor/github.com/docker/docker/opts/hosts_windows.go b/vendor/github.com/docker/docker/opts/hosts_windows.go
new file mode 100644
index 000000000..7c239e00f
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/hosts_windows.go
@@ -0,0 +1,6 @@
+// +build windows
+
+package opts
+
+// DefaultHost constant defines the default host string used by docker on Windows
+var DefaultHost = "npipe://" + DefaultNamedPipe
diff --git a/vendor/github.com/docker/docker/opts/ip.go b/vendor/github.com/docker/docker/opts/ip.go
new file mode 100644
index 000000000..109506397
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/ip.go
@@ -0,0 +1,47 @@
+package opts
+
+import (
+ "fmt"
+ "net"
+)
+
+// IPOpt holds an IP. It is used to store values from CLI flags.
+type IPOpt struct {
+ *net.IP
+}
+
+// NewIPOpt creates a new IPOpt from a reference net.IP and a
+// string representation of an IP. If the string is not a valid
+// IP it will fallback to the specified reference.
+func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt {
+ o := &IPOpt{
+ IP: ref,
+ }
+ o.Set(defaultVal)
+ return o
+}
+
+// Set sets an IPv4 or IPv6 address from a given string. If the given
+// string is not parsable as an IP address it returns an error.
+func (o *IPOpt) Set(val string) error {
+ ip := net.ParseIP(val)
+ if ip == nil {
+ return fmt.Errorf("%s is not an ip address", val)
+ }
+ *o.IP = ip
+ return nil
+}
+
+// String returns the IP address stored in the IPOpt. If stored IP is a
+// nil pointer, it returns an empty string.
+func (o *IPOpt) String() string {
+ if *o.IP == nil {
+ return ""
+ }
+ return o.IP.String()
+}
+
+// Type returns the type of the option
+func (o *IPOpt) Type() string {
+ return "ip"
+}
diff --git a/vendor/github.com/docker/docker/opts/opts.go b/vendor/github.com/docker/docker/opts/opts.go
new file mode 100644
index 000000000..a86d74d60
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/opts.go
@@ -0,0 +1,327 @@
+package opts
+
+import (
+ "fmt"
+ "net"
+ "path"
+ "regexp"
+ "strings"
+
+ units "github.com/docker/go-units"
+)
+
+var (
+ alphaRegexp = regexp.MustCompile(`[a-zA-Z]`)
+ domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`)
+)
+
+// ListOpts holds a list of values and a validation function.
+type ListOpts struct {
+ values *[]string
+ validator ValidatorFctType
+}
+
+// NewListOpts creates a new ListOpts with the specified validator.
+func NewListOpts(validator ValidatorFctType) ListOpts {
+ var values []string
+ return *NewListOptsRef(&values, validator)
+}
+
+// NewListOptsRef creates a new ListOpts with the specified values and validator.
+func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts {
+ return &ListOpts{
+ values: values,
+ validator: validator,
+ }
+}
+
+func (opts *ListOpts) String() string {
+ if len(*opts.values) == 0 {
+ return ""
+ }
+ return fmt.Sprintf("%v", *opts.values)
+}
+
+// Set validates if needed the input value and adds it to the
+// internal slice.
+func (opts *ListOpts) Set(value string) error {
+ if opts.validator != nil {
+ v, err := opts.validator(value)
+ if err != nil {
+ return err
+ }
+ value = v
+ }
+ (*opts.values) = append((*opts.values), value)
+ return nil
+}
+
+// Delete removes the specified element from the slice.
+func (opts *ListOpts) Delete(key string) {
+ for i, k := range *opts.values {
+ if k == key {
+ (*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...)
+ return
+ }
+ }
+}
+
+// GetMap returns the content of values in a map in order to avoid
+// duplicates.
+func (opts *ListOpts) GetMap() map[string]struct{} {
+ ret := make(map[string]struct{})
+ for _, k := range *opts.values {
+ ret[k] = struct{}{}
+ }
+ return ret
+}
+
+// GetAll returns the values of slice.
+func (opts *ListOpts) GetAll() []string {
+ return (*opts.values)
+}
+
+// GetAllOrEmpty returns the values of the slice
+// or an empty slice when there are no values.
+func (opts *ListOpts) GetAllOrEmpty() []string {
+ v := *opts.values
+ if v == nil {
+ return make([]string, 0)
+ }
+ return v
+}
+
+// Get checks the existence of the specified key.
+func (opts *ListOpts) Get(key string) bool {
+ for _, k := range *opts.values {
+ if k == key {
+ return true
+ }
+ }
+ return false
+}
+
+// Len returns the amount of element in the slice.
+func (opts *ListOpts) Len() int {
+ return len((*opts.values))
+}
+
+// Type returns a string name for this Option type
+func (opts *ListOpts) Type() string {
+ return "list"
+}
+
+// WithValidator returns the ListOpts with validator set.
+func (opts *ListOpts) WithValidator(validator ValidatorFctType) *ListOpts {
+ opts.validator = validator
+ return opts
+}
+
+// NamedOption is an interface that list and map options
+// with names implement.
+type NamedOption interface {
+ Name() string
+}
+
+// NamedListOpts is a ListOpts with a configuration name.
+// This struct is useful to keep reference to the assigned
+// field name in the internal configuration struct.
+type NamedListOpts struct {
+ name string
+ ListOpts
+}
+
+var _ NamedOption = &NamedListOpts{}
+
+// NewNamedListOptsRef creates a reference to a new NamedListOpts struct.
+func NewNamedListOptsRef(name string, values *[]string, validator ValidatorFctType) *NamedListOpts {
+ return &NamedListOpts{
+ name: name,
+ ListOpts: *NewListOptsRef(values, validator),
+ }
+}
+
+// Name returns the name of the NamedListOpts in the configuration.
+func (o *NamedListOpts) Name() string {
+ return o.name
+}
+
+// MapOpts holds a map of values and a validation function.
+type MapOpts struct {
+ values map[string]string
+ validator ValidatorFctType
+}
+
+// Set validates if needed the input value and add it to the
+// internal map, by splitting on '='.
+func (opts *MapOpts) Set(value string) error {
+ if opts.validator != nil {
+ v, err := opts.validator(value)
+ if err != nil {
+ return err
+ }
+ value = v
+ }
+ vals := strings.SplitN(value, "=", 2)
+ if len(vals) == 1 {
+ (opts.values)[vals[0]] = ""
+ } else {
+ (opts.values)[vals[0]] = vals[1]
+ }
+ return nil
+}
+
+// GetAll returns the values of MapOpts as a map.
+func (opts *MapOpts) GetAll() map[string]string {
+ return opts.values
+}
+
+func (opts *MapOpts) String() string {
+ return fmt.Sprintf("%v", opts.values)
+}
+
+// Type returns a string name for this Option type
+func (opts *MapOpts) Type() string {
+ return "map"
+}
+
+// NewMapOpts creates a new MapOpts with the specified map of values and a validator.
+func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts {
+ if values == nil {
+ values = make(map[string]string)
+ }
+ return &MapOpts{
+ values: values,
+ validator: validator,
+ }
+}
+
+// NamedMapOpts is a MapOpts struct with a configuration name.
+// This struct is useful to keep reference to the assigned
+// field name in the internal configuration struct.
+type NamedMapOpts struct {
+ name string
+ MapOpts
+}
+
+var _ NamedOption = &NamedMapOpts{}
+
+// NewNamedMapOpts creates a reference to a new NamedMapOpts struct.
+func NewNamedMapOpts(name string, values map[string]string, validator ValidatorFctType) *NamedMapOpts {
+ return &NamedMapOpts{
+ name: name,
+ MapOpts: *NewMapOpts(values, validator),
+ }
+}
+
+// Name returns the name of the NamedMapOpts in the configuration.
+func (o *NamedMapOpts) Name() string {
+ return o.name
+}
+
+// ValidatorFctType defines a validator function that returns a validated string and/or an error.
+type ValidatorFctType func(val string) (string, error)
+
+// ValidatorFctListType defines a validator function that returns a validated list of string and/or an error
+type ValidatorFctListType func(val string) ([]string, error)
+
+// ValidateIPAddress validates an Ip address.
+func ValidateIPAddress(val string) (string, error) {
+ var ip = net.ParseIP(strings.TrimSpace(val))
+ if ip != nil {
+ return ip.String(), nil
+ }
+ return "", fmt.Errorf("%s is not an ip address", val)
+}
+
+// ValidateDNSSearch validates domain for resolvconf search configuration.
+// A zero length domain is represented by a dot (.).
+func ValidateDNSSearch(val string) (string, error) {
+ if val = strings.Trim(val, " "); val == "." {
+ return val, nil
+ }
+ return validateDomain(val)
+}
+
+func validateDomain(val string) (string, error) {
+ if alphaRegexp.FindString(val) == "" {
+ return "", fmt.Errorf("%s is not a valid domain", val)
+ }
+ ns := domainRegexp.FindSubmatch([]byte(val))
+ if len(ns) > 0 && len(ns[1]) < 255 {
+ return string(ns[1]), nil
+ }
+ return "", fmt.Errorf("%s is not a valid domain", val)
+}
+
+// ValidateLabel validates that the specified string is a valid label, and returns it.
+// Labels are in the form on key=value.
+func ValidateLabel(val string) (string, error) {
+ if strings.Count(val, "=") < 1 {
+ return "", fmt.Errorf("bad attribute format: %s", val)
+ }
+ return val, nil
+}
+
+// ParseLink parses and validates the specified string as a link format (name:alias)
+func ParseLink(val string) (string, string, error) {
+ if val == "" {
+ return "", "", fmt.Errorf("empty string specified for links")
+ }
+ arr := strings.Split(val, ":")
+ if len(arr) > 2 {
+ return "", "", fmt.Errorf("bad format for links: %s", val)
+ }
+ if len(arr) == 1 {
+ return val, val, nil
+ }
+ // This is kept because we can actually get a HostConfig with links
+ // from an already created container and the format is not `foo:bar`
+ // but `/foo:/c1/bar`
+ if strings.HasPrefix(arr[0], "/") {
+ _, alias := path.Split(arr[1])
+ return arr[0][1:], alias, nil
+ }
+ return arr[0], arr[1], nil
+}
+
+// MemBytes is a type for human readable memory bytes (like 128M, 2g, etc)
+type MemBytes int64
+
+// String returns the string format of the human readable memory bytes
+func (m *MemBytes) String() string {
+ // NOTE: In spf13/pflag/flag.go, "0" is considered as "zero value" while "0 B" is not.
+ // We return "0" in case value is 0 here so that the default value is hidden.
+ // (Sometimes "default 0 B" is actually misleading)
+ if m.Value() != 0 {
+ return units.BytesSize(float64(m.Value()))
+ }
+ return "0"
+}
+
+// Set sets the value of the MemBytes by passing a string
+func (m *MemBytes) Set(value string) error {
+ val, err := units.RAMInBytes(value)
+ *m = MemBytes(val)
+ return err
+}
+
+// Type returns the type
+func (m *MemBytes) Type() string {
+ return "bytes"
+}
+
+// Value returns the value in int64
+func (m *MemBytes) Value() int64 {
+ return int64(*m)
+}
+
+// UnmarshalJSON is the customized unmarshaler for MemBytes
+func (m *MemBytes) UnmarshalJSON(s []byte) error {
+ if len(s) <= 2 || s[0] != '"' || s[len(s)-1] != '"' {
+ return fmt.Errorf("invalid size: %q", s)
+ }
+ val, err := units.RAMInBytes(string(s[1 : len(s)-1]))
+ *m = MemBytes(val)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/opts/opts_unix.go b/vendor/github.com/docker/docker/opts/opts_unix.go
new file mode 100644
index 000000000..2766a43a0
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/opts_unix.go
@@ -0,0 +1,6 @@
+// +build !windows
+
+package opts
+
+// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. dockerd -H tcp://:8080
+const DefaultHTTPHost = "localhost"
diff --git a/vendor/github.com/docker/docker/opts/opts_windows.go b/vendor/github.com/docker/docker/opts/opts_windows.go
new file mode 100644
index 000000000..98b7251a9
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/opts_windows.go
@@ -0,0 +1,56 @@
+package opts
+
+// TODO Windows. Identify bug in GOLang 1.5.1+ and/or Windows Server 2016 TP5.
+// @jhowardmsft, @swernli.
+//
+// On Windows, this mitigates a problem with the default options of running
+// a docker client against a local docker daemon on TP5.
+//
+// What was found that if the default host is "localhost", even if the client
+// (and daemon as this is local) is not physically on a network, and the DNS
+// cache is flushed (ipconfig /flushdns), then the client will pause for
+// exactly one second when connecting to the daemon for calls. For example
+// using docker run windowsservercore cmd, the CLI will send a create followed
+// by an attach. You see the delay between the attach finishing and the attach
+// being seen by the daemon.
+//
+// Here's some daemon debug logs with additional debug spew put in. The
+// AfterWriteJSON log is the very last thing the daemon does as part of the
+// create call. The POST /attach is the second CLI call. Notice the second
+// time gap.
+//
+// time="2015-11-06T13:38:37.259627400-08:00" level=debug msg="After createRootfs"
+// time="2015-11-06T13:38:37.263626300-08:00" level=debug msg="After setHostConfig"
+// time="2015-11-06T13:38:37.267631200-08:00" level=debug msg="before createContainerPl...."
+// time="2015-11-06T13:38:37.271629500-08:00" level=debug msg=ToDiskLocking....
+// time="2015-11-06T13:38:37.275643200-08:00" level=debug msg="loggin event...."
+// time="2015-11-06T13:38:37.277627600-08:00" level=debug msg="logged event...."
+// time="2015-11-06T13:38:37.279631800-08:00" level=debug msg="In defer func"
+// time="2015-11-06T13:38:37.282628100-08:00" level=debug msg="After daemon.create"
+// time="2015-11-06T13:38:37.286651700-08:00" level=debug msg="return 2"
+// time="2015-11-06T13:38:37.289629500-08:00" level=debug msg="Returned from daemon.ContainerCreate"
+// time="2015-11-06T13:38:37.311629100-08:00" level=debug msg="After WriteJSON"
+// ... 1 second gap here....
+// time="2015-11-06T13:38:38.317866200-08:00" level=debug msg="Calling POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach"
+// time="2015-11-06T13:38:38.326882500-08:00" level=info msg="POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach?stderr=1&stdin=1&stdout=1&stream=1"
+//
+// We suspect this is either a bug introduced in GOLang 1.5.1, or that a change
+// in GOLang 1.5.1 (from 1.4.3) is exposing a bug in Windows. In theory,
+// the Windows networking stack is supposed to resolve "localhost" internally,
+// without hitting DNS, or even reading the hosts file (which is why localhost
+// is commented out in the hosts file on Windows).
+//
+// We have validated that working around this using the actual IPv4 localhost
+// address does not cause the delay.
+//
+// This does not occur with the docker client built with 1.4.3 on the same
+// Windows build, regardless of whether the daemon is built using 1.5.1
+// or 1.4.3. It does not occur on Linux. We also verified we see the same thing
+// on a cross-compiled Windows binary (from Linux).
+//
+// Final note: This is a mitigation, not a 'real' fix. It is still susceptible
+// to the delay if a user were to do 'docker run -H=tcp://localhost:2375...'
+// explicitly.
+
+// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. dockerd -H tcp://:8080
+const DefaultHTTPHost = "127.0.0.1"
diff --git a/vendor/github.com/docker/docker/opts/quotedstring.go b/vendor/github.com/docker/docker/opts/quotedstring.go
new file mode 100644
index 000000000..09c68a526
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/quotedstring.go
@@ -0,0 +1,37 @@
+package opts
+
+// QuotedString is a string that may have extra quotes around the value. The
+// quotes are stripped from the value.
+type QuotedString struct {
+ value *string
+}
+
+// Set sets a new value
+func (s *QuotedString) Set(val string) error {
+ *s.value = trimQuotes(val)
+ return nil
+}
+
+// Type returns the type of the value
+func (s *QuotedString) Type() string {
+ return "string"
+}
+
+func (s *QuotedString) String() string {
+ return *s.value
+}
+
+func trimQuotes(value string) string {
+ lastIndex := len(value) - 1
+ for _, char := range []byte{'\'', '"'} {
+ if value[0] == char && value[lastIndex] == char {
+ return value[1:lastIndex]
+ }
+ }
+ return value
+}
+
+// NewQuotedString returns a new quoted string option
+func NewQuotedString(value *string) *QuotedString {
+ return &QuotedString{value: value}
+}
diff --git a/vendor/github.com/docker/docker/opts/runtime.go b/vendor/github.com/docker/docker/opts/runtime.go
new file mode 100644
index 000000000..4361b3ce0
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/runtime.go
@@ -0,0 +1,79 @@
+package opts
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/docker/docker/api/types"
+)
+
+// RuntimeOpt defines a map of Runtimes
+type RuntimeOpt struct {
+ name string
+ stockRuntimeName string
+ values *map[string]types.Runtime
+}
+
+// NewNamedRuntimeOpt creates a new RuntimeOpt
+func NewNamedRuntimeOpt(name string, ref *map[string]types.Runtime, stockRuntime string) *RuntimeOpt {
+ if ref == nil {
+ ref = &map[string]types.Runtime{}
+ }
+ return &RuntimeOpt{name: name, values: ref, stockRuntimeName: stockRuntime}
+}
+
+// Name returns the name of the NamedListOpts in the configuration.
+func (o *RuntimeOpt) Name() string {
+ return o.name
+}
+
+// Set validates and updates the list of Runtimes
+func (o *RuntimeOpt) Set(val string) error {
+ parts := strings.SplitN(val, "=", 2)
+ if len(parts) != 2 {
+ return fmt.Errorf("invalid runtime argument: %s", val)
+ }
+
+ parts[0] = strings.TrimSpace(parts[0])
+ parts[1] = strings.TrimSpace(parts[1])
+ if parts[0] == "" || parts[1] == "" {
+ return fmt.Errorf("invalid runtime argument: %s", val)
+ }
+
+ parts[0] = strings.ToLower(parts[0])
+ if parts[0] == o.stockRuntimeName {
+ return fmt.Errorf("runtime name '%s' is reserved", o.stockRuntimeName)
+ }
+
+ if _, ok := (*o.values)[parts[0]]; ok {
+ return fmt.Errorf("runtime '%s' was already defined", parts[0])
+ }
+
+ (*o.values)[parts[0]] = types.Runtime{Path: parts[1]}
+
+ return nil
+}
+
+// String returns Runtime values as a string.
+func (o *RuntimeOpt) String() string {
+ var out []string
+ for k := range *o.values {
+ out = append(out, k)
+ }
+
+ return fmt.Sprintf("%v", out)
+}
+
+// GetMap returns a map of Runtimes (name: path)
+func (o *RuntimeOpt) GetMap() map[string]types.Runtime {
+ if o.values != nil {
+ return *o.values
+ }
+
+ return map[string]types.Runtime{}
+}
+
+// Type returns the type of the option
+func (o *RuntimeOpt) Type() string {
+ return "runtime"
+}
diff --git a/vendor/github.com/docker/docker/opts/ulimit.go b/vendor/github.com/docker/docker/opts/ulimit.go
new file mode 100644
index 000000000..a2a65fcd2
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/ulimit.go
@@ -0,0 +1,81 @@
+package opts
+
+import (
+ "fmt"
+
+ "github.com/docker/go-units"
+)
+
+// UlimitOpt defines a map of Ulimits
+type UlimitOpt struct {
+ values *map[string]*units.Ulimit
+}
+
+// NewUlimitOpt creates a new UlimitOpt
+func NewUlimitOpt(ref *map[string]*units.Ulimit) *UlimitOpt {
+ if ref == nil {
+ ref = &map[string]*units.Ulimit{}
+ }
+ return &UlimitOpt{ref}
+}
+
+// Set validates a Ulimit and sets its name as a key in UlimitOpt
+func (o *UlimitOpt) Set(val string) error {
+ l, err := units.ParseUlimit(val)
+ if err != nil {
+ return err
+ }
+
+ (*o.values)[l.Name] = l
+
+ return nil
+}
+
+// String returns Ulimit values as a string.
+func (o *UlimitOpt) String() string {
+ var out []string
+ for _, v := range *o.values {
+ out = append(out, v.String())
+ }
+
+ return fmt.Sprintf("%v", out)
+}
+
+// GetList returns a slice of pointers to Ulimits.
+func (o *UlimitOpt) GetList() []*units.Ulimit {
+ var ulimits []*units.Ulimit
+ for _, v := range *o.values {
+ ulimits = append(ulimits, v)
+ }
+
+ return ulimits
+}
+
+// Type returns the option type
+func (o *UlimitOpt) Type() string {
+ return "ulimit"
+}
+
+// NamedUlimitOpt defines a named map of Ulimits
+type NamedUlimitOpt struct {
+ name string
+ UlimitOpt
+}
+
+var _ NamedOption = &NamedUlimitOpt{}
+
+// NewNamedUlimitOpt creates a new NamedUlimitOpt
+func NewNamedUlimitOpt(name string, ref *map[string]*units.Ulimit) *NamedUlimitOpt {
+ if ref == nil {
+ ref = &map[string]*units.Ulimit{}
+ }
+ return &NamedUlimitOpt{
+ name: name,
+ UlimitOpt: *NewUlimitOpt(ref),
+ }
+}
+
+// Name returns the option name
+func (o *NamedUlimitOpt) Name() string {
+ return o.name
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/README.md b/vendor/github.com/docker/docker/pkg/archive/README.md
new file mode 100644
index 000000000..7307d9694
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/README.md
@@ -0,0 +1 @@
+This code provides helper functions for dealing with archive files.
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go
new file mode 100644
index 000000000..aa5563756
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive.go
@@ -0,0 +1,1237 @@
+package archive
+
+import (
+ "archive/tar"
+ "bufio"
+ "bytes"
+ "compress/bzip2"
+ "compress/gzip"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "syscall"
+
+ "github.com/docker/docker/pkg/fileutils"
+ "github.com/docker/docker/pkg/idtools"
+ "github.com/docker/docker/pkg/ioutils"
+ "github.com/docker/docker/pkg/pools"
+ "github.com/docker/docker/pkg/system"
+ "github.com/sirupsen/logrus"
+)
+
+type (
+ // Compression is the state represents if compressed or not.
+ Compression int
+ // WhiteoutFormat is the format of whiteouts unpacked
+ WhiteoutFormat int
+
+ // TarOptions wraps the tar options.
+ TarOptions struct {
+ IncludeFiles []string
+ ExcludePatterns []string
+ Compression Compression
+ NoLchown bool
+ UIDMaps []idtools.IDMap
+ GIDMaps []idtools.IDMap
+ ChownOpts *idtools.IDPair
+ IncludeSourceDir bool
+ // WhiteoutFormat is the expected on disk format for whiteout files.
+ // This format will be converted to the standard format on pack
+ // and from the standard format on unpack.
+ WhiteoutFormat WhiteoutFormat
+ // When unpacking, specifies whether overwriting a directory with a
+ // non-directory is allowed and vice versa.
+ NoOverwriteDirNonDir bool
+ // For each include when creating an archive, the included name will be
+ // replaced with the matching name from this map.
+ RebaseNames map[string]string
+ InUserNS bool
+ }
+)
+
+// Archiver implements the Archiver interface and allows the reuse of most utility functions of
+// this package with a pluggable Untar function. Also, to facilitate the passing of specific id
+// mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations.
+type Archiver struct {
+ Untar func(io.Reader, string, *TarOptions) error
+ IDMappingsVar *idtools.IDMappings
+}
+
+// NewDefaultArchiver returns a new Archiver without any IDMappings
+func NewDefaultArchiver() *Archiver {
+ return &Archiver{Untar: Untar, IDMappingsVar: &idtools.IDMappings{}}
+}
+
+// breakoutError is used to differentiate errors related to breaking out
+// When testing archive breakout in the unit tests, this error is expected
+// in order for the test to pass.
+type breakoutError error
+
+const (
+ // Uncompressed represents the uncompressed.
+ Uncompressed Compression = iota
+ // Bzip2 is bzip2 compression algorithm.
+ Bzip2
+ // Gzip is gzip compression algorithm.
+ Gzip
+ // Xz is xz compression algorithm.
+ Xz
+)
+
+const (
+ // AUFSWhiteoutFormat is the default format for whiteouts
+ AUFSWhiteoutFormat WhiteoutFormat = iota
+ // OverlayWhiteoutFormat formats whiteout according to the overlay
+ // standard.
+ OverlayWhiteoutFormat
+)
+
+const (
+ modeISDIR = 040000 // Directory
+ modeISFIFO = 010000 // FIFO
+ modeISREG = 0100000 // Regular file
+ modeISLNK = 0120000 // Symbolic link
+ modeISBLK = 060000 // Block special file
+ modeISCHR = 020000 // Character special file
+ modeISSOCK = 0140000 // Socket
+)
+
+// IsArchivePath checks if the (possibly compressed) file at the given path
+// starts with a tar file header.
+func IsArchivePath(path string) bool {
+ file, err := os.Open(path)
+ if err != nil {
+ return false
+ }
+ defer file.Close()
+ rdr, err := DecompressStream(file)
+ if err != nil {
+ return false
+ }
+ r := tar.NewReader(rdr)
+ _, err = r.Next()
+ return err == nil
+}
+
+// DetectCompression detects the compression algorithm of the source.
+func DetectCompression(source []byte) Compression {
+ for compression, m := range map[Compression][]byte{
+ Bzip2: {0x42, 0x5A, 0x68},
+ Gzip: {0x1F, 0x8B, 0x08},
+ Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
+ } {
+ if len(source) < len(m) {
+ logrus.Debug("Len too short")
+ continue
+ }
+ if bytes.Equal(m, source[:len(m)]) {
+ return compression
+ }
+ }
+ return Uncompressed
+}
+
+func xzDecompress(archive io.Reader) (io.ReadCloser, <-chan struct{}, error) {
+ args := []string{"xz", "-d", "-c", "-q"}
+
+ return cmdStream(exec.Command(args[0], args[1:]...), archive)
+}
+
+// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive.
+func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
+ p := pools.BufioReader32KPool
+ buf := p.Get(archive)
+ bs, err := buf.Peek(10)
+ if err != nil && err != io.EOF {
+ // Note: we'll ignore any io.EOF error because there are some odd
+ // cases where the layer.tar file will be empty (zero bytes) and
+ // that results in an io.EOF from the Peek() call. So, in those
+ // cases we'll just treat it as a non-compressed stream and
+ // that means just create an empty layer.
+ // See Issue 18170
+ return nil, err
+ }
+
+ compression := DetectCompression(bs)
+ switch compression {
+ case Uncompressed:
+ readBufWrapper := p.NewReadCloserWrapper(buf, buf)
+ return readBufWrapper, nil
+ case Gzip:
+ gzReader, err := gzip.NewReader(buf)
+ if err != nil {
+ return nil, err
+ }
+ readBufWrapper := p.NewReadCloserWrapper(buf, gzReader)
+ return readBufWrapper, nil
+ case Bzip2:
+ bz2Reader := bzip2.NewReader(buf)
+ readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader)
+ return readBufWrapper, nil
+ case Xz:
+ xzReader, chdone, err := xzDecompress(buf)
+ if err != nil {
+ return nil, err
+ }
+ readBufWrapper := p.NewReadCloserWrapper(buf, xzReader)
+ return ioutils.NewReadCloserWrapper(readBufWrapper, func() error {
+ <-chdone
+ return readBufWrapper.Close()
+ }), nil
+ default:
+ return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ }
+}
+
+// CompressStream compresses the dest with specified compression algorithm.
+func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) {
+ p := pools.BufioWriter32KPool
+ buf := p.Get(dest)
+ switch compression {
+ case Uncompressed:
+ writeBufWrapper := p.NewWriteCloserWrapper(buf, buf)
+ return writeBufWrapper, nil
+ case Gzip:
+ gzWriter := gzip.NewWriter(dest)
+ writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter)
+ return writeBufWrapper, nil
+ case Bzip2, Xz:
+ // archive/bzip2 does not support writing, and there is no xz support at all
+ // However, this is not a problem as docker only currently generates gzipped tars
+ return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ default:
+ return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ }
+}
+
+// TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to
+// modify the contents or header of an entry in the archive. If the file already
+// exists in the archive the TarModifierFunc will be called with the Header and
+// a reader which will return the files content. If the file does not exist both
+// header and content will be nil.
+type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error)
+
+// ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the
+// tar stream are modified if they match any of the keys in mods.
+func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser {
+ pipeReader, pipeWriter := io.Pipe()
+
+ go func() {
+ tarReader := tar.NewReader(inputTarStream)
+ tarWriter := tar.NewWriter(pipeWriter)
+ defer inputTarStream.Close()
+ defer tarWriter.Close()
+
+ modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error {
+ header, data, err := modifier(name, original, tarReader)
+ switch {
+ case err != nil:
+ return err
+ case header == nil:
+ return nil
+ }
+
+ header.Name = name
+ header.Size = int64(len(data))
+ if err := tarWriter.WriteHeader(header); err != nil {
+ return err
+ }
+ if len(data) != 0 {
+ if _, err := tarWriter.Write(data); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ var err error
+ var originalHeader *tar.Header
+ for {
+ originalHeader, err = tarReader.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ pipeWriter.CloseWithError(err)
+ return
+ }
+
+ modifier, ok := mods[originalHeader.Name]
+ if !ok {
+ // No modifiers for this file, copy the header and data
+ if err := tarWriter.WriteHeader(originalHeader); err != nil {
+ pipeWriter.CloseWithError(err)
+ return
+ }
+ if _, err := pools.Copy(tarWriter, tarReader); err != nil {
+ pipeWriter.CloseWithError(err)
+ return
+ }
+ continue
+ }
+ delete(mods, originalHeader.Name)
+
+ if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil {
+ pipeWriter.CloseWithError(err)
+ return
+ }
+ }
+
+ // Apply the modifiers that haven't matched any files in the archive
+ for name, modifier := range mods {
+ if err := modify(name, nil, modifier, nil); err != nil {
+ pipeWriter.CloseWithError(err)
+ return
+ }
+ }
+
+ pipeWriter.Close()
+
+ }()
+ return pipeReader
+}
+
+// Extension returns the extension of a file that uses the specified compression algorithm.
+func (compression *Compression) Extension() string {
+ switch *compression {
+ case Uncompressed:
+ return "tar"
+ case Bzip2:
+ return "tar.bz2"
+ case Gzip:
+ return "tar.gz"
+ case Xz:
+ return "tar.xz"
+ }
+ return ""
+}
+
+// FileInfoHeader creates a populated Header from fi.
+// Compared to archive pkg this function fills in more information.
+// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR),
+// which have been deleted since Go 1.9 archive/tar.
+func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) {
+ hdr, err := tar.FileInfoHeader(fi, link)
+ if err != nil {
+ return nil, err
+ }
+ hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi)
+ name, err = canonicalTarName(name, fi.IsDir())
+ if err != nil {
+ return nil, fmt.Errorf("tar: cannot canonicalize path: %v", err)
+ }
+ hdr.Name = name
+ if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil {
+ return nil, err
+ }
+ return hdr, nil
+}
+
+// fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar
+// https://github.com/golang/go/commit/66b5a2f
+func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 {
+ fm := fi.Mode()
+ switch {
+ case fm.IsRegular():
+ mode |= modeISREG
+ case fi.IsDir():
+ mode |= modeISDIR
+ case fm&os.ModeSymlink != 0:
+ mode |= modeISLNK
+ case fm&os.ModeDevice != 0:
+ if fm&os.ModeCharDevice != 0 {
+ mode |= modeISCHR
+ } else {
+ mode |= modeISBLK
+ }
+ case fm&os.ModeNamedPipe != 0:
+ mode |= modeISFIFO
+ case fm&os.ModeSocket != 0:
+ mode |= modeISSOCK
+ }
+ return mode
+}
+
+// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem
+// to a tar header
+func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
+ capability, _ := system.Lgetxattr(path, "security.capability")
+ if capability != nil {
+ hdr.Xattrs = make(map[string]string)
+ hdr.Xattrs["security.capability"] = string(capability)
+ }
+ return nil
+}
+
+type tarWhiteoutConverter interface {
+ ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error)
+ ConvertRead(*tar.Header, string) (bool, error)
+}
+
+type tarAppender struct {
+ TarWriter *tar.Writer
+ Buffer *bufio.Writer
+
+ // for hardlink mapping
+ SeenFiles map[uint64]string
+ IDMappings *idtools.IDMappings
+ ChownOpts *idtools.IDPair
+
+ // For packing and unpacking whiteout files in the
+ // non standard format. The whiteout files defined
+ // by the AUFS standard are used as the tar whiteout
+ // standard.
+ WhiteoutConverter tarWhiteoutConverter
+}
+
+func newTarAppender(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *idtools.IDPair) *tarAppender {
+ return &tarAppender{
+ SeenFiles: make(map[uint64]string),
+ TarWriter: tar.NewWriter(writer),
+ Buffer: pools.BufioWriter32KPool.Get(nil),
+ IDMappings: idMapping,
+ ChownOpts: chownOpts,
+ }
+}
+
+// canonicalTarName provides a platform-independent and consistent posix-style
+//path for files and directories to be archived regardless of the platform.
+func canonicalTarName(name string, isDir bool) (string, error) {
+ name, err := CanonicalTarNameForPath(name)
+ if err != nil {
+ return "", err
+ }
+
+ // suffix with '/' for directories
+ if isDir && !strings.HasSuffix(name, "/") {
+ name += "/"
+ }
+ return name, nil
+}
+
+// addTarFile adds to the tar archive a file from `path` as `name`
+func (ta *tarAppender) addTarFile(path, name string) error {
+ fi, err := os.Lstat(path)
+ if err != nil {
+ return err
+ }
+
+ var link string
+ if fi.Mode()&os.ModeSymlink != 0 {
+ var err error
+ link, err = os.Readlink(path)
+ if err != nil {
+ return err
+ }
+ }
+
+ hdr, err := FileInfoHeader(name, fi, link)
+ if err != nil {
+ return err
+ }
+ if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil {
+ return err
+ }
+
+ // if it's not a directory and has more than 1 link,
+ // it's hard linked, so set the type flag accordingly
+ if !fi.IsDir() && hasHardlinks(fi) {
+ inode, err := getInodeFromStat(fi.Sys())
+ if err != nil {
+ return err
+ }
+ // a link should have a name that it links too
+ // and that linked name should be first in the tar archive
+ if oldpath, ok := ta.SeenFiles[inode]; ok {
+ hdr.Typeflag = tar.TypeLink
+ hdr.Linkname = oldpath
+ hdr.Size = 0 // This Must be here for the writer math to add up!
+ } else {
+ ta.SeenFiles[inode] = name
+ }
+ }
+
+ //handle re-mapping container ID mappings back to host ID mappings before
+ //writing tar headers/files. We skip whiteout files because they were written
+ //by the kernel and already have proper ownership relative to the host
+ if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IDMappings.Empty() {
+ fileIDPair, err := getFileUIDGID(fi.Sys())
+ if err != nil {
+ return err
+ }
+ hdr.Uid, hdr.Gid, err = ta.IDMappings.ToContainer(fileIDPair)
+ if err != nil {
+ return err
+ }
+ }
+
+ // explicitly override with ChownOpts
+ if ta.ChownOpts != nil {
+ hdr.Uid = ta.ChownOpts.UID
+ hdr.Gid = ta.ChownOpts.GID
+ }
+
+ if ta.WhiteoutConverter != nil {
+ wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi)
+ if err != nil {
+ return err
+ }
+
+ // If a new whiteout file exists, write original hdr, then
+ // replace hdr with wo to be written after. Whiteouts should
+ // always be written after the original. Note the original
+ // hdr may have been updated to be a whiteout with returning
+ // a whiteout header
+ if wo != nil {
+ if err := ta.TarWriter.WriteHeader(hdr); err != nil {
+ return err
+ }
+ if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
+ return fmt.Errorf("tar: cannot use whiteout for non-empty file")
+ }
+ hdr = wo
+ }
+ }
+
+ if err := ta.TarWriter.WriteHeader(hdr); err != nil {
+ return err
+ }
+
+ if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
+ // We use system.OpenSequential to ensure we use sequential file
+ // access on Windows to avoid depleting the standby list.
+ // On Linux, this equates to a regular os.Open.
+ file, err := system.OpenSequential(path)
+ if err != nil {
+ return err
+ }
+
+ ta.Buffer.Reset(ta.TarWriter)
+ defer ta.Buffer.Reset(nil)
+ _, err = io.Copy(ta.Buffer, file)
+ file.Close()
+ if err != nil {
+ return err
+ }
+ err = ta.Buffer.Flush()
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns bool) error {
+ // hdr.Mode is in linux format, which we can use for sycalls,
+ // but for os.Foo() calls we need the mode converted to os.FileMode,
+ // so use hdrInfo.Mode() (they differ for e.g. setuid bits)
+ hdrInfo := hdr.FileInfo()
+
+ switch hdr.Typeflag {
+ case tar.TypeDir:
+ // Create directory unless it exists as a directory already.
+ // In that case we just want to merge the two
+ if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) {
+ if err := os.Mkdir(path, hdrInfo.Mode()); err != nil {
+ return err
+ }
+ }
+
+ case tar.TypeReg, tar.TypeRegA:
+ // Source is regular file. We use system.OpenFileSequential to use sequential
+ // file access to avoid depleting the standby list on Windows.
+ // On Linux, this equates to a regular os.OpenFile
+ file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode())
+ if err != nil {
+ return err
+ }
+ if _, err := io.Copy(file, reader); err != nil {
+ file.Close()
+ return err
+ }
+ file.Close()
+
+ case tar.TypeBlock, tar.TypeChar:
+ if inUserns { // cannot create devices in a userns
+ return nil
+ }
+ // Handle this is an OS-specific way
+ if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
+ return err
+ }
+
+ case tar.TypeFifo:
+ // Handle this is an OS-specific way
+ if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
+ return err
+ }
+
+ case tar.TypeLink:
+ targetPath := filepath.Join(extractDir, hdr.Linkname)
+ // check for hardlink breakout
+ if !strings.HasPrefix(targetPath, extractDir) {
+ return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname))
+ }
+ if err := os.Link(targetPath, path); err != nil {
+ return err
+ }
+
+ case tar.TypeSymlink:
+ // path -> hdr.Linkname = targetPath
+ // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file
+ targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname)
+
+ // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because
+ // that symlink would first have to be created, which would be caught earlier, at this very check:
+ if !strings.HasPrefix(targetPath, extractDir) {
+ return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname))
+ }
+ if err := os.Symlink(hdr.Linkname, path); err != nil {
+ return err
+ }
+
+ case tar.TypeXGlobalHeader:
+ logrus.Debug("PAX Global Extended Headers found and ignored")
+ return nil
+
+ default:
+ return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag)
+ }
+
+ // Lchown is not supported on Windows.
+ if Lchown && runtime.GOOS != "windows" {
+ if chownOpts == nil {
+ chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}
+ }
+ if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil {
+ return err
+ }
+ }
+
+ var errors []string
+ for key, value := range hdr.Xattrs {
+ if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil {
+ if err == syscall.ENOTSUP {
+ // We ignore errors here because not all graphdrivers support
+ // xattrs *cough* old versions of AUFS *cough*. However only
+ // ENOTSUP should be emitted in that case, otherwise we still
+ // bail.
+ errors = append(errors, err.Error())
+ continue
+ }
+ return err
+ }
+
+ }
+
+ if len(errors) > 0 {
+ logrus.WithFields(logrus.Fields{
+ "errors": errors,
+ }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them")
+ }
+
+ // There is no LChmod, so ignore mode for symlink. Also, this
+ // must happen after chown, as that can modify the file mode
+ if err := handleLChmod(hdr, path, hdrInfo); err != nil {
+ return err
+ }
+
+ aTime := hdr.AccessTime
+ if aTime.Before(hdr.ModTime) {
+ // Last access time should never be before last modified time.
+ aTime = hdr.ModTime
+ }
+
+ // system.Chtimes doesn't support a NOFOLLOW flag atm
+ if hdr.Typeflag == tar.TypeLink {
+ if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
+ if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil {
+ return err
+ }
+ }
+ } else if hdr.Typeflag != tar.TypeSymlink {
+ if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil {
+ return err
+ }
+ } else {
+ ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)}
+ if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
+ return err
+ }
+ }
+ return nil
+}
+
+// Tar creates an archive from the directory at `path`, and returns it as a
+// stream of bytes.
+func Tar(path string, compression Compression) (io.ReadCloser, error) {
+ return TarWithOptions(path, &TarOptions{Compression: compression})
+}
+
+// TarWithOptions creates an archive from the directory at `path`, only including files whose relative
+// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
+func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
+
+ // Fix the source path to work with long path names. This is a no-op
+ // on platforms other than Windows.
+ srcPath = fixVolumePathPrefix(srcPath)
+
+ pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns)
+ if err != nil {
+ return nil, err
+ }
+
+ pipeReader, pipeWriter := io.Pipe()
+
+ compressWriter, err := CompressStream(pipeWriter, options.Compression)
+ if err != nil {
+ return nil, err
+ }
+
+ go func() {
+ ta := newTarAppender(
+ idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps),
+ compressWriter,
+ options.ChownOpts,
+ )
+ ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat)
+
+ defer func() {
+ // Make sure to check the error on Close.
+ if err := ta.TarWriter.Close(); err != nil {
+ logrus.Errorf("Can't close tar writer: %s", err)
+ }
+ if err := compressWriter.Close(); err != nil {
+ logrus.Errorf("Can't close compress writer: %s", err)
+ }
+ if err := pipeWriter.Close(); err != nil {
+ logrus.Errorf("Can't close pipe writer: %s", err)
+ }
+ }()
+
+ // this buffer is needed for the duration of this piped stream
+ defer pools.BufioWriter32KPool.Put(ta.Buffer)
+
+ // In general we log errors here but ignore them because
+ // during e.g. a diff operation the container can continue
+ // mutating the filesystem and we can see transient errors
+ // from this
+
+ stat, err := os.Lstat(srcPath)
+ if err != nil {
+ return
+ }
+
+ if !stat.IsDir() {
+ // We can't later join a non-dir with any includes because the
+ // 'walk' will error if "file/." is stat-ed and "file" is not a
+ // directory. So, we must split the source path and use the
+ // basename as the include.
+ if len(options.IncludeFiles) > 0 {
+ logrus.Warn("Tar: Can't archive a file with includes")
+ }
+
+ dir, base := SplitPathDirEntry(srcPath)
+ srcPath = dir
+ options.IncludeFiles = []string{base}
+ }
+
+ if len(options.IncludeFiles) == 0 {
+ options.IncludeFiles = []string{"."}
+ }
+
+ seen := make(map[string]bool)
+
+ for _, include := range options.IncludeFiles {
+ rebaseName := options.RebaseNames[include]
+
+ walkRoot := getWalkRoot(srcPath, include)
+ filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error {
+ if err != nil {
+ logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err)
+ return nil
+ }
+
+ relFilePath, err := filepath.Rel(srcPath, filePath)
+ if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) {
+ // Error getting relative path OR we are looking
+ // at the source directory path. Skip in both situations.
+ return nil
+ }
+
+ if options.IncludeSourceDir && include == "." && relFilePath != "." {
+ relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator))
+ }
+
+ skip := false
+
+ // If "include" is an exact match for the current file
+ // then even if there's an "excludePatterns" pattern that
+ // matches it, don't skip it. IOW, assume an explicit 'include'
+ // is asking for that file no matter what - which is true
+ // for some files, like .dockerignore and Dockerfile (sometimes)
+ if include != relFilePath {
+ skip, err = pm.Matches(relFilePath)
+ if err != nil {
+ logrus.Errorf("Error matching %s: %v", relFilePath, err)
+ return err
+ }
+ }
+
+ if skip {
+ // If we want to skip this file and its a directory
+ // then we should first check to see if there's an
+ // excludes pattern (e.g. !dir/file) that starts with this
+ // dir. If so then we can't skip this dir.
+
+ // Its not a dir then so we can just return/skip.
+ if !f.IsDir() {
+ return nil
+ }
+
+ // No exceptions (!...) in patterns so just skip dir
+ if !pm.Exclusions() {
+ return filepath.SkipDir
+ }
+
+ dirSlash := relFilePath + string(filepath.Separator)
+
+ for _, pat := range pm.Patterns() {
+ if !pat.Exclusion() {
+ continue
+ }
+ if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) {
+ // found a match - so can't skip this dir
+ return nil
+ }
+ }
+
+ // No matching exclusion dir so just skip dir
+ return filepath.SkipDir
+ }
+
+ if seen[relFilePath] {
+ return nil
+ }
+ seen[relFilePath] = true
+
+ // Rename the base resource.
+ if rebaseName != "" {
+ var replacement string
+ if rebaseName != string(filepath.Separator) {
+ // Special case the root directory to replace with an
+ // empty string instead so that we don't end up with
+ // double slashes in the paths.
+ replacement = rebaseName
+ }
+
+ relFilePath = strings.Replace(relFilePath, include, replacement, 1)
+ }
+
+ if err := ta.addTarFile(filePath, relFilePath); err != nil {
+ logrus.Errorf("Can't add file %s to tar: %s", filePath, err)
+ // if pipe is broken, stop writing tar stream to it
+ if err == io.ErrClosedPipe {
+ return err
+ }
+ }
+ return nil
+ })
+ }
+ }()
+
+ return pipeReader, nil
+}
+
+// Unpack unpacks the decompressedArchive to dest with options.
+func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error {
+ tr := tar.NewReader(decompressedArchive)
+ trBuf := pools.BufioReader32KPool.Get(nil)
+ defer pools.BufioReader32KPool.Put(trBuf)
+
+ var dirs []*tar.Header
+ idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
+ rootIDs := idMappings.RootPair()
+ whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat)
+
+ // Iterate through the files in the archive.
+loop:
+ for {
+ hdr, err := tr.Next()
+ if err == io.EOF {
+ // end of tar archive
+ break
+ }
+ if err != nil {
+ return err
+ }
+
+ // Normalize name, for safety and for a simple is-root check
+ // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows:
+ // This keeps "..\" as-is, but normalizes "\..\" to "\".
+ hdr.Name = filepath.Clean(hdr.Name)
+
+ for _, exclude := range options.ExcludePatterns {
+ if strings.HasPrefix(hdr.Name, exclude) {
+ continue loop
+ }
+ }
+
+ // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in
+ // the filepath format for the OS on which the daemon is running. Hence
+ // the check for a slash-suffix MUST be done in an OS-agnostic way.
+ if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
+ // Not the root directory, ensure that the parent directory exists
+ parent := filepath.Dir(hdr.Name)
+ parentPath := filepath.Join(dest, parent)
+ if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
+ err = idtools.MkdirAllAndChownNew(parentPath, 0777, rootIDs)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ path := filepath.Join(dest, hdr.Name)
+ rel, err := filepath.Rel(dest, path)
+ if err != nil {
+ return err
+ }
+ if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
+ return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
+ }
+
+ // If path exits we almost always just want to remove and replace it
+ // The only exception is when it is a directory *and* the file from
+ // the layer is also a directory. Then we want to merge them (i.e.
+ // just apply the metadata from the layer).
+ if fi, err := os.Lstat(path); err == nil {
+ if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir {
+ // If NoOverwriteDirNonDir is true then we cannot replace
+ // an existing directory with a non-directory from the archive.
+ return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest)
+ }
+
+ if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir {
+ // If NoOverwriteDirNonDir is true then we cannot replace
+ // an existing non-directory with a directory from the archive.
+ return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest)
+ }
+
+ if fi.IsDir() && hdr.Name == "." {
+ continue
+ }
+
+ if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
+ if err := os.RemoveAll(path); err != nil {
+ return err
+ }
+ }
+ }
+ trBuf.Reset(tr)
+
+ if err := remapIDs(idMappings, hdr); err != nil {
+ return err
+ }
+
+ if whiteoutConverter != nil {
+ writeFile, err := whiteoutConverter.ConvertRead(hdr, path)
+ if err != nil {
+ return err
+ }
+ if !writeFile {
+ continue
+ }
+ }
+
+ if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil {
+ return err
+ }
+
+ // Directory mtimes must be handled at the end to avoid further
+ // file creation in them to modify the directory mtime
+ if hdr.Typeflag == tar.TypeDir {
+ dirs = append(dirs, hdr)
+ }
+ }
+
+ for _, hdr := range dirs {
+ path := filepath.Join(dest, hdr.Name)
+
+ if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
+// and unpacks it into the directory at `dest`.
+// The archive may be compressed with one of the following algorithms:
+// identity (uncompressed), gzip, bzip2, xz.
+// FIXME: specify behavior when target path exists vs. doesn't exist.
+func Untar(tarArchive io.Reader, dest string, options *TarOptions) error {
+ return untarHandler(tarArchive, dest, options, true)
+}
+
+// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive,
+// and unpacks it into the directory at `dest`.
+// The archive must be an uncompressed stream.
+func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error {
+ return untarHandler(tarArchive, dest, options, false)
+}
+
+// Handler for teasing out the automatic decompression
+func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error {
+ if tarArchive == nil {
+ return fmt.Errorf("Empty archive")
+ }
+ dest = filepath.Clean(dest)
+ if options == nil {
+ options = &TarOptions{}
+ }
+ if options.ExcludePatterns == nil {
+ options.ExcludePatterns = []string{}
+ }
+
+ r := tarArchive
+ if decompress {
+ decompressedArchive, err := DecompressStream(tarArchive)
+ if err != nil {
+ return err
+ }
+ defer decompressedArchive.Close()
+ r = decompressedArchive
+ }
+
+ return Unpack(r, dest, options)
+}
+
+// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
+// If either Tar or Untar fails, TarUntar aborts and returns the error.
+func (archiver *Archiver) TarUntar(src, dst string) error {
+ logrus.Debugf("TarUntar(%s %s)", src, dst)
+ archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed})
+ if err != nil {
+ return err
+ }
+ defer archive.Close()
+ options := &TarOptions{
+ UIDMaps: archiver.IDMappingsVar.UIDs(),
+ GIDMaps: archiver.IDMappingsVar.GIDs(),
+ }
+ return archiver.Untar(archive, dst, options)
+}
+
+// UntarPath untar a file from path to a destination, src is the source tar file path.
+func (archiver *Archiver) UntarPath(src, dst string) error {
+ archive, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer archive.Close()
+ options := &TarOptions{
+ UIDMaps: archiver.IDMappingsVar.UIDs(),
+ GIDMaps: archiver.IDMappingsVar.GIDs(),
+ }
+ return archiver.Untar(archive, dst, options)
+}
+
+// CopyWithTar creates a tar archive of filesystem path `src`, and
+// unpacks it at filesystem path `dst`.
+// The archive is streamed directly with fixed buffering and no
+// intermediary disk IO.
+func (archiver *Archiver) CopyWithTar(src, dst string) error {
+ srcSt, err := os.Stat(src)
+ if err != nil {
+ return err
+ }
+ if !srcSt.IsDir() {
+ return archiver.CopyFileWithTar(src, dst)
+ }
+
+ // if this Archiver is set up with ID mapping we need to create
+ // the new destination directory with the remapped root UID/GID pair
+ // as owner
+ rootIDs := archiver.IDMappingsVar.RootPair()
+ // Create dst, copy src's content into it
+ logrus.Debugf("Creating dest directory: %s", dst)
+ if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil {
+ return err
+ }
+ logrus.Debugf("Calling TarUntar(%s, %s)", src, dst)
+ return archiver.TarUntar(src, dst)
+}
+
+// CopyFileWithTar emulates the behavior of the 'cp' command-line
+// for a single file. It copies a regular file from path `src` to
+// path `dst`, and preserves all its metadata.
+func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
+ logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst)
+ srcSt, err := os.Stat(src)
+ if err != nil {
+ return err
+ }
+
+ if srcSt.IsDir() {
+ return fmt.Errorf("Can't copy a directory")
+ }
+
+ // Clean up the trailing slash. This must be done in an operating
+ // system specific manner.
+ if dst[len(dst)-1] == os.PathSeparator {
+ dst = filepath.Join(dst, filepath.Base(src))
+ }
+ // Create the holding directory if necessary
+ if err := system.MkdirAll(filepath.Dir(dst), 0700, ""); err != nil {
+ return err
+ }
+
+ r, w := io.Pipe()
+ errC := make(chan error, 1)
+
+ go func() {
+ defer close(errC)
+
+ errC <- func() error {
+ defer w.Close()
+
+ srcF, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer srcF.Close()
+
+ hdr, err := tar.FileInfoHeader(srcSt, "")
+ if err != nil {
+ return err
+ }
+ hdr.Name = filepath.Base(dst)
+ hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
+
+ if err := remapIDs(archiver.IDMappingsVar, hdr); err != nil {
+ return err
+ }
+
+ tw := tar.NewWriter(w)
+ defer tw.Close()
+ if err := tw.WriteHeader(hdr); err != nil {
+ return err
+ }
+ if _, err := io.Copy(tw, srcF); err != nil {
+ return err
+ }
+ return nil
+ }()
+ }()
+ defer func() {
+ if er := <-errC; err == nil && er != nil {
+ err = er
+ }
+ }()
+
+ err = archiver.Untar(r, filepath.Dir(dst), nil)
+ if err != nil {
+ r.CloseWithError(err)
+ }
+ return err
+}
+
+// IDMappings returns the IDMappings of the archiver.
+func (archiver *Archiver) IDMappings() *idtools.IDMappings {
+ return archiver.IDMappingsVar
+}
+
+func remapIDs(idMappings *idtools.IDMappings, hdr *tar.Header) error {
+ ids, err := idMappings.ToHost(idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid})
+ hdr.Uid, hdr.Gid = ids.UID, ids.GID
+ return err
+}
+
+// cmdStream executes a command, and returns its stdout as a stream.
+// If the command fails to run or doesn't complete successfully, an error
+// will be returned, including anything written on stderr.
+func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, <-chan struct{}, error) {
+ chdone := make(chan struct{})
+ cmd.Stdin = input
+ pipeR, pipeW := io.Pipe()
+ cmd.Stdout = pipeW
+ var errBuf bytes.Buffer
+ cmd.Stderr = &errBuf
+
+ // Run the command and return the pipe
+ if err := cmd.Start(); err != nil {
+ return nil, nil, err
+ }
+
+ // Copy stdout to the returned pipe
+ go func() {
+ if err := cmd.Wait(); err != nil {
+ pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String()))
+ } else {
+ pipeW.Close()
+ }
+ close(chdone)
+ }()
+
+ return pipeR, chdone, nil
+}
+
+// NewTempArchive reads the content of src into a temporary file, and returns the contents
+// of that file as an archive. The archive can only be read once - as soon as reading completes,
+// the file will be deleted.
+func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) {
+ f, err := ioutil.TempFile(dir, "")
+ if err != nil {
+ return nil, err
+ }
+ if _, err := io.Copy(f, src); err != nil {
+ return nil, err
+ }
+ if _, err := f.Seek(0, 0); err != nil {
+ return nil, err
+ }
+ st, err := f.Stat()
+ if err != nil {
+ return nil, err
+ }
+ size := st.Size()
+ return &TempArchive{File: f, Size: size}, nil
+}
+
+// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes,
+// the file will be deleted.
+type TempArchive struct {
+ *os.File
+ Size int64 // Pre-computed from Stat().Size() as a convenience
+ read int64
+ closed bool
+}
+
+// Close closes the underlying file if it's still open, or does a no-op
+// to allow callers to try to close the TempArchive multiple times safely.
+func (archive *TempArchive) Close() error {
+ if archive.closed {
+ return nil
+ }
+
+ archive.closed = true
+
+ return archive.File.Close()
+}
+
+func (archive *TempArchive) Read(data []byte) (int, error) {
+ n, err := archive.File.Read(data)
+ archive.read += int64(n)
+ if err != nil || archive.read == archive.Size {
+ archive.Close()
+ os.Remove(archive.File.Name())
+ }
+ return n, err
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go
new file mode 100644
index 000000000..6e950e93c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go
@@ -0,0 +1,92 @@
+package archive
+
+import (
+ "archive/tar"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/docker/docker/pkg/system"
+ "golang.org/x/sys/unix"
+)
+
+func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
+ if format == OverlayWhiteoutFormat {
+ return overlayWhiteoutConverter{}
+ }
+ return nil
+}
+
+type overlayWhiteoutConverter struct{}
+
+func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) {
+ // convert whiteouts to AUFS format
+ if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 {
+ // we just rename the file and make it normal
+ dir, filename := filepath.Split(hdr.Name)
+ hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename)
+ hdr.Mode = 0600
+ hdr.Typeflag = tar.TypeReg
+ hdr.Size = 0
+ }
+
+ if fi.Mode()&os.ModeDir != 0 {
+ // convert opaque dirs to AUFS format by writing an empty file with the prefix
+ opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque")
+ if err != nil {
+ return nil, err
+ }
+ if len(opaque) == 1 && opaque[0] == 'y' {
+ if hdr.Xattrs != nil {
+ delete(hdr.Xattrs, "trusted.overlay.opaque")
+ }
+
+ // create a header for the whiteout file
+ // it should inherit some properties from the parent, but be a regular file
+ wo = &tar.Header{
+ Typeflag: tar.TypeReg,
+ Mode: hdr.Mode & int64(os.ModePerm),
+ Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir),
+ Size: 0,
+ Uid: hdr.Uid,
+ Uname: hdr.Uname,
+ Gid: hdr.Gid,
+ Gname: hdr.Gname,
+ AccessTime: hdr.AccessTime,
+ ChangeTime: hdr.ChangeTime,
+ }
+ }
+ }
+
+ return
+}
+
+func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) {
+ base := filepath.Base(path)
+ dir := filepath.Dir(path)
+
+ // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay
+ if base == WhiteoutOpaqueDir {
+ err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0)
+ // don't write the file itself
+ return false, err
+ }
+
+ // if a file was deleted and we are using overlay, we need to create a character device
+ if strings.HasPrefix(base, WhiteoutPrefix) {
+ originalBase := base[len(WhiteoutPrefix):]
+ originalPath := filepath.Join(dir, originalBase)
+
+ if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil {
+ return false, err
+ }
+ if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil {
+ return false, err
+ }
+
+ // don't write the file itself
+ return false, nil
+ }
+
+ return true, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_other.go b/vendor/github.com/docker/docker/pkg/archive/archive_other.go
new file mode 100644
index 000000000..54acbf285
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive_other.go
@@ -0,0 +1,7 @@
+// +build !linux
+
+package archive
+
+func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go
new file mode 100644
index 000000000..ac4a348d5
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go
@@ -0,0 +1,122 @@
+// +build !windows
+
+package archive
+
+import (
+ "archive/tar"
+ "errors"
+ "os"
+ "path/filepath"
+ "syscall"
+
+ "github.com/docker/docker/pkg/idtools"
+ "github.com/docker/docker/pkg/system"
+ rsystem "github.com/opencontainers/runc/libcontainer/system"
+ "golang.org/x/sys/unix"
+)
+
+// fixVolumePathPrefix does platform specific processing to ensure that if
+// the path being passed in is not in a volume path format, convert it to one.
+func fixVolumePathPrefix(srcPath string) string {
+ return srcPath
+}
+
+// getWalkRoot calculates the root path when performing a TarWithOptions.
+// We use a separate function as this is platform specific. On Linux, we
+// can't use filepath.Join(srcPath,include) because this will clean away
+// a trailing "." or "/" which may be important.
+func getWalkRoot(srcPath string, include string) string {
+ return srcPath + string(filepath.Separator) + include
+}
+
+// CanonicalTarNameForPath returns platform-specific filepath
+// to canonical posix-style path for tar archival. p is relative
+// path.
+func CanonicalTarNameForPath(p string) (string, error) {
+ return p, nil // already unix-style
+}
+
+// chmodTarEntry is used to adjust the file permissions used in tar header based
+// on the platform the archival is done.
+
+func chmodTarEntry(perm os.FileMode) os.FileMode {
+ return perm // noop for unix as golang APIs provide perm bits correctly
+}
+
+func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
+ s, ok := stat.(*syscall.Stat_t)
+
+ if ok {
+ // Currently go does not fill in the major/minors
+ if s.Mode&unix.S_IFBLK != 0 ||
+ s.Mode&unix.S_IFCHR != 0 {
+ hdr.Devmajor = int64(major(uint64(s.Rdev))) // nolint: unconvert
+ hdr.Devminor = int64(minor(uint64(s.Rdev))) // nolint: unconvert
+ }
+ }
+
+ return
+}
+
+func getInodeFromStat(stat interface{}) (inode uint64, err error) {
+ s, ok := stat.(*syscall.Stat_t)
+
+ if ok {
+ inode = s.Ino
+ }
+
+ return
+}
+
+func getFileUIDGID(stat interface{}) (idtools.IDPair, error) {
+ s, ok := stat.(*syscall.Stat_t)
+
+ if !ok {
+ return idtools.IDPair{}, errors.New("cannot convert stat value to syscall.Stat_t")
+ }
+ return idtools.IDPair{UID: int(s.Uid), GID: int(s.Gid)}, nil
+}
+
+func major(device uint64) uint64 {
+ return (device >> 8) & 0xfff
+}
+
+func minor(device uint64) uint64 {
+ return (device & 0xff) | ((device >> 12) & 0xfff00)
+}
+
+// handleTarTypeBlockCharFifo is an OS-specific helper function used by
+// createTarFile to handle the following types of header: Block; Char; Fifo
+func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
+ if rsystem.RunningInUserNS() {
+ // cannot create a device if running in user namespace
+ return nil
+ }
+
+ mode := uint32(hdr.Mode & 07777)
+ switch hdr.Typeflag {
+ case tar.TypeBlock:
+ mode |= unix.S_IFBLK
+ case tar.TypeChar:
+ mode |= unix.S_IFCHR
+ case tar.TypeFifo:
+ mode |= unix.S_IFIFO
+ }
+
+ return system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor)))
+}
+
+func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
+ if hdr.Typeflag == tar.TypeLink {
+ if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
+ if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
+ return err
+ }
+ }
+ } else if hdr.Typeflag != tar.TypeSymlink {
+ if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go
new file mode 100644
index 000000000..a22410c03
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go
@@ -0,0 +1,79 @@
+// +build windows
+
+package archive
+
+import (
+ "archive/tar"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/docker/docker/pkg/idtools"
+ "github.com/docker/docker/pkg/longpath"
+)
+
+// fixVolumePathPrefix does platform specific processing to ensure that if
+// the path being passed in is not in a volume path format, convert it to one.
+func fixVolumePathPrefix(srcPath string) string {
+ return longpath.AddPrefix(srcPath)
+}
+
+// getWalkRoot calculates the root path when performing a TarWithOptions.
+// We use a separate function as this is platform specific.
+func getWalkRoot(srcPath string, include string) string {
+ return filepath.Join(srcPath, include)
+}
+
+// CanonicalTarNameForPath returns platform-specific filepath
+// to canonical posix-style path for tar archival. p is relative
+// path.
+func CanonicalTarNameForPath(p string) (string, error) {
+ // windows: convert windows style relative path with backslashes
+ // into forward slashes. Since windows does not allow '/' or '\'
+ // in file names, it is mostly safe to replace however we must
+ // check just in case
+ if strings.Contains(p, "/") {
+ return "", fmt.Errorf("Windows path contains forward slash: %s", p)
+ }
+ return strings.Replace(p, string(os.PathSeparator), "/", -1), nil
+
+}
+
+// chmodTarEntry is used to adjust the file permissions used in tar header based
+// on the platform the archival is done.
+func chmodTarEntry(perm os.FileMode) os.FileMode {
+ //perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.)
+ permPart := perm & os.ModePerm
+ noPermPart := perm &^ os.ModePerm
+ // Add the x bit: make everything +x from windows
+ permPart |= 0111
+ permPart &= 0755
+
+ return noPermPart | permPart
+}
+
+func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
+ // do nothing. no notion of Rdev, Nlink in stat on Windows
+ return
+}
+
+func getInodeFromStat(stat interface{}) (inode uint64, err error) {
+ // do nothing. no notion of Inode in stat on Windows
+ return
+}
+
+// handleTarTypeBlockCharFifo is an OS-specific helper function used by
+// createTarFile to handle the following types of header: Block; Char; Fifo
+func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
+ return nil
+}
+
+func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
+ return nil
+}
+
+func getFileUIDGID(stat interface{}) (idtools.IDPair, error) {
+ // no notion of file ownership mapping yet on Windows
+ return idtools.IDPair{0, 0}, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes.go b/vendor/github.com/docker/docker/pkg/archive/changes.go
new file mode 100644
index 000000000..d78fe6ac6
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes.go
@@ -0,0 +1,441 @@
+package archive
+
+import (
+ "archive/tar"
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+ "syscall"
+ "time"
+
+ "github.com/docker/docker/pkg/idtools"
+ "github.com/docker/docker/pkg/pools"
+ "github.com/docker/docker/pkg/system"
+ "github.com/sirupsen/logrus"
+)
+
+// ChangeType represents the change type.
+type ChangeType int
+
+const (
+ // ChangeModify represents the modify operation.
+ ChangeModify = iota
+ // ChangeAdd represents the add operation.
+ ChangeAdd
+ // ChangeDelete represents the delete operation.
+ ChangeDelete
+)
+
+func (c ChangeType) String() string {
+ switch c {
+ case ChangeModify:
+ return "C"
+ case ChangeAdd:
+ return "A"
+ case ChangeDelete:
+ return "D"
+ }
+ return ""
+}
+
+// Change represents a change, it wraps the change type and path.
+// It describes changes of the files in the path respect to the
+// parent layers. The change could be modify, add, delete.
+// This is used for layer diff.
+type Change struct {
+ Path string
+ Kind ChangeType
+}
+
+func (change *Change) String() string {
+ return fmt.Sprintf("%s %s", change.Kind, change.Path)
+}
+
+// for sort.Sort
+type changesByPath []Change
+
+func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path }
+func (c changesByPath) Len() int { return len(c) }
+func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] }
+
+// Gnu tar and the go tar writer don't have sub-second mtime
+// precision, which is problematic when we apply changes via tar
+// files, we handle this by comparing for exact times, *or* same
+// second count and either a or b having exactly 0 nanoseconds
+func sameFsTime(a, b time.Time) bool {
+ return a == b ||
+ (a.Unix() == b.Unix() &&
+ (a.Nanosecond() == 0 || b.Nanosecond() == 0))
+}
+
+func sameFsTimeSpec(a, b syscall.Timespec) bool {
+ return a.Sec == b.Sec &&
+ (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0)
+}
+
+// Changes walks the path rw and determines changes for the files in the path,
+// with respect to the parent layers
+func Changes(layers []string, rw string) ([]Change, error) {
+ return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip)
+}
+
+func aufsMetadataSkip(path string) (skip bool, err error) {
+ skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path)
+ if err != nil {
+ skip = true
+ }
+ return
+}
+
+func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) {
+ f := filepath.Base(path)
+
+ // If there is a whiteout, then the file was removed
+ if strings.HasPrefix(f, WhiteoutPrefix) {
+ originalFile := f[len(WhiteoutPrefix):]
+ return filepath.Join(filepath.Dir(path), originalFile), nil
+ }
+
+ return "", nil
+}
+
+type skipChange func(string) (bool, error)
+type deleteChange func(string, string, os.FileInfo) (string, error)
+
+func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) {
+ var (
+ changes []Change
+ changedDirs = make(map[string]struct{})
+ )
+
+ err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ // Rebase path
+ path, err = filepath.Rel(rw, path)
+ if err != nil {
+ return err
+ }
+
+ // As this runs on the daemon side, file paths are OS specific.
+ path = filepath.Join(string(os.PathSeparator), path)
+
+ // Skip root
+ if path == string(os.PathSeparator) {
+ return nil
+ }
+
+ if sc != nil {
+ if skip, err := sc(path); skip {
+ return err
+ }
+ }
+
+ change := Change{
+ Path: path,
+ }
+
+ deletedFile, err := dc(rw, path, f)
+ if err != nil {
+ return err
+ }
+
+ // Find out what kind of modification happened
+ if deletedFile != "" {
+ change.Path = deletedFile
+ change.Kind = ChangeDelete
+ } else {
+ // Otherwise, the file was added
+ change.Kind = ChangeAdd
+
+ // ...Unless it already existed in a top layer, in which case, it's a modification
+ for _, layer := range layers {
+ stat, err := os.Stat(filepath.Join(layer, path))
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ if err == nil {
+ // The file existed in the top layer, so that's a modification
+
+ // However, if it's a directory, maybe it wasn't actually modified.
+ // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
+ if stat.IsDir() && f.IsDir() {
+ if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) {
+ // Both directories are the same, don't record the change
+ return nil
+ }
+ }
+ change.Kind = ChangeModify
+ break
+ }
+ }
+ }
+
+ // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files.
+ // This block is here to ensure the change is recorded even if the
+ // modify time, mode and size of the parent directory in the rw and ro layers are all equal.
+ // Check https://github.com/docker/docker/pull/13590 for details.
+ if f.IsDir() {
+ changedDirs[path] = struct{}{}
+ }
+ if change.Kind == ChangeAdd || change.Kind == ChangeDelete {
+ parent := filepath.Dir(path)
+ if _, ok := changedDirs[parent]; !ok && parent != "/" {
+ changes = append(changes, Change{Path: parent, Kind: ChangeModify})
+ changedDirs[parent] = struct{}{}
+ }
+ }
+
+ // Record change
+ changes = append(changes, change)
+ return nil
+ })
+ if err != nil && !os.IsNotExist(err) {
+ return nil, err
+ }
+ return changes, nil
+}
+
+// FileInfo describes the information of a file.
+type FileInfo struct {
+ parent *FileInfo
+ name string
+ stat *system.StatT
+ children map[string]*FileInfo
+ capability []byte
+ added bool
+}
+
+// LookUp looks up the file information of a file.
+func (info *FileInfo) LookUp(path string) *FileInfo {
+ // As this runs on the daemon side, file paths are OS specific.
+ parent := info
+ if path == string(os.PathSeparator) {
+ return info
+ }
+
+ pathElements := strings.Split(path, string(os.PathSeparator))
+ for _, elem := range pathElements {
+ if elem != "" {
+ child := parent.children[elem]
+ if child == nil {
+ return nil
+ }
+ parent = child
+ }
+ }
+ return parent
+}
+
+func (info *FileInfo) path() string {
+ if info.parent == nil {
+ // As this runs on the daemon side, file paths are OS specific.
+ return string(os.PathSeparator)
+ }
+ return filepath.Join(info.parent.path(), info.name)
+}
+
+func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
+
+ sizeAtEntry := len(*changes)
+
+ if oldInfo == nil {
+ // add
+ change := Change{
+ Path: info.path(),
+ Kind: ChangeAdd,
+ }
+ *changes = append(*changes, change)
+ info.added = true
+ }
+
+ // We make a copy so we can modify it to detect additions
+ // also, we only recurse on the old dir if the new info is a directory
+ // otherwise any previous delete/change is considered recursive
+ oldChildren := make(map[string]*FileInfo)
+ if oldInfo != nil && info.isDir() {
+ for k, v := range oldInfo.children {
+ oldChildren[k] = v
+ }
+ }
+
+ for name, newChild := range info.children {
+ oldChild := oldChildren[name]
+ if oldChild != nil {
+ // change?
+ oldStat := oldChild.stat
+ newStat := newChild.stat
+ // Note: We can't compare inode or ctime or blocksize here, because these change
+ // when copying a file into a container. However, that is not generally a problem
+ // because any content change will change mtime, and any status change should
+ // be visible when actually comparing the stat fields. The only time this
+ // breaks down is if some code intentionally hides a change by setting
+ // back mtime
+ if statDifferent(oldStat, newStat) ||
+ !bytes.Equal(oldChild.capability, newChild.capability) {
+ change := Change{
+ Path: newChild.path(),
+ Kind: ChangeModify,
+ }
+ *changes = append(*changes, change)
+ newChild.added = true
+ }
+
+ // Remove from copy so we can detect deletions
+ delete(oldChildren, name)
+ }
+
+ newChild.addChanges(oldChild, changes)
+ }
+ for _, oldChild := range oldChildren {
+ // delete
+ change := Change{
+ Path: oldChild.path(),
+ Kind: ChangeDelete,
+ }
+ *changes = append(*changes, change)
+ }
+
+ // If there were changes inside this directory, we need to add it, even if the directory
+ // itself wasn't changed. This is needed to properly save and restore filesystem permissions.
+ // As this runs on the daemon side, file paths are OS specific.
+ if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) {
+ change := Change{
+ Path: info.path(),
+ Kind: ChangeModify,
+ }
+ // Let's insert the directory entry before the recently added entries located inside this dir
+ *changes = append(*changes, change) // just to resize the slice, will be overwritten
+ copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:])
+ (*changes)[sizeAtEntry] = change
+ }
+
+}
+
+// Changes add changes to file information.
+func (info *FileInfo) Changes(oldInfo *FileInfo) []Change {
+ var changes []Change
+
+ info.addChanges(oldInfo, &changes)
+
+ return changes
+}
+
+func newRootFileInfo() *FileInfo {
+ // As this runs on the daemon side, file paths are OS specific.
+ root := &FileInfo{
+ name: string(os.PathSeparator),
+ children: make(map[string]*FileInfo),
+ }
+ return root
+}
+
+// ChangesDirs compares two directories and generates an array of Change objects describing the changes.
+// If oldDir is "", then all files in newDir will be Add-Changes.
+func ChangesDirs(newDir, oldDir string) ([]Change, error) {
+ var (
+ oldRoot, newRoot *FileInfo
+ )
+ if oldDir == "" {
+ emptyDir, err := ioutil.TempDir("", "empty")
+ if err != nil {
+ return nil, err
+ }
+ defer os.Remove(emptyDir)
+ oldDir = emptyDir
+ }
+ oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir)
+ if err != nil {
+ return nil, err
+ }
+
+ return newRoot.Changes(oldRoot), nil
+}
+
+// ChangesSize calculates the size in bytes of the provided changes, based on newDir.
+func ChangesSize(newDir string, changes []Change) int64 {
+ var (
+ size int64
+ sf = make(map[uint64]struct{})
+ )
+ for _, change := range changes {
+ if change.Kind == ChangeModify || change.Kind == ChangeAdd {
+ file := filepath.Join(newDir, change.Path)
+ fileInfo, err := os.Lstat(file)
+ if err != nil {
+ logrus.Errorf("Can not stat %q: %s", file, err)
+ continue
+ }
+
+ if fileInfo != nil && !fileInfo.IsDir() {
+ if hasHardlinks(fileInfo) {
+ inode := getIno(fileInfo)
+ if _, ok := sf[inode]; !ok {
+ size += fileInfo.Size()
+ sf[inode] = struct{}{}
+ }
+ } else {
+ size += fileInfo.Size()
+ }
+ }
+ }
+ }
+ return size
+}
+
+// ExportChanges produces an Archive from the provided changes, relative to dir.
+func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) {
+ reader, writer := io.Pipe()
+ go func() {
+ ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil)
+
+ // this buffer is needed for the duration of this piped stream
+ defer pools.BufioWriter32KPool.Put(ta.Buffer)
+
+ sort.Sort(changesByPath(changes))
+
+ // In general we log errors here but ignore them because
+ // during e.g. a diff operation the container can continue
+ // mutating the filesystem and we can see transient errors
+ // from this
+ for _, change := range changes {
+ if change.Kind == ChangeDelete {
+ whiteOutDir := filepath.Dir(change.Path)
+ whiteOutBase := filepath.Base(change.Path)
+ whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase)
+ timestamp := time.Now()
+ hdr := &tar.Header{
+ Name: whiteOut[1:],
+ Size: 0,
+ ModTime: timestamp,
+ AccessTime: timestamp,
+ ChangeTime: timestamp,
+ }
+ if err := ta.TarWriter.WriteHeader(hdr); err != nil {
+ logrus.Debugf("Can't write whiteout header: %s", err)
+ }
+ } else {
+ path := filepath.Join(dir, change.Path)
+ if err := ta.addTarFile(path, change.Path[1:]); err != nil {
+ logrus.Debugf("Can't add file %s to tar: %s", path, err)
+ }
+ }
+ }
+
+ // Make sure to check the error on Close.
+ if err := ta.TarWriter.Close(); err != nil {
+ logrus.Debugf("Can't close layer: %s", err)
+ }
+ if err := writer.Close(); err != nil {
+ logrus.Debugf("failed close Changes writer: %s", err)
+ }
+ }()
+ return reader, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go
new file mode 100644
index 000000000..8e96d961f
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go
@@ -0,0 +1,313 @@
+package archive
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "path/filepath"
+ "sort"
+ "syscall"
+ "unsafe"
+
+ "github.com/docker/docker/pkg/system"
+ "golang.org/x/sys/unix"
+)
+
+// walker is used to implement collectFileInfoForChanges on linux. Where this
+// method in general returns the entire contents of two directory trees, we
+// optimize some FS calls out on linux. In particular, we take advantage of the
+// fact that getdents(2) returns the inode of each file in the directory being
+// walked, which, when walking two trees in parallel to generate a list of
+// changes, can be used to prune subtrees without ever having to lstat(2) them
+// directly. Eliminating stat calls in this way can save up to seconds on large
+// images.
+type walker struct {
+ dir1 string
+ dir2 string
+ root1 *FileInfo
+ root2 *FileInfo
+}
+
+// collectFileInfoForChanges returns a complete representation of the trees
+// rooted at dir1 and dir2, with one important exception: any subtree or
+// leaf where the inode and device numbers are an exact match between dir1
+// and dir2 will be pruned from the results. This method is *only* to be used
+// to generating a list of changes between the two directories, as it does not
+// reflect the full contents.
+func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) {
+ w := &walker{
+ dir1: dir1,
+ dir2: dir2,
+ root1: newRootFileInfo(),
+ root2: newRootFileInfo(),
+ }
+
+ i1, err := os.Lstat(w.dir1)
+ if err != nil {
+ return nil, nil, err
+ }
+ i2, err := os.Lstat(w.dir2)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if err := w.walk("/", i1, i2); err != nil {
+ return nil, nil, err
+ }
+
+ return w.root1, w.root2, nil
+}
+
+// Given a FileInfo, its path info, and a reference to the root of the tree
+// being constructed, register this file with the tree.
+func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
+ if fi == nil {
+ return nil
+ }
+ parent := root.LookUp(filepath.Dir(path))
+ if parent == nil {
+ return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path)
+ }
+ info := &FileInfo{
+ name: filepath.Base(path),
+ children: make(map[string]*FileInfo),
+ parent: parent,
+ }
+ cpath := filepath.Join(dir, path)
+ stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t))
+ if err != nil {
+ return err
+ }
+ info.stat = stat
+ info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access
+ parent.children[info.name] = info
+ return nil
+}
+
+// Walk a subtree rooted at the same path in both trees being iterated. For
+// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d
+func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) {
+ // Register these nodes with the return trees, unless we're still at the
+ // (already-created) roots:
+ if path != "/" {
+ if err := walkchunk(path, i1, w.dir1, w.root1); err != nil {
+ return err
+ }
+ if err := walkchunk(path, i2, w.dir2, w.root2); err != nil {
+ return err
+ }
+ }
+
+ is1Dir := i1 != nil && i1.IsDir()
+ is2Dir := i2 != nil && i2.IsDir()
+
+ sameDevice := false
+ if i1 != nil && i2 != nil {
+ si1 := i1.Sys().(*syscall.Stat_t)
+ si2 := i2.Sys().(*syscall.Stat_t)
+ if si1.Dev == si2.Dev {
+ sameDevice = true
+ }
+ }
+
+ // If these files are both non-existent, or leaves (non-dirs), we are done.
+ if !is1Dir && !is2Dir {
+ return nil
+ }
+
+ // Fetch the names of all the files contained in both directories being walked:
+ var names1, names2 []nameIno
+ if is1Dir {
+ names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access
+ if err != nil {
+ return err
+ }
+ }
+ if is2Dir {
+ names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access
+ if err != nil {
+ return err
+ }
+ }
+
+ // We have lists of the files contained in both parallel directories, sorted
+ // in the same order. Walk them in parallel, generating a unique merged list
+ // of all items present in either or both directories.
+ var names []string
+ ix1 := 0
+ ix2 := 0
+
+ for {
+ if ix1 >= len(names1) {
+ break
+ }
+ if ix2 >= len(names2) {
+ break
+ }
+
+ ni1 := names1[ix1]
+ ni2 := names2[ix2]
+
+ switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) {
+ case -1: // ni1 < ni2 -- advance ni1
+ // we will not encounter ni1 in names2
+ names = append(names, ni1.name)
+ ix1++
+ case 0: // ni1 == ni2
+ if ni1.ino != ni2.ino || !sameDevice {
+ names = append(names, ni1.name)
+ }
+ ix1++
+ ix2++
+ case 1: // ni1 > ni2 -- advance ni2
+ // we will not encounter ni2 in names1
+ names = append(names, ni2.name)
+ ix2++
+ }
+ }
+ for ix1 < len(names1) {
+ names = append(names, names1[ix1].name)
+ ix1++
+ }
+ for ix2 < len(names2) {
+ names = append(names, names2[ix2].name)
+ ix2++
+ }
+
+ // For each of the names present in either or both of the directories being
+ // iterated, stat the name under each root, and recurse the pair of them:
+ for _, name := range names {
+ fname := filepath.Join(path, name)
+ var cInfo1, cInfo2 os.FileInfo
+ if is1Dir {
+ cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ }
+ if is2Dir {
+ cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ }
+ if err = w.walk(fname, cInfo1, cInfo2); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// {name,inode} pairs used to support the early-pruning logic of the walker type
+type nameIno struct {
+ name string
+ ino uint64
+}
+
+type nameInoSlice []nameIno
+
+func (s nameInoSlice) Len() int { return len(s) }
+func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name }
+
+// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode
+// numbers further up the stack when reading directory contents. Unlike
+// os.Readdirnames, which returns a list of filenames, this function returns a
+// list of {filename,inode} pairs.
+func readdirnames(dirname string) (names []nameIno, err error) {
+ var (
+ size = 100
+ buf = make([]byte, 4096)
+ nbuf int
+ bufp int
+ nb int
+ )
+
+ f, err := os.Open(dirname)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ names = make([]nameIno, 0, size) // Empty with room to grow.
+ for {
+ // Refill the buffer if necessary
+ if bufp >= nbuf {
+ bufp = 0
+ nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux
+ if nbuf < 0 {
+ nbuf = 0
+ }
+ if err != nil {
+ return nil, os.NewSyscallError("readdirent", err)
+ }
+ if nbuf <= 0 {
+ break // EOF
+ }
+ }
+
+ // Drain the buffer
+ nb, names = parseDirent(buf[bufp:nbuf], names)
+ bufp += nb
+ }
+
+ sl := nameInoSlice(names)
+ sort.Sort(sl)
+ return sl, nil
+}
+
+// parseDirent is a minor modification of unix.ParseDirent (linux version)
+// which returns {name,inode} pairs instead of just names.
+func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) {
+ origlen := len(buf)
+ for len(buf) > 0 {
+ dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0]))
+ buf = buf[dirent.Reclen:]
+ if dirent.Ino == 0 { // File absent in directory.
+ continue
+ }
+ bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0]))
+ var name = string(bytes[0:clen(bytes[:])])
+ if name == "." || name == ".." { // Useless names
+ continue
+ }
+ names = append(names, nameIno{name, dirent.Ino})
+ }
+ return origlen - len(buf), names
+}
+
+func clen(n []byte) int {
+ for i := 0; i < len(n); i++ {
+ if n[i] == 0 {
+ return i
+ }
+ }
+ return len(n)
+}
+
+// OverlayChanges walks the path rw and determines changes for the files in the path,
+// with respect to the parent layers
+func OverlayChanges(layers []string, rw string) ([]Change, error) {
+ return changes(layers, rw, overlayDeletedFile, nil)
+}
+
+func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) {
+ if fi.Mode()&os.ModeCharDevice != 0 {
+ s := fi.Sys().(*syscall.Stat_t)
+ if major(s.Rdev) == 0 && minor(s.Rdev) == 0 {
+ return path, nil
+ }
+ }
+ if fi.Mode()&os.ModeDir != 0 {
+ opaque, err := system.Lgetxattr(filepath.Join(root, path), "trusted.overlay.opaque")
+ if err != nil {
+ return "", err
+ }
+ if len(opaque) == 1 && opaque[0] == 'y' {
+ return path, nil
+ }
+ }
+
+ return "", nil
+
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_other.go b/vendor/github.com/docker/docker/pkg/archive/changes_other.go
new file mode 100644
index 000000000..da70ed37c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes_other.go
@@ -0,0 +1,97 @@
+// +build !linux
+
+package archive
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+
+ "github.com/docker/docker/pkg/system"
+)
+
+func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) {
+ var (
+ oldRoot, newRoot *FileInfo
+ err1, err2 error
+ errs = make(chan error, 2)
+ )
+ go func() {
+ oldRoot, err1 = collectFileInfo(oldDir)
+ errs <- err1
+ }()
+ go func() {
+ newRoot, err2 = collectFileInfo(newDir)
+ errs <- err2
+ }()
+
+ // block until both routines have returned
+ for i := 0; i < 2; i++ {
+ if err := <-errs; err != nil {
+ return nil, nil, err
+ }
+ }
+
+ return oldRoot, newRoot, nil
+}
+
+func collectFileInfo(sourceDir string) (*FileInfo, error) {
+ root := newRootFileInfo()
+
+ err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ // Rebase path
+ relPath, err := filepath.Rel(sourceDir, path)
+ if err != nil {
+ return err
+ }
+
+ // As this runs on the daemon side, file paths are OS specific.
+ relPath = filepath.Join(string(os.PathSeparator), relPath)
+
+ // See https://github.com/golang/go/issues/9168 - bug in filepath.Join.
+ // Temporary workaround. If the returned path starts with two backslashes,
+ // trim it down to a single backslash. Only relevant on Windows.
+ if runtime.GOOS == "windows" {
+ if strings.HasPrefix(relPath, `\\`) {
+ relPath = relPath[1:]
+ }
+ }
+
+ if relPath == string(os.PathSeparator) {
+ return nil
+ }
+
+ parent := root.LookUp(filepath.Dir(relPath))
+ if parent == nil {
+ return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath)
+ }
+
+ info := &FileInfo{
+ name: filepath.Base(relPath),
+ children: make(map[string]*FileInfo),
+ parent: parent,
+ }
+
+ s, err := system.Lstat(path)
+ if err != nil {
+ return err
+ }
+ info.stat = s
+
+ info.capability, _ = system.Lgetxattr(path, "security.capability")
+
+ parent.children[info.name] = info
+
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return root, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go
new file mode 100644
index 000000000..7aa1226d7
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go
@@ -0,0 +1,37 @@
+// +build !windows
+
+package archive
+
+import (
+ "os"
+ "syscall"
+
+ "github.com/docker/docker/pkg/system"
+ "golang.org/x/sys/unix"
+)
+
+func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
+ // Don't look at size for dirs, its not a good measure of change
+ if oldStat.Mode() != newStat.Mode() ||
+ oldStat.UID() != newStat.UID() ||
+ oldStat.GID() != newStat.GID() ||
+ oldStat.Rdev() != newStat.Rdev() ||
+ // Don't look at size for dirs, its not a good measure of change
+ (oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR &&
+ (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) {
+ return true
+ }
+ return false
+}
+
+func (info *FileInfo) isDir() bool {
+ return info.parent == nil || info.stat.Mode()&unix.S_IFDIR != 0
+}
+
+func getIno(fi os.FileInfo) uint64 {
+ return fi.Sys().(*syscall.Stat_t).Ino
+}
+
+func hasHardlinks(fi os.FileInfo) bool {
+ return fi.Sys().(*syscall.Stat_t).Nlink > 1
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go
new file mode 100644
index 000000000..6fd353269
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go
@@ -0,0 +1,30 @@
+package archive
+
+import (
+ "os"
+
+ "github.com/docker/docker/pkg/system"
+)
+
+func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
+
+ // Don't look at size for dirs, its not a good measure of change
+ if oldStat.Mtim() != newStat.Mtim() ||
+ oldStat.Mode() != newStat.Mode() ||
+ oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() {
+ return true
+ }
+ return false
+}
+
+func (info *FileInfo) isDir() bool {
+ return info.parent == nil || info.stat.Mode().IsDir()
+}
+
+func getIno(fi os.FileInfo) (inode uint64) {
+ return
+}
+
+func hasHardlinks(fi os.FileInfo) bool {
+ return false
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/docker/docker/pkg/archive/copy.go
new file mode 100644
index 000000000..d1e036d5c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/copy.go
@@ -0,0 +1,472 @@
+package archive
+
+import (
+ "archive/tar"
+ "errors"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/docker/docker/pkg/system"
+ "github.com/sirupsen/logrus"
+)
+
+// Errors used or returned by this file.
+var (
+ ErrNotDirectory = errors.New("not a directory")
+ ErrDirNotExists = errors.New("no such directory")
+ ErrCannotCopyDir = errors.New("cannot copy directory")
+ ErrInvalidCopySource = errors.New("invalid copy source content")
+)
+
+// PreserveTrailingDotOrSeparator returns the given cleaned path (after
+// processing using any utility functions from the path or filepath stdlib
+// packages) and appends a trailing `/.` or `/` if its corresponding original
+// path (from before being processed by utility functions from the path or
+// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned
+// path already ends in a `.` path segment, then another is not added. If the
+// clean path already ends in the separator, then another is not added.
+func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string, sep byte) string {
+ // Ensure paths are in platform semantics
+ cleanedPath = strings.Replace(cleanedPath, "/", string(sep), -1)
+ originalPath = strings.Replace(originalPath, "/", string(sep), -1)
+
+ if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) {
+ if !hasTrailingPathSeparator(cleanedPath, sep) {
+ // Add a separator if it doesn't already end with one (a cleaned
+ // path would only end in a separator if it is the root).
+ cleanedPath += string(sep)
+ }
+ cleanedPath += "."
+ }
+
+ if !hasTrailingPathSeparator(cleanedPath, sep) && hasTrailingPathSeparator(originalPath, sep) {
+ cleanedPath += string(sep)
+ }
+
+ return cleanedPath
+}
+
+// assertsDirectory returns whether the given path is
+// asserted to be a directory, i.e., the path ends with
+// a trailing '/' or `/.`, assuming a path separator of `/`.
+func assertsDirectory(path string, sep byte) bool {
+ return hasTrailingPathSeparator(path, sep) || specifiesCurrentDir(path)
+}
+
+// hasTrailingPathSeparator returns whether the given
+// path ends with the system's path separator character.
+func hasTrailingPathSeparator(path string, sep byte) bool {
+ return len(path) > 0 && path[len(path)-1] == sep
+}
+
+// specifiesCurrentDir returns whether the given path specifies
+// a "current directory", i.e., the last path segment is `.`.
+func specifiesCurrentDir(path string) bool {
+ return filepath.Base(path) == "."
+}
+
+// SplitPathDirEntry splits the given path between its directory name and its
+// basename by first cleaning the path but preserves a trailing "." if the
+// original path specified the current directory.
+func SplitPathDirEntry(path string) (dir, base string) {
+ cleanedPath := filepath.Clean(filepath.FromSlash(path))
+
+ if specifiesCurrentDir(path) {
+ cleanedPath += string(os.PathSeparator) + "."
+ }
+
+ return filepath.Dir(cleanedPath), filepath.Base(cleanedPath)
+}
+
+// TarResource archives the resource described by the given CopyInfo to a Tar
+// archive. A non-nil error is returned if sourcePath does not exist or is
+// asserted to be a directory but exists as another type of file.
+//
+// This function acts as a convenient wrapper around TarWithOptions, which
+// requires a directory as the source path. TarResource accepts either a
+// directory or a file path and correctly sets the Tar options.
+func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) {
+ return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName)
+}
+
+// TarResourceRebase is like TarResource but renames the first path element of
+// items in the resulting tar archive to match the given rebaseName if not "".
+func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) {
+ sourcePath = normalizePath(sourcePath)
+ if _, err = os.Lstat(sourcePath); err != nil {
+ // Catches the case where the source does not exist or is not a
+ // directory if asserted to be a directory, as this also causes an
+ // error.
+ return
+ }
+
+ // Separate the source path between its directory and
+ // the entry in that directory which we are archiving.
+ sourceDir, sourceBase := SplitPathDirEntry(sourcePath)
+ opts := TarResourceRebaseOpts(sourceBase, rebaseName)
+
+ logrus.Debugf("copying %q from %q", sourceBase, sourceDir)
+ return TarWithOptions(sourceDir, opts)
+}
+
+// TarResourceRebaseOpts does not preform the Tar, but instead just creates the rebase
+// parameters to be sent to TarWithOptions (the TarOptions struct)
+func TarResourceRebaseOpts(sourceBase string, rebaseName string) *TarOptions {
+ filter := []string{sourceBase}
+ return &TarOptions{
+ Compression: Uncompressed,
+ IncludeFiles: filter,
+ IncludeSourceDir: true,
+ RebaseNames: map[string]string{
+ sourceBase: rebaseName,
+ },
+ }
+}
+
+// CopyInfo holds basic info about the source
+// or destination path of a copy operation.
+type CopyInfo struct {
+ Path string
+ Exists bool
+ IsDir bool
+ RebaseName string
+}
+
+// CopyInfoSourcePath stats the given path to create a CopyInfo
+// struct representing that resource for the source of an archive copy
+// operation. The given path should be an absolute local path. A source path
+// has all symlinks evaluated that appear before the last path separator ("/"
+// on Unix). As it is to be a copy source, the path must exist.
+func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) {
+ // normalize the file path and then evaluate the symbol link
+ // we will use the target file instead of the symbol link if
+ // followLink is set
+ path = normalizePath(path)
+
+ resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink)
+ if err != nil {
+ return CopyInfo{}, err
+ }
+
+ stat, err := os.Lstat(resolvedPath)
+ if err != nil {
+ return CopyInfo{}, err
+ }
+
+ return CopyInfo{
+ Path: resolvedPath,
+ Exists: true,
+ IsDir: stat.IsDir(),
+ RebaseName: rebaseName,
+ }, nil
+}
+
+// CopyInfoDestinationPath stats the given path to create a CopyInfo
+// struct representing that resource for the destination of an archive copy
+// operation. The given path should be an absolute local path.
+func CopyInfoDestinationPath(path string) (info CopyInfo, err error) {
+ maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot.
+ path = normalizePath(path)
+ originalPath := path
+
+ stat, err := os.Lstat(path)
+
+ if err == nil && stat.Mode()&os.ModeSymlink == 0 {
+ // The path exists and is not a symlink.
+ return CopyInfo{
+ Path: path,
+ Exists: true,
+ IsDir: stat.IsDir(),
+ }, nil
+ }
+
+ // While the path is a symlink.
+ for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ {
+ if n > maxSymlinkIter {
+ // Don't follow symlinks more than this arbitrary number of times.
+ return CopyInfo{}, errors.New("too many symlinks in " + originalPath)
+ }
+
+ // The path is a symbolic link. We need to evaluate it so that the
+ // destination of the copy operation is the link target and not the
+ // link itself. This is notably different than CopyInfoSourcePath which
+ // only evaluates symlinks before the last appearing path separator.
+ // Also note that it is okay if the last path element is a broken
+ // symlink as the copy operation should create the target.
+ var linkTarget string
+
+ linkTarget, err = os.Readlink(path)
+ if err != nil {
+ return CopyInfo{}, err
+ }
+
+ if !system.IsAbs(linkTarget) {
+ // Join with the parent directory.
+ dstParent, _ := SplitPathDirEntry(path)
+ linkTarget = filepath.Join(dstParent, linkTarget)
+ }
+
+ path = linkTarget
+ stat, err = os.Lstat(path)
+ }
+
+ if err != nil {
+ // It's okay if the destination path doesn't exist. We can still
+ // continue the copy operation if the parent directory exists.
+ if !os.IsNotExist(err) {
+ return CopyInfo{}, err
+ }
+
+ // Ensure destination parent dir exists.
+ dstParent, _ := SplitPathDirEntry(path)
+
+ parentDirStat, err := os.Stat(dstParent)
+ if err != nil {
+ return CopyInfo{}, err
+ }
+ if !parentDirStat.IsDir() {
+ return CopyInfo{}, ErrNotDirectory
+ }
+
+ return CopyInfo{Path: path}, nil
+ }
+
+ // The path exists after resolving symlinks.
+ return CopyInfo{
+ Path: path,
+ Exists: true,
+ IsDir: stat.IsDir(),
+ }, nil
+}
+
+// PrepareArchiveCopy prepares the given srcContent archive, which should
+// contain the archived resource described by srcInfo, to the destination
+// described by dstInfo. Returns the possibly modified content archive along
+// with the path to the destination directory which it should be extracted to.
+func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) {
+ // Ensure in platform semantics
+ srcInfo.Path = normalizePath(srcInfo.Path)
+ dstInfo.Path = normalizePath(dstInfo.Path)
+
+ // Separate the destination path between its directory and base
+ // components in case the source archive contents need to be rebased.
+ dstDir, dstBase := SplitPathDirEntry(dstInfo.Path)
+ _, srcBase := SplitPathDirEntry(srcInfo.Path)
+
+ switch {
+ case dstInfo.Exists && dstInfo.IsDir:
+ // The destination exists as a directory. No alteration
+ // to srcContent is needed as its contents can be
+ // simply extracted to the destination directory.
+ return dstInfo.Path, ioutil.NopCloser(srcContent), nil
+ case dstInfo.Exists && srcInfo.IsDir:
+ // The destination exists as some type of file and the source
+ // content is a directory. This is an error condition since
+ // you cannot copy a directory to an existing file location.
+ return "", nil, ErrCannotCopyDir
+ case dstInfo.Exists:
+ // The destination exists as some type of file and the source content
+ // is also a file. The source content entry will have to be renamed to
+ // have a basename which matches the destination path's basename.
+ if len(srcInfo.RebaseName) != 0 {
+ srcBase = srcInfo.RebaseName
+ }
+ return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
+ case srcInfo.IsDir:
+ // The destination does not exist and the source content is an archive
+ // of a directory. The archive should be extracted to the parent of
+ // the destination path instead, and when it is, the directory that is
+ // created as a result should take the name of the destination path.
+ // The source content entries will have to be renamed to have a
+ // basename which matches the destination path's basename.
+ if len(srcInfo.RebaseName) != 0 {
+ srcBase = srcInfo.RebaseName
+ }
+ return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
+ case assertsDirectory(dstInfo.Path, os.PathSeparator):
+ // The destination does not exist and is asserted to be created as a
+ // directory, but the source content is not a directory. This is an
+ // error condition since you cannot create a directory from a file
+ // source.
+ return "", nil, ErrDirNotExists
+ default:
+ // The last remaining case is when the destination does not exist, is
+ // not asserted to be a directory, and the source content is not an
+ // archive of a directory. It this case, the destination file will need
+ // to be created when the archive is extracted and the source content
+ // entry will have to be renamed to have a basename which matches the
+ // destination path's basename.
+ if len(srcInfo.RebaseName) != 0 {
+ srcBase = srcInfo.RebaseName
+ }
+ return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
+ }
+
+}
+
+// RebaseArchiveEntries rewrites the given srcContent archive replacing
+// an occurrence of oldBase with newBase at the beginning of entry names.
+func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser {
+ if oldBase == string(os.PathSeparator) {
+ // If oldBase specifies the root directory, use an empty string as
+ // oldBase instead so that newBase doesn't replace the path separator
+ // that all paths will start with.
+ oldBase = ""
+ }
+
+ rebased, w := io.Pipe()
+
+ go func() {
+ srcTar := tar.NewReader(srcContent)
+ rebasedTar := tar.NewWriter(w)
+
+ for {
+ hdr, err := srcTar.Next()
+ if err == io.EOF {
+ // Signals end of archive.
+ rebasedTar.Close()
+ w.Close()
+ return
+ }
+ if err != nil {
+ w.CloseWithError(err)
+ return
+ }
+
+ hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1)
+ if hdr.Typeflag == tar.TypeLink {
+ hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1)
+ }
+
+ if err = rebasedTar.WriteHeader(hdr); err != nil {
+ w.CloseWithError(err)
+ return
+ }
+
+ if _, err = io.Copy(rebasedTar, srcTar); err != nil {
+ w.CloseWithError(err)
+ return
+ }
+ }
+ }()
+
+ return rebased
+}
+
+// TODO @gupta-ak. These might have to be changed in the future to be
+// continuity driver aware as well to support LCOW.
+
+// CopyResource performs an archive copy from the given source path to the
+// given destination path. The source path MUST exist and the destination
+// path's parent directory must exist.
+func CopyResource(srcPath, dstPath string, followLink bool) error {
+ var (
+ srcInfo CopyInfo
+ err error
+ )
+
+ // Ensure in platform semantics
+ srcPath = normalizePath(srcPath)
+ dstPath = normalizePath(dstPath)
+
+ // Clean the source and destination paths.
+ srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath, os.PathSeparator)
+ dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath, os.PathSeparator)
+
+ if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil {
+ return err
+ }
+
+ content, err := TarResource(srcInfo)
+ if err != nil {
+ return err
+ }
+ defer content.Close()
+
+ return CopyTo(content, srcInfo, dstPath)
+}
+
+// CopyTo handles extracting the given content whose
+// entries should be sourced from srcInfo to dstPath.
+func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error {
+ // The destination path need not exist, but CopyInfoDestinationPath will
+ // ensure that at least the parent directory exists.
+ dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath))
+ if err != nil {
+ return err
+ }
+
+ dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo)
+ if err != nil {
+ return err
+ }
+ defer copyArchive.Close()
+
+ options := &TarOptions{
+ NoLchown: true,
+ NoOverwriteDirNonDir: true,
+ }
+
+ return Untar(copyArchive, dstDir, options)
+}
+
+// ResolveHostSourcePath decides real path need to be copied with parameters such as
+// whether to follow symbol link or not, if followLink is true, resolvedPath will return
+// link target of any symbol link file, else it will only resolve symlink of directory
+// but return symbol link file itself without resolving.
+func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) {
+ if followLink {
+ resolvedPath, err = filepath.EvalSymlinks(path)
+ if err != nil {
+ return
+ }
+
+ resolvedPath, rebaseName = GetRebaseName(path, resolvedPath)
+ } else {
+ dirPath, basePath := filepath.Split(path)
+
+ // if not follow symbol link, then resolve symbol link of parent dir
+ var resolvedDirPath string
+ resolvedDirPath, err = filepath.EvalSymlinks(dirPath)
+ if err != nil {
+ return
+ }
+ // resolvedDirPath will have been cleaned (no trailing path separators) so
+ // we can manually join it with the base path element.
+ resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath
+ if hasTrailingPathSeparator(path, os.PathSeparator) &&
+ filepath.Base(path) != filepath.Base(resolvedPath) {
+ rebaseName = filepath.Base(path)
+ }
+ }
+ return resolvedPath, rebaseName, nil
+}
+
+// GetRebaseName normalizes and compares path and resolvedPath,
+// return completed resolved path and rebased file name
+func GetRebaseName(path, resolvedPath string) (string, string) {
+ // linkTarget will have been cleaned (no trailing path separators and dot) so
+ // we can manually join it with them
+ var rebaseName string
+ if specifiesCurrentDir(path) &&
+ !specifiesCurrentDir(resolvedPath) {
+ resolvedPath += string(filepath.Separator) + "."
+ }
+
+ if hasTrailingPathSeparator(path, os.PathSeparator) &&
+ !hasTrailingPathSeparator(resolvedPath, os.PathSeparator) {
+ resolvedPath += string(filepath.Separator)
+ }
+
+ if filepath.Base(path) != filepath.Base(resolvedPath) {
+ // In the case where the path had a trailing separator and a symlink
+ // evaluation has changed the last path component, we will need to
+ // rebase the name in the archive that is being copied to match the
+ // originally requested name.
+ rebaseName = filepath.Base(path)
+ }
+ return resolvedPath, rebaseName
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go
new file mode 100644
index 000000000..e305b5e4a
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go
@@ -0,0 +1,11 @@
+// +build !windows
+
+package archive
+
+import (
+ "path/filepath"
+)
+
+func normalizePath(path string) string {
+ return filepath.ToSlash(path)
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_windows.go b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go
new file mode 100644
index 000000000..2b775b45c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go
@@ -0,0 +1,9 @@
+package archive
+
+import (
+ "path/filepath"
+)
+
+func normalizePath(path string) string {
+ return filepath.FromSlash(path)
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/docker/docker/pkg/archive/diff.go
new file mode 100644
index 000000000..019facd38
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/diff.go
@@ -0,0 +1,256 @@
+package archive
+
+import (
+ "archive/tar"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+
+ "github.com/docker/docker/pkg/idtools"
+ "github.com/docker/docker/pkg/pools"
+ "github.com/docker/docker/pkg/system"
+ "github.com/sirupsen/logrus"
+)
+
+// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be
+// compressed or uncompressed.
+// Returns the size in bytes of the contents of the layer.
+func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) {
+ tr := tar.NewReader(layer)
+ trBuf := pools.BufioReader32KPool.Get(tr)
+ defer pools.BufioReader32KPool.Put(trBuf)
+
+ var dirs []*tar.Header
+ unpackedPaths := make(map[string]struct{})
+
+ if options == nil {
+ options = &TarOptions{}
+ }
+ if options.ExcludePatterns == nil {
+ options.ExcludePatterns = []string{}
+ }
+ idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
+
+ aufsTempdir := ""
+ aufsHardlinks := make(map[string]*tar.Header)
+
+ // Iterate through the files in the archive.
+ for {
+ hdr, err := tr.Next()
+ if err == io.EOF {
+ // end of tar archive
+ break
+ }
+ if err != nil {
+ return 0, err
+ }
+
+ size += hdr.Size
+
+ // Normalize name, for safety and for a simple is-root check
+ hdr.Name = filepath.Clean(hdr.Name)
+
+ // Windows does not support filenames with colons in them. Ignore
+ // these files. This is not a problem though (although it might
+ // appear that it is). Let's suppose a client is running docker pull.
+ // The daemon it points to is Windows. Would it make sense for the
+ // client to be doing a docker pull Ubuntu for example (which has files
+ // with colons in the name under /usr/share/man/man3)? No, absolutely
+ // not as it would really only make sense that they were pulling a
+ // Windows image. However, for development, it is necessary to be able
+ // to pull Linux images which are in the repository.
+ //
+ // TODO Windows. Once the registry is aware of what images are Windows-
+ // specific or Linux-specific, this warning should be changed to an error
+ // to cater for the situation where someone does manage to upload a Linux
+ // image but have it tagged as Windows inadvertently.
+ if runtime.GOOS == "windows" {
+ if strings.Contains(hdr.Name, ":") {
+ logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name)
+ continue
+ }
+ }
+
+ // Note as these operations are platform specific, so must the slash be.
+ if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
+ // Not the root directory, ensure that the parent directory exists.
+ // This happened in some tests where an image had a tarfile without any
+ // parent directories.
+ parent := filepath.Dir(hdr.Name)
+ parentPath := filepath.Join(dest, parent)
+
+ if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
+ err = system.MkdirAll(parentPath, 0600, "")
+ if err != nil {
+ return 0, err
+ }
+ }
+ }
+
+ // Skip AUFS metadata dirs
+ if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) {
+ // Regular files inside /.wh..wh.plnk can be used as hardlink targets
+ // We don't want this directory, but we need the files in them so that
+ // such hardlinks can be resolved.
+ if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg {
+ basename := filepath.Base(hdr.Name)
+ aufsHardlinks[basename] = hdr
+ if aufsTempdir == "" {
+ if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil {
+ return 0, err
+ }
+ defer os.RemoveAll(aufsTempdir)
+ }
+ if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil {
+ return 0, err
+ }
+ }
+
+ if hdr.Name != WhiteoutOpaqueDir {
+ continue
+ }
+ }
+ path := filepath.Join(dest, hdr.Name)
+ rel, err := filepath.Rel(dest, path)
+ if err != nil {
+ return 0, err
+ }
+
+ // Note as these operations are platform specific, so must the slash be.
+ if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
+ return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
+ }
+ base := filepath.Base(path)
+
+ if strings.HasPrefix(base, WhiteoutPrefix) {
+ dir := filepath.Dir(path)
+ if base == WhiteoutOpaqueDir {
+ _, err := os.Lstat(dir)
+ if err != nil {
+ return 0, err
+ }
+ err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ if os.IsNotExist(err) {
+ err = nil // parent was deleted
+ }
+ return err
+ }
+ if path == dir {
+ return nil
+ }
+ if _, exists := unpackedPaths[path]; !exists {
+ err := os.RemoveAll(path)
+ return err
+ }
+ return nil
+ })
+ if err != nil {
+ return 0, err
+ }
+ } else {
+ originalBase := base[len(WhiteoutPrefix):]
+ originalPath := filepath.Join(dir, originalBase)
+ if err := os.RemoveAll(originalPath); err != nil {
+ return 0, err
+ }
+ }
+ } else {
+ // If path exits we almost always just want to remove and replace it.
+ // The only exception is when it is a directory *and* the file from
+ // the layer is also a directory. Then we want to merge them (i.e.
+ // just apply the metadata from the layer).
+ if fi, err := os.Lstat(path); err == nil {
+ if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
+ if err := os.RemoveAll(path); err != nil {
+ return 0, err
+ }
+ }
+ }
+
+ trBuf.Reset(tr)
+ srcData := io.Reader(trBuf)
+ srcHdr := hdr
+
+ // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
+ // we manually retarget these into the temporary files we extracted them into
+ if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) {
+ linkBasename := filepath.Base(hdr.Linkname)
+ srcHdr = aufsHardlinks[linkBasename]
+ if srcHdr == nil {
+ return 0, fmt.Errorf("Invalid aufs hardlink")
+ }
+ tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename))
+ if err != nil {
+ return 0, err
+ }
+ defer tmpFile.Close()
+ srcData = tmpFile
+ }
+
+ if err := remapIDs(idMappings, srcHdr); err != nil {
+ return 0, err
+ }
+
+ if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS); err != nil {
+ return 0, err
+ }
+
+ // Directory mtimes must be handled at the end to avoid further
+ // file creation in them to modify the directory mtime
+ if hdr.Typeflag == tar.TypeDir {
+ dirs = append(dirs, hdr)
+ }
+ unpackedPaths[path] = struct{}{}
+ }
+ }
+
+ for _, hdr := range dirs {
+ path := filepath.Join(dest, hdr.Name)
+ if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
+ return 0, err
+ }
+ }
+
+ return size, nil
+}
+
+// ApplyLayer parses a diff in the standard layer format from `layer`,
+// and applies it to the directory `dest`. The stream `layer` can be
+// compressed or uncompressed.
+// Returns the size in bytes of the contents of the layer.
+func ApplyLayer(dest string, layer io.Reader) (int64, error) {
+ return applyLayerHandler(dest, layer, &TarOptions{}, true)
+}
+
+// ApplyUncompressedLayer parses a diff in the standard layer format from
+// `layer`, and applies it to the directory `dest`. The stream `layer`
+// can only be uncompressed.
+// Returns the size in bytes of the contents of the layer.
+func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) {
+ return applyLayerHandler(dest, layer, options, false)
+}
+
+// do the bulk load of ApplyLayer, but allow for not calling DecompressStream
+func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) {
+ dest = filepath.Clean(dest)
+
+ // We need to be able to set any perms
+ oldmask, err := system.Umask(0)
+ if err != nil {
+ return 0, err
+ }
+ defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform
+
+ if decompress {
+ layer, err = DecompressStream(layer)
+ if err != nil {
+ return 0, err
+ }
+ }
+ return UnpackLayer(dest, layer, options)
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/time_linux.go b/vendor/github.com/docker/docker/pkg/archive/time_linux.go
new file mode 100644
index 000000000..3448569b1
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/time_linux.go
@@ -0,0 +1,16 @@
+package archive
+
+import (
+ "syscall"
+ "time"
+)
+
+func timeToTimespec(time time.Time) (ts syscall.Timespec) {
+ if time.IsZero() {
+ // Return UTIME_OMIT special value
+ ts.Sec = 0
+ ts.Nsec = ((1 << 30) - 2)
+ return
+ }
+ return syscall.NsecToTimespec(time.UnixNano())
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go
new file mode 100644
index 000000000..e85aac054
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go
@@ -0,0 +1,16 @@
+// +build !linux
+
+package archive
+
+import (
+ "syscall"
+ "time"
+)
+
+func timeToTimespec(time time.Time) (ts syscall.Timespec) {
+ nsec := int64(0)
+ if !time.IsZero() {
+ nsec = time.UnixNano()
+ }
+ return syscall.NsecToTimespec(nsec)
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/whiteouts.go b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go
new file mode 100644
index 000000000..d20478a10
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go
@@ -0,0 +1,23 @@
+package archive
+
+// Whiteouts are files with a special meaning for the layered filesystem.
+// Docker uses AUFS whiteout files inside exported archives. In other
+// filesystems these files are generated/handled on tar creation/extraction.
+
+// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a
+// filename this means that file has been removed from the base layer.
+const WhiteoutPrefix = ".wh."
+
+// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not
+// for removing an actual file. Normally these files are excluded from exported
+// archives.
+const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix
+
+// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other
+// layers. Normally these should not go into exported archives and all changed
+// hardlinks should be copied to the top layer.
+const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk"
+
+// WhiteoutOpaqueDir file means directory has been made opaque - meaning
+// readdir calls to this directory do not follow to lower layers.
+const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq"
diff --git a/vendor/github.com/docker/docker/pkg/archive/wrap.go b/vendor/github.com/docker/docker/pkg/archive/wrap.go
new file mode 100644
index 000000000..b39d12c87
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/wrap.go
@@ -0,0 +1,59 @@
+package archive
+
+import (
+ "archive/tar"
+ "bytes"
+ "io"
+)
+
+// Generate generates a new archive from the content provided
+// as input.
+//
+// `files` is a sequence of path/content pairs. A new file is
+// added to the archive for each pair.
+// If the last pair is incomplete, the file is created with an
+// empty content. For example:
+//
+// Generate("foo.txt", "hello world", "emptyfile")
+//
+// The above call will return an archive with 2 files:
+// * ./foo.txt with content "hello world"
+// * ./empty with empty content
+//
+// FIXME: stream content instead of buffering
+// FIXME: specify permissions and other archive metadata
+func Generate(input ...string) (io.Reader, error) {
+ files := parseStringPairs(input...)
+ buf := new(bytes.Buffer)
+ tw := tar.NewWriter(buf)
+ for _, file := range files {
+ name, content := file[0], file[1]
+ hdr := &tar.Header{
+ Name: name,
+ Size: int64(len(content)),
+ }
+ if err := tw.WriteHeader(hdr); err != nil {
+ return nil, err
+ }
+ if _, err := tw.Write([]byte(content)); err != nil {
+ return nil, err
+ }
+ }
+ if err := tw.Close(); err != nil {
+ return nil, err
+ }
+ return buf, nil
+}
+
+func parseStringPairs(input ...string) (output [][2]string) {
+ output = make([][2]string, 0, len(input)/2+1)
+ for i := 0; i < len(input); i += 2 {
+ var pair [2]string
+ pair[0] = input[i]
+ if i+1 < len(input) {
+ pair[1] = input[i+1]
+ }
+ output = append(output, pair)
+ }
+ return
+}
diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go
new file mode 100644
index 000000000..a129e654e
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go
@@ -0,0 +1,298 @@
+package fileutils
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "text/scanner"
+
+ "github.com/sirupsen/logrus"
+)
+
+// PatternMatcher allows checking paths agaist a list of patterns
+type PatternMatcher struct {
+ patterns []*Pattern
+ exclusions bool
+}
+
+// NewPatternMatcher creates a new matcher object for specific patterns that can
+// be used later to match against patterns against paths
+func NewPatternMatcher(patterns []string) (*PatternMatcher, error) {
+ pm := &PatternMatcher{
+ patterns: make([]*Pattern, 0, len(patterns)),
+ }
+ for _, p := range patterns {
+ // Eliminate leading and trailing whitespace.
+ p = strings.TrimSpace(p)
+ if p == "" {
+ continue
+ }
+ p = filepath.Clean(p)
+ newp := &Pattern{}
+ if p[0] == '!' {
+ if len(p) == 1 {
+ return nil, errors.New("illegal exclusion pattern: \"!\"")
+ }
+ newp.exclusion = true
+ p = p[1:]
+ pm.exclusions = true
+ }
+ // Do some syntax checking on the pattern.
+ // filepath's Match() has some really weird rules that are inconsistent
+ // so instead of trying to dup their logic, just call Match() for its
+ // error state and if there is an error in the pattern return it.
+ // If this becomes an issue we can remove this since its really only
+ // needed in the error (syntax) case - which isn't really critical.
+ if _, err := filepath.Match(p, "."); err != nil {
+ return nil, err
+ }
+ newp.cleanedPattern = p
+ newp.dirs = strings.Split(p, string(os.PathSeparator))
+ pm.patterns = append(pm.patterns, newp)
+ }
+ return pm, nil
+}
+
+// Matches matches path against all the patterns. Matches is not safe to be
+// called concurrently
+func (pm *PatternMatcher) Matches(file string) (bool, error) {
+ matched := false
+ file = filepath.FromSlash(file)
+ parentPath := filepath.Dir(file)
+ parentPathDirs := strings.Split(parentPath, string(os.PathSeparator))
+
+ for _, pattern := range pm.patterns {
+ negative := false
+
+ if pattern.exclusion {
+ negative = true
+ }
+
+ match, err := pattern.match(file)
+ if err != nil {
+ return false, err
+ }
+
+ if !match && parentPath != "." {
+ // Check to see if the pattern matches one of our parent dirs.
+ if len(pattern.dirs) <= len(parentPathDirs) {
+ match, _ = pattern.match(strings.Join(parentPathDirs[:len(pattern.dirs)], string(os.PathSeparator)))
+ }
+ }
+
+ if match {
+ matched = !negative
+ }
+ }
+
+ if matched {
+ logrus.Debugf("Skipping excluded path: %s", file)
+ }
+
+ return matched, nil
+}
+
+// Exclusions returns true if any of the patterns define exclusions
+func (pm *PatternMatcher) Exclusions() bool {
+ return pm.exclusions
+}
+
+// Patterns returns array of active patterns
+func (pm *PatternMatcher) Patterns() []*Pattern {
+ return pm.patterns
+}
+
+// Pattern defines a single regexp used used to filter file paths.
+type Pattern struct {
+ cleanedPattern string
+ dirs []string
+ regexp *regexp.Regexp
+ exclusion bool
+}
+
+func (p *Pattern) String() string {
+ return p.cleanedPattern
+}
+
+// Exclusion returns true if this pattern defines exclusion
+func (p *Pattern) Exclusion() bool {
+ return p.exclusion
+}
+
+func (p *Pattern) match(path string) (bool, error) {
+
+ if p.regexp == nil {
+ if err := p.compile(); err != nil {
+ return false, filepath.ErrBadPattern
+ }
+ }
+
+ b := p.regexp.MatchString(path)
+
+ return b, nil
+}
+
+func (p *Pattern) compile() error {
+ regStr := "^"
+ pattern := p.cleanedPattern
+ // Go through the pattern and convert it to a regexp.
+ // We use a scanner so we can support utf-8 chars.
+ var scan scanner.Scanner
+ scan.Init(strings.NewReader(pattern))
+
+ sl := string(os.PathSeparator)
+ escSL := sl
+ if sl == `\` {
+ escSL += `\`
+ }
+
+ for scan.Peek() != scanner.EOF {
+ ch := scan.Next()
+
+ if ch == '*' {
+ if scan.Peek() == '*' {
+ // is some flavor of "**"
+ scan.Next()
+
+ // Treat **/ as ** so eat the "/"
+ if string(scan.Peek()) == sl {
+ scan.Next()
+ }
+
+ if scan.Peek() == scanner.EOF {
+ // is "**EOF" - to align with .gitignore just accept all
+ regStr += ".*"
+ } else {
+ // is "**"
+ // Note that this allows for any # of /'s (even 0) because
+ // the .* will eat everything, even /'s
+ regStr += "(.*" + escSL + ")?"
+ }
+ } else {
+ // is "*" so map it to anything but "/"
+ regStr += "[^" + escSL + "]*"
+ }
+ } else if ch == '?' {
+ // "?" is any char except "/"
+ regStr += "[^" + escSL + "]"
+ } else if ch == '.' || ch == '$' {
+ // Escape some regexp special chars that have no meaning
+ // in golang's filepath.Match
+ regStr += `\` + string(ch)
+ } else if ch == '\\' {
+ // escape next char. Note that a trailing \ in the pattern
+ // will be left alone (but need to escape it)
+ if sl == `\` {
+ // On windows map "\" to "\\", meaning an escaped backslash,
+ // and then just continue because filepath.Match on
+ // Windows doesn't allow escaping at all
+ regStr += escSL
+ continue
+ }
+ if scan.Peek() != scanner.EOF {
+ regStr += `\` + string(scan.Next())
+ } else {
+ regStr += `\`
+ }
+ } else {
+ regStr += string(ch)
+ }
+ }
+
+ regStr += "$"
+
+ re, err := regexp.Compile(regStr)
+ if err != nil {
+ return err
+ }
+
+ p.regexp = re
+ return nil
+}
+
+// Matches returns true if file matches any of the patterns
+// and isn't excluded by any of the subsequent patterns.
+func Matches(file string, patterns []string) (bool, error) {
+ pm, err := NewPatternMatcher(patterns)
+ if err != nil {
+ return false, err
+ }
+ file = filepath.Clean(file)
+
+ if file == "." {
+ // Don't let them exclude everything, kind of silly.
+ return false, nil
+ }
+
+ return pm.Matches(file)
+}
+
+// CopyFile copies from src to dst until either EOF is reached
+// on src or an error occurs. It verifies src exists and removes
+// the dst if it exists.
+func CopyFile(src, dst string) (int64, error) {
+ cleanSrc := filepath.Clean(src)
+ cleanDst := filepath.Clean(dst)
+ if cleanSrc == cleanDst {
+ return 0, nil
+ }
+ sf, err := os.Open(cleanSrc)
+ if err != nil {
+ return 0, err
+ }
+ defer sf.Close()
+ if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) {
+ return 0, err
+ }
+ df, err := os.Create(cleanDst)
+ if err != nil {
+ return 0, err
+ }
+ defer df.Close()
+ return io.Copy(df, sf)
+}
+
+// ReadSymlinkedDirectory returns the target directory of a symlink.
+// The target of the symbolic link may not be a file.
+func ReadSymlinkedDirectory(path string) (string, error) {
+ var realPath string
+ var err error
+ if realPath, err = filepath.Abs(path); err != nil {
+ return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err)
+ }
+ if realPath, err = filepath.EvalSymlinks(realPath); err != nil {
+ return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err)
+ }
+ realPathInfo, err := os.Stat(realPath)
+ if err != nil {
+ return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err)
+ }
+ if !realPathInfo.Mode().IsDir() {
+ return "", fmt.Errorf("canonical path points to a file '%s'", realPath)
+ }
+ return realPath, nil
+}
+
+// CreateIfNotExists creates a file or a directory only if it does not already exist.
+func CreateIfNotExists(path string, isDir bool) error {
+ if _, err := os.Stat(path); err != nil {
+ if os.IsNotExist(err) {
+ if isDir {
+ return os.MkdirAll(path, 0755)
+ }
+ if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
+ return err
+ }
+ f, err := os.OpenFile(path, os.O_CREATE, 0755)
+ if err != nil {
+ return err
+ }
+ f.Close()
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go
new file mode 100644
index 000000000..ccd648fac
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go
@@ -0,0 +1,27 @@
+package fileutils
+
+import (
+ "os"
+ "os/exec"
+ "strconv"
+ "strings"
+)
+
+// GetTotalUsedFds returns the number of used File Descriptors by
+// executing `lsof -p PID`
+func GetTotalUsedFds() int {
+ pid := os.Getpid()
+
+ cmd := exec.Command("lsof", "-p", strconv.Itoa(pid))
+
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ return -1
+ }
+
+ outputStr := strings.TrimSpace(string(output))
+
+ fds := strings.Split(outputStr, "\n")
+
+ return len(fds) - 1
+}
diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_solaris.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_solaris.go
new file mode 100644
index 000000000..0f2cb7ab9
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_solaris.go
@@ -0,0 +1,7 @@
+package fileutils
+
+// GetTotalUsedFds Returns the number of used File Descriptors.
+// On Solaris these limits are per process and not systemwide
+func GetTotalUsedFds() int {
+ return -1
+}
diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go
new file mode 100644
index 000000000..9e0e97bd6
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go
@@ -0,0 +1,22 @@
+// +build linux freebsd
+
+package fileutils
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+
+ "github.com/sirupsen/logrus"
+)
+
+// GetTotalUsedFds Returns the number of used File Descriptors by
+// reading it via /proc filesystem.
+func GetTotalUsedFds() int {
+ if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
+ logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
+ } else {
+ return len(fds)
+ }
+ return -1
+}
diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go
new file mode 100644
index 000000000..5ec21cace
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go
@@ -0,0 +1,7 @@
+package fileutils
+
+// GetTotalUsedFds Returns the number of used File Descriptors. Not supported
+// on Windows.
+func GetTotalUsedFds() int {
+ return -1
+}
diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go
new file mode 100644
index 000000000..6cfa46483
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go
@@ -0,0 +1,317 @@
+package jsonmessage
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "time"
+
+ gotty "github.com/Nvveen/Gotty"
+ "github.com/docker/docker/pkg/term"
+ units "github.com/docker/go-units"
+)
+
+// RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to
+// ensure the formatted time isalways the same number of characters.
+const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
+
+// JSONError wraps a concrete Code and Message, `Code` is
+// is an integer error code, `Message` is the error message.
+type JSONError struct {
+ Code int `json:"code,omitempty"`
+ Message string `json:"message,omitempty"`
+}
+
+func (e *JSONError) Error() string {
+ return e.Message
+}
+
+// JSONProgress describes a Progress. terminalFd is the fd of the current terminal,
+// Start is the initial value for the operation. Current is the current status and
+// value of the progress made towards Total. Total is the end value describing when
+// we made 100% progress for an operation.
+type JSONProgress struct {
+ terminalFd uintptr
+ Current int64 `json:"current,omitempty"`
+ Total int64 `json:"total,omitempty"`
+ Start int64 `json:"start,omitempty"`
+ // If true, don't show xB/yB
+ HideCounts bool `json:"hidecounts,omitempty"`
+ Units string `json:"units,omitempty"`
+}
+
+func (p *JSONProgress) String() string {
+ var (
+ width = 200
+ pbBox string
+ numbersBox string
+ timeLeftBox string
+ )
+
+ ws, err := term.GetWinsize(p.terminalFd)
+ if err == nil {
+ width = int(ws.Width)
+ }
+
+ if p.Current <= 0 && p.Total <= 0 {
+ return ""
+ }
+ if p.Total <= 0 {
+ switch p.Units {
+ case "":
+ current := units.HumanSize(float64(p.Current))
+ return fmt.Sprintf("%8v", current)
+ default:
+ return fmt.Sprintf("%d %s", p.Current, p.Units)
+ }
+ }
+
+ percentage := int(float64(p.Current)/float64(p.Total)*100) / 2
+ if percentage > 50 {
+ percentage = 50
+ }
+ if width > 110 {
+ // this number can't be negative gh#7136
+ numSpaces := 0
+ if 50-percentage > 0 {
+ numSpaces = 50 - percentage
+ }
+ pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces))
+ }
+
+ switch {
+ case p.HideCounts:
+ case p.Units == "": // no units, use bytes
+ current := units.HumanSize(float64(p.Current))
+ total := units.HumanSize(float64(p.Total))
+
+ numbersBox = fmt.Sprintf("%8v/%v", current, total)
+
+ if p.Current > p.Total {
+ // remove total display if the reported current is wonky.
+ numbersBox = fmt.Sprintf("%8v", current)
+ }
+ default:
+ numbersBox = fmt.Sprintf("%d/%d %s", p.Current, p.Total, p.Units)
+
+ if p.Current > p.Total {
+ // remove total display if the reported current is wonky.
+ numbersBox = fmt.Sprintf("%d %s", p.Current, p.Units)
+ }
+ }
+
+ if p.Current > 0 && p.Start > 0 && percentage < 50 {
+ fromStart := time.Now().UTC().Sub(time.Unix(p.Start, 0))
+ perEntry := fromStart / time.Duration(p.Current)
+ left := time.Duration(p.Total-p.Current) * perEntry
+ left = (left / time.Second) * time.Second
+
+ if width > 50 {
+ timeLeftBox = " " + left.String()
+ }
+ }
+ return pbBox + numbersBox + timeLeftBox
+}
+
+// JSONMessage defines a message struct. It describes
+// the created time, where it from, status, ID of the
+// message. It's used for docker events.
+type JSONMessage struct {
+ Stream string `json:"stream,omitempty"`
+ Status string `json:"status,omitempty"`
+ Progress *JSONProgress `json:"progressDetail,omitempty"`
+ ProgressMessage string `json:"progress,omitempty"` //deprecated
+ ID string `json:"id,omitempty"`
+ From string `json:"from,omitempty"`
+ Time int64 `json:"time,omitempty"`
+ TimeNano int64 `json:"timeNano,omitempty"`
+ Error *JSONError `json:"errorDetail,omitempty"`
+ ErrorMessage string `json:"error,omitempty"` //deprecated
+ // Aux contains out-of-band data, such as digests for push signing and image id after building.
+ Aux *json.RawMessage `json:"aux,omitempty"`
+}
+
+/* Satisfied by gotty.TermInfo as well as noTermInfo from below */
+type termInfo interface {
+ Parse(attr string, params ...interface{}) (string, error)
+}
+
+type noTermInfo struct{} // canary used when no terminfo.
+
+func (ti *noTermInfo) Parse(attr string, params ...interface{}) (string, error) {
+ return "", fmt.Errorf("noTermInfo")
+}
+
+func clearLine(out io.Writer, ti termInfo) {
+ // el2 (clear whole line) is not exposed by terminfo.
+
+ // First clear line from beginning to cursor
+ if attr, err := ti.Parse("el1"); err == nil {
+ fmt.Fprintf(out, "%s", attr)
+ } else {
+ fmt.Fprintf(out, "\x1b[1K")
+ }
+ // Then clear line from cursor to end
+ if attr, err := ti.Parse("el"); err == nil {
+ fmt.Fprintf(out, "%s", attr)
+ } else {
+ fmt.Fprintf(out, "\x1b[K")
+ }
+}
+
+func cursorUp(out io.Writer, ti termInfo, l int) {
+ if l == 0 { // Should never be the case, but be tolerant
+ return
+ }
+ if attr, err := ti.Parse("cuu", l); err == nil {
+ fmt.Fprintf(out, "%s", attr)
+ } else {
+ fmt.Fprintf(out, "\x1b[%dA", l)
+ }
+}
+
+func cursorDown(out io.Writer, ti termInfo, l int) {
+ if l == 0 { // Should never be the case, but be tolerant
+ return
+ }
+ if attr, err := ti.Parse("cud", l); err == nil {
+ fmt.Fprintf(out, "%s", attr)
+ } else {
+ fmt.Fprintf(out, "\x1b[%dB", l)
+ }
+}
+
+// Display displays the JSONMessage to `out`. `termInfo` is non-nil if `out`
+// is a terminal. If this is the case, it will erase the entire current line
+// when displaying the progressbar.
+func (jm *JSONMessage) Display(out io.Writer, termInfo termInfo) error {
+ if jm.Error != nil {
+ if jm.Error.Code == 401 {
+ return fmt.Errorf("authentication is required")
+ }
+ return jm.Error
+ }
+ var endl string
+ if termInfo != nil && jm.Stream == "" && jm.Progress != nil {
+ clearLine(out, termInfo)
+ endl = "\r"
+ fmt.Fprintf(out, endl)
+ } else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal
+ return nil
+ }
+ if jm.TimeNano != 0 {
+ fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(RFC3339NanoFixed))
+ } else if jm.Time != 0 {
+ fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(RFC3339NanoFixed))
+ }
+ if jm.ID != "" {
+ fmt.Fprintf(out, "%s: ", jm.ID)
+ }
+ if jm.From != "" {
+ fmt.Fprintf(out, "(from %s) ", jm.From)
+ }
+ if jm.Progress != nil && termInfo != nil {
+ fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl)
+ } else if jm.ProgressMessage != "" { //deprecated
+ fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl)
+ } else if jm.Stream != "" {
+ fmt.Fprintf(out, "%s%s", jm.Stream, endl)
+ } else {
+ fmt.Fprintf(out, "%s%s\n", jm.Status, endl)
+ }
+ return nil
+}
+
+// DisplayJSONMessagesStream displays a json message stream from `in` to `out`, `isTerminal`
+// describes if `out` is a terminal. If this is the case, it will print `\n` at the end of
+// each line and move the cursor while displaying.
+func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(*json.RawMessage)) error {
+ var (
+ dec = json.NewDecoder(in)
+ ids = make(map[string]int)
+ )
+
+ var termInfo termInfo
+
+ if isTerminal {
+ term := os.Getenv("TERM")
+ if term == "" {
+ term = "vt102"
+ }
+
+ var err error
+ if termInfo, err = gotty.OpenTermInfo(term); err != nil {
+ termInfo = &noTermInfo{}
+ }
+ }
+
+ for {
+ diff := 0
+ var jm JSONMessage
+ if err := dec.Decode(&jm); err != nil {
+ if err == io.EOF {
+ break
+ }
+ return err
+ }
+
+ if jm.Aux != nil {
+ if auxCallback != nil {
+ auxCallback(jm.Aux)
+ }
+ continue
+ }
+
+ if jm.Progress != nil {
+ jm.Progress.terminalFd = terminalFd
+ }
+ if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") {
+ line, ok := ids[jm.ID]
+ if !ok {
+ // NOTE: This approach of using len(id) to
+ // figure out the number of lines of history
+ // only works as long as we clear the history
+ // when we output something that's not
+ // accounted for in the map, such as a line
+ // with no ID.
+ line = len(ids)
+ ids[jm.ID] = line
+ if termInfo != nil {
+ fmt.Fprintf(out, "\n")
+ }
+ }
+ diff = len(ids) - line
+ if termInfo != nil {
+ cursorUp(out, termInfo, diff)
+ }
+ } else {
+ // When outputting something that isn't progress
+ // output, clear the history of previous lines. We
+ // don't want progress entries from some previous
+ // operation to be updated (for example, pull -a
+ // with multiple tags).
+ ids = make(map[string]int)
+ }
+ err := jm.Display(out, termInfo)
+ if jm.ID != "" && termInfo != nil {
+ cursorDown(out, termInfo, diff)
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+type stream interface {
+ io.Writer
+ FD() uintptr
+ IsTerminal() bool
+}
+
+// DisplayJSONMessagesToStream prints json messages to the output stream
+func DisplayJSONMessagesToStream(in io.Reader, stream stream, auxCallback func(*json.RawMessage)) error {
+ return DisplayJSONMessagesStream(in, stream, stream.FD(), stream.IsTerminal(), auxCallback)
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go b/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go
index c684aa81f..48b86771e 100644
--- a/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go
+++ b/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go
@@ -3,8 +3,9 @@
package mount
import (
- "golang.org/x/sys/unix"
"unsafe"
+
+ "golang.org/x/sys/unix"
)
// #include <stdlib.h>
diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go
index ad9ab57f8..069ed8f2d 100644
--- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go
+++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go
@@ -4,16 +4,23 @@ package mount
/*
#include <stdio.h>
+#include <stdlib.h>
#include <sys/mnttab.h>
*/
import "C"
import (
"fmt"
+ "unsafe"
)
func parseMountTable() ([]*Info, error) {
- mnttab := C.fopen(C.CString(C.MNTTAB), C.CString("r"))
+ path := C.CString(C.MNTTAB)
+ defer C.free(unsafe.Pointer(path))
+ mode := C.CString("r")
+ defer C.free(unsafe.Pointer(mode))
+
+ mnttab := C.fopen(path, mode)
if mnttab == nil {
return nil, fmt.Errorf("Failed to open %s", C.MNTTAB)
}
diff --git a/vendor/github.com/docker/docker/pkg/pools/pools.go b/vendor/github.com/docker/docker/pkg/pools/pools.go
new file mode 100644
index 000000000..6a111a3ba
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/pools/pools.go
@@ -0,0 +1,137 @@
+// Package pools provides a collection of pools which provide various
+// data types with buffers. These can be used to lower the number of
+// memory allocations and reuse buffers.
+//
+// New pools should be added to this package to allow them to be
+// shared across packages.
+//
+// Utility functions which operate on pools should be added to this
+// package to allow them to be reused.
+package pools
+
+import (
+ "bufio"
+ "io"
+ "sync"
+
+ "github.com/docker/docker/pkg/ioutils"
+)
+
+const buffer32K = 32 * 1024
+
+var (
+ // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer.
+ BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K)
+ // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer.
+ BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K)
+ buffer32KPool = newBufferPoolWithSize(buffer32K)
+)
+
+// BufioReaderPool is a bufio reader that uses sync.Pool.
+type BufioReaderPool struct {
+ pool sync.Pool
+}
+
+// newBufioReaderPoolWithSize is unexported because new pools should be
+// added here to be shared where required.
+func newBufioReaderPoolWithSize(size int) *BufioReaderPool {
+ return &BufioReaderPool{
+ pool: sync.Pool{
+ New: func() interface{} { return bufio.NewReaderSize(nil, size) },
+ },
+ }
+}
+
+// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool.
+func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader {
+ buf := bufPool.pool.Get().(*bufio.Reader)
+ buf.Reset(r)
+ return buf
+}
+
+// Put puts the bufio.Reader back into the pool.
+func (bufPool *BufioReaderPool) Put(b *bufio.Reader) {
+ b.Reset(nil)
+ bufPool.pool.Put(b)
+}
+
+type bufferPool struct {
+ pool sync.Pool
+}
+
+func newBufferPoolWithSize(size int) *bufferPool {
+ return &bufferPool{
+ pool: sync.Pool{
+ New: func() interface{} { return make([]byte, size) },
+ },
+ }
+}
+
+func (bp *bufferPool) Get() []byte {
+ return bp.pool.Get().([]byte)
+}
+
+func (bp *bufferPool) Put(b []byte) {
+ bp.pool.Put(b)
+}
+
+// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy.
+func Copy(dst io.Writer, src io.Reader) (written int64, err error) {
+ buf := buffer32KPool.Get()
+ written, err = io.CopyBuffer(dst, src, buf)
+ buffer32KPool.Put(buf)
+ return
+}
+
+// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back
+// into the pool and closes the reader if it's an io.ReadCloser.
+func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser {
+ return ioutils.NewReadCloserWrapper(r, func() error {
+ if readCloser, ok := r.(io.ReadCloser); ok {
+ readCloser.Close()
+ }
+ bufPool.Put(buf)
+ return nil
+ })
+}
+
+// BufioWriterPool is a bufio writer that uses sync.Pool.
+type BufioWriterPool struct {
+ pool sync.Pool
+}
+
+// newBufioWriterPoolWithSize is unexported because new pools should be
+// added here to be shared where required.
+func newBufioWriterPoolWithSize(size int) *BufioWriterPool {
+ return &BufioWriterPool{
+ pool: sync.Pool{
+ New: func() interface{} { return bufio.NewWriterSize(nil, size) },
+ },
+ }
+}
+
+// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool.
+func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer {
+ buf := bufPool.pool.Get().(*bufio.Writer)
+ buf.Reset(w)
+ return buf
+}
+
+// Put puts the bufio.Writer back into the pool.
+func (bufPool *BufioWriterPool) Put(b *bufio.Writer) {
+ b.Reset(nil)
+ bufPool.pool.Put(b)
+}
+
+// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back
+// into the pool and closes the writer if it's an io.Writecloser.
+func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser {
+ return ioutils.NewWriteCloserWrapper(w, func() error {
+ buf.Flush()
+ if writeCloser, ok := w.(io.WriteCloser); ok {
+ writeCloser.Close()
+ }
+ bufPool.Put(buf)
+ return nil
+ })
+}
diff --git a/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go
new file mode 100644
index 000000000..a018a203f
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go
@@ -0,0 +1,190 @@
+package stdcopy
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "sync"
+)
+
+// StdType is the type of standard stream
+// a writer can multiplex to.
+type StdType byte
+
+const (
+ // Stdin represents standard input stream type.
+ Stdin StdType = iota
+ // Stdout represents standard output stream type.
+ Stdout
+ // Stderr represents standard error steam type.
+ Stderr
+ // Systemerr represents errors originating from the system that make it
+ // into the the multiplexed stream.
+ Systemerr
+
+ stdWriterPrefixLen = 8
+ stdWriterFdIndex = 0
+ stdWriterSizeIndex = 4
+
+ startingBufLen = 32*1024 + stdWriterPrefixLen + 1
+)
+
+var bufPool = &sync.Pool{New: func() interface{} { return bytes.NewBuffer(nil) }}
+
+// stdWriter is wrapper of io.Writer with extra customized info.
+type stdWriter struct {
+ io.Writer
+ prefix byte
+}
+
+// Write sends the buffer to the underneath writer.
+// It inserts the prefix header before the buffer,
+// so stdcopy.StdCopy knows where to multiplex the output.
+// It makes stdWriter to implement io.Writer.
+func (w *stdWriter) Write(p []byte) (n int, err error) {
+ if w == nil || w.Writer == nil {
+ return 0, errors.New("Writer not instantiated")
+ }
+ if p == nil {
+ return 0, nil
+ }
+
+ header := [stdWriterPrefixLen]byte{stdWriterFdIndex: w.prefix}
+ binary.BigEndian.PutUint32(header[stdWriterSizeIndex:], uint32(len(p)))
+ buf := bufPool.Get().(*bytes.Buffer)
+ buf.Write(header[:])
+ buf.Write(p)
+
+ n, err = w.Writer.Write(buf.Bytes())
+ n -= stdWriterPrefixLen
+ if n < 0 {
+ n = 0
+ }
+
+ buf.Reset()
+ bufPool.Put(buf)
+ return
+}
+
+// NewStdWriter instantiates a new Writer.
+// Everything written to it will be encapsulated using a custom format,
+// and written to the underlying `w` stream.
+// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection.
+// `t` indicates the id of the stream to encapsulate.
+// It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr.
+func NewStdWriter(w io.Writer, t StdType) io.Writer {
+ return &stdWriter{
+ Writer: w,
+ prefix: byte(t),
+ }
+}
+
+// StdCopy is a modified version of io.Copy.
+//
+// StdCopy will demultiplex `src`, assuming that it contains two streams,
+// previously multiplexed together using a StdWriter instance.
+// As it reads from `src`, StdCopy will write to `dstout` and `dsterr`.
+//
+// StdCopy will read until it hits EOF on `src`. It will then return a nil error.
+// In other words: if `err` is non nil, it indicates a real underlying error.
+//
+// `written` will hold the total number of bytes written to `dstout` and `dsterr`.
+func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) {
+ var (
+ buf = make([]byte, startingBufLen)
+ bufLen = len(buf)
+ nr, nw int
+ er, ew error
+ out io.Writer
+ frameSize int
+ )
+
+ for {
+ // Make sure we have at least a full header
+ for nr < stdWriterPrefixLen {
+ var nr2 int
+ nr2, er = src.Read(buf[nr:])
+ nr += nr2
+ if er == io.EOF {
+ if nr < stdWriterPrefixLen {
+ return written, nil
+ }
+ break
+ }
+ if er != nil {
+ return 0, er
+ }
+ }
+
+ stream := StdType(buf[stdWriterFdIndex])
+ // Check the first byte to know where to write
+ switch stream {
+ case Stdin:
+ fallthrough
+ case Stdout:
+ // Write on stdout
+ out = dstout
+ case Stderr:
+ // Write on stderr
+ out = dsterr
+ case Systemerr:
+ // If we're on Systemerr, we won't write anywhere.
+ // NB: if this code changes later, make sure you don't try to write
+ // to outstream if Systemerr is the stream
+ out = nil
+ default:
+ return 0, fmt.Errorf("Unrecognized input header: %d", buf[stdWriterFdIndex])
+ }
+
+ // Retrieve the size of the frame
+ frameSize = int(binary.BigEndian.Uint32(buf[stdWriterSizeIndex : stdWriterSizeIndex+4]))
+
+ // Check if the buffer is big enough to read the frame.
+ // Extend it if necessary.
+ if frameSize+stdWriterPrefixLen > bufLen {
+ buf = append(buf, make([]byte, frameSize+stdWriterPrefixLen-bufLen+1)...)
+ bufLen = len(buf)
+ }
+
+ // While the amount of bytes read is less than the size of the frame + header, we keep reading
+ for nr < frameSize+stdWriterPrefixLen {
+ var nr2 int
+ nr2, er = src.Read(buf[nr:])
+ nr += nr2
+ if er == io.EOF {
+ if nr < frameSize+stdWriterPrefixLen {
+ return written, nil
+ }
+ break
+ }
+ if er != nil {
+ return 0, er
+ }
+ }
+
+ // we might have an error from the source mixed up in our multiplexed
+ // stream. if we do, return it.
+ if stream == Systemerr {
+ return written, fmt.Errorf("error from daemon in stream: %s", string(buf[stdWriterPrefixLen:frameSize+stdWriterPrefixLen]))
+ }
+
+ // Write the retrieved frame (without header)
+ nw, ew = out.Write(buf[stdWriterPrefixLen : frameSize+stdWriterPrefixLen])
+ if ew != nil {
+ return 0, ew
+ }
+
+ // If the frame has not been fully written: error
+ if nw != frameSize {
+ return 0, io.ErrShortWrite
+ }
+ written += int64(nw)
+
+ // Move the rest of the buffer to the beginning
+ copy(buf, buf[frameSize+stdWriterPrefixLen:])
+ // Move the index
+ nr -= frameSize + stdWriterPrefixLen
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/events_windows.go b/vendor/github.com/docker/docker/pkg/system/events_windows.go
deleted file mode 100644
index 192e36788..000000000
--- a/vendor/github.com/docker/docker/pkg/system/events_windows.go
+++ /dev/null
@@ -1,85 +0,0 @@
-package system
-
-// This file implements syscalls for Win32 events which are not implemented
-// in golang.
-
-import (
- "syscall"
- "unsafe"
-
- "golang.org/x/sys/windows"
-)
-
-var (
- procCreateEvent = modkernel32.NewProc("CreateEventW")
- procOpenEvent = modkernel32.NewProc("OpenEventW")
- procSetEvent = modkernel32.NewProc("SetEvent")
- procResetEvent = modkernel32.NewProc("ResetEvent")
- procPulseEvent = modkernel32.NewProc("PulseEvent")
-)
-
-// CreateEvent implements win32 CreateEventW func in golang. It will create an event object.
-func CreateEvent(eventAttributes *windows.SecurityAttributes, manualReset bool, initialState bool, name string) (handle windows.Handle, err error) {
- namep, _ := windows.UTF16PtrFromString(name)
- var _p1 uint32
- if manualReset {
- _p1 = 1
- }
- var _p2 uint32
- if initialState {
- _p2 = 1
- }
- r0, _, e1 := procCreateEvent.Call(uintptr(unsafe.Pointer(eventAttributes)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(namep)))
- use(unsafe.Pointer(namep))
- handle = windows.Handle(r0)
- if handle == windows.InvalidHandle {
- err = e1
- }
- return
-}
-
-// OpenEvent implements win32 OpenEventW func in golang. It opens an event object.
-func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle windows.Handle, err error) {
- namep, _ := windows.UTF16PtrFromString(name)
- var _p1 uint32
- if inheritHandle {
- _p1 = 1
- }
- r0, _, e1 := procOpenEvent.Call(uintptr(desiredAccess), uintptr(_p1), uintptr(unsafe.Pointer(namep)))
- use(unsafe.Pointer(namep))
- handle = windows.Handle(r0)
- if handle == windows.InvalidHandle {
- err = e1
- }
- return
-}
-
-// SetEvent implements win32 SetEvent func in golang.
-func SetEvent(handle windows.Handle) (err error) {
- return setResetPulse(handle, procSetEvent)
-}
-
-// ResetEvent implements win32 ResetEvent func in golang.
-func ResetEvent(handle windows.Handle) (err error) {
- return setResetPulse(handle, procResetEvent)
-}
-
-// PulseEvent implements win32 PulseEvent func in golang.
-func PulseEvent(handle windows.Handle) (err error) {
- return setResetPulse(handle, procPulseEvent)
-}
-
-func setResetPulse(handle windows.Handle, proc *windows.LazyProc) (err error) {
- r0, _, _ := proc.Call(uintptr(handle))
- if r0 != 0 {
- err = syscall.Errno(r0)
- }
- return
-}
-
-var temp unsafe.Pointer
-
-// use ensures a variable is kept alive without the GC freeing while still needed
-func use(p unsafe.Pointer) {
- temp = p
-}
diff --git a/vendor/github.com/docker/docker/pkg/system/init_unix.go b/vendor/github.com/docker/docker/pkg/system/init_unix.go
new file mode 100644
index 000000000..a219895e6
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/init_unix.go
@@ -0,0 +1,7 @@
+// +build !windows
+
+package system
+
+// InitLCOW does nothing since LCOW is a windows only feature
+func InitLCOW(experimental bool) {
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/init_windows.go b/vendor/github.com/docker/docker/pkg/system/init_windows.go
index 019c66441..e75183726 100644
--- a/vendor/github.com/docker/docker/pkg/system/init_windows.go
+++ b/vendor/github.com/docker/docker/pkg/system/init_windows.go
@@ -8,9 +8,10 @@ import "os"
// on build number. @jhowardmsft
var lcowSupported = false
-func init() {
+// InitLCOW sets whether LCOW is supported or not
+func InitLCOW(experimental bool) {
// LCOW initialization
- if os.Getenv("LCOW_SUPPORTED") != "" {
+ if experimental && os.Getenv("LCOW_SUPPORTED") != "" {
lcowSupported = true
}
diff --git a/vendor/github.com/docker/docker/pkg/system/path.go b/vendor/github.com/docker/docker/pkg/system/path.go
index f634a6be6..4160616f4 100644
--- a/vendor/github.com/docker/docker/pkg/system/path.go
+++ b/vendor/github.com/docker/docker/pkg/system/path.go
@@ -1,6 +1,13 @@
package system
-import "runtime"
+import (
+ "fmt"
+ "path/filepath"
+ "runtime"
+ "strings"
+
+ "github.com/containerd/continuity/pathdriver"
+)
const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
@@ -19,3 +26,35 @@ func DefaultPathEnv(platform string) string {
return defaultUnixPathEnv
}
+
+// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter,
+// is the system drive.
+// On Linux: this is a no-op.
+// On Windows: this does the following>
+// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path.
+// This is used, for example, when validating a user provided path in docker cp.
+// If a drive letter is supplied, it must be the system drive. The drive letter
+// is always removed. Also, it translates it to OS semantics (IOW / to \). We
+// need the path in this syntax so that it can ultimately be contatenated with
+// a Windows long-path which doesn't support drive-letters. Examples:
+// C: --> Fail
+// C:\ --> \
+// a --> a
+// /a --> \a
+// d:\ --> Fail
+func CheckSystemDriveAndRemoveDriveLetter(path string, driver pathdriver.PathDriver) (string, error) {
+ if runtime.GOOS != "windows" || LCOWSupported() {
+ return path, nil
+ }
+
+ if len(path) == 2 && string(path[1]) == ":" {
+ return "", fmt.Errorf("No relative path specified in %q", path)
+ }
+ if !driver.IsAbs(path) || len(path) < 2 {
+ return filepath.FromSlash(path), nil
+ }
+ if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") {
+ return "", fmt.Errorf("The specified path is not on the system drive (C:)")
+ }
+ return filepath.FromSlash(path[2:]), nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/path_unix.go b/vendor/github.com/docker/docker/pkg/system/path_unix.go
deleted file mode 100644
index f3762e69d..000000000
--- a/vendor/github.com/docker/docker/pkg/system/path_unix.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build !windows
-
-package system
-
-// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter,
-// is the system drive. This is a no-op on Linux.
-func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
- return path, nil
-}
diff --git a/vendor/github.com/docker/docker/pkg/system/path_windows.go b/vendor/github.com/docker/docker/pkg/system/path_windows.go
deleted file mode 100644
index aab891522..000000000
--- a/vendor/github.com/docker/docker/pkg/system/path_windows.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// +build windows
-
-package system
-
-import (
- "fmt"
- "path/filepath"
- "strings"
-)
-
-// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path.
-// This is used, for example, when validating a user provided path in docker cp.
-// If a drive letter is supplied, it must be the system drive. The drive letter
-// is always removed. Also, it translates it to OS semantics (IOW / to \). We
-// need the path in this syntax so that it can ultimately be concatenated with
-// a Windows long-path which doesn't support drive-letters. Examples:
-// C: --> Fail
-// C:\ --> \
-// a --> a
-// /a --> \a
-// d:\ --> Fail
-func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
- if len(path) == 2 && string(path[1]) == ":" {
- return "", fmt.Errorf("No relative path specified in %q", path)
- }
- if !filepath.IsAbs(path) || len(path) < 2 {
- return filepath.FromSlash(path), nil
- }
- if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") {
- return "", fmt.Errorf("The specified path is not on the system drive (C:)")
- }
- return filepath.FromSlash(path[2:]), nil
-}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_linux.go b/vendor/github.com/docker/docker/pkg/system/stat_linux.go
index 66bf6e28e..1939f9518 100644
--- a/vendor/github.com/docker/docker/pkg/system/stat_linux.go
+++ b/vendor/github.com/docker/docker/pkg/system/stat_linux.go
@@ -5,10 +5,10 @@ import "syscall"
// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
func fromStatT(s *syscall.Stat_t) (*StatT, error) {
return &StatT{size: s.Size,
- mode: uint32(s.Mode),
+ mode: s.Mode,
uid: s.Uid,
gid: s.Gid,
- rdev: uint64(s.Rdev),
+ rdev: s.Rdev,
mtim: s.Mtim}, nil
}
diff --git a/vendor/github.com/docker/docker/pkg/term/ascii.go b/vendor/github.com/docker/docker/pkg/term/ascii.go
index f5262bccf..55873c055 100644
--- a/vendor/github.com/docker/docker/pkg/term/ascii.go
+++ b/vendor/github.com/docker/docker/pkg/term/ascii.go
@@ -59,7 +59,7 @@ next:
return nil, fmt.Errorf("Unknown character: '%s'", key)
}
} else {
- codes = append(codes, byte(key[0]))
+ codes = append(codes, key[0])
}
}
return codes, nil
diff --git a/vendor/github.com/docker/docker/pkg/term/term_windows.go b/vendor/github.com/docker/docker/pkg/term/term_windows.go
index c0332c3cd..b6819b342 100644
--- a/vendor/github.com/docker/docker/pkg/term/term_windows.go
+++ b/vendor/github.com/docker/docker/pkg/term/term_windows.go
@@ -23,14 +23,7 @@ type Winsize struct {
Width uint16
}
-const (
- // https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx
- enableVirtualTerminalInput = 0x0200
- enableVirtualTerminalProcessing = 0x0004
- disableNewlineAutoReturn = 0x0008
-)
-
-// vtInputSupported is true if enableVirtualTerminalInput is supported by the console
+// vtInputSupported is true if winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported by the console
var vtInputSupported bool
// StdStreams returns the standard streams (stdin, stdout, stderr).
@@ -40,8 +33,8 @@ func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) {
var emulateStdin, emulateStdout, emulateStderr bool
fd := os.Stdin.Fd()
if mode, err := winterm.GetConsoleMode(fd); err == nil {
- // Validate that enableVirtualTerminalInput is supported, but do not set it.
- if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalInput); err != nil {
+ // Validate that winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported, but do not set it.
+ if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_INPUT); err != nil {
emulateStdin = true
} else {
vtInputSupported = true
@@ -53,21 +46,21 @@ func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) {
fd = os.Stdout.Fd()
if mode, err := winterm.GetConsoleMode(fd); err == nil {
- // Validate disableNewlineAutoReturn is supported, but do not set it.
- if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing|disableNewlineAutoReturn); err != nil {
+ // Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it.
+ if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING|winterm.DISABLE_NEWLINE_AUTO_RETURN); err != nil {
emulateStdout = true
} else {
- winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing)
+ winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING)
}
}
fd = os.Stderr.Fd()
if mode, err := winterm.GetConsoleMode(fd); err == nil {
- // Validate disableNewlineAutoReturn is supported, but do not set it.
- if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing|disableNewlineAutoReturn); err != nil {
+ // Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it.
+ if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING|winterm.DISABLE_NEWLINE_AUTO_RETURN); err != nil {
emulateStderr = true
} else {
- winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing)
+ winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING)
}
}
@@ -183,9 +176,9 @@ func SetRawTerminalOutput(fd uintptr) (*State, error) {
return nil, err
}
- // Ignore failures, since disableNewlineAutoReturn might not be supported on this
+ // Ignore failures, since winterm.DISABLE_NEWLINE_AUTO_RETURN might not be supported on this
// version of Windows.
- winterm.SetConsoleMode(fd, state.mode|disableNewlineAutoReturn)
+ winterm.SetConsoleMode(fd, state.mode|winterm.DISABLE_NEWLINE_AUTO_RETURN)
return state, err
}
@@ -215,7 +208,7 @@ func MakeRaw(fd uintptr) (*State, error) {
mode |= winterm.ENABLE_INSERT_MODE
mode |= winterm.ENABLE_QUICK_EDIT_MODE
if vtInputSupported {
- mode |= enableVirtualTerminalInput
+ mode |= winterm.ENABLE_VIRTUAL_TERMINAL_INPUT
}
err = winterm.SetConsoleMode(fd, mode)
diff --git a/vendor/github.com/docker/docker/pkg/term/termios_linux.go b/vendor/github.com/docker/docker/pkg/term/termios_linux.go
index 3e25eb7a4..0f21abcc2 100644
--- a/vendor/github.com/docker/docker/pkg/term/termios_linux.go
+++ b/vendor/github.com/docker/docker/pkg/term/termios_linux.go
@@ -29,6 +29,8 @@ func MakeRaw(fd uintptr) (*State, error) {
termios.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN)
termios.Cflag &^= (unix.CSIZE | unix.PARENB)
termios.Cflag |= unix.CS8
+ termios.Cc[unix.VMIN] = 1
+ termios.Cc[unix.VTIME] = 0
if err := unix.IoctlSetTermios(int(fd), setTermios, termios); err != nil {
return nil, err
diff --git a/vendor/github.com/docker/docker/pkg/term/winsize.go b/vendor/github.com/docker/docker/pkg/term/winsize.go
index f58367fe6..85c4d9d67 100644
--- a/vendor/github.com/docker/docker/pkg/term/winsize.go
+++ b/vendor/github.com/docker/docker/pkg/term/winsize.go
@@ -3,28 +3,18 @@
package term
import (
- "unsafe"
-
"golang.org/x/sys/unix"
)
// GetWinsize returns the window size based on the specified file descriptor.
func GetWinsize(fd uintptr) (*Winsize, error) {
- ws := &Winsize{}
- _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, uintptr(unix.TIOCGWINSZ), uintptr(unsafe.Pointer(ws)))
- // Skipp errno = 0
- if err == 0 {
- return ws, nil
- }
+ uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ)
+ ws := &Winsize{Height: uws.Row, Width: uws.Col, x: uws.Xpixel, y: uws.Ypixel}
return ws, err
}
// SetWinsize tries to set the specified window size for the specified file descriptor.
func SetWinsize(fd uintptr, ws *Winsize) error {
- _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, uintptr(unix.TIOCSWINSZ), uintptr(unsafe.Pointer(ws)))
- // Skipp errno = 0
- if err == 0 {
- return nil
- }
- return err
+ uws := &unix.Winsize{Row: ws.Height, Col: ws.Width, Xpixel: ws.x, Ypixel: ws.y}
+ return unix.IoctlSetWinsize(int(fd), unix.TIOCSWINSZ, uws)
}
diff --git a/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone.go b/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone.go
deleted file mode 100644
index e4dec3a5d..000000000
--- a/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// +build go1.8
-
-package tlsconfig
-
-import "crypto/tls"
-
-// Clone returns a clone of tls.Config. This function is provided for
-// compatibility for go1.7 that doesn't include this method in stdlib.
-func Clone(c *tls.Config) *tls.Config {
- return c.Clone()
-}
diff --git a/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go
index 1e6ea90e3..1b5179c70 100644
--- a/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go
+++ b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go
@@ -50,7 +50,6 @@ func DefaultProfile() *types.Seccomp {
"access",
"adjtimex",
"alarm",
- "alarm",
"bind",
"brk",
"capget",
@@ -488,6 +487,7 @@ func DefaultProfile() *types.Seccomp {
"mount",
"name_to_handle_at",
"perf_event_open",
+ "quotactl",
"setdomainname",
"sethostname",
"setns",
diff --git a/vendor/github.com/docker/docker/vendor.conf b/vendor/github.com/docker/docker/vendor.conf
index 7608b0e33..bd3c283e2 100644
--- a/vendor/github.com/docker/docker/vendor.conf
+++ b/vendor/github.com/docker/docker/vendor.conf
@@ -1,33 +1,37 @@
# the following lines are in sorted order, FYI
-github.com/Azure/go-ansiterm 19f72df4d05d31cbe1c56bfc8045c96babff6c7e
-github.com/Microsoft/hcsshim v0.6.2
-github.com/Microsoft/go-winio v0.4.4
-github.com/moby/buildkit da2b9dc7dab99e824b2b1067ad7d0523e32dd2d9 https://github.com/dmcgowan/buildkit.git
+github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109
+github.com/Microsoft/hcsshim v0.6.5
+github.com/Microsoft/go-winio v0.4.5
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a
github.com/go-check/check 4ed411733c5785b40214c70bce814c3a3a689609 https://github.com/cpuguy83/check.git
github.com/gorilla/context v1.1
github.com/gorilla/mux v1.1
-github.com/jhowardmsft/opengcs v0.0.12
+github.com/Microsoft/opengcs v0.3.4
github.com/kr/pty 5cf931ef8f
github.com/mattn/go-shellwords v1.0.3
-github.com/sirupsen/logrus v1.0.1
+github.com/sirupsen/logrus v1.0.3
github.com/tchap/go-patricia v2.2.6
github.com/vdemeester/shakers 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3
golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6
-golang.org/x/sys 739734461d1c916b6c72a63d7efda2b27edb369f
+golang.org/x/sys 07c182904dbd53199946ba614a412c61d3c548f5
github.com/docker/go-units 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1
github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d
golang.org/x/text f72d8390a633d5dfb0cc84043294db9f6c935756
github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987
github.com/pmezard/go-difflib v1.0.0
+github.com/gotestyourself/gotestyourself v1.1.0
github.com/RackSec/srslog 456df3a81436d29ba874f3590eeeee25d666f8a5
github.com/imdario/mergo 0.2.1
golang.org/x/sync de49d9dcd27d4f764488181bea099dfe6179bcf0
+github.com/containerd/continuity 22694c680ee48fb8f50015b44618517e2bde77e8
+github.com/moby/buildkit aaff9d591ef128560018433fe61beb802e149de8
+github.com/tonistiigi/fsutil dea3a0da73aee887fc02142d995be764106ac5e2
+
#get libnetwork packages
-github.com/docker/libnetwork 248fd5ea6a67f8810da322e6e7441e8de96a9045 https://github.com/dmcgowan/libnetwork.git
+github.com/docker/libnetwork 0f08d31bf0e640e0cdc6d5161227f87602d605c5
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
@@ -62,9 +66,9 @@ github.com/pborman/uuid v1.0
google.golang.org/grpc v1.3.0
# When updating, also update RUNC_COMMIT in hack/dockerfile/binaries-commits accordingly
-github.com/opencontainers/runc e9325d442f5979c4f79bfa9e09bdf7abb74ba03b https://github.com/dmcgowan/runc.git
+github.com/opencontainers/runc 0351df1c5a66838d0c392b4ac4cf9450de844e2d
github.com/opencontainers/image-spec 372ad780f63454fbbbbcc7cf80e5b90245c13e13
-github.com/opencontainers/runtime-spec d42f1eb741e6361e858d83fc75aa6893b66292c4 # specs
+github.com/opencontainers/runtime-spec v1.0.0
github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0
@@ -101,17 +105,15 @@ github.com/googleapis/gax-go da06d194a00e19ce00d9011a13931c3f6f6887c7
google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
# containerd
-github.com/containerd/containerd fc10004571bb9b26695ccbf2dd4a83213f60b93e https://github.com/dmcgowan/containerd.git
+github.com/containerd/containerd 06b9cb35161009dcb7123345749fef02f7cea8e0
github.com/tonistiigi/fifo 1405643975692217d6720f8b54aeee1bf2cd5cf4
-github.com/stevvooe/continuity cd7a8e21e2b6f84799f5dd4b65faf49c8d3ee02d
-github.com/tonistiigi/fsutil 0ac4c11b053b9c5c7c47558f81f96c7100ce50fb
# cluster
-github.com/docker/swarmkit 8bdecc57887ffc598b63d6433f58e0d2852112c3 https://github.com/dmcgowan/swarmkit.git
+github.com/docker/swarmkit 941a01844b89c56aa61086fecb167ab3af1de22b
github.com/gogo/protobuf v0.4
github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a
github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b6506826e
-golang.org/x/crypto 3fbbcd23f1cb824e69491a5930cfeff09b12f4d2
+golang.org/x/crypto 558b6879de74bc843225cde5686419267ff707ca
golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb
github.com/hashicorp/go-memdb cb9a474f84cc5e41b273b20c6927680b2a8776ad
github.com/hashicorp/go-immutable-radix 8e8ed81f8f0bf1bdd829593fdd5c29922c1ea990