summaryrefslogtreecommitdiff
path: root/vendor/github.com/docker
diff options
context:
space:
mode:
authorNalin Dahyabhai <nalin@redhat.com>2019-10-24 10:37:22 -0400
committerNalin Dahyabhai <nalin@redhat.com>2019-10-29 13:35:18 -0400
commita4a70b4506ec4abb8b3bbc3873ee5ca015a8ed08 (patch)
tree4e7a50576d4db83450c58054e276f33bbd2cdb3a /vendor/github.com/docker
parent59582c55b798f0a2d086981ca9a8ddd8314fd0c2 (diff)
downloadpodman-a4a70b4506ec4abb8b3bbc3873ee5ca015a8ed08.tar.gz
podman-a4a70b4506ec4abb8b3bbc3873ee5ca015a8ed08.tar.bz2
podman-a4a70b4506ec4abb8b3bbc3873ee5ca015a8ed08.zip
bump containers/image to v5.0.0, buildah to v1.11.4
Move to containers/image v5 and containers/buildah to v1.11.4. Replace an equality check with a type assertion when checking for a docker.ErrUnauthorizedForCredentials in `podman login`. Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
Diffstat (limited to 'vendor/github.com/docker')
-rw-r--r--vendor/github.com/docker/docker/NOTICE2
-rw-r--r--vendor/github.com/docker/docker/api/swagger.yaml39
-rw-r--r--vendor/github.com/docker/docker/api/types/container/host_config.go2
-rw-r--r--vendor/github.com/docker/docker/api/types/filters/parse.go2
-rw-r--r--vendor/github.com/docker/docker/api/types/registry/registry.go2
-rw-r--r--vendor/github.com/docker/docker/api/types/types.go1
-rw-r--r--vendor/github.com/docker/docker/client/client.go3
-rw-r--r--vendor/github.com/docker/docker/client/container_list.go1
-rw-r--r--vendor/github.com/docker/docker/client/events.go1
-rw-r--r--vendor/github.com/docker/docker/client/hijack.go2
-rw-r--r--vendor/github.com/docker/docker/client/image_list.go1
-rw-r--r--vendor/github.com/docker/docker/client/network_list.go1
-rw-r--r--vendor/github.com/docker/docker/client/plugin_list.go1
-rw-r--r--vendor/github.com/docker/docker/client/request.go17
-rw-r--r--vendor/github.com/docker/docker/client/service_create.go2
-rw-r--r--vendor/github.com/docker/docker/client/volume_list.go1
-rw-r--r--vendor/github.com/docker/docker/errdefs/http_helpers.go33
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/README.md1
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/archive.go1294
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/archive_linux.go261
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/archive_other.go7
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/archive_unix.go115
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/archive_windows.go67
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/changes.go445
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/changes_linux.go286
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/changes_other.go97
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/changes_unix.go43
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/changes_windows.go34
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/copy.go480
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/copy_unix.go11
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/copy_windows.go9
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/diff.go260
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/example_changes.go97
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/time_linux.go16
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/time_unsupported.go16
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/whiteouts.go23
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/wrap.go59
-rw-r--r--vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go16
-rw-r--r--vendor/github.com/docker/docker/pkg/homedir/homedir_others.go6
-rw-r--r--vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go9
-rw-r--r--vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go2
-rw-r--r--vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go2
-rw-r--r--vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go283
-rw-r--r--vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go6
-rw-r--r--vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go2
-rw-r--r--vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go4
-rw-r--r--vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go14
-rw-r--r--vendor/github.com/docker/docker/pkg/pools/pools.go1
-rw-r--r--vendor/github.com/docker/docker/pkg/system/filesys_unix.go (renamed from vendor/github.com/docker/docker/pkg/system/filesys.go)6
-rw-r--r--vendor/github.com/docker/docker/pkg/system/filesys_windows.go7
-rw-r--r--vendor/github.com/docker/docker/pkg/system/meminfo_linux.go8
-rw-r--r--vendor/github.com/docker/docker/pkg/system/path.go10
-rw-r--r--vendor/github.com/docker/docker/pkg/system/stat_linux.go3
-rw-r--r--vendor/github.com/docker/docker/pkg/system/stat_solaris.go13
-rw-r--r--vendor/github.com/docker/docker/pkg/system/syscall_windows.go19
-rw-r--r--vendor/github.com/docker/docker/pkg/system/utimes_linux.go25
-rw-r--r--vendor/github.com/docker/docker/pkg/system/utimes_unix.go (renamed from vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go)14
-rw-r--r--vendor/github.com/docker/docker/pkg/term/term_windows.go2
-rw-r--r--vendor/github.com/docker/docker/pkg/term/windows/windows.go3
-rw-r--r--vendor/github.com/docker/docker/profiles/seccomp/default.json4
-rw-r--r--vendor/github.com/docker/docker/profiles/seccomp/seccomp.go12
-rw-r--r--vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go4
62 files changed, 4030 insertions, 177 deletions
diff --git a/vendor/github.com/docker/docker/NOTICE b/vendor/github.com/docker/docker/NOTICE
index 0c74e15b0..58b19b6d1 100644
--- a/vendor/github.com/docker/docker/NOTICE
+++ b/vendor/github.com/docker/docker/NOTICE
@@ -3,7 +3,7 @@ Copyright 2012-2017 Docker, Inc.
This product includes software developed at Docker, Inc. (https://www.docker.com).
-This product contains software (https://github.com/kr/pty) developed
+This product contains software (https://github.com/creack/pty) developed
by Keith Rarick, licensed under the MIT License.
The following is courtesy of our legal counsel:
diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml
index 38ca5329e..cc2451f03 100644
--- a/vendor/github.com/docker/docker/api/swagger.yaml
+++ b/vendor/github.com/docker/docker/api/swagger.yaml
@@ -3287,7 +3287,7 @@ definitions:
<p><br /></p>
- - "ingress" makes the target port accessible on on every node,
+ - "ingress" makes the target port accessible on every node,
regardless of whether there is a task for the service running on
that node or not.
- "host" bypasses the routing mesh and publish the port directly on
@@ -3305,8 +3305,8 @@ definitions:
type: "object"
properties:
Mode:
- description: "The mode of resolution to use for internal load balancing
- between tasks."
+ description: |
+ The mode of resolution to use for internal load balancing between tasks.
type: "string"
enum:
- "vip"
@@ -4873,7 +4873,7 @@ paths:
Note that a running container can be _paused_. The `Running` and `Paused`
booleans are not mutually exclusive:
- When pausing a container (on Linux), the cgroups freezer is used to suspend
+ When pausing a container (on Linux), the freezer cgroup is used to suspend
all processes in the container. Freezing the process requires the process to
be running. As a result, paused containers are both `Running` _and_ `Paused`.
@@ -5543,8 +5543,6 @@ paths:
description: "no error"
304:
description: "container already started"
- schema:
- $ref: "#/definitions/ErrorResponse"
404:
description: "no such container"
schema:
@@ -5576,8 +5574,6 @@ paths:
description: "no error"
304:
description: "container already stopped"
- schema:
- $ref: "#/definitions/ErrorResponse"
404:
description: "no such container"
schema:
@@ -5768,9 +5764,9 @@ paths:
post:
summary: "Pause a container"
description: |
- Use the cgroups freezer to suspend all processes in a container.
+ Use the freezer cgroup to suspend all processes in a container.
- Traditionally, when suspending a process the `SIGSTOP` signal is used, which is observable by the process being suspended. With the cgroups freezer the process is unaware, and unable to capture, that it is being suspended, and subsequently resumed.
+ Traditionally, when suspending a process the `SIGSTOP` signal is used, which is observable by the process being suspended. With the freezer cgroup the process is unaware, and unable to capture, that it is being suspended, and subsequently resumed.
operationId: "ContainerPause"
responses:
204:
@@ -6493,10 +6489,11 @@ paths:
type: "string"
- name: "networkmode"
in: "query"
- description: "Sets the networking mode for the run commands during
- build. Supported standard values are: `bridge`, `host`, `none`, and
- `container:<name|id>`. Any other value is taken as a custom network's
- name to which this container should connect to."
+ description: |
+ Sets the networking mode for the run commands during build. Supported
+ standard values are: `bridge`, `host`, `none`, and `container:<name|id>`.
+ Any other value is taken as a custom network's name or ID to which this
+ container should connect to.
type: "string"
- name: "Content-type"
in: "header"
@@ -9585,17 +9582,19 @@ paths:
type: "integer"
- name: "registryAuthFrom"
in: "query"
+ description: |
+ If the `X-Registry-Auth` header is not specified, this parameter
+ indicates where to find registry authorization credentials.
type: "string"
- description: "If the X-Registry-Auth header is not specified, this
- parameter indicates where to find registry authorization credentials. The
- valid values are `spec` and `previous-spec`."
+ enum: ["spec", "previous-spec"]
default: "spec"
- name: "rollback"
in: "query"
+ description: |
+ Set to this parameter to `previous` to cause a server-side rollback
+ to the previous service spec. The supplied spec will be ignored in
+ this case.
type: "string"
- description: "Set to this parameter to `previous` to cause a
- server-side rollback to the previous service spec. The supplied spec will be
- ignored in this case."
- name: "X-Registry-Auth"
in: "header"
description: "A base64-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)"
diff --git a/vendor/github.com/docker/docker/api/types/container/host_config.go b/vendor/github.com/docker/docker/api/types/container/host_config.go
index 654c88106..209f33eb9 100644
--- a/vendor/github.com/docker/docker/api/types/container/host_config.go
+++ b/vendor/github.com/docker/docker/api/types/container/host_config.go
@@ -7,7 +7,7 @@ import (
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/api/types/strslice"
"github.com/docker/go-connections/nat"
- "github.com/docker/go-units"
+ units "github.com/docker/go-units"
)
// CgroupnsMode represents the cgroup namespace mode of the container
diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go
index 1f75403f7..2e24e769c 100644
--- a/vendor/github.com/docker/docker/api/types/filters/parse.go
+++ b/vendor/github.com/docker/docker/api/types/filters/parse.go
@@ -57,7 +57,7 @@ func ToJSON(a Args) (string, error) {
// then the encoded format will use an older legacy format where the values are a
// list of strings, instead of a set.
//
-// Deprecated: Use ToJSON
+// Deprecated: do not use in any new code; use ToJSON instead
func ToParamWithVersion(version string, a Args) (string, error) {
if a.Len() == 0 {
return "", nil
diff --git a/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/docker/docker/api/types/registry/registry.go
index 8789ad3b3..53e47084c 100644
--- a/vendor/github.com/docker/docker/api/types/registry/registry.go
+++ b/vendor/github.com/docker/docker/api/types/registry/registry.go
@@ -4,7 +4,7 @@ import (
"encoding/json"
"net"
- "github.com/opencontainers/image-spec/specs-go/v1"
+ v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
// ServiceConfig stores daemon registry services configuration.
diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go
index b13d9c4c7..4cf9a95ff 100644
--- a/vendor/github.com/docker/docker/api/types/types.go
+++ b/vendor/github.com/docker/docker/api/types/types.go
@@ -39,6 +39,7 @@ type ImageInspect struct {
Author string
Config *container.Config
Architecture string
+ Variant string `json:",omitempty"`
Os string
OsVersion string `json:",omitempty"`
Size int64
diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go
index b63d4d6d4..0649a69cc 100644
--- a/vendor/github.com/docker/docker/client/client.go
+++ b/vendor/github.com/docker/docker/client/client.go
@@ -252,7 +252,8 @@ func (cli *Client) DaemonHost() string {
// HTTPClient returns a copy of the HTTP client bound to the server
func (cli *Client) HTTPClient() *http.Client {
- return &*cli.client
+ c := *cli.client
+ return &c
}
// ParseHostURL parses a url string, validates the string is a host url, and
diff --git a/vendor/github.com/docker/docker/client/container_list.go b/vendor/github.com/docker/docker/client/container_list.go
index 1e7a63a9c..c099d80e2 100644
--- a/vendor/github.com/docker/docker/client/container_list.go
+++ b/vendor/github.com/docker/docker/client/container_list.go
@@ -35,6 +35,7 @@ func (cli *Client) ContainerList(ctx context.Context, options types.ContainerLis
}
if options.Filters.Len() > 0 {
+ //lint:ignore SA1019 for old code
filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters)
if err != nil {
diff --git a/vendor/github.com/docker/docker/client/events.go b/vendor/github.com/docker/docker/client/events.go
index 6e5653895..f347cadf1 100644
--- a/vendor/github.com/docker/docker/client/events.go
+++ b/vendor/github.com/docker/docker/client/events.go
@@ -90,6 +90,7 @@ func buildEventsQueryParams(cliVersion string, options types.EventsOptions) (url
}
if options.Filters.Len() > 0 {
+ //lint:ignore SA1019 for old code
filterJSON, err := filters.ToParamWithVersion(cliVersion, options.Filters)
if err != nil {
return nil, err
diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/docker/docker/client/hijack.go
index e9c9a752f..e77084af6 100644
--- a/vendor/github.com/docker/docker/client/hijack.go
+++ b/vendor/github.com/docker/docker/client/hijack.go
@@ -87,6 +87,8 @@ func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto
// Server hijacks the connection, error 'connection closed' expected
resp, err := clientconn.Do(req)
+
+ //lint:ignore SA1019 for connecting to old (pre go1.8) daemons
if err != httputil.ErrPersistEOF {
if err != nil {
return nil, err
diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go
index 4fa8c006b..a5bc4b095 100644
--- a/vendor/github.com/docker/docker/client/image_list.go
+++ b/vendor/github.com/docker/docker/client/image_list.go
@@ -24,6 +24,7 @@ func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions
}
}
if optionFilters.Len() > 0 {
+ //lint:ignore SA1019 for old code
filterJSON, err := filters.ToParamWithVersion(cli.version, optionFilters)
if err != nil {
return images, err
diff --git a/vendor/github.com/docker/docker/client/network_list.go b/vendor/github.com/docker/docker/client/network_list.go
index 7130c1364..8ca7eb612 100644
--- a/vendor/github.com/docker/docker/client/network_list.go
+++ b/vendor/github.com/docker/docker/client/network_list.go
@@ -13,6 +13,7 @@ import (
func (cli *Client) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) {
query := url.Values{}
if options.Filters.Len() > 0 {
+ //lint:ignore SA1019 for old code
filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters)
if err != nil {
return nil, err
diff --git a/vendor/github.com/docker/docker/client/plugin_list.go b/vendor/github.com/docker/docker/client/plugin_list.go
index 8285cecd6..a51c930e6 100644
--- a/vendor/github.com/docker/docker/client/plugin_list.go
+++ b/vendor/github.com/docker/docker/client/plugin_list.go
@@ -15,6 +15,7 @@ func (cli *Client) PluginList(ctx context.Context, filter filters.Args) (types.P
query := url.Values{}
if filter.Len() > 0 {
+ //lint:ignore SA1019 for old code
filterJSON, err := filters.ToParamWithVersion(cli.version, filter)
if err != nil {
return plugins, err
diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go
index 3078335e2..144c41636 100644
--- a/vendor/github.com/docker/docker/client/request.go
+++ b/vendor/github.com/docker/docker/client/request.go
@@ -50,15 +50,6 @@ func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, b
return cli.sendRequest(ctx, "POST", path, query, body, headers)
}
-// put sends an http request to the docker API using the method PUT.
-func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) {
- body, headers, err := encodeBody(obj, headers)
- if err != nil {
- return serverResponse{}, err
- }
- return cli.sendRequest(ctx, "PUT", path, query, body, headers)
-}
-
// putRaw sends an http request to the docker API using the method PUT.
func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) {
return cli.sendRequest(ctx, "PUT", path, query, body, headers)
@@ -178,7 +169,13 @@ func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResp
// this is localised - for example in French the error would be
// `open //./pipe/docker_engine: Le fichier spécifié est introuvable.`
if strings.Contains(err.Error(), `open //./pipe/docker_engine`) {
- err = errors.New(err.Error() + " In the default daemon configuration on Windows, the docker client must be run elevated to connect. This error may also indicate that the docker daemon is not running.")
+ // Checks if client is running with elevated privileges
+ if f, elevatedErr := os.Open("\\\\.\\PHYSICALDRIVE0"); elevatedErr == nil {
+ err = errors.Wrap(err, "In the default daemon configuration on Windows, the docker client must be run with elevated privileges to connect.")
+ } else {
+ f.Close()
+ err = errors.Wrap(err, "This error may indicate that the docker daemon is not running.")
+ }
}
return serverResp, errors.Wrap(err, "error during connect")
diff --git a/vendor/github.com/docker/docker/client/service_create.go b/vendor/github.com/docker/docker/client/service_create.go
index 620fc6cff..56bfe55b7 100644
--- a/vendor/github.com/docker/docker/client/service_create.go
+++ b/vendor/github.com/docker/docker/client/service_create.go
@@ -9,7 +9,7 @@ import (
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/swarm"
- "github.com/opencontainers/go-digest"
+ digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
diff --git a/vendor/github.com/docker/docker/client/volume_list.go b/vendor/github.com/docker/docker/client/volume_list.go
index 2380d5638..d68fc2b98 100644
--- a/vendor/github.com/docker/docker/client/volume_list.go
+++ b/vendor/github.com/docker/docker/client/volume_list.go
@@ -15,6 +15,7 @@ func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volumet
query := url.Values{}
if filter.Len() > 0 {
+ //lint:ignore SA1019 for old code
filterJSON, err := filters.ToParamWithVersion(cli.version, filter)
if err != nil {
return volumes, err
diff --git a/vendor/github.com/docker/docker/errdefs/http_helpers.go b/vendor/github.com/docker/docker/errdefs/http_helpers.go
index ac9bf6d33..07552f1cc 100644
--- a/vendor/github.com/docker/docker/errdefs/http_helpers.go
+++ b/vendor/github.com/docker/docker/errdefs/http_helpers.go
@@ -4,6 +4,7 @@ import (
"fmt"
"net/http"
+ containerderrors "github.com/containerd/containerd/errdefs"
"github.com/docker/distribution/registry/api/errcode"
"github.com/sirupsen/logrus"
"google.golang.org/grpc/codes"
@@ -47,6 +48,10 @@ func GetHTTPErrorStatusCode(err error) int {
if statusCode != http.StatusInternalServerError {
return statusCode
}
+ statusCode = statusCodeFromContainerdError(err)
+ if statusCode != http.StatusInternalServerError {
+ return statusCode
+ }
statusCode = statusCodeFromDistributionError(err)
if statusCode != http.StatusInternalServerError {
return statusCode
@@ -136,9 +141,6 @@ func statusCodeFromGRPCError(err error) int {
case codes.Unavailable: // code 14
return http.StatusServiceUnavailable
default:
- if e, ok := err.(causer); ok {
- return statusCodeFromGRPCError(e.Cause())
- }
// codes.Canceled(1)
// codes.Unknown(2)
// codes.DeadlineExceeded(4)
@@ -163,10 +165,27 @@ func statusCodeFromDistributionError(err error) int {
}
case errcode.ErrorCoder:
return errs.ErrorCode().Descriptor().HTTPStatusCode
- default:
- if e, ok := err.(causer); ok {
- return statusCodeFromDistributionError(e.Cause())
- }
}
return http.StatusInternalServerError
}
+
+// statusCodeFromContainerdError returns status code for containerd errors when
+// consumed directly (not through gRPC)
+func statusCodeFromContainerdError(err error) int {
+ switch {
+ case containerderrors.IsInvalidArgument(err):
+ return http.StatusBadRequest
+ case containerderrors.IsNotFound(err):
+ return http.StatusNotFound
+ case containerderrors.IsAlreadyExists(err):
+ return http.StatusConflict
+ case containerderrors.IsFailedPrecondition(err):
+ return http.StatusPreconditionFailed
+ case containerderrors.IsUnavailable(err):
+ return http.StatusServiceUnavailable
+ case containerderrors.IsNotImplemented(err):
+ return http.StatusNotImplemented
+ default:
+ return http.StatusInternalServerError
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/README.md b/vendor/github.com/docker/docker/pkg/archive/README.md
new file mode 100644
index 000000000..7307d9694
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/README.md
@@ -0,0 +1 @@
+This code provides helper functions for dealing with archive files.
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go
new file mode 100644
index 000000000..cbcf86532
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive.go
@@ -0,0 +1,1294 @@
+package archive // import "github.com/docker/docker/pkg/archive"
+
+import (
+ "archive/tar"
+ "bufio"
+ "bytes"
+ "compress/bzip2"
+ "compress/gzip"
+ "context"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ "syscall"
+ "time"
+
+ "github.com/docker/docker/pkg/fileutils"
+ "github.com/docker/docker/pkg/idtools"
+ "github.com/docker/docker/pkg/ioutils"
+ "github.com/docker/docker/pkg/pools"
+ "github.com/docker/docker/pkg/system"
+ "github.com/sirupsen/logrus"
+)
+
+var unpigzPath string
+
+func init() {
+ if path, err := exec.LookPath("unpigz"); err != nil {
+ logrus.Debug("unpigz binary not found in PATH, falling back to go gzip library")
+ } else {
+ logrus.Debugf("Using unpigz binary found at path %s", path)
+ unpigzPath = path
+ }
+}
+
+type (
+ // Compression is the state represents if compressed or not.
+ Compression int
+ // WhiteoutFormat is the format of whiteouts unpacked
+ WhiteoutFormat int
+
+ // TarOptions wraps the tar options.
+ TarOptions struct {
+ IncludeFiles []string
+ ExcludePatterns []string
+ Compression Compression
+ NoLchown bool
+ UIDMaps []idtools.IDMap
+ GIDMaps []idtools.IDMap
+ ChownOpts *idtools.Identity
+ IncludeSourceDir bool
+ // WhiteoutFormat is the expected on disk format for whiteout files.
+ // This format will be converted to the standard format on pack
+ // and from the standard format on unpack.
+ WhiteoutFormat WhiteoutFormat
+ // When unpacking, specifies whether overwriting a directory with a
+ // non-directory is allowed and vice versa.
+ NoOverwriteDirNonDir bool
+ // For each include when creating an archive, the included name will be
+ // replaced with the matching name from this map.
+ RebaseNames map[string]string
+ InUserNS bool
+ }
+)
+
+// Archiver implements the Archiver interface and allows the reuse of most utility functions of
+// this package with a pluggable Untar function. Also, to facilitate the passing of specific id
+// mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations.
+type Archiver struct {
+ Untar func(io.Reader, string, *TarOptions) error
+ IDMapping *idtools.IdentityMapping
+}
+
+// NewDefaultArchiver returns a new Archiver without any IdentityMapping
+func NewDefaultArchiver() *Archiver {
+ return &Archiver{Untar: Untar, IDMapping: &idtools.IdentityMapping{}}
+}
+
+// breakoutError is used to differentiate errors related to breaking out
+// When testing archive breakout in the unit tests, this error is expected
+// in order for the test to pass.
+type breakoutError error
+
+const (
+ // Uncompressed represents the uncompressed.
+ Uncompressed Compression = iota
+ // Bzip2 is bzip2 compression algorithm.
+ Bzip2
+ // Gzip is gzip compression algorithm.
+ Gzip
+ // Xz is xz compression algorithm.
+ Xz
+)
+
+const (
+ // AUFSWhiteoutFormat is the default format for whiteouts
+ AUFSWhiteoutFormat WhiteoutFormat = iota
+ // OverlayWhiteoutFormat formats whiteout according to the overlay
+ // standard.
+ OverlayWhiteoutFormat
+)
+
+const (
+ modeISDIR = 040000 // Directory
+ modeISFIFO = 010000 // FIFO
+ modeISREG = 0100000 // Regular file
+ modeISLNK = 0120000 // Symbolic link
+ modeISBLK = 060000 // Block special file
+ modeISCHR = 020000 // Character special file
+ modeISSOCK = 0140000 // Socket
+)
+
+// IsArchivePath checks if the (possibly compressed) file at the given path
+// starts with a tar file header.
+func IsArchivePath(path string) bool {
+ file, err := os.Open(path)
+ if err != nil {
+ return false
+ }
+ defer file.Close()
+ rdr, err := DecompressStream(file)
+ if err != nil {
+ return false
+ }
+ defer rdr.Close()
+ r := tar.NewReader(rdr)
+ _, err = r.Next()
+ return err == nil
+}
+
+// DetectCompression detects the compression algorithm of the source.
+func DetectCompression(source []byte) Compression {
+ for compression, m := range map[Compression][]byte{
+ Bzip2: {0x42, 0x5A, 0x68},
+ Gzip: {0x1F, 0x8B, 0x08},
+ Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
+ } {
+ if len(source) < len(m) {
+ logrus.Debug("Len too short")
+ continue
+ }
+ if bytes.Equal(m, source[:len(m)]) {
+ return compression
+ }
+ }
+ return Uncompressed
+}
+
+func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) {
+ args := []string{"xz", "-d", "-c", "-q"}
+
+ return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive)
+}
+
+func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) {
+ if unpigzPath == "" {
+ return gzip.NewReader(buf)
+ }
+
+ disablePigzEnv := os.Getenv("MOBY_DISABLE_PIGZ")
+ if disablePigzEnv != "" {
+ if disablePigz, err := strconv.ParseBool(disablePigzEnv); err != nil {
+ return nil, err
+ } else if disablePigz {
+ return gzip.NewReader(buf)
+ }
+ }
+
+ return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf)
+}
+
+func wrapReadCloser(readBuf io.ReadCloser, cancel context.CancelFunc) io.ReadCloser {
+ return ioutils.NewReadCloserWrapper(readBuf, func() error {
+ cancel()
+ return readBuf.Close()
+ })
+}
+
+// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive.
+func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
+ p := pools.BufioReader32KPool
+ buf := p.Get(archive)
+ bs, err := buf.Peek(10)
+ if err != nil && err != io.EOF {
+ // Note: we'll ignore any io.EOF error because there are some odd
+ // cases where the layer.tar file will be empty (zero bytes) and
+ // that results in an io.EOF from the Peek() call. So, in those
+ // cases we'll just treat it as a non-compressed stream and
+ // that means just create an empty layer.
+ // See Issue 18170
+ return nil, err
+ }
+
+ compression := DetectCompression(bs)
+ switch compression {
+ case Uncompressed:
+ readBufWrapper := p.NewReadCloserWrapper(buf, buf)
+ return readBufWrapper, nil
+ case Gzip:
+ ctx, cancel := context.WithCancel(context.Background())
+
+ gzReader, err := gzDecompress(ctx, buf)
+ if err != nil {
+ cancel()
+ return nil, err
+ }
+ readBufWrapper := p.NewReadCloserWrapper(buf, gzReader)
+ return wrapReadCloser(readBufWrapper, cancel), nil
+ case Bzip2:
+ bz2Reader := bzip2.NewReader(buf)
+ readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader)
+ return readBufWrapper, nil
+ case Xz:
+ ctx, cancel := context.WithCancel(context.Background())
+
+ xzReader, err := xzDecompress(ctx, buf)
+ if err != nil {
+ cancel()
+ return nil, err
+ }
+ readBufWrapper := p.NewReadCloserWrapper(buf, xzReader)
+ return wrapReadCloser(readBufWrapper, cancel), nil
+ default:
+ return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ }
+}
+
+// CompressStream compresses the dest with specified compression algorithm.
+func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) {
+ p := pools.BufioWriter32KPool
+ buf := p.Get(dest)
+ switch compression {
+ case Uncompressed:
+ writeBufWrapper := p.NewWriteCloserWrapper(buf, buf)
+ return writeBufWrapper, nil
+ case Gzip:
+ gzWriter := gzip.NewWriter(dest)
+ writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter)
+ return writeBufWrapper, nil
+ case Bzip2, Xz:
+ // archive/bzip2 does not support writing, and there is no xz support at all
+ // However, this is not a problem as docker only currently generates gzipped tars
+ return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ default:
+ return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ }
+}
+
+// TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to
+// modify the contents or header of an entry in the archive. If the file already
+// exists in the archive the TarModifierFunc will be called with the Header and
+// a reader which will return the files content. If the file does not exist both
+// header and content will be nil.
+type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error)
+
+// ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the
+// tar stream are modified if they match any of the keys in mods.
+func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser {
+ pipeReader, pipeWriter := io.Pipe()
+
+ go func() {
+ tarReader := tar.NewReader(inputTarStream)
+ tarWriter := tar.NewWriter(pipeWriter)
+ defer inputTarStream.Close()
+ defer tarWriter.Close()
+
+ modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error {
+ header, data, err := modifier(name, original, tarReader)
+ switch {
+ case err != nil:
+ return err
+ case header == nil:
+ return nil
+ }
+
+ header.Name = name
+ header.Size = int64(len(data))
+ if err := tarWriter.WriteHeader(header); err != nil {
+ return err
+ }
+ if len(data) != 0 {
+ if _, err := tarWriter.Write(data); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ var err error
+ var originalHeader *tar.Header
+ for {
+ originalHeader, err = tarReader.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ pipeWriter.CloseWithError(err)
+ return
+ }
+
+ modifier, ok := mods[originalHeader.Name]
+ if !ok {
+ // No modifiers for this file, copy the header and data
+ if err := tarWriter.WriteHeader(originalHeader); err != nil {
+ pipeWriter.CloseWithError(err)
+ return
+ }
+ if _, err := pools.Copy(tarWriter, tarReader); err != nil {
+ pipeWriter.CloseWithError(err)
+ return
+ }
+ continue
+ }
+ delete(mods, originalHeader.Name)
+
+ if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil {
+ pipeWriter.CloseWithError(err)
+ return
+ }
+ }
+
+ // Apply the modifiers that haven't matched any files in the archive
+ for name, modifier := range mods {
+ if err := modify(name, nil, modifier, nil); err != nil {
+ pipeWriter.CloseWithError(err)
+ return
+ }
+ }
+
+ pipeWriter.Close()
+
+ }()
+ return pipeReader
+}
+
+// Extension returns the extension of a file that uses the specified compression algorithm.
+func (compression *Compression) Extension() string {
+ switch *compression {
+ case Uncompressed:
+ return "tar"
+ case Bzip2:
+ return "tar.bz2"
+ case Gzip:
+ return "tar.gz"
+ case Xz:
+ return "tar.xz"
+ }
+ return ""
+}
+
+// FileInfoHeader creates a populated Header from fi.
+// Compared to archive pkg this function fills in more information.
+// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR),
+// which have been deleted since Go 1.9 archive/tar.
+func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) {
+ hdr, err := tar.FileInfoHeader(fi, link)
+ if err != nil {
+ return nil, err
+ }
+ hdr.Format = tar.FormatPAX
+ hdr.ModTime = hdr.ModTime.Truncate(time.Second)
+ hdr.AccessTime = time.Time{}
+ hdr.ChangeTime = time.Time{}
+ hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi)
+ hdr.Name = canonicalTarName(name, fi.IsDir())
+ if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil {
+ return nil, err
+ }
+ return hdr, nil
+}
+
+// fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar
+// https://github.com/golang/go/commit/66b5a2f
+func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 {
+ fm := fi.Mode()
+ switch {
+ case fm.IsRegular():
+ mode |= modeISREG
+ case fi.IsDir():
+ mode |= modeISDIR
+ case fm&os.ModeSymlink != 0:
+ mode |= modeISLNK
+ case fm&os.ModeDevice != 0:
+ if fm&os.ModeCharDevice != 0 {
+ mode |= modeISCHR
+ } else {
+ mode |= modeISBLK
+ }
+ case fm&os.ModeNamedPipe != 0:
+ mode |= modeISFIFO
+ case fm&os.ModeSocket != 0:
+ mode |= modeISSOCK
+ }
+ return mode
+}
+
+// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem
+// to a tar header
+func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
+ capability, _ := system.Lgetxattr(path, "security.capability")
+ if capability != nil {
+ hdr.Xattrs = make(map[string]string)
+ hdr.Xattrs["security.capability"] = string(capability)
+ }
+ return nil
+}
+
+type tarWhiteoutConverter interface {
+ ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error)
+ ConvertRead(*tar.Header, string) (bool, error)
+}
+
+type tarAppender struct {
+ TarWriter *tar.Writer
+ Buffer *bufio.Writer
+
+ // for hardlink mapping
+ SeenFiles map[uint64]string
+ IdentityMapping *idtools.IdentityMapping
+ ChownOpts *idtools.Identity
+
+ // For packing and unpacking whiteout files in the
+ // non standard format. The whiteout files defined
+ // by the AUFS standard are used as the tar whiteout
+ // standard.
+ WhiteoutConverter tarWhiteoutConverter
+}
+
+func newTarAppender(idMapping *idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender {
+ return &tarAppender{
+ SeenFiles: make(map[uint64]string),
+ TarWriter: tar.NewWriter(writer),
+ Buffer: pools.BufioWriter32KPool.Get(nil),
+ IdentityMapping: idMapping,
+ ChownOpts: chownOpts,
+ }
+}
+
+// canonicalTarName provides a platform-independent and consistent posix-style
+//path for files and directories to be archived regardless of the platform.
+func canonicalTarName(name string, isDir bool) string {
+ name = CanonicalTarNameForPath(name)
+
+ // suffix with '/' for directories
+ if isDir && !strings.HasSuffix(name, "/") {
+ name += "/"
+ }
+ return name
+}
+
+// addTarFile adds to the tar archive a file from `path` as `name`
+func (ta *tarAppender) addTarFile(path, name string) error {
+ fi, err := os.Lstat(path)
+ if err != nil {
+ return err
+ }
+
+ var link string
+ if fi.Mode()&os.ModeSymlink != 0 {
+ var err error
+ link, err = os.Readlink(path)
+ if err != nil {
+ return err
+ }
+ }
+
+ hdr, err := FileInfoHeader(name, fi, link)
+ if err != nil {
+ return err
+ }
+ if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil {
+ return err
+ }
+
+ // if it's not a directory and has more than 1 link,
+ // it's hard linked, so set the type flag accordingly
+ if !fi.IsDir() && hasHardlinks(fi) {
+ inode, err := getInodeFromStat(fi.Sys())
+ if err != nil {
+ return err
+ }
+ // a link should have a name that it links too
+ // and that linked name should be first in the tar archive
+ if oldpath, ok := ta.SeenFiles[inode]; ok {
+ hdr.Typeflag = tar.TypeLink
+ hdr.Linkname = oldpath
+ hdr.Size = 0 // This Must be here for the writer math to add up!
+ } else {
+ ta.SeenFiles[inode] = name
+ }
+ }
+
+ //check whether the file is overlayfs whiteout
+ //if yes, skip re-mapping container ID mappings.
+ isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0
+
+ //handle re-mapping container ID mappings back to host ID mappings before
+ //writing tar headers/files. We skip whiteout files because they were written
+ //by the kernel and already have proper ownership relative to the host
+ if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() {
+ fileIDPair, err := getFileUIDGID(fi.Sys())
+ if err != nil {
+ return err
+ }
+ hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIDPair)
+ if err != nil {
+ return err
+ }
+ }
+
+ // explicitly override with ChownOpts
+ if ta.ChownOpts != nil {
+ hdr.Uid = ta.ChownOpts.UID
+ hdr.Gid = ta.ChownOpts.GID
+ }
+
+ if ta.WhiteoutConverter != nil {
+ wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi)
+ if err != nil {
+ return err
+ }
+
+ // If a new whiteout file exists, write original hdr, then
+ // replace hdr with wo to be written after. Whiteouts should
+ // always be written after the original. Note the original
+ // hdr may have been updated to be a whiteout with returning
+ // a whiteout header
+ if wo != nil {
+ if err := ta.TarWriter.WriteHeader(hdr); err != nil {
+ return err
+ }
+ if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
+ return fmt.Errorf("tar: cannot use whiteout for non-empty file")
+ }
+ hdr = wo
+ }
+ }
+
+ if err := ta.TarWriter.WriteHeader(hdr); err != nil {
+ return err
+ }
+
+ if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
+ // We use system.OpenSequential to ensure we use sequential file
+ // access on Windows to avoid depleting the standby list.
+ // On Linux, this equates to a regular os.Open.
+ file, err := system.OpenSequential(path)
+ if err != nil {
+ return err
+ }
+
+ ta.Buffer.Reset(ta.TarWriter)
+ defer ta.Buffer.Reset(nil)
+ _, err = io.Copy(ta.Buffer, file)
+ file.Close()
+ if err != nil {
+ return err
+ }
+ err = ta.Buffer.Flush()
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.Identity, inUserns bool) error {
+ // hdr.Mode is in linux format, which we can use for sycalls,
+ // but for os.Foo() calls we need the mode converted to os.FileMode,
+ // so use hdrInfo.Mode() (they differ for e.g. setuid bits)
+ hdrInfo := hdr.FileInfo()
+
+ switch hdr.Typeflag {
+ case tar.TypeDir:
+ // Create directory unless it exists as a directory already.
+ // In that case we just want to merge the two
+ if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) {
+ if err := os.Mkdir(path, hdrInfo.Mode()); err != nil {
+ return err
+ }
+ }
+
+ case tar.TypeReg, tar.TypeRegA:
+ // Source is regular file. We use system.OpenFileSequential to use sequential
+ // file access to avoid depleting the standby list on Windows.
+ // On Linux, this equates to a regular os.OpenFile
+ file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode())
+ if err != nil {
+ return err
+ }
+ if _, err := io.Copy(file, reader); err != nil {
+ file.Close()
+ return err
+ }
+ file.Close()
+
+ case tar.TypeBlock, tar.TypeChar:
+ if inUserns { // cannot create devices in a userns
+ return nil
+ }
+ // Handle this is an OS-specific way
+ if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
+ return err
+ }
+
+ case tar.TypeFifo:
+ // Handle this is an OS-specific way
+ if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
+ return err
+ }
+
+ case tar.TypeLink:
+ targetPath := filepath.Join(extractDir, hdr.Linkname)
+ // check for hardlink breakout
+ if !strings.HasPrefix(targetPath, extractDir) {
+ return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname))
+ }
+ if err := os.Link(targetPath, path); err != nil {
+ return err
+ }
+
+ case tar.TypeSymlink:
+ // path -> hdr.Linkname = targetPath
+ // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file
+ targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname)
+
+ // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because
+ // that symlink would first have to be created, which would be caught earlier, at this very check:
+ if !strings.HasPrefix(targetPath, extractDir) {
+ return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname))
+ }
+ if err := os.Symlink(hdr.Linkname, path); err != nil {
+ return err
+ }
+
+ case tar.TypeXGlobalHeader:
+ logrus.Debug("PAX Global Extended Headers found and ignored")
+ return nil
+
+ default:
+ return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag)
+ }
+
+ // Lchown is not supported on Windows.
+ if Lchown && runtime.GOOS != "windows" {
+ if chownOpts == nil {
+ chownOpts = &idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}
+ }
+ if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil {
+ return err
+ }
+ }
+
+ var errors []string
+ for key, value := range hdr.Xattrs {
+ if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil {
+ if err == syscall.ENOTSUP || err == syscall.EPERM {
+ // We ignore errors here because not all graphdrivers support
+ // xattrs *cough* old versions of AUFS *cough*. However only
+ // ENOTSUP should be emitted in that case, otherwise we still
+ // bail.
+ // EPERM occurs if modifying xattrs is not allowed. This can
+ // happen when running in userns with restrictions (ChromeOS).
+ errors = append(errors, err.Error())
+ continue
+ }
+ return err
+ }
+
+ }
+
+ if len(errors) > 0 {
+ logrus.WithFields(logrus.Fields{
+ "errors": errors,
+ }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them")
+ }
+
+ // There is no LChmod, so ignore mode for symlink. Also, this
+ // must happen after chown, as that can modify the file mode
+ if err := handleLChmod(hdr, path, hdrInfo); err != nil {
+ return err
+ }
+
+ aTime := hdr.AccessTime
+ if aTime.Before(hdr.ModTime) {
+ // Last access time should never be before last modified time.
+ aTime = hdr.ModTime
+ }
+
+ // system.Chtimes doesn't support a NOFOLLOW flag atm
+ if hdr.Typeflag == tar.TypeLink {
+ if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
+ if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil {
+ return err
+ }
+ }
+ } else if hdr.Typeflag != tar.TypeSymlink {
+ if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil {
+ return err
+ }
+ } else {
+ ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)}
+ if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
+ return err
+ }
+ }
+ return nil
+}
+
+// Tar creates an archive from the directory at `path`, and returns it as a
+// stream of bytes.
+func Tar(path string, compression Compression) (io.ReadCloser, error) {
+ return TarWithOptions(path, &TarOptions{Compression: compression})
+}
+
+// TarWithOptions creates an archive from the directory at `path`, only including files whose relative
+// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
+func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
+
+ // Fix the source path to work with long path names. This is a no-op
+ // on platforms other than Windows.
+ srcPath = fixVolumePathPrefix(srcPath)
+
+ pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns)
+ if err != nil {
+ return nil, err
+ }
+
+ pipeReader, pipeWriter := io.Pipe()
+
+ compressWriter, err := CompressStream(pipeWriter, options.Compression)
+ if err != nil {
+ return nil, err
+ }
+
+ go func() {
+ ta := newTarAppender(
+ idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps),
+ compressWriter,
+ options.ChownOpts,
+ )
+ ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS)
+
+ defer func() {
+ // Make sure to check the error on Close.
+ if err := ta.TarWriter.Close(); err != nil {
+ logrus.Errorf("Can't close tar writer: %s", err)
+ }
+ if err := compressWriter.Close(); err != nil {
+ logrus.Errorf("Can't close compress writer: %s", err)
+ }
+ if err := pipeWriter.Close(); err != nil {
+ logrus.Errorf("Can't close pipe writer: %s", err)
+ }
+ }()
+
+ // this buffer is needed for the duration of this piped stream
+ defer pools.BufioWriter32KPool.Put(ta.Buffer)
+
+ // In general we log errors here but ignore them because
+ // during e.g. a diff operation the container can continue
+ // mutating the filesystem and we can see transient errors
+ // from this
+
+ stat, err := os.Lstat(srcPath)
+ if err != nil {
+ return
+ }
+
+ if !stat.IsDir() {
+ // We can't later join a non-dir with any includes because the
+ // 'walk' will error if "file/." is stat-ed and "file" is not a
+ // directory. So, we must split the source path and use the
+ // basename as the include.
+ if len(options.IncludeFiles) > 0 {
+ logrus.Warn("Tar: Can't archive a file with includes")
+ }
+
+ dir, base := SplitPathDirEntry(srcPath)
+ srcPath = dir
+ options.IncludeFiles = []string{base}
+ }
+
+ if len(options.IncludeFiles) == 0 {
+ options.IncludeFiles = []string{"."}
+ }
+
+ seen := make(map[string]bool)
+
+ for _, include := range options.IncludeFiles {
+ rebaseName := options.RebaseNames[include]
+
+ walkRoot := getWalkRoot(srcPath, include)
+ filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error {
+ if err != nil {
+ logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err)
+ return nil
+ }
+
+ relFilePath, err := filepath.Rel(srcPath, filePath)
+ if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) {
+ // Error getting relative path OR we are looking
+ // at the source directory path. Skip in both situations.
+ return nil
+ }
+
+ if options.IncludeSourceDir && include == "." && relFilePath != "." {
+ relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator))
+ }
+
+ skip := false
+
+ // If "include" is an exact match for the current file
+ // then even if there's an "excludePatterns" pattern that
+ // matches it, don't skip it. IOW, assume an explicit 'include'
+ // is asking for that file no matter what - which is true
+ // for some files, like .dockerignore and Dockerfile (sometimes)
+ if include != relFilePath {
+ skip, err = pm.Matches(relFilePath)
+ if err != nil {
+ logrus.Errorf("Error matching %s: %v", relFilePath, err)
+ return err
+ }
+ }
+
+ if skip {
+ // If we want to skip this file and its a directory
+ // then we should first check to see if there's an
+ // excludes pattern (e.g. !dir/file) that starts with this
+ // dir. If so then we can't skip this dir.
+
+ // Its not a dir then so we can just return/skip.
+ if !f.IsDir() {
+ return nil
+ }
+
+ // No exceptions (!...) in patterns so just skip dir
+ if !pm.Exclusions() {
+ return filepath.SkipDir
+ }
+
+ dirSlash := relFilePath + string(filepath.Separator)
+
+ for _, pat := range pm.Patterns() {
+ if !pat.Exclusion() {
+ continue
+ }
+ if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) {
+ // found a match - so can't skip this dir
+ return nil
+ }
+ }
+
+ // No matching exclusion dir so just skip dir
+ return filepath.SkipDir
+ }
+
+ if seen[relFilePath] {
+ return nil
+ }
+ seen[relFilePath] = true
+
+ // Rename the base resource.
+ if rebaseName != "" {
+ var replacement string
+ if rebaseName != string(filepath.Separator) {
+ // Special case the root directory to replace with an
+ // empty string instead so that we don't end up with
+ // double slashes in the paths.
+ replacement = rebaseName
+ }
+
+ relFilePath = strings.Replace(relFilePath, include, replacement, 1)
+ }
+
+ if err := ta.addTarFile(filePath, relFilePath); err != nil {
+ logrus.Errorf("Can't add file %s to tar: %s", filePath, err)
+ // if pipe is broken, stop writing tar stream to it
+ if err == io.ErrClosedPipe {
+ return err
+ }
+ }
+ return nil
+ })
+ }
+ }()
+
+ return pipeReader, nil
+}
+
+// Unpack unpacks the decompressedArchive to dest with options.
+func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error {
+ tr := tar.NewReader(decompressedArchive)
+ trBuf := pools.BufioReader32KPool.Get(nil)
+ defer pools.BufioReader32KPool.Put(trBuf)
+
+ var dirs []*tar.Header
+ idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
+ rootIDs := idMapping.RootPair()
+ whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS)
+
+ // Iterate through the files in the archive.
+loop:
+ for {
+ hdr, err := tr.Next()
+ if err == io.EOF {
+ // end of tar archive
+ break
+ }
+ if err != nil {
+ return err
+ }
+
+ // Normalize name, for safety and for a simple is-root check
+ // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows:
+ // This keeps "..\" as-is, but normalizes "\..\" to "\".
+ hdr.Name = filepath.Clean(hdr.Name)
+
+ for _, exclude := range options.ExcludePatterns {
+ if strings.HasPrefix(hdr.Name, exclude) {
+ continue loop
+ }
+ }
+
+ // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in
+ // the filepath format for the OS on which the daemon is running. Hence
+ // the check for a slash-suffix MUST be done in an OS-agnostic way.
+ if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
+ // Not the root directory, ensure that the parent directory exists
+ parent := filepath.Dir(hdr.Name)
+ parentPath := filepath.Join(dest, parent)
+ if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
+ err = idtools.MkdirAllAndChownNew(parentPath, 0777, rootIDs)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ path := filepath.Join(dest, hdr.Name)
+ rel, err := filepath.Rel(dest, path)
+ if err != nil {
+ return err
+ }
+ if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
+ return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
+ }
+
+ // If path exits we almost always just want to remove and replace it
+ // The only exception is when it is a directory *and* the file from
+ // the layer is also a directory. Then we want to merge them (i.e.
+ // just apply the metadata from the layer).
+ if fi, err := os.Lstat(path); err == nil {
+ if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir {
+ // If NoOverwriteDirNonDir is true then we cannot replace
+ // an existing directory with a non-directory from the archive.
+ return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest)
+ }
+
+ if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir {
+ // If NoOverwriteDirNonDir is true then we cannot replace
+ // an existing non-directory with a directory from the archive.
+ return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest)
+ }
+
+ if fi.IsDir() && hdr.Name == "." {
+ continue
+ }
+
+ if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
+ if err := os.RemoveAll(path); err != nil {
+ return err
+ }
+ }
+ }
+ trBuf.Reset(tr)
+
+ if err := remapIDs(idMapping, hdr); err != nil {
+ return err
+ }
+
+ if whiteoutConverter != nil {
+ writeFile, err := whiteoutConverter.ConvertRead(hdr, path)
+ if err != nil {
+ return err
+ }
+ if !writeFile {
+ continue
+ }
+ }
+
+ if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil {
+ return err
+ }
+
+ // Directory mtimes must be handled at the end to avoid further
+ // file creation in them to modify the directory mtime
+ if hdr.Typeflag == tar.TypeDir {
+ dirs = append(dirs, hdr)
+ }
+ }
+
+ for _, hdr := range dirs {
+ path := filepath.Join(dest, hdr.Name)
+
+ if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
+// and unpacks it into the directory at `dest`.
+// The archive may be compressed with one of the following algorithms:
+// identity (uncompressed), gzip, bzip2, xz.
+// FIXME: specify behavior when target path exists vs. doesn't exist.
+func Untar(tarArchive io.Reader, dest string, options *TarOptions) error {
+ return untarHandler(tarArchive, dest, options, true)
+}
+
+// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive,
+// and unpacks it into the directory at `dest`.
+// The archive must be an uncompressed stream.
+func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error {
+ return untarHandler(tarArchive, dest, options, false)
+}
+
+// Handler for teasing out the automatic decompression
+func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error {
+ if tarArchive == nil {
+ return fmt.Errorf("Empty archive")
+ }
+ dest = filepath.Clean(dest)
+ if options == nil {
+ options = &TarOptions{}
+ }
+ if options.ExcludePatterns == nil {
+ options.ExcludePatterns = []string{}
+ }
+
+ r := tarArchive
+ if decompress {
+ decompressedArchive, err := DecompressStream(tarArchive)
+ if err != nil {
+ return err
+ }
+ defer decompressedArchive.Close()
+ r = decompressedArchive
+ }
+
+ return Unpack(r, dest, options)
+}
+
+// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
+// If either Tar or Untar fails, TarUntar aborts and returns the error.
+func (archiver *Archiver) TarUntar(src, dst string) error {
+ logrus.Debugf("TarUntar(%s %s)", src, dst)
+ archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed})
+ if err != nil {
+ return err
+ }
+ defer archive.Close()
+ options := &TarOptions{
+ UIDMaps: archiver.IDMapping.UIDs(),
+ GIDMaps: archiver.IDMapping.GIDs(),
+ }
+ return archiver.Untar(archive, dst, options)
+}
+
+// UntarPath untar a file from path to a destination, src is the source tar file path.
+func (archiver *Archiver) UntarPath(src, dst string) error {
+ archive, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer archive.Close()
+ options := &TarOptions{
+ UIDMaps: archiver.IDMapping.UIDs(),
+ GIDMaps: archiver.IDMapping.GIDs(),
+ }
+ return archiver.Untar(archive, dst, options)
+}
+
+// CopyWithTar creates a tar archive of filesystem path `src`, and
+// unpacks it at filesystem path `dst`.
+// The archive is streamed directly with fixed buffering and no
+// intermediary disk IO.
+func (archiver *Archiver) CopyWithTar(src, dst string) error {
+ srcSt, err := os.Stat(src)
+ if err != nil {
+ return err
+ }
+ if !srcSt.IsDir() {
+ return archiver.CopyFileWithTar(src, dst)
+ }
+
+ // if this Archiver is set up with ID mapping we need to create
+ // the new destination directory with the remapped root UID/GID pair
+ // as owner
+ rootIDs := archiver.IDMapping.RootPair()
+ // Create dst, copy src's content into it
+ logrus.Debugf("Creating dest directory: %s", dst)
+ if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil {
+ return err
+ }
+ logrus.Debugf("Calling TarUntar(%s, %s)", src, dst)
+ return archiver.TarUntar(src, dst)
+}
+
+// CopyFileWithTar emulates the behavior of the 'cp' command-line
+// for a single file. It copies a regular file from path `src` to
+// path `dst`, and preserves all its metadata.
+func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
+ logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst)
+ srcSt, err := os.Stat(src)
+ if err != nil {
+ return err
+ }
+
+ if srcSt.IsDir() {
+ return fmt.Errorf("Can't copy a directory")
+ }
+
+ // Clean up the trailing slash. This must be done in an operating
+ // system specific manner.
+ if dst[len(dst)-1] == os.PathSeparator {
+ dst = filepath.Join(dst, filepath.Base(src))
+ }
+ // Create the holding directory if necessary
+ if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil {
+ return err
+ }
+
+ r, w := io.Pipe()
+ errC := make(chan error, 1)
+
+ go func() {
+ defer close(errC)
+
+ errC <- func() error {
+ defer w.Close()
+
+ srcF, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer srcF.Close()
+
+ hdr, err := tar.FileInfoHeader(srcSt, "")
+ if err != nil {
+ return err
+ }
+ hdr.Format = tar.FormatPAX
+ hdr.ModTime = hdr.ModTime.Truncate(time.Second)
+ hdr.AccessTime = time.Time{}
+ hdr.ChangeTime = time.Time{}
+ hdr.Name = filepath.Base(dst)
+ hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
+
+ if err := remapIDs(archiver.IDMapping, hdr); err != nil {
+ return err
+ }
+
+ tw := tar.NewWriter(w)
+ defer tw.Close()
+ if err := tw.WriteHeader(hdr); err != nil {
+ return err
+ }
+ if _, err := io.Copy(tw, srcF); err != nil {
+ return err
+ }
+ return nil
+ }()
+ }()
+ defer func() {
+ if er := <-errC; err == nil && er != nil {
+ err = er
+ }
+ }()
+
+ err = archiver.Untar(r, filepath.Dir(dst), nil)
+ if err != nil {
+ r.CloseWithError(err)
+ }
+ return err
+}
+
+// IdentityMapping returns the IdentityMapping of the archiver.
+func (archiver *Archiver) IdentityMapping() *idtools.IdentityMapping {
+ return archiver.IDMapping
+}
+
+func remapIDs(idMapping *idtools.IdentityMapping, hdr *tar.Header) error {
+ ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid})
+ hdr.Uid, hdr.Gid = ids.UID, ids.GID
+ return err
+}
+
+// cmdStream executes a command, and returns its stdout as a stream.
+// If the command fails to run or doesn't complete successfully, an error
+// will be returned, including anything written on stderr.
+func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) {
+ cmd.Stdin = input
+ pipeR, pipeW := io.Pipe()
+ cmd.Stdout = pipeW
+ var errBuf bytes.Buffer
+ cmd.Stderr = &errBuf
+
+ // Run the command and return the pipe
+ if err := cmd.Start(); err != nil {
+ return nil, err
+ }
+
+ // Ensure the command has exited before we clean anything up
+ done := make(chan struct{})
+
+ // Copy stdout to the returned pipe
+ go func() {
+ if err := cmd.Wait(); err != nil {
+ pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String()))
+ } else {
+ pipeW.Close()
+ }
+ close(done)
+ }()
+
+ return ioutils.NewReadCloserWrapper(pipeR, func() error {
+ // Close pipeR, and then wait for the command to complete before returning. We have to close pipeR first, as
+ // cmd.Wait waits for any non-file stdout/stderr/stdin to close.
+ err := pipeR.Close()
+ <-done
+ return err
+ }), nil
+}
+
+// NewTempArchive reads the content of src into a temporary file, and returns the contents
+// of that file as an archive. The archive can only be read once - as soon as reading completes,
+// the file will be deleted.
+func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) {
+ f, err := ioutil.TempFile(dir, "")
+ if err != nil {
+ return nil, err
+ }
+ if _, err := io.Copy(f, src); err != nil {
+ return nil, err
+ }
+ if _, err := f.Seek(0, 0); err != nil {
+ return nil, err
+ }
+ st, err := f.Stat()
+ if err != nil {
+ return nil, err
+ }
+ size := st.Size()
+ return &TempArchive{File: f, Size: size}, nil
+}
+
+// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes,
+// the file will be deleted.
+type TempArchive struct {
+ *os.File
+ Size int64 // Pre-computed from Stat().Size() as a convenience
+ read int64
+ closed bool
+}
+
+// Close closes the underlying file if it's still open, or does a no-op
+// to allow callers to try to close the TempArchive multiple times safely.
+func (archive *TempArchive) Close() error {
+ if archive.closed {
+ return nil
+ }
+
+ archive.closed = true
+
+ return archive.File.Close()
+}
+
+func (archive *TempArchive) Read(data []byte) (int, error) {
+ n, err := archive.File.Read(data)
+ archive.read += int64(n)
+ if err != nil || archive.read == archive.Size {
+ archive.Close()
+ os.Remove(archive.File.Name())
+ }
+ return n, err
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go
new file mode 100644
index 000000000..0601f7b0d
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go
@@ -0,0 +1,261 @@
+package archive // import "github.com/docker/docker/pkg/archive"
+
+import (
+ "archive/tar"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+ "syscall"
+
+ "github.com/containerd/continuity/fs"
+ "github.com/docker/docker/pkg/system"
+ "github.com/pkg/errors"
+ "golang.org/x/sys/unix"
+)
+
+func getWhiteoutConverter(format WhiteoutFormat, inUserNS bool) tarWhiteoutConverter {
+ if format == OverlayWhiteoutFormat {
+ return overlayWhiteoutConverter{inUserNS: inUserNS}
+ }
+ return nil
+}
+
+type overlayWhiteoutConverter struct {
+ inUserNS bool
+}
+
+func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) {
+ // convert whiteouts to AUFS format
+ if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 {
+ // we just rename the file and make it normal
+ dir, filename := filepath.Split(hdr.Name)
+ hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename)
+ hdr.Mode = 0600
+ hdr.Typeflag = tar.TypeReg
+ hdr.Size = 0
+ }
+
+ if fi.Mode()&os.ModeDir != 0 {
+ // convert opaque dirs to AUFS format by writing an empty file with the prefix
+ opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque")
+ if err != nil {
+ return nil, err
+ }
+ if len(opaque) == 1 && opaque[0] == 'y' {
+ if hdr.Xattrs != nil {
+ delete(hdr.Xattrs, "trusted.overlay.opaque")
+ }
+
+ // create a header for the whiteout file
+ // it should inherit some properties from the parent, but be a regular file
+ wo = &tar.Header{
+ Typeflag: tar.TypeReg,
+ Mode: hdr.Mode & int64(os.ModePerm),
+ Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir),
+ Size: 0,
+ Uid: hdr.Uid,
+ Uname: hdr.Uname,
+ Gid: hdr.Gid,
+ Gname: hdr.Gname,
+ AccessTime: hdr.AccessTime,
+ ChangeTime: hdr.ChangeTime,
+ }
+ }
+ }
+
+ return
+}
+
+func (c overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) {
+ base := filepath.Base(path)
+ dir := filepath.Dir(path)
+
+ // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay
+ if base == WhiteoutOpaqueDir {
+ err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0)
+ if err != nil {
+ if c.inUserNS {
+ if err = replaceDirWithOverlayOpaque(dir); err != nil {
+ return false, errors.Wrapf(err, "replaceDirWithOverlayOpaque(%q) failed", dir)
+ }
+ } else {
+ return false, errors.Wrapf(err, "setxattr(%q, trusted.overlay.opaque=y)", dir)
+ }
+ }
+ // don't write the file itself
+ return false, err
+ }
+
+ // if a file was deleted and we are using overlay, we need to create a character device
+ if strings.HasPrefix(base, WhiteoutPrefix) {
+ originalBase := base[len(WhiteoutPrefix):]
+ originalPath := filepath.Join(dir, originalBase)
+
+ if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil {
+ if c.inUserNS {
+ // Ubuntu and a few distros support overlayfs in userns.
+ //
+ // Although we can't call mknod directly in userns (at least on bionic kernel 4.15),
+ // we can still create 0,0 char device using mknodChar0Overlay().
+ //
+ // NOTE: we don't need this hack for the containerd snapshotter+unpack model.
+ if err := mknodChar0Overlay(originalPath); err != nil {
+ return false, errors.Wrapf(err, "failed to mknodChar0UserNS(%q)", originalPath)
+ }
+ } else {
+ return false, errors.Wrapf(err, "failed to mknod(%q, S_IFCHR, 0)", originalPath)
+ }
+ }
+ if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil {
+ return false, err
+ }
+
+ // don't write the file itself
+ return false, nil
+ }
+
+ return true, nil
+}
+
+// mknodChar0Overlay creates 0,0 char device by mounting overlayfs and unlinking.
+// This function can be used for creating 0,0 char device in userns on Ubuntu.
+//
+// Steps:
+// * Mkdir lower,upper,merged,work
+// * Create lower/dummy
+// * Mount overlayfs
+// * Unlink merged/dummy
+// * Unmount overlayfs
+// * Make sure a 0,0 char device is created as upper/dummy
+// * Rename upper/dummy to cleansedOriginalPath
+func mknodChar0Overlay(cleansedOriginalPath string) error {
+ dir := filepath.Dir(cleansedOriginalPath)
+ tmp, err := ioutil.TempDir(dir, "mc0o")
+ if err != nil {
+ return errors.Wrapf(err, "failed to create a tmp directory under %s", dir)
+ }
+ defer os.RemoveAll(tmp)
+ lower := filepath.Join(tmp, "l")
+ upper := filepath.Join(tmp, "u")
+ work := filepath.Join(tmp, "w")
+ merged := filepath.Join(tmp, "m")
+ for _, s := range []string{lower, upper, work, merged} {
+ if err := os.MkdirAll(s, 0700); err != nil {
+ return errors.Wrapf(err, "failed to mkdir %s", s)
+ }
+ }
+ dummyBase := "d"
+ lowerDummy := filepath.Join(lower, dummyBase)
+ if err := ioutil.WriteFile(lowerDummy, []byte{}, 0600); err != nil {
+ return errors.Wrapf(err, "failed to create a dummy lower file %s", lowerDummy)
+ }
+ mOpts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lower, upper, work)
+ // docker/pkg/mount.Mount() requires procfs to be mounted. So we use syscall.Mount() directly instead.
+ if err := syscall.Mount("overlay", merged, "overlay", uintptr(0), mOpts); err != nil {
+ return errors.Wrapf(err, "failed to mount overlay (%s) on %s", mOpts, merged)
+ }
+ mergedDummy := filepath.Join(merged, dummyBase)
+ if err := os.Remove(mergedDummy); err != nil {
+ syscall.Unmount(merged, 0)
+ return errors.Wrapf(err, "failed to unlink %s", mergedDummy)
+ }
+ if err := syscall.Unmount(merged, 0); err != nil {
+ return errors.Wrapf(err, "failed to unmount %s", merged)
+ }
+ upperDummy := filepath.Join(upper, dummyBase)
+ if err := isChar0(upperDummy); err != nil {
+ return err
+ }
+ if err := os.Rename(upperDummy, cleansedOriginalPath); err != nil {
+ return errors.Wrapf(err, "failed to rename %s to %s", upperDummy, cleansedOriginalPath)
+ }
+ return nil
+}
+
+func isChar0(path string) error {
+ osStat, err := os.Stat(path)
+ if err != nil {
+ return errors.Wrapf(err, "failed to stat %s", path)
+ }
+ st, ok := osStat.Sys().(*syscall.Stat_t)
+ if !ok {
+ return errors.Errorf("got unsupported stat for %s", path)
+ }
+ if os.FileMode(st.Mode)&syscall.S_IFMT != syscall.S_IFCHR {
+ return errors.Errorf("%s is not a character device, got mode=%d", path, st.Mode)
+ }
+ if st.Rdev != 0 {
+ return errors.Errorf("%s is not a 0,0 character device, got Rdev=%d", path, st.Rdev)
+ }
+ return nil
+}
+
+// replaceDirWithOverlayOpaque replaces path with a new directory with trusted.overlay.opaque
+// xattr. The contents of the directory are preserved.
+func replaceDirWithOverlayOpaque(path string) error {
+ if path == "/" {
+ return errors.New("replaceDirWithOverlayOpaque: path must not be \"/\"")
+ }
+ dir := filepath.Dir(path)
+ tmp, err := ioutil.TempDir(dir, "rdwoo")
+ if err != nil {
+ return errors.Wrapf(err, "failed to create a tmp directory under %s", dir)
+ }
+ defer os.RemoveAll(tmp)
+ // newPath is a new empty directory crafted with trusted.overlay.opaque xattr.
+ // we copy the content of path into newPath, remove path, and rename newPath to path.
+ newPath, err := createDirWithOverlayOpaque(tmp)
+ if err != nil {
+ return errors.Wrapf(err, "createDirWithOverlayOpaque(%q) failed", tmp)
+ }
+ if err := fs.CopyDir(newPath, path); err != nil {
+ return errors.Wrapf(err, "CopyDir(%q, %q) failed", newPath, path)
+ }
+ if err := os.RemoveAll(path); err != nil {
+ return err
+ }
+ return os.Rename(newPath, path)
+}
+
+// createDirWithOverlayOpaque creates a directory with trusted.overlay.opaque xattr,
+// without calling setxattr, so as to allow creating opaque dir in userns on Ubuntu.
+func createDirWithOverlayOpaque(tmp string) (string, error) {
+ lower := filepath.Join(tmp, "l")
+ upper := filepath.Join(tmp, "u")
+ work := filepath.Join(tmp, "w")
+ merged := filepath.Join(tmp, "m")
+ for _, s := range []string{lower, upper, work, merged} {
+ if err := os.MkdirAll(s, 0700); err != nil {
+ return "", errors.Wrapf(err, "failed to mkdir %s", s)
+ }
+ }
+ dummyBase := "d"
+ lowerDummy := filepath.Join(lower, dummyBase)
+ if err := os.MkdirAll(lowerDummy, 0700); err != nil {
+ return "", errors.Wrapf(err, "failed to create a dummy lower directory %s", lowerDummy)
+ }
+ mOpts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lower, upper, work)
+ // docker/pkg/mount.Mount() requires procfs to be mounted. So we use syscall.Mount() directly instead.
+ if err := syscall.Mount("overlay", merged, "overlay", uintptr(0), mOpts); err != nil {
+ return "", errors.Wrapf(err, "failed to mount overlay (%s) on %s", mOpts, merged)
+ }
+ mergedDummy := filepath.Join(merged, dummyBase)
+ if err := os.Remove(mergedDummy); err != nil {
+ syscall.Unmount(merged, 0)
+ return "", errors.Wrapf(err, "failed to rmdir %s", mergedDummy)
+ }
+ // upperDummy becomes a 0,0-char device file here
+ if err := os.Mkdir(mergedDummy, 0700); err != nil {
+ syscall.Unmount(merged, 0)
+ return "", errors.Wrapf(err, "failed to mkdir %s", mergedDummy)
+ }
+ // upperDummy becomes a directory with trusted.overlay.opaque xattr
+ // (but can't be verified in userns)
+ if err := syscall.Unmount(merged, 0); err != nil {
+ return "", errors.Wrapf(err, "failed to unmount %s", merged)
+ }
+ upperDummy := filepath.Join(upper, dummyBase)
+ return upperDummy, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_other.go b/vendor/github.com/docker/docker/pkg/archive/archive_other.go
new file mode 100644
index 000000000..65a73354c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive_other.go
@@ -0,0 +1,7 @@
+// +build !linux
+
+package archive // import "github.com/docker/docker/pkg/archive"
+
+func getWhiteoutConverter(format WhiteoutFormat, inUserNS bool) tarWhiteoutConverter {
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go
new file mode 100644
index 000000000..d62633603
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go
@@ -0,0 +1,115 @@
+// +build !windows
+
+package archive // import "github.com/docker/docker/pkg/archive"
+
+import (
+ "archive/tar"
+ "errors"
+ "os"
+ "path/filepath"
+ "strings"
+ "syscall"
+
+ "github.com/docker/docker/pkg/idtools"
+ "github.com/docker/docker/pkg/system"
+ rsystem "github.com/opencontainers/runc/libcontainer/system"
+ "golang.org/x/sys/unix"
+)
+
+// fixVolumePathPrefix does platform specific processing to ensure that if
+// the path being passed in is not in a volume path format, convert it to one.
+func fixVolumePathPrefix(srcPath string) string {
+ return srcPath
+}
+
+// getWalkRoot calculates the root path when performing a TarWithOptions.
+// We use a separate function as this is platform specific. On Linux, we
+// can't use filepath.Join(srcPath,include) because this will clean away
+// a trailing "." or "/" which may be important.
+func getWalkRoot(srcPath string, include string) string {
+ return strings.TrimSuffix(srcPath, string(filepath.Separator)) + string(filepath.Separator) + include
+}
+
+// CanonicalTarNameForPath returns platform-specific filepath
+// to canonical posix-style path for tar archival. p is relative
+// path.
+func CanonicalTarNameForPath(p string) string {
+ return p // already unix-style
+}
+
+// chmodTarEntry is used to adjust the file permissions used in tar header based
+// on the platform the archival is done.
+
+func chmodTarEntry(perm os.FileMode) os.FileMode {
+ return perm // noop for unix as golang APIs provide perm bits correctly
+}
+
+func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
+ s, ok := stat.(*syscall.Stat_t)
+
+ if ok {
+ // Currently go does not fill in the major/minors
+ if s.Mode&unix.S_IFBLK != 0 ||
+ s.Mode&unix.S_IFCHR != 0 {
+ hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) // nolint: unconvert
+ hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) // nolint: unconvert
+ }
+ }
+
+ return
+}
+
+func getInodeFromStat(stat interface{}) (inode uint64, err error) {
+ s, ok := stat.(*syscall.Stat_t)
+
+ if ok {
+ inode = s.Ino
+ }
+
+ return
+}
+
+func getFileUIDGID(stat interface{}) (idtools.Identity, error) {
+ s, ok := stat.(*syscall.Stat_t)
+
+ if !ok {
+ return idtools.Identity{}, errors.New("cannot convert stat value to syscall.Stat_t")
+ }
+ return idtools.Identity{UID: int(s.Uid), GID: int(s.Gid)}, nil
+}
+
+// handleTarTypeBlockCharFifo is an OS-specific helper function used by
+// createTarFile to handle the following types of header: Block; Char; Fifo
+func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
+ if rsystem.RunningInUserNS() {
+ // cannot create a device if running in user namespace
+ return nil
+ }
+
+ mode := uint32(hdr.Mode & 07777)
+ switch hdr.Typeflag {
+ case tar.TypeBlock:
+ mode |= unix.S_IFBLK
+ case tar.TypeChar:
+ mode |= unix.S_IFCHR
+ case tar.TypeFifo:
+ mode |= unix.S_IFIFO
+ }
+
+ return system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor)))
+}
+
+func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
+ if hdr.Typeflag == tar.TypeLink {
+ if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
+ if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
+ return err
+ }
+ }
+ } else if hdr.Typeflag != tar.TypeSymlink {
+ if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go
new file mode 100644
index 000000000..ae6b89fd7
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go
@@ -0,0 +1,67 @@
+package archive // import "github.com/docker/docker/pkg/archive"
+
+import (
+ "archive/tar"
+ "os"
+ "path/filepath"
+
+ "github.com/docker/docker/pkg/idtools"
+ "github.com/docker/docker/pkg/longpath"
+)
+
+// fixVolumePathPrefix does platform specific processing to ensure that if
+// the path being passed in is not in a volume path format, convert it to one.
+func fixVolumePathPrefix(srcPath string) string {
+ return longpath.AddPrefix(srcPath)
+}
+
+// getWalkRoot calculates the root path when performing a TarWithOptions.
+// We use a separate function as this is platform specific.
+func getWalkRoot(srcPath string, include string) string {
+ return filepath.Join(srcPath, include)
+}
+
+// CanonicalTarNameForPath returns platform-specific filepath
+// to canonical posix-style path for tar archival. p is relative
+// path.
+func CanonicalTarNameForPath(p string) string {
+ return filepath.ToSlash(p)
+}
+
+// chmodTarEntry is used to adjust the file permissions used in tar header based
+// on the platform the archival is done.
+func chmodTarEntry(perm os.FileMode) os.FileMode {
+ //perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.)
+ permPart := perm & os.ModePerm
+ noPermPart := perm &^ os.ModePerm
+ // Add the x bit: make everything +x from windows
+ permPart |= 0111
+ permPart &= 0755
+
+ return noPermPart | permPart
+}
+
+func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
+ // do nothing. no notion of Rdev, Nlink in stat on Windows
+ return
+}
+
+func getInodeFromStat(stat interface{}) (inode uint64, err error) {
+ // do nothing. no notion of Inode in stat on Windows
+ return
+}
+
+// handleTarTypeBlockCharFifo is an OS-specific helper function used by
+// createTarFile to handle the following types of header: Block; Char; Fifo
+func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
+ return nil
+}
+
+func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
+ return nil
+}
+
+func getFileUIDGID(stat interface{}) (idtools.Identity, error) {
+ // no notion of file ownership mapping yet on Windows
+ return idtools.Identity{UID: 0, GID: 0}, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes.go b/vendor/github.com/docker/docker/pkg/archive/changes.go
new file mode 100644
index 000000000..aedb91b03
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes.go
@@ -0,0 +1,445 @@
+package archive // import "github.com/docker/docker/pkg/archive"
+
+import (
+ "archive/tar"
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+ "syscall"
+ "time"
+
+ "github.com/docker/docker/pkg/idtools"
+ "github.com/docker/docker/pkg/pools"
+ "github.com/docker/docker/pkg/system"
+ "github.com/sirupsen/logrus"
+)
+
+// ChangeType represents the change type.
+type ChangeType int
+
+const (
+ // ChangeModify represents the modify operation.
+ ChangeModify = iota
+ // ChangeAdd represents the add operation.
+ ChangeAdd
+ // ChangeDelete represents the delete operation.
+ ChangeDelete
+)
+
+func (c ChangeType) String() string {
+ switch c {
+ case ChangeModify:
+ return "C"
+ case ChangeAdd:
+ return "A"
+ case ChangeDelete:
+ return "D"
+ }
+ return ""
+}
+
+// Change represents a change, it wraps the change type and path.
+// It describes changes of the files in the path respect to the
+// parent layers. The change could be modify, add, delete.
+// This is used for layer diff.
+type Change struct {
+ Path string
+ Kind ChangeType
+}
+
+func (change *Change) String() string {
+ return fmt.Sprintf("%s %s", change.Kind, change.Path)
+}
+
+// for sort.Sort
+type changesByPath []Change
+
+func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path }
+func (c changesByPath) Len() int { return len(c) }
+func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] }
+
+// Gnu tar doesn't have sub-second mtime precision. The go tar
+// writer (1.10+) does when using PAX format, but we round times to seconds
+// to ensure archives have the same hashes for backwards compatibility.
+// See https://github.com/moby/moby/pull/35739/commits/fb170206ba12752214630b269a40ac7be6115ed4.
+//
+// Non-sub-second is problematic when we apply changes via tar
+// files. We handle this by comparing for exact times, *or* same
+// second count and either a or b having exactly 0 nanoseconds
+func sameFsTime(a, b time.Time) bool {
+ return a.Equal(b) ||
+ (a.Unix() == b.Unix() &&
+ (a.Nanosecond() == 0 || b.Nanosecond() == 0))
+}
+
+func sameFsTimeSpec(a, b syscall.Timespec) bool {
+ return a.Sec == b.Sec &&
+ (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0)
+}
+
+// Changes walks the path rw and determines changes for the files in the path,
+// with respect to the parent layers
+func Changes(layers []string, rw string) ([]Change, error) {
+ return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip)
+}
+
+func aufsMetadataSkip(path string) (skip bool, err error) {
+ skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path)
+ if err != nil {
+ skip = true
+ }
+ return
+}
+
+func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) {
+ f := filepath.Base(path)
+
+ // If there is a whiteout, then the file was removed
+ if strings.HasPrefix(f, WhiteoutPrefix) {
+ originalFile := f[len(WhiteoutPrefix):]
+ return filepath.Join(filepath.Dir(path), originalFile), nil
+ }
+
+ return "", nil
+}
+
+type skipChange func(string) (bool, error)
+type deleteChange func(string, string, os.FileInfo) (string, error)
+
+func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) {
+ var (
+ changes []Change
+ changedDirs = make(map[string]struct{})
+ )
+
+ err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ // Rebase path
+ path, err = filepath.Rel(rw, path)
+ if err != nil {
+ return err
+ }
+
+ // As this runs on the daemon side, file paths are OS specific.
+ path = filepath.Join(string(os.PathSeparator), path)
+
+ // Skip root
+ if path == string(os.PathSeparator) {
+ return nil
+ }
+
+ if sc != nil {
+ if skip, err := sc(path); skip {
+ return err
+ }
+ }
+
+ change := Change{
+ Path: path,
+ }
+
+ deletedFile, err := dc(rw, path, f)
+ if err != nil {
+ return err
+ }
+
+ // Find out what kind of modification happened
+ if deletedFile != "" {
+ change.Path = deletedFile
+ change.Kind = ChangeDelete
+ } else {
+ // Otherwise, the file was added
+ change.Kind = ChangeAdd
+
+ // ...Unless it already existed in a top layer, in which case, it's a modification
+ for _, layer := range layers {
+ stat, err := os.Stat(filepath.Join(layer, path))
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ if err == nil {
+ // The file existed in the top layer, so that's a modification
+
+ // However, if it's a directory, maybe it wasn't actually modified.
+ // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
+ if stat.IsDir() && f.IsDir() {
+ if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) {
+ // Both directories are the same, don't record the change
+ return nil
+ }
+ }
+ change.Kind = ChangeModify
+ break
+ }
+ }
+ }
+
+ // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files.
+ // This block is here to ensure the change is recorded even if the
+ // modify time, mode and size of the parent directory in the rw and ro layers are all equal.
+ // Check https://github.com/docker/docker/pull/13590 for details.
+ if f.IsDir() {
+ changedDirs[path] = struct{}{}
+ }
+ if change.Kind == ChangeAdd || change.Kind == ChangeDelete {
+ parent := filepath.Dir(path)
+ if _, ok := changedDirs[parent]; !ok && parent != "/" {
+ changes = append(changes, Change{Path: parent, Kind: ChangeModify})
+ changedDirs[parent] = struct{}{}
+ }
+ }
+
+ // Record change
+ changes = append(changes, change)
+ return nil
+ })
+ if err != nil && !os.IsNotExist(err) {
+ return nil, err
+ }
+ return changes, nil
+}
+
+// FileInfo describes the information of a file.
+type FileInfo struct {
+ parent *FileInfo
+ name string
+ stat *system.StatT
+ children map[string]*FileInfo
+ capability []byte
+ added bool
+}
+
+// LookUp looks up the file information of a file.
+func (info *FileInfo) LookUp(path string) *FileInfo {
+ // As this runs on the daemon side, file paths are OS specific.
+ parent := info
+ if path == string(os.PathSeparator) {
+ return info
+ }
+
+ pathElements := strings.Split(path, string(os.PathSeparator))
+ for _, elem := range pathElements {
+ if elem != "" {
+ child := parent.children[elem]
+ if child == nil {
+ return nil
+ }
+ parent = child
+ }
+ }
+ return parent
+}
+
+func (info *FileInfo) path() string {
+ if info.parent == nil {
+ // As this runs on the daemon side, file paths are OS specific.
+ return string(os.PathSeparator)
+ }
+ return filepath.Join(info.parent.path(), info.name)
+}
+
+func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
+
+ sizeAtEntry := len(*changes)
+
+ if oldInfo == nil {
+ // add
+ change := Change{
+ Path: info.path(),
+ Kind: ChangeAdd,
+ }
+ *changes = append(*changes, change)
+ info.added = true
+ }
+
+ // We make a copy so we can modify it to detect additions
+ // also, we only recurse on the old dir if the new info is a directory
+ // otherwise any previous delete/change is considered recursive
+ oldChildren := make(map[string]*FileInfo)
+ if oldInfo != nil && info.isDir() {
+ for k, v := range oldInfo.children {
+ oldChildren[k] = v
+ }
+ }
+
+ for name, newChild := range info.children {
+ oldChild := oldChildren[name]
+ if oldChild != nil {
+ // change?
+ oldStat := oldChild.stat
+ newStat := newChild.stat
+ // Note: We can't compare inode or ctime or blocksize here, because these change
+ // when copying a file into a container. However, that is not generally a problem
+ // because any content change will change mtime, and any status change should
+ // be visible when actually comparing the stat fields. The only time this
+ // breaks down is if some code intentionally hides a change by setting
+ // back mtime
+ if statDifferent(oldStat, newStat) ||
+ !bytes.Equal(oldChild.capability, newChild.capability) {
+ change := Change{
+ Path: newChild.path(),
+ Kind: ChangeModify,
+ }
+ *changes = append(*changes, change)
+ newChild.added = true
+ }
+
+ // Remove from copy so we can detect deletions
+ delete(oldChildren, name)
+ }
+
+ newChild.addChanges(oldChild, changes)
+ }
+ for _, oldChild := range oldChildren {
+ // delete
+ change := Change{
+ Path: oldChild.path(),
+ Kind: ChangeDelete,
+ }
+ *changes = append(*changes, change)
+ }
+
+ // If there were changes inside this directory, we need to add it, even if the directory
+ // itself wasn't changed. This is needed to properly save and restore filesystem permissions.
+ // As this runs on the daemon side, file paths are OS specific.
+ if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) {
+ change := Change{
+ Path: info.path(),
+ Kind: ChangeModify,
+ }
+ // Let's insert the directory entry before the recently added entries located inside this dir
+ *changes = append(*changes, change) // just to resize the slice, will be overwritten
+ copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:])
+ (*changes)[sizeAtEntry] = change
+ }
+
+}
+
+// Changes add changes to file information.
+func (info *FileInfo) Changes(oldInfo *FileInfo) []Change {
+ var changes []Change
+
+ info.addChanges(oldInfo, &changes)
+
+ return changes
+}
+
+func newRootFileInfo() *FileInfo {
+ // As this runs on the daemon side, file paths are OS specific.
+ root := &FileInfo{
+ name: string(os.PathSeparator),
+ children: make(map[string]*FileInfo),
+ }
+ return root
+}
+
+// ChangesDirs compares two directories and generates an array of Change objects describing the changes.
+// If oldDir is "", then all files in newDir will be Add-Changes.
+func ChangesDirs(newDir, oldDir string) ([]Change, error) {
+ var (
+ oldRoot, newRoot *FileInfo
+ )
+ if oldDir == "" {
+ emptyDir, err := ioutil.TempDir("", "empty")
+ if err != nil {
+ return nil, err
+ }
+ defer os.Remove(emptyDir)
+ oldDir = emptyDir
+ }
+ oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir)
+ if err != nil {
+ return nil, err
+ }
+
+ return newRoot.Changes(oldRoot), nil
+}
+
+// ChangesSize calculates the size in bytes of the provided changes, based on newDir.
+func ChangesSize(newDir string, changes []Change) int64 {
+ var (
+ size int64
+ sf = make(map[uint64]struct{})
+ )
+ for _, change := range changes {
+ if change.Kind == ChangeModify || change.Kind == ChangeAdd {
+ file := filepath.Join(newDir, change.Path)
+ fileInfo, err := os.Lstat(file)
+ if err != nil {
+ logrus.Errorf("Can not stat %q: %s", file, err)
+ continue
+ }
+
+ if fileInfo != nil && !fileInfo.IsDir() {
+ if hasHardlinks(fileInfo) {
+ inode := getIno(fileInfo)
+ if _, ok := sf[inode]; !ok {
+ size += fileInfo.Size()
+ sf[inode] = struct{}{}
+ }
+ } else {
+ size += fileInfo.Size()
+ }
+ }
+ }
+ }
+ return size
+}
+
+// ExportChanges produces an Archive from the provided changes, relative to dir.
+func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) {
+ reader, writer := io.Pipe()
+ go func() {
+ ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil)
+
+ // this buffer is needed for the duration of this piped stream
+ defer pools.BufioWriter32KPool.Put(ta.Buffer)
+
+ sort.Sort(changesByPath(changes))
+
+ // In general we log errors here but ignore them because
+ // during e.g. a diff operation the container can continue
+ // mutating the filesystem and we can see transient errors
+ // from this
+ for _, change := range changes {
+ if change.Kind == ChangeDelete {
+ whiteOutDir := filepath.Dir(change.Path)
+ whiteOutBase := filepath.Base(change.Path)
+ whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase)
+ timestamp := time.Now()
+ hdr := &tar.Header{
+ Name: whiteOut[1:],
+ Size: 0,
+ ModTime: timestamp,
+ AccessTime: timestamp,
+ ChangeTime: timestamp,
+ }
+ if err := ta.TarWriter.WriteHeader(hdr); err != nil {
+ logrus.Debugf("Can't write whiteout header: %s", err)
+ }
+ } else {
+ path := filepath.Join(dir, change.Path)
+ if err := ta.addTarFile(path, change.Path[1:]); err != nil {
+ logrus.Debugf("Can't add file %s to tar: %s", path, err)
+ }
+ }
+ }
+
+ // Make sure to check the error on Close.
+ if err := ta.TarWriter.Close(); err != nil {
+ logrus.Debugf("Can't close layer: %s", err)
+ }
+ if err := writer.Close(); err != nil {
+ logrus.Debugf("failed close Changes writer: %s", err)
+ }
+ }()
+ return reader, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go
new file mode 100644
index 000000000..f8792b3d4
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go
@@ -0,0 +1,286 @@
+package archive // import "github.com/docker/docker/pkg/archive"
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "path/filepath"
+ "sort"
+ "syscall"
+ "unsafe"
+
+ "github.com/docker/docker/pkg/system"
+ "golang.org/x/sys/unix"
+)
+
+// walker is used to implement collectFileInfoForChanges on linux. Where this
+// method in general returns the entire contents of two directory trees, we
+// optimize some FS calls out on linux. In particular, we take advantage of the
+// fact that getdents(2) returns the inode of each file in the directory being
+// walked, which, when walking two trees in parallel to generate a list of
+// changes, can be used to prune subtrees without ever having to lstat(2) them
+// directly. Eliminating stat calls in this way can save up to seconds on large
+// images.
+type walker struct {
+ dir1 string
+ dir2 string
+ root1 *FileInfo
+ root2 *FileInfo
+}
+
+// collectFileInfoForChanges returns a complete representation of the trees
+// rooted at dir1 and dir2, with one important exception: any subtree or
+// leaf where the inode and device numbers are an exact match between dir1
+// and dir2 will be pruned from the results. This method is *only* to be used
+// to generating a list of changes between the two directories, as it does not
+// reflect the full contents.
+func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) {
+ w := &walker{
+ dir1: dir1,
+ dir2: dir2,
+ root1: newRootFileInfo(),
+ root2: newRootFileInfo(),
+ }
+
+ i1, err := os.Lstat(w.dir1)
+ if err != nil {
+ return nil, nil, err
+ }
+ i2, err := os.Lstat(w.dir2)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if err := w.walk("/", i1, i2); err != nil {
+ return nil, nil, err
+ }
+
+ return w.root1, w.root2, nil
+}
+
+// Given a FileInfo, its path info, and a reference to the root of the tree
+// being constructed, register this file with the tree.
+func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
+ if fi == nil {
+ return nil
+ }
+ parent := root.LookUp(filepath.Dir(path))
+ if parent == nil {
+ return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path)
+ }
+ info := &FileInfo{
+ name: filepath.Base(path),
+ children: make(map[string]*FileInfo),
+ parent: parent,
+ }
+ cpath := filepath.Join(dir, path)
+ stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t))
+ if err != nil {
+ return err
+ }
+ info.stat = stat
+ info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access
+ parent.children[info.name] = info
+ return nil
+}
+
+// Walk a subtree rooted at the same path in both trees being iterated. For
+// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d
+func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) {
+ // Register these nodes with the return trees, unless we're still at the
+ // (already-created) roots:
+ if path != "/" {
+ if err := walkchunk(path, i1, w.dir1, w.root1); err != nil {
+ return err
+ }
+ if err := walkchunk(path, i2, w.dir2, w.root2); err != nil {
+ return err
+ }
+ }
+
+ is1Dir := i1 != nil && i1.IsDir()
+ is2Dir := i2 != nil && i2.IsDir()
+
+ sameDevice := false
+ if i1 != nil && i2 != nil {
+ si1 := i1.Sys().(*syscall.Stat_t)
+ si2 := i2.Sys().(*syscall.Stat_t)
+ if si1.Dev == si2.Dev {
+ sameDevice = true
+ }
+ }
+
+ // If these files are both non-existent, or leaves (non-dirs), we are done.
+ if !is1Dir && !is2Dir {
+ return nil
+ }
+
+ // Fetch the names of all the files contained in both directories being walked:
+ var names1, names2 []nameIno
+ if is1Dir {
+ names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access
+ if err != nil {
+ return err
+ }
+ }
+ if is2Dir {
+ names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access
+ if err != nil {
+ return err
+ }
+ }
+
+ // We have lists of the files contained in both parallel directories, sorted
+ // in the same order. Walk them in parallel, generating a unique merged list
+ // of all items present in either or both directories.
+ var names []string
+ ix1 := 0
+ ix2 := 0
+
+ for {
+ if ix1 >= len(names1) {
+ break
+ }
+ if ix2 >= len(names2) {
+ break
+ }
+
+ ni1 := names1[ix1]
+ ni2 := names2[ix2]
+
+ switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) {
+ case -1: // ni1 < ni2 -- advance ni1
+ // we will not encounter ni1 in names2
+ names = append(names, ni1.name)
+ ix1++
+ case 0: // ni1 == ni2
+ if ni1.ino != ni2.ino || !sameDevice {
+ names = append(names, ni1.name)
+ }
+ ix1++
+ ix2++
+ case 1: // ni1 > ni2 -- advance ni2
+ // we will not encounter ni2 in names1
+ names = append(names, ni2.name)
+ ix2++
+ }
+ }
+ for ix1 < len(names1) {
+ names = append(names, names1[ix1].name)
+ ix1++
+ }
+ for ix2 < len(names2) {
+ names = append(names, names2[ix2].name)
+ ix2++
+ }
+
+ // For each of the names present in either or both of the directories being
+ // iterated, stat the name under each root, and recurse the pair of them:
+ for _, name := range names {
+ fname := filepath.Join(path, name)
+ var cInfo1, cInfo2 os.FileInfo
+ if is1Dir {
+ cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ }
+ if is2Dir {
+ cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ }
+ if err = w.walk(fname, cInfo1, cInfo2); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// {name,inode} pairs used to support the early-pruning logic of the walker type
+type nameIno struct {
+ name string
+ ino uint64
+}
+
+type nameInoSlice []nameIno
+
+func (s nameInoSlice) Len() int { return len(s) }
+func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name }
+
+// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode
+// numbers further up the stack when reading directory contents. Unlike
+// os.Readdirnames, which returns a list of filenames, this function returns a
+// list of {filename,inode} pairs.
+func readdirnames(dirname string) (names []nameIno, err error) {
+ var (
+ size = 100
+ buf = make([]byte, 4096)
+ nbuf int
+ bufp int
+ nb int
+ )
+
+ f, err := os.Open(dirname)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ names = make([]nameIno, 0, size) // Empty with room to grow.
+ for {
+ // Refill the buffer if necessary
+ if bufp >= nbuf {
+ bufp = 0
+ nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux
+ if nbuf < 0 {
+ nbuf = 0
+ }
+ if err != nil {
+ return nil, os.NewSyscallError("readdirent", err)
+ }
+ if nbuf <= 0 {
+ break // EOF
+ }
+ }
+
+ // Drain the buffer
+ nb, names = parseDirent(buf[bufp:nbuf], names)
+ bufp += nb
+ }
+
+ sl := nameInoSlice(names)
+ sort.Sort(sl)
+ return sl, nil
+}
+
+// parseDirent is a minor modification of unix.ParseDirent (linux version)
+// which returns {name,inode} pairs instead of just names.
+func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) {
+ origlen := len(buf)
+ for len(buf) > 0 {
+ dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0]))
+ buf = buf[dirent.Reclen:]
+ if dirent.Ino == 0 { // File absent in directory.
+ continue
+ }
+ bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0]))
+ var name = string(bytes[0:clen(bytes[:])])
+ if name == "." || name == ".." { // Useless names
+ continue
+ }
+ names = append(names, nameIno{name, dirent.Ino})
+ }
+ return origlen - len(buf), names
+}
+
+func clen(n []byte) int {
+ for i := 0; i < len(n); i++ {
+ if n[i] == 0 {
+ return i
+ }
+ }
+ return len(n)
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_other.go b/vendor/github.com/docker/docker/pkg/archive/changes_other.go
new file mode 100644
index 000000000..ba744741c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes_other.go
@@ -0,0 +1,97 @@
+// +build !linux
+
+package archive // import "github.com/docker/docker/pkg/archive"
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+
+ "github.com/docker/docker/pkg/system"
+)
+
+func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) {
+ var (
+ oldRoot, newRoot *FileInfo
+ err1, err2 error
+ errs = make(chan error, 2)
+ )
+ go func() {
+ oldRoot, err1 = collectFileInfo(oldDir)
+ errs <- err1
+ }()
+ go func() {
+ newRoot, err2 = collectFileInfo(newDir)
+ errs <- err2
+ }()
+
+ // block until both routines have returned
+ for i := 0; i < 2; i++ {
+ if err := <-errs; err != nil {
+ return nil, nil, err
+ }
+ }
+
+ return oldRoot, newRoot, nil
+}
+
+func collectFileInfo(sourceDir string) (*FileInfo, error) {
+ root := newRootFileInfo()
+
+ err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ // Rebase path
+ relPath, err := filepath.Rel(sourceDir, path)
+ if err != nil {
+ return err
+ }
+
+ // As this runs on the daemon side, file paths are OS specific.
+ relPath = filepath.Join(string(os.PathSeparator), relPath)
+
+ // See https://github.com/golang/go/issues/9168 - bug in filepath.Join.
+ // Temporary workaround. If the returned path starts with two backslashes,
+ // trim it down to a single backslash. Only relevant on Windows.
+ if runtime.GOOS == "windows" {
+ if strings.HasPrefix(relPath, `\\`) {
+ relPath = relPath[1:]
+ }
+ }
+
+ if relPath == string(os.PathSeparator) {
+ return nil
+ }
+
+ parent := root.LookUp(filepath.Dir(relPath))
+ if parent == nil {
+ return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath)
+ }
+
+ info := &FileInfo{
+ name: filepath.Base(relPath),
+ children: make(map[string]*FileInfo),
+ parent: parent,
+ }
+
+ s, err := system.Lstat(path)
+ if err != nil {
+ return err
+ }
+ info.stat = s
+
+ info.capability, _ = system.Lgetxattr(path, "security.capability")
+
+ parent.children[info.name] = info
+
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return root, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go
new file mode 100644
index 000000000..06217b716
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go
@@ -0,0 +1,43 @@
+// +build !windows
+
+package archive // import "github.com/docker/docker/pkg/archive"
+
+import (
+ "os"
+ "syscall"
+
+ "github.com/docker/docker/pkg/system"
+ "golang.org/x/sys/unix"
+)
+
+func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
+ // Don't look at size for dirs, its not a good measure of change
+ if oldStat.Mode() != newStat.Mode() ||
+ oldStat.UID() != newStat.UID() ||
+ oldStat.GID() != newStat.GID() ||
+ oldStat.Rdev() != newStat.Rdev() ||
+ // Don't look at size or modification time for dirs, its not a good
+ // measure of change. See https://github.com/moby/moby/issues/9874
+ // for a description of the issue with modification time, and
+ // https://github.com/moby/moby/pull/11422 for the change.
+ // (Note that in the Windows implementation of this function,
+ // modification time IS taken as a change). See
+ // https://github.com/moby/moby/pull/37982 for more information.
+ (oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR &&
+ (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) {
+ return true
+ }
+ return false
+}
+
+func (info *FileInfo) isDir() bool {
+ return info.parent == nil || info.stat.Mode()&unix.S_IFDIR != 0
+}
+
+func getIno(fi os.FileInfo) uint64 {
+ return fi.Sys().(*syscall.Stat_t).Ino
+}
+
+func hasHardlinks(fi os.FileInfo) bool {
+ return fi.Sys().(*syscall.Stat_t).Nlink > 1
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go
new file mode 100644
index 000000000..9906685e4
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go
@@ -0,0 +1,34 @@
+package archive // import "github.com/docker/docker/pkg/archive"
+
+import (
+ "os"
+
+ "github.com/docker/docker/pkg/system"
+)
+
+func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
+ // Note there is slight difference between the Linux and Windows
+ // implementations here. Due to https://github.com/moby/moby/issues/9874,
+ // and the fix at https://github.com/moby/moby/pull/11422, Linux does not
+ // consider a change to the directory time as a change. Windows on NTFS
+ // does. See https://github.com/moby/moby/pull/37982 for more information.
+
+ if !sameFsTime(oldStat.Mtim(), newStat.Mtim()) ||
+ oldStat.Mode() != newStat.Mode() ||
+ oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() {
+ return true
+ }
+ return false
+}
+
+func (info *FileInfo) isDir() bool {
+ return info.parent == nil || info.stat.Mode().IsDir()
+}
+
+func getIno(fi os.FileInfo) (inode uint64) {
+ return
+}
+
+func hasHardlinks(fi os.FileInfo) bool {
+ return false
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/docker/docker/pkg/archive/copy.go
new file mode 100644
index 000000000..57fddac07
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/copy.go
@@ -0,0 +1,480 @@
+package archive // import "github.com/docker/docker/pkg/archive"
+
+import (
+ "archive/tar"
+ "errors"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/docker/docker/pkg/system"
+ "github.com/sirupsen/logrus"
+)
+
+// Errors used or returned by this file.
+var (
+ ErrNotDirectory = errors.New("not a directory")
+ ErrDirNotExists = errors.New("no such directory")
+ ErrCannotCopyDir = errors.New("cannot copy directory")
+ ErrInvalidCopySource = errors.New("invalid copy source content")
+)
+
+// PreserveTrailingDotOrSeparator returns the given cleaned path (after
+// processing using any utility functions from the path or filepath stdlib
+// packages) and appends a trailing `/.` or `/` if its corresponding original
+// path (from before being processed by utility functions from the path or
+// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned
+// path already ends in a `.` path segment, then another is not added. If the
+// clean path already ends in the separator, then another is not added.
+func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string, sep byte) string {
+ // Ensure paths are in platform semantics
+ cleanedPath = strings.Replace(cleanedPath, "/", string(sep), -1)
+ originalPath = strings.Replace(originalPath, "/", string(sep), -1)
+
+ if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) {
+ if !hasTrailingPathSeparator(cleanedPath, sep) {
+ // Add a separator if it doesn't already end with one (a cleaned
+ // path would only end in a separator if it is the root).
+ cleanedPath += string(sep)
+ }
+ cleanedPath += "."
+ }
+
+ if !hasTrailingPathSeparator(cleanedPath, sep) && hasTrailingPathSeparator(originalPath, sep) {
+ cleanedPath += string(sep)
+ }
+
+ return cleanedPath
+}
+
+// assertsDirectory returns whether the given path is
+// asserted to be a directory, i.e., the path ends with
+// a trailing '/' or `/.`, assuming a path separator of `/`.
+func assertsDirectory(path string, sep byte) bool {
+ return hasTrailingPathSeparator(path, sep) || specifiesCurrentDir(path)
+}
+
+// hasTrailingPathSeparator returns whether the given
+// path ends with the system's path separator character.
+func hasTrailingPathSeparator(path string, sep byte) bool {
+ return len(path) > 0 && path[len(path)-1] == sep
+}
+
+// specifiesCurrentDir returns whether the given path specifies
+// a "current directory", i.e., the last path segment is `.`.
+func specifiesCurrentDir(path string) bool {
+ return filepath.Base(path) == "."
+}
+
+// SplitPathDirEntry splits the given path between its directory name and its
+// basename by first cleaning the path but preserves a trailing "." if the
+// original path specified the current directory.
+func SplitPathDirEntry(path string) (dir, base string) {
+ cleanedPath := filepath.Clean(filepath.FromSlash(path))
+
+ if specifiesCurrentDir(path) {
+ cleanedPath += string(os.PathSeparator) + "."
+ }
+
+ return filepath.Dir(cleanedPath), filepath.Base(cleanedPath)
+}
+
+// TarResource archives the resource described by the given CopyInfo to a Tar
+// archive. A non-nil error is returned if sourcePath does not exist or is
+// asserted to be a directory but exists as another type of file.
+//
+// This function acts as a convenient wrapper around TarWithOptions, which
+// requires a directory as the source path. TarResource accepts either a
+// directory or a file path and correctly sets the Tar options.
+func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) {
+ return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName)
+}
+
+// TarResourceRebase is like TarResource but renames the first path element of
+// items in the resulting tar archive to match the given rebaseName if not "".
+func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) {
+ sourcePath = normalizePath(sourcePath)
+ if _, err = os.Lstat(sourcePath); err != nil {
+ // Catches the case where the source does not exist or is not a
+ // directory if asserted to be a directory, as this also causes an
+ // error.
+ return
+ }
+
+ // Separate the source path between its directory and
+ // the entry in that directory which we are archiving.
+ sourceDir, sourceBase := SplitPathDirEntry(sourcePath)
+ opts := TarResourceRebaseOpts(sourceBase, rebaseName)
+
+ logrus.Debugf("copying %q from %q", sourceBase, sourceDir)
+ return TarWithOptions(sourceDir, opts)
+}
+
+// TarResourceRebaseOpts does not preform the Tar, but instead just creates the rebase
+// parameters to be sent to TarWithOptions (the TarOptions struct)
+func TarResourceRebaseOpts(sourceBase string, rebaseName string) *TarOptions {
+ filter := []string{sourceBase}
+ return &TarOptions{
+ Compression: Uncompressed,
+ IncludeFiles: filter,
+ IncludeSourceDir: true,
+ RebaseNames: map[string]string{
+ sourceBase: rebaseName,
+ },
+ }
+}
+
+// CopyInfo holds basic info about the source
+// or destination path of a copy operation.
+type CopyInfo struct {
+ Path string
+ Exists bool
+ IsDir bool
+ RebaseName string
+}
+
+// CopyInfoSourcePath stats the given path to create a CopyInfo
+// struct representing that resource for the source of an archive copy
+// operation. The given path should be an absolute local path. A source path
+// has all symlinks evaluated that appear before the last path separator ("/"
+// on Unix). As it is to be a copy source, the path must exist.
+func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) {
+ // normalize the file path and then evaluate the symbol link
+ // we will use the target file instead of the symbol link if
+ // followLink is set
+ path = normalizePath(path)
+
+ resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink)
+ if err != nil {
+ return CopyInfo{}, err
+ }
+
+ stat, err := os.Lstat(resolvedPath)
+ if err != nil {
+ return CopyInfo{}, err
+ }
+
+ return CopyInfo{
+ Path: resolvedPath,
+ Exists: true,
+ IsDir: stat.IsDir(),
+ RebaseName: rebaseName,
+ }, nil
+}
+
+// CopyInfoDestinationPath stats the given path to create a CopyInfo
+// struct representing that resource for the destination of an archive copy
+// operation. The given path should be an absolute local path.
+func CopyInfoDestinationPath(path string) (info CopyInfo, err error) {
+ maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot.
+ path = normalizePath(path)
+ originalPath := path
+
+ stat, err := os.Lstat(path)
+
+ if err == nil && stat.Mode()&os.ModeSymlink == 0 {
+ // The path exists and is not a symlink.
+ return CopyInfo{
+ Path: path,
+ Exists: true,
+ IsDir: stat.IsDir(),
+ }, nil
+ }
+
+ // While the path is a symlink.
+ for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ {
+ if n > maxSymlinkIter {
+ // Don't follow symlinks more than this arbitrary number of times.
+ return CopyInfo{}, errors.New("too many symlinks in " + originalPath)
+ }
+
+ // The path is a symbolic link. We need to evaluate it so that the
+ // destination of the copy operation is the link target and not the
+ // link itself. This is notably different than CopyInfoSourcePath which
+ // only evaluates symlinks before the last appearing path separator.
+ // Also note that it is okay if the last path element is a broken
+ // symlink as the copy operation should create the target.
+ var linkTarget string
+
+ linkTarget, err = os.Readlink(path)
+ if err != nil {
+ return CopyInfo{}, err
+ }
+
+ if !system.IsAbs(linkTarget) {
+ // Join with the parent directory.
+ dstParent, _ := SplitPathDirEntry(path)
+ linkTarget = filepath.Join(dstParent, linkTarget)
+ }
+
+ path = linkTarget
+ stat, err = os.Lstat(path)
+ }
+
+ if err != nil {
+ // It's okay if the destination path doesn't exist. We can still
+ // continue the copy operation if the parent directory exists.
+ if !os.IsNotExist(err) {
+ return CopyInfo{}, err
+ }
+
+ // Ensure destination parent dir exists.
+ dstParent, _ := SplitPathDirEntry(path)
+
+ parentDirStat, err := os.Stat(dstParent)
+ if err != nil {
+ return CopyInfo{}, err
+ }
+ if !parentDirStat.IsDir() {
+ return CopyInfo{}, ErrNotDirectory
+ }
+
+ return CopyInfo{Path: path}, nil
+ }
+
+ // The path exists after resolving symlinks.
+ return CopyInfo{
+ Path: path,
+ Exists: true,
+ IsDir: stat.IsDir(),
+ }, nil
+}
+
+// PrepareArchiveCopy prepares the given srcContent archive, which should
+// contain the archived resource described by srcInfo, to the destination
+// described by dstInfo. Returns the possibly modified content archive along
+// with the path to the destination directory which it should be extracted to.
+func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) {
+ // Ensure in platform semantics
+ srcInfo.Path = normalizePath(srcInfo.Path)
+ dstInfo.Path = normalizePath(dstInfo.Path)
+
+ // Separate the destination path between its directory and base
+ // components in case the source archive contents need to be rebased.
+ dstDir, dstBase := SplitPathDirEntry(dstInfo.Path)
+ _, srcBase := SplitPathDirEntry(srcInfo.Path)
+
+ switch {
+ case dstInfo.Exists && dstInfo.IsDir:
+ // The destination exists as a directory. No alteration
+ // to srcContent is needed as its contents can be
+ // simply extracted to the destination directory.
+ return dstInfo.Path, ioutil.NopCloser(srcContent), nil
+ case dstInfo.Exists && srcInfo.IsDir:
+ // The destination exists as some type of file and the source
+ // content is a directory. This is an error condition since
+ // you cannot copy a directory to an existing file location.
+ return "", nil, ErrCannotCopyDir
+ case dstInfo.Exists:
+ // The destination exists as some type of file and the source content
+ // is also a file. The source content entry will have to be renamed to
+ // have a basename which matches the destination path's basename.
+ if len(srcInfo.RebaseName) != 0 {
+ srcBase = srcInfo.RebaseName
+ }
+ return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
+ case srcInfo.IsDir:
+ // The destination does not exist and the source content is an archive
+ // of a directory. The archive should be extracted to the parent of
+ // the destination path instead, and when it is, the directory that is
+ // created as a result should take the name of the destination path.
+ // The source content entries will have to be renamed to have a
+ // basename which matches the destination path's basename.
+ if len(srcInfo.RebaseName) != 0 {
+ srcBase = srcInfo.RebaseName
+ }
+ return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
+ case assertsDirectory(dstInfo.Path, os.PathSeparator):
+ // The destination does not exist and is asserted to be created as a
+ // directory, but the source content is not a directory. This is an
+ // error condition since you cannot create a directory from a file
+ // source.
+ return "", nil, ErrDirNotExists
+ default:
+ // The last remaining case is when the destination does not exist, is
+ // not asserted to be a directory, and the source content is not an
+ // archive of a directory. It this case, the destination file will need
+ // to be created when the archive is extracted and the source content
+ // entry will have to be renamed to have a basename which matches the
+ // destination path's basename.
+ if len(srcInfo.RebaseName) != 0 {
+ srcBase = srcInfo.RebaseName
+ }
+ return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
+ }
+
+}
+
+// RebaseArchiveEntries rewrites the given srcContent archive replacing
+// an occurrence of oldBase with newBase at the beginning of entry names.
+func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser {
+ if oldBase == string(os.PathSeparator) {
+ // If oldBase specifies the root directory, use an empty string as
+ // oldBase instead so that newBase doesn't replace the path separator
+ // that all paths will start with.
+ oldBase = ""
+ }
+
+ rebased, w := io.Pipe()
+
+ go func() {
+ srcTar := tar.NewReader(srcContent)
+ rebasedTar := tar.NewWriter(w)
+
+ for {
+ hdr, err := srcTar.Next()
+ if err == io.EOF {
+ // Signals end of archive.
+ rebasedTar.Close()
+ w.Close()
+ return
+ }
+ if err != nil {
+ w.CloseWithError(err)
+ return
+ }
+
+ // srcContent tar stream, as served by TarWithOptions(), is
+ // definitely in PAX format, but tar.Next() mistakenly guesses it
+ // as USTAR, which creates a problem: if the newBase is >100
+ // characters long, WriteHeader() returns an error like
+ // "archive/tar: cannot encode header: Format specifies USTAR; and USTAR cannot encode Name=...".
+ //
+ // To fix, set the format to PAX here. See docker/for-linux issue #484.
+ hdr.Format = tar.FormatPAX
+ hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1)
+ if hdr.Typeflag == tar.TypeLink {
+ hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1)
+ }
+
+ if err = rebasedTar.WriteHeader(hdr); err != nil {
+ w.CloseWithError(err)
+ return
+ }
+
+ if _, err = io.Copy(rebasedTar, srcTar); err != nil {
+ w.CloseWithError(err)
+ return
+ }
+ }
+ }()
+
+ return rebased
+}
+
+// TODO @gupta-ak. These might have to be changed in the future to be
+// continuity driver aware as well to support LCOW.
+
+// CopyResource performs an archive copy from the given source path to the
+// given destination path. The source path MUST exist and the destination
+// path's parent directory must exist.
+func CopyResource(srcPath, dstPath string, followLink bool) error {
+ var (
+ srcInfo CopyInfo
+ err error
+ )
+
+ // Ensure in platform semantics
+ srcPath = normalizePath(srcPath)
+ dstPath = normalizePath(dstPath)
+
+ // Clean the source and destination paths.
+ srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath, os.PathSeparator)
+ dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath, os.PathSeparator)
+
+ if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil {
+ return err
+ }
+
+ content, err := TarResource(srcInfo)
+ if err != nil {
+ return err
+ }
+ defer content.Close()
+
+ return CopyTo(content, srcInfo, dstPath)
+}
+
+// CopyTo handles extracting the given content whose
+// entries should be sourced from srcInfo to dstPath.
+func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error {
+ // The destination path need not exist, but CopyInfoDestinationPath will
+ // ensure that at least the parent directory exists.
+ dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath))
+ if err != nil {
+ return err
+ }
+
+ dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo)
+ if err != nil {
+ return err
+ }
+ defer copyArchive.Close()
+
+ options := &TarOptions{
+ NoLchown: true,
+ NoOverwriteDirNonDir: true,
+ }
+
+ return Untar(copyArchive, dstDir, options)
+}
+
+// ResolveHostSourcePath decides real path need to be copied with parameters such as
+// whether to follow symbol link or not, if followLink is true, resolvedPath will return
+// link target of any symbol link file, else it will only resolve symlink of directory
+// but return symbol link file itself without resolving.
+func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) {
+ if followLink {
+ resolvedPath, err = filepath.EvalSymlinks(path)
+ if err != nil {
+ return
+ }
+
+ resolvedPath, rebaseName = GetRebaseName(path, resolvedPath)
+ } else {
+ dirPath, basePath := filepath.Split(path)
+
+ // if not follow symbol link, then resolve symbol link of parent dir
+ var resolvedDirPath string
+ resolvedDirPath, err = filepath.EvalSymlinks(dirPath)
+ if err != nil {
+ return
+ }
+ // resolvedDirPath will have been cleaned (no trailing path separators) so
+ // we can manually join it with the base path element.
+ resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath
+ if hasTrailingPathSeparator(path, os.PathSeparator) &&
+ filepath.Base(path) != filepath.Base(resolvedPath) {
+ rebaseName = filepath.Base(path)
+ }
+ }
+ return resolvedPath, rebaseName, nil
+}
+
+// GetRebaseName normalizes and compares path and resolvedPath,
+// return completed resolved path and rebased file name
+func GetRebaseName(path, resolvedPath string) (string, string) {
+ // linkTarget will have been cleaned (no trailing path separators and dot) so
+ // we can manually join it with them
+ var rebaseName string
+ if specifiesCurrentDir(path) &&
+ !specifiesCurrentDir(resolvedPath) {
+ resolvedPath += string(filepath.Separator) + "."
+ }
+
+ if hasTrailingPathSeparator(path, os.PathSeparator) &&
+ !hasTrailingPathSeparator(resolvedPath, os.PathSeparator) {
+ resolvedPath += string(filepath.Separator)
+ }
+
+ if filepath.Base(path) != filepath.Base(resolvedPath) {
+ // In the case where the path had a trailing separator and a symlink
+ // evaluation has changed the last path component, we will need to
+ // rebase the name in the archive that is being copied to match the
+ // originally requested name.
+ rebaseName = filepath.Base(path)
+ }
+ return resolvedPath, rebaseName
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go
new file mode 100644
index 000000000..3958364f5
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go
@@ -0,0 +1,11 @@
+// +build !windows
+
+package archive // import "github.com/docker/docker/pkg/archive"
+
+import (
+ "path/filepath"
+)
+
+func normalizePath(path string) string {
+ return filepath.ToSlash(path)
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_windows.go b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go
new file mode 100644
index 000000000..a878d1bac
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go
@@ -0,0 +1,9 @@
+package archive // import "github.com/docker/docker/pkg/archive"
+
+import (
+ "path/filepath"
+)
+
+func normalizePath(path string) string {
+ return filepath.FromSlash(path)
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/docker/docker/pkg/archive/diff.go
new file mode 100644
index 000000000..27897e6ab
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/diff.go
@@ -0,0 +1,260 @@
+package archive // import "github.com/docker/docker/pkg/archive"
+
+import (
+ "archive/tar"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+
+ "github.com/docker/docker/pkg/idtools"
+ "github.com/docker/docker/pkg/pools"
+ "github.com/docker/docker/pkg/system"
+ "github.com/sirupsen/logrus"
+)
+
+// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be
+// compressed or uncompressed.
+// Returns the size in bytes of the contents of the layer.
+func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) {
+ tr := tar.NewReader(layer)
+ trBuf := pools.BufioReader32KPool.Get(tr)
+ defer pools.BufioReader32KPool.Put(trBuf)
+
+ var dirs []*tar.Header
+ unpackedPaths := make(map[string]struct{})
+
+ if options == nil {
+ options = &TarOptions{}
+ }
+ if options.ExcludePatterns == nil {
+ options.ExcludePatterns = []string{}
+ }
+ idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
+
+ aufsTempdir := ""
+ aufsHardlinks := make(map[string]*tar.Header)
+
+ // Iterate through the files in the archive.
+ for {
+ hdr, err := tr.Next()
+ if err == io.EOF {
+ // end of tar archive
+ break
+ }
+ if err != nil {
+ return 0, err
+ }
+
+ size += hdr.Size
+
+ // Normalize name, for safety and for a simple is-root check
+ hdr.Name = filepath.Clean(hdr.Name)
+
+ // Windows does not support filenames with colons in them. Ignore
+ // these files. This is not a problem though (although it might
+ // appear that it is). Let's suppose a client is running docker pull.
+ // The daemon it points to is Windows. Would it make sense for the
+ // client to be doing a docker pull Ubuntu for example (which has files
+ // with colons in the name under /usr/share/man/man3)? No, absolutely
+ // not as it would really only make sense that they were pulling a
+ // Windows image. However, for development, it is necessary to be able
+ // to pull Linux images which are in the repository.
+ //
+ // TODO Windows. Once the registry is aware of what images are Windows-
+ // specific or Linux-specific, this warning should be changed to an error
+ // to cater for the situation where someone does manage to upload a Linux
+ // image but have it tagged as Windows inadvertently.
+ if runtime.GOOS == "windows" {
+ if strings.Contains(hdr.Name, ":") {
+ logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name)
+ continue
+ }
+ }
+
+ // Note as these operations are platform specific, so must the slash be.
+ if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
+ // Not the root directory, ensure that the parent directory exists.
+ // This happened in some tests where an image had a tarfile without any
+ // parent directories.
+ parent := filepath.Dir(hdr.Name)
+ parentPath := filepath.Join(dest, parent)
+
+ if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
+ err = system.MkdirAll(parentPath, 0600)
+ if err != nil {
+ return 0, err
+ }
+ }
+ }
+
+ // Skip AUFS metadata dirs
+ if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) {
+ // Regular files inside /.wh..wh.plnk can be used as hardlink targets
+ // We don't want this directory, but we need the files in them so that
+ // such hardlinks can be resolved.
+ if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg {
+ basename := filepath.Base(hdr.Name)
+ aufsHardlinks[basename] = hdr
+ if aufsTempdir == "" {
+ if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil {
+ return 0, err
+ }
+ defer os.RemoveAll(aufsTempdir)
+ }
+ if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil {
+ return 0, err
+ }
+ }
+
+ if hdr.Name != WhiteoutOpaqueDir {
+ continue
+ }
+ }
+ path := filepath.Join(dest, hdr.Name)
+ rel, err := filepath.Rel(dest, path)
+ if err != nil {
+ return 0, err
+ }
+
+ // Note as these operations are platform specific, so must the slash be.
+ if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
+ return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
+ }
+ base := filepath.Base(path)
+
+ if strings.HasPrefix(base, WhiteoutPrefix) {
+ dir := filepath.Dir(path)
+ if base == WhiteoutOpaqueDir {
+ _, err := os.Lstat(dir)
+ if err != nil {
+ return 0, err
+ }
+ err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ if os.IsNotExist(err) {
+ err = nil // parent was deleted
+ }
+ return err
+ }
+ if path == dir {
+ return nil
+ }
+ if _, exists := unpackedPaths[path]; !exists {
+ err := os.RemoveAll(path)
+ return err
+ }
+ return nil
+ })
+ if err != nil {
+ return 0, err
+ }
+ } else {
+ originalBase := base[len(WhiteoutPrefix):]
+ originalPath := filepath.Join(dir, originalBase)
+ if err := os.RemoveAll(originalPath); err != nil {
+ return 0, err
+ }
+ }
+ } else {
+ // If path exits we almost always just want to remove and replace it.
+ // The only exception is when it is a directory *and* the file from
+ // the layer is also a directory. Then we want to merge them (i.e.
+ // just apply the metadata from the layer).
+ if fi, err := os.Lstat(path); err == nil {
+ if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
+ if err := os.RemoveAll(path); err != nil {
+ return 0, err
+ }
+ }
+ }
+
+ trBuf.Reset(tr)
+ srcData := io.Reader(trBuf)
+ srcHdr := hdr
+
+ // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
+ // we manually retarget these into the temporary files we extracted them into
+ if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) {
+ linkBasename := filepath.Base(hdr.Linkname)
+ srcHdr = aufsHardlinks[linkBasename]
+ if srcHdr == nil {
+ return 0, fmt.Errorf("Invalid aufs hardlink")
+ }
+ tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename))
+ if err != nil {
+ return 0, err
+ }
+ defer tmpFile.Close()
+ srcData = tmpFile
+ }
+
+ if err := remapIDs(idMapping, srcHdr); err != nil {
+ return 0, err
+ }
+
+ if err := createTarFile(path, dest, srcHdr, srcData, !options.NoLchown, nil, options.InUserNS); err != nil {
+ return 0, err
+ }
+
+ // Directory mtimes must be handled at the end to avoid further
+ // file creation in them to modify the directory mtime
+ if hdr.Typeflag == tar.TypeDir {
+ dirs = append(dirs, hdr)
+ }
+ unpackedPaths[path] = struct{}{}
+ }
+ }
+
+ for _, hdr := range dirs {
+ path := filepath.Join(dest, hdr.Name)
+ if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
+ return 0, err
+ }
+ }
+
+ return size, nil
+}
+
+// ApplyLayer parses a diff in the standard layer format from `layer`,
+// and applies it to the directory `dest`. The stream `layer` can be
+// compressed or uncompressed.
+// Returns the size in bytes of the contents of the layer.
+func ApplyLayer(dest string, layer io.Reader) (int64, error) {
+ return applyLayerHandler(dest, layer, &TarOptions{}, true)
+}
+
+// ApplyUncompressedLayer parses a diff in the standard layer format from
+// `layer`, and applies it to the directory `dest`. The stream `layer`
+// can only be uncompressed.
+// Returns the size in bytes of the contents of the layer.
+func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) {
+ return applyLayerHandler(dest, layer, options, false)
+}
+
+// do the bulk load of ApplyLayer, but allow for not calling DecompressStream
+func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) {
+ dest = filepath.Clean(dest)
+
+ // We need to be able to set any perms
+ if runtime.GOOS != "windows" {
+ oldmask, err := system.Umask(0)
+ if err != nil {
+ return 0, err
+ }
+ defer system.Umask(oldmask)
+ }
+
+ if decompress {
+ decompLayer, err := DecompressStream(layer)
+ if err != nil {
+ return 0, err
+ }
+ defer decompLayer.Close()
+ layer = decompLayer
+ }
+ return UnpackLayer(dest, layer, options)
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/example_changes.go b/vendor/github.com/docker/docker/pkg/archive/example_changes.go
new file mode 100644
index 000000000..495db809e
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/example_changes.go
@@ -0,0 +1,97 @@
+// +build ignore
+
+// Simple tool to create an archive stream from an old and new directory
+//
+// By default it will stream the comparison of two temporary directories with junk files
+package main
+
+import (
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path"
+
+ "github.com/docker/docker/pkg/archive"
+ "github.com/sirupsen/logrus"
+)
+
+var (
+ flDebug = flag.Bool("D", false, "debugging output")
+ flNewDir = flag.String("newdir", "", "")
+ flOldDir = flag.String("olddir", "", "")
+ log = logrus.New()
+)
+
+func main() {
+ flag.Usage = func() {
+ fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)")
+ fmt.Printf("%s [OPTIONS]\n", os.Args[0])
+ flag.PrintDefaults()
+ }
+ flag.Parse()
+ log.Out = os.Stderr
+ if (len(os.Getenv("DEBUG")) > 0) || *flDebug {
+ logrus.SetLevel(logrus.DebugLevel)
+ }
+ var newDir, oldDir string
+
+ if len(*flNewDir) == 0 {
+ var err error
+ newDir, err = ioutil.TempDir("", "docker-test-newDir")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer os.RemoveAll(newDir)
+ if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil {
+ log.Fatal(err)
+ }
+ } else {
+ newDir = *flNewDir
+ }
+
+ if len(*flOldDir) == 0 {
+ oldDir, err := ioutil.TempDir("", "docker-test-oldDir")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer os.RemoveAll(oldDir)
+ } else {
+ oldDir = *flOldDir
+ }
+
+ changes, err := archive.ChangesDirs(newDir, oldDir)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ a, err := archive.ExportChanges(newDir, changes)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer a.Close()
+
+ i, err := io.Copy(os.Stdout, a)
+ if err != nil && err != io.EOF {
+ log.Fatal(err)
+ }
+ fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i)
+}
+
+func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
+ fileData := []byte("fooo")
+ for n := 0; n < numberOfFiles; n++ {
+ fileName := fmt.Sprintf("file-%d", n)
+ if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
+ return 0, err
+ }
+ if makeLinks {
+ if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
+ return 0, err
+ }
+ }
+ }
+ totalSize := numberOfFiles * len(fileData)
+ return totalSize, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/time_linux.go b/vendor/github.com/docker/docker/pkg/archive/time_linux.go
new file mode 100644
index 000000000..797143ee8
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/time_linux.go
@@ -0,0 +1,16 @@
+package archive // import "github.com/docker/docker/pkg/archive"
+
+import (
+ "syscall"
+ "time"
+)
+
+func timeToTimespec(time time.Time) (ts syscall.Timespec) {
+ if time.IsZero() {
+ // Return UTIME_OMIT special value
+ ts.Sec = 0
+ ts.Nsec = (1 << 30) - 2
+ return
+ }
+ return syscall.NsecToTimespec(time.UnixNano())
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go
new file mode 100644
index 000000000..f58bf227f
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go
@@ -0,0 +1,16 @@
+// +build !linux
+
+package archive // import "github.com/docker/docker/pkg/archive"
+
+import (
+ "syscall"
+ "time"
+)
+
+func timeToTimespec(time time.Time) (ts syscall.Timespec) {
+ nsec := int64(0)
+ if !time.IsZero() {
+ nsec = time.UnixNano()
+ }
+ return syscall.NsecToTimespec(nsec)
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/whiteouts.go b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go
new file mode 100644
index 000000000..4c072a87e
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go
@@ -0,0 +1,23 @@
+package archive // import "github.com/docker/docker/pkg/archive"
+
+// Whiteouts are files with a special meaning for the layered filesystem.
+// Docker uses AUFS whiteout files inside exported archives. In other
+// filesystems these files are generated/handled on tar creation/extraction.
+
+// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a
+// filename this means that file has been removed from the base layer.
+const WhiteoutPrefix = ".wh."
+
+// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not
+// for removing an actual file. Normally these files are excluded from exported
+// archives.
+const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix
+
+// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other
+// layers. Normally these should not go into exported archives and all changed
+// hardlinks should be copied to the top layer.
+const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk"
+
+// WhiteoutOpaqueDir file means directory has been made opaque - meaning
+// readdir calls to this directory do not follow to lower layers.
+const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq"
diff --git a/vendor/github.com/docker/docker/pkg/archive/wrap.go b/vendor/github.com/docker/docker/pkg/archive/wrap.go
new file mode 100644
index 000000000..85435694c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/wrap.go
@@ -0,0 +1,59 @@
+package archive // import "github.com/docker/docker/pkg/archive"
+
+import (
+ "archive/tar"
+ "bytes"
+ "io"
+)
+
+// Generate generates a new archive from the content provided
+// as input.
+//
+// `files` is a sequence of path/content pairs. A new file is
+// added to the archive for each pair.
+// If the last pair is incomplete, the file is created with an
+// empty content. For example:
+//
+// Generate("foo.txt", "hello world", "emptyfile")
+//
+// The above call will return an archive with 2 files:
+// * ./foo.txt with content "hello world"
+// * ./empty with empty content
+//
+// FIXME: stream content instead of buffering
+// FIXME: specify permissions and other archive metadata
+func Generate(input ...string) (io.Reader, error) {
+ files := parseStringPairs(input...)
+ buf := new(bytes.Buffer)
+ tw := tar.NewWriter(buf)
+ for _, file := range files {
+ name, content := file[0], file[1]
+ hdr := &tar.Header{
+ Name: name,
+ Size: int64(len(content)),
+ }
+ if err := tw.WriteHeader(hdr); err != nil {
+ return nil, err
+ }
+ if _, err := tw.Write([]byte(content)); err != nil {
+ return nil, err
+ }
+ }
+ if err := tw.Close(); err != nil {
+ return nil, err
+ }
+ return buf, nil
+}
+
+func parseStringPairs(input ...string) (output [][2]string) {
+ output = make([][2]string, 0, len(input)/2+1)
+ for i := 0; i < len(input); i += 2 {
+ var pair [2]string
+ pair[0] = input[i]
+ if i+1 < len(input) {
+ pair[1] = input[i+1]
+ }
+ output = append(output, pair)
+ }
+ return
+}
diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go
index 47ecd0c09..5e6310fdc 100644
--- a/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go
+++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go
@@ -5,24 +5,8 @@ import (
"os"
"path/filepath"
"strings"
-
- "github.com/docker/docker/pkg/idtools"
)
-// GetStatic returns the home directory for the current user without calling
-// os/user.Current(). This is useful for static-linked binary on glibc-based
-// system, because a call to os/user.Current() in a static binary leads to
-// segfault due to a glibc issue that won't be fixed in a short term.
-// (#29344, golang/go#13470, https://sourceware.org/bugzilla/show_bug.cgi?id=19341)
-func GetStatic() (string, error) {
- uid := os.Getuid()
- usr, err := idtools.LookupUID(uid)
- if err != nil {
- return "", err
- }
- return usr.Home, nil
-}
-
// GetRuntimeDir returns XDG_RUNTIME_DIR.
// XDG_RUNTIME_DIR is typically configured via pam_systemd.
// GetRuntimeDir returns non-nil error if XDG_RUNTIME_DIR is not set.
diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go
index f0a363ded..67ab9e9b3 100644
--- a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go
+++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go
@@ -6,12 +6,6 @@ import (
"errors"
)
-// GetStatic is not needed for non-linux systems.
-// (Precisely, it is needed only for glibc-based linux systems.)
-func GetStatic() (string, error) {
- return "", errors.New("homedir.GetStatic() is not supported on this system")
-}
-
// GetRuntimeDir is unsupported on non-linux system.
func GetRuntimeDir() (string, error) {
return "", errors.New("homedir.GetRuntimeDir() is not supported on this system")
diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go
index d85e12448..284e8be7c 100644
--- a/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go
+++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go
@@ -4,8 +4,7 @@ package homedir // import "github.com/docker/docker/pkg/homedir"
import (
"os"
-
- "github.com/opencontainers/runc/libcontainer/user"
+ "os/user"
)
// Key returns the env var name for the user's home dir based on
@@ -17,11 +16,13 @@ func Key() string {
// Get returns the home directory of the current user with the help of
// environment variables depending on the target operating system.
// Returned path should be used with "path/filepath" to form new paths.
+// If compiling statically, ensure the osusergo build tag is used.
+// If needing to do nss lookups, do not compile statically.
func Get() string {
home := os.Getenv(Key())
if home == "" {
- if u, err := user.CurrentUser(); err == nil {
- return u.Home
+ if u, err := user.Current(); err == nil {
+ return u.HomeDir
}
}
return home
diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
index fb239743a..3981ff64d 100644
--- a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
+++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
@@ -59,7 +59,7 @@ func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting
paths = append(paths, dirPath)
}
}
- if err := system.MkdirAll(path, mode, ""); err != nil {
+ if err := system.MkdirAll(path, mode); err != nil {
return err
}
} else {
diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go
index 4ae38a1b1..35ede0fff 100644
--- a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go
+++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go
@@ -11,7 +11,7 @@ import (
// Ownership is handled elsewhere, but in the future could be support here
// too.
func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error {
- if err := system.MkdirAll(path, mode, ""); err != nil {
+ if err := system.MkdirAll(path, mode); err != nil {
return err
}
return nil
diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go
new file mode 100644
index 000000000..6d6640898
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go
@@ -0,0 +1,283 @@
+package jsonmessage // import "github.com/docker/docker/pkg/jsonmessage"
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "strings"
+ "time"
+
+ "github.com/docker/docker/pkg/term"
+ units "github.com/docker/go-units"
+ "github.com/morikuni/aec"
+)
+
+// RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to
+// ensure the formatted time isalways the same number of characters.
+const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
+
+// JSONError wraps a concrete Code and Message, `Code` is
+// is an integer error code, `Message` is the error message.
+type JSONError struct {
+ Code int `json:"code,omitempty"`
+ Message string `json:"message,omitempty"`
+}
+
+func (e *JSONError) Error() string {
+ return e.Message
+}
+
+// JSONProgress describes a Progress. terminalFd is the fd of the current terminal,
+// Start is the initial value for the operation. Current is the current status and
+// value of the progress made towards Total. Total is the end value describing when
+// we made 100% progress for an operation.
+type JSONProgress struct {
+ terminalFd uintptr
+ Current int64 `json:"current,omitempty"`
+ Total int64 `json:"total,omitempty"`
+ Start int64 `json:"start,omitempty"`
+ // If true, don't show xB/yB
+ HideCounts bool `json:"hidecounts,omitempty"`
+ Units string `json:"units,omitempty"`
+ nowFunc func() time.Time
+ winSize int
+}
+
+func (p *JSONProgress) String() string {
+ var (
+ width = p.width()
+ pbBox string
+ numbersBox string
+ timeLeftBox string
+ )
+ if p.Current <= 0 && p.Total <= 0 {
+ return ""
+ }
+ if p.Total <= 0 {
+ switch p.Units {
+ case "":
+ current := units.HumanSize(float64(p.Current))
+ return fmt.Sprintf("%8v", current)
+ default:
+ return fmt.Sprintf("%d %s", p.Current, p.Units)
+ }
+ }
+
+ percentage := int(float64(p.Current)/float64(p.Total)*100) / 2
+ if percentage > 50 {
+ percentage = 50
+ }
+ if width > 110 {
+ // this number can't be negative gh#7136
+ numSpaces := 0
+ if 50-percentage > 0 {
+ numSpaces = 50 - percentage
+ }
+ pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces))
+ }
+
+ switch {
+ case p.HideCounts:
+ case p.Units == "": // no units, use bytes
+ current := units.HumanSize(float64(p.Current))
+ total := units.HumanSize(float64(p.Total))
+
+ numbersBox = fmt.Sprintf("%8v/%v", current, total)
+
+ if p.Current > p.Total {
+ // remove total display if the reported current is wonky.
+ numbersBox = fmt.Sprintf("%8v", current)
+ }
+ default:
+ numbersBox = fmt.Sprintf("%d/%d %s", p.Current, p.Total, p.Units)
+
+ if p.Current > p.Total {
+ // remove total display if the reported current is wonky.
+ numbersBox = fmt.Sprintf("%d %s", p.Current, p.Units)
+ }
+ }
+
+ if p.Current > 0 && p.Start > 0 && percentage < 50 {
+ fromStart := p.now().Sub(time.Unix(p.Start, 0))
+ perEntry := fromStart / time.Duration(p.Current)
+ left := time.Duration(p.Total-p.Current) * perEntry
+ left = (left / time.Second) * time.Second
+
+ if width > 50 {
+ timeLeftBox = " " + left.String()
+ }
+ }
+ return pbBox + numbersBox + timeLeftBox
+}
+
+// shim for testing
+func (p *JSONProgress) now() time.Time {
+ if p.nowFunc == nil {
+ p.nowFunc = func() time.Time {
+ return time.Now().UTC()
+ }
+ }
+ return p.nowFunc()
+}
+
+// shim for testing
+func (p *JSONProgress) width() int {
+ if p.winSize != 0 {
+ return p.winSize
+ }
+ ws, err := term.GetWinsize(p.terminalFd)
+ if err == nil {
+ return int(ws.Width)
+ }
+ return 200
+}
+
+// JSONMessage defines a message struct. It describes
+// the created time, where it from, status, ID of the
+// message. It's used for docker events.
+type JSONMessage struct {
+ Stream string `json:"stream,omitempty"`
+ Status string `json:"status,omitempty"`
+ Progress *JSONProgress `json:"progressDetail,omitempty"`
+ ProgressMessage string `json:"progress,omitempty"` //deprecated
+ ID string `json:"id,omitempty"`
+ From string `json:"from,omitempty"`
+ Time int64 `json:"time,omitempty"`
+ TimeNano int64 `json:"timeNano,omitempty"`
+ Error *JSONError `json:"errorDetail,omitempty"`
+ ErrorMessage string `json:"error,omitempty"` //deprecated
+ // Aux contains out-of-band data, such as digests for push signing and image id after building.
+ Aux *json.RawMessage `json:"aux,omitempty"`
+}
+
+func clearLine(out io.Writer) {
+ eraseMode := aec.EraseModes.All
+ cl := aec.EraseLine(eraseMode)
+ fmt.Fprint(out, cl)
+}
+
+func cursorUp(out io.Writer, l uint) {
+ fmt.Fprint(out, aec.Up(l))
+}
+
+func cursorDown(out io.Writer, l uint) {
+ fmt.Fprint(out, aec.Down(l))
+}
+
+// Display displays the JSONMessage to `out`. If `isTerminal` is true, it will erase the
+// entire current line when displaying the progressbar.
+func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error {
+ if jm.Error != nil {
+ if jm.Error.Code == 401 {
+ return fmt.Errorf("authentication is required")
+ }
+ return jm.Error
+ }
+ var endl string
+ if isTerminal && jm.Stream == "" && jm.Progress != nil {
+ clearLine(out)
+ endl = "\r"
+ fmt.Fprintf(out, endl)
+ } else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal
+ return nil
+ }
+ if jm.TimeNano != 0 {
+ fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(RFC3339NanoFixed))
+ } else if jm.Time != 0 {
+ fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(RFC3339NanoFixed))
+ }
+ if jm.ID != "" {
+ fmt.Fprintf(out, "%s: ", jm.ID)
+ }
+ if jm.From != "" {
+ fmt.Fprintf(out, "(from %s) ", jm.From)
+ }
+ if jm.Progress != nil && isTerminal {
+ fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl)
+ } else if jm.ProgressMessage != "" { //deprecated
+ fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl)
+ } else if jm.Stream != "" {
+ fmt.Fprintf(out, "%s%s", jm.Stream, endl)
+ } else {
+ fmt.Fprintf(out, "%s%s\n", jm.Status, endl)
+ }
+ return nil
+}
+
+// DisplayJSONMessagesStream displays a json message stream from `in` to `out`, `isTerminal`
+// describes if `out` is a terminal. If this is the case, it will print `\n` at the end of
+// each line and move the cursor while displaying.
+func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(JSONMessage)) error {
+ var (
+ dec = json.NewDecoder(in)
+ ids = make(map[string]uint)
+ )
+
+ for {
+ var diff uint
+ var jm JSONMessage
+ if err := dec.Decode(&jm); err != nil {
+ if err == io.EOF {
+ break
+ }
+ return err
+ }
+
+ if jm.Aux != nil {
+ if auxCallback != nil {
+ auxCallback(jm)
+ }
+ continue
+ }
+
+ if jm.Progress != nil {
+ jm.Progress.terminalFd = terminalFd
+ }
+ if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") {
+ line, ok := ids[jm.ID]
+ if !ok {
+ // NOTE: This approach of using len(id) to
+ // figure out the number of lines of history
+ // only works as long as we clear the history
+ // when we output something that's not
+ // accounted for in the map, such as a line
+ // with no ID.
+ line = uint(len(ids))
+ ids[jm.ID] = line
+ if isTerminal {
+ fmt.Fprintf(out, "\n")
+ }
+ }
+ diff = uint(len(ids)) - line
+ if isTerminal {
+ cursorUp(out, diff)
+ }
+ } else {
+ // When outputting something that isn't progress
+ // output, clear the history of previous lines. We
+ // don't want progress entries from some previous
+ // operation to be updated (for example, pull -a
+ // with multiple tags).
+ ids = make(map[string]uint)
+ }
+ err := jm.Display(out, isTerminal)
+ if jm.ID != "" && isTerminal {
+ cursorDown(out, diff)
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+type stream interface {
+ io.Writer
+ FD() uintptr
+ IsTerminal() bool
+}
+
+// DisplayJSONMessagesToStream prints json messages to the output stream
+func DisplayJSONMessagesToStream(in io.Reader, stream stream, auxCallback func(JSONMessage)) error {
+ return DisplayJSONMessagesStream(in, stream, stream.FD(), stream.IsTerminal(), auxCallback)
+}
diff --git a/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go b/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go
index 3d68800eb..f3f46055e 100644
--- a/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go
+++ b/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go
@@ -587,9 +587,6 @@ var (
// Johanna Mestorf - German prehistoric archaeologist and first female museum director in Germany - https://en.wikipedia.org/wiki/Johanna_Mestorf
"mestorf",
- // Marvin Minsky - Pioneer in Artificial Intelligence, co-founder of the MIT's AI Lab, won the Turing Award in 1969. https://en.wikipedia.org/wiki/Marvin_Minsky
- "minsky",
-
// Maryam Mirzakhani - an Iranian mathematician and the first woman to win the Fields Medal. https://en.wikipedia.org/wiki/Maryam_Mirzakhani
"mirzakhani",
@@ -737,9 +734,6 @@ var (
// Frances Spence - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Frances_Spence
"spence",
- // Richard Matthew Stallman - the founder of the Free Software movement, the GNU project, the Free Software Foundation, and the League for Programming Freedom. He also invented the concept of copyleft to protect the ideals of this movement, and enshrined this concept in the widely-used GPL (General Public License) for software. https://en.wikiquote.org/wiki/Richard_Stallman
- "stallman",
-
// Michael Stonebraker is a database research pioneer and architect of Ingres, Postgres, VoltDB and SciDB. Winner of 2014 ACM Turing Award. https://en.wikipedia.org/wiki/Michael_Stonebraker
"stonebraker",
diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go
index 6e599eebc..6a302dcee 100644
--- a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go
+++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go
@@ -9,7 +9,7 @@ import (
"os/exec"
"strings"
- "github.com/mattn/go-shellwords"
+ shellwords "github.com/mattn/go-shellwords"
)
// GetKernelVersion gets the current kernel version.
diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go
index b7b15a1fd..a04763872 100644
--- a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go
+++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go
@@ -36,7 +36,7 @@ func GetKernelVersion() (*VersionInfo, error) {
}
KVI.kvi = blex
- // Important - docker.exe MUST be manifested for this API to return
+ // Important - dockerd.exe MUST be manifested for this API to return
// the correct information.
dwVersion, err := windows.GetVersion()
if err != nil {
@@ -44,7 +44,7 @@ func GetKernelVersion() (*VersionInfo, error) {
}
KVI.major = int(dwVersion & 0xFF)
- KVI.minor = int((dwVersion & 0XFF00) >> 8)
+ KVI.minor = int((dwVersion & 0xFF00) >> 8)
KVI.build = int((dwVersion & 0xFFFF0000) >> 16)
return KVI, nil
diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go
deleted file mode 100644
index b2139b60e..000000000
--- a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go
+++ /dev/null
@@ -1,14 +0,0 @@
-package kernel // import "github.com/docker/docker/pkg/parsers/kernel"
-
-import (
- "golang.org/x/sys/unix"
-)
-
-func uname() (*unix.Utsname, error) {
- uts := &unix.Utsname{}
-
- if err := unix.Uname(uts); err != nil {
- return nil, err
- }
- return uts, nil
-}
diff --git a/vendor/github.com/docker/docker/pkg/pools/pools.go b/vendor/github.com/docker/docker/pkg/pools/pools.go
index 46339c282..3b978fd3b 100644
--- a/vendor/github.com/docker/docker/pkg/pools/pools.go
+++ b/vendor/github.com/docker/docker/pkg/pools/pools.go
@@ -72,6 +72,7 @@ func (bp *bufferPool) Get() []byte {
}
func (bp *bufferPool) Put(b []byte) {
+ //nolint:staticcheck // TODO changing this to a pointer makes tests fail. Investigate if we should change or not (otherwise remove this TODO)
bp.pool.Put(b)
}
diff --git a/vendor/github.com/docker/docker/pkg/system/filesys.go b/vendor/github.com/docker/docker/pkg/system/filesys_unix.go
index adeb16305..dcee3e9f9 100644
--- a/vendor/github.com/docker/docker/pkg/system/filesys.go
+++ b/vendor/github.com/docker/docker/pkg/system/filesys_unix.go
@@ -8,14 +8,14 @@ import (
"path/filepath"
)
-// MkdirAllWithACL is a wrapper for MkdirAll on unix systems.
+// MkdirAllWithACL is a wrapper for os.MkdirAll on unix systems.
func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error {
- return MkdirAll(path, perm, sddl)
+ return os.MkdirAll(path, perm)
}
// MkdirAll creates a directory named path along with any necessary parents,
// with permission specified by attribute perm for all dir created.
-func MkdirAll(path string, perm os.FileMode, sddl string) error {
+func MkdirAll(path string, perm os.FileMode) error {
return os.MkdirAll(path, perm)
}
diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go
index 3049ff38a..7cebd6efc 100644
--- a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go
+++ b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go
@@ -26,9 +26,10 @@ func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error {
return mkdirall(path, true, sddl)
}
-// MkdirAll implementation that is volume path aware for Windows.
-func MkdirAll(path string, _ os.FileMode, sddl string) error {
- return mkdirall(path, false, sddl)
+// MkdirAll implementation that is volume path aware for Windows. It can be used
+// as a drop-in replacement for os.MkdirAll()
+func MkdirAll(path string, _ os.FileMode) error {
+ return mkdirall(path, false, "")
}
// mkdirall is a custom version of os.MkdirAll modified for use on Windows
diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go
index d79e8b076..cd060eff2 100644
--- a/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go
+++ b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go
@@ -7,7 +7,7 @@ import (
"strconv"
"strings"
- "github.com/docker/go-units"
+ units "github.com/docker/go-units"
)
// ReadMemInfo retrieves memory statistics of the host system and returns a
@@ -27,6 +27,7 @@ func ReadMemInfo() (*MemInfo, error) {
func parseMemInfo(reader io.Reader) (*MemInfo, error) {
meminfo := &MemInfo{}
scanner := bufio.NewScanner(reader)
+ memAvailable := int64(-1)
for scanner.Scan() {
// Expected format: ["MemTotal:", "1234", "kB"]
parts := strings.Fields(scanner.Text())
@@ -48,6 +49,8 @@ func parseMemInfo(reader io.Reader) (*MemInfo, error) {
meminfo.MemTotal = bytes
case "MemFree:":
meminfo.MemFree = bytes
+ case "MemAvailable:":
+ memAvailable = bytes
case "SwapTotal:":
meminfo.SwapTotal = bytes
case "SwapFree:":
@@ -55,6 +58,9 @@ func parseMemInfo(reader io.Reader) (*MemInfo, error) {
}
}
+ if memAvailable != -1 {
+ meminfo.MemFree = memAvailable
+ }
// Handle errors that may have occurred during the reading of the file.
if err := scanner.Err(); err != nil {
diff --git a/vendor/github.com/docker/docker/pkg/system/path.go b/vendor/github.com/docker/docker/pkg/system/path.go
index a3d957afa..64e892289 100644
--- a/vendor/github.com/docker/docker/pkg/system/path.go
+++ b/vendor/github.com/docker/docker/pkg/system/path.go
@@ -5,8 +5,6 @@ import (
"path/filepath"
"runtime"
"strings"
-
- "github.com/containerd/continuity/pathdriver"
)
const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
@@ -27,6 +25,12 @@ func DefaultPathEnv(os string) string {
}
+// PathVerifier defines the subset of a PathDriver that CheckSystemDriveAndRemoveDriveLetter
+// actually uses in order to avoid system depending on containerd/continuity.
+type PathVerifier interface {
+ IsAbs(string) bool
+}
+
// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter,
// is the system drive.
// On Linux: this is a no-op.
@@ -42,7 +46,7 @@ func DefaultPathEnv(os string) string {
// a --> a
// /a --> \a
// d:\ --> Fail
-func CheckSystemDriveAndRemoveDriveLetter(path string, driver pathdriver.PathDriver) (string, error) {
+func CheckSystemDriveAndRemoveDriveLetter(path string, driver PathVerifier) (string, error) {
if runtime.GOOS != "windows" || LCOWSupported() {
return path, nil
}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_linux.go b/vendor/github.com/docker/docker/pkg/system/stat_linux.go
index 98c9eb18d..17d5d131a 100644
--- a/vendor/github.com/docker/docker/pkg/system/stat_linux.go
+++ b/vendor/github.com/docker/docker/pkg/system/stat_linux.go
@@ -8,7 +8,8 @@ func fromStatT(s *syscall.Stat_t) (*StatT, error) {
mode: s.Mode,
uid: s.Uid,
gid: s.Gid,
- rdev: s.Rdev,
+ // the type is 32bit on mips
+ rdev: uint64(s.Rdev), // nolint: unconvert
mtim: s.Mtim}, nil
}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_solaris.go b/vendor/github.com/docker/docker/pkg/system/stat_solaris.go
deleted file mode 100644
index 756b92d1e..000000000
--- a/vendor/github.com/docker/docker/pkg/system/stat_solaris.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package system // import "github.com/docker/docker/pkg/system"
-
-import "syscall"
-
-// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
-func fromStatT(s *syscall.Stat_t) (*StatT, error) {
- return &StatT{size: s.Size,
- mode: uint32(s.Mode),
- uid: s.Uid,
- gid: s.Gid,
- rdev: uint64(s.Rdev),
- mtim: s.Mtim}, nil
-}
diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go
index 4ae92fa6c..eb19f9c85 100644
--- a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go
+++ b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go
@@ -55,7 +55,6 @@ var (
ntuserApiset = windows.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0")
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
procGetVersionExW = modkernel32.NewProc("GetVersionExW")
- procGetProductInfo = modkernel32.NewProc("GetProductInfo")
procSetNamedSecurityInfo = modadvapi32.NewProc("SetNamedSecurityInfoW")
procGetSecurityDescriptorDacl = modadvapi32.NewProc("GetSecurityDescriptorDacl")
)
@@ -85,7 +84,7 @@ type osVersionInfoEx struct {
}
// GetOSVersion gets the operating system version on Windows. Note that
-// docker.exe must be manifested to get the correct version information.
+// dockerd.exe must be manifested to get the correct version information.
func GetOSVersion() OSVersion {
var err error
osv := OSVersion{}
@@ -118,22 +117,6 @@ func IsWindowsClient() bool {
return osviex.ProductType == verNTWorkstation
}
-// IsIoTCore returns true if the currently running image is based off of
-// Windows 10 IoT Core.
-// @engine maintainers - this function should not be removed or modified as it
-// is used to enforce licensing restrictions on Windows.
-func IsIoTCore() bool {
- var returnedProductType uint32
- r1, _, err := procGetProductInfo.Call(6, 1, 0, 0, uintptr(unsafe.Pointer(&returnedProductType)))
- if r1 == 0 {
- logrus.Warnf("GetProductInfo failed - assuming this is not IoT: %v", err)
- return false
- }
- const productIoTUAP = 0x0000007B
- const productIoTUAPCommercial = 0x00000083
- return returnedProductType == productIoTUAP || returnedProductType == productIoTUAPCommercial
-}
-
// Unmount is a platform-specific helper function to call
// the unmount syscall. Not supported on Windows
func Unmount(dest string) error {
diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_linux.go b/vendor/github.com/docker/docker/pkg/system/utimes_linux.go
deleted file mode 100644
index 0afe85458..000000000
--- a/vendor/github.com/docker/docker/pkg/system/utimes_linux.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package system // import "github.com/docker/docker/pkg/system"
-
-import (
- "syscall"
- "unsafe"
-
- "golang.org/x/sys/unix"
-)
-
-// LUtimesNano is used to change access and modification time of the specified path.
-// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm.
-func LUtimesNano(path string, ts []syscall.Timespec) error {
- atFdCwd := unix.AT_FDCWD
-
- var _path *byte
- _path, err := unix.BytePtrFromString(path)
- if err != nil {
- return err
- }
- if _, _, err := unix.Syscall6(unix.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), unix.AT_SYMLINK_NOFOLLOW, 0, 0); err != 0 && err != unix.ENOSYS {
- return err
- }
-
- return nil
-}
diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go b/vendor/github.com/docker/docker/pkg/system/utimes_unix.go
index ed1b9fad5..61ba8c474 100644
--- a/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go
+++ b/vendor/github.com/docker/docker/pkg/system/utimes_unix.go
@@ -1,8 +1,9 @@
+// +build linux freebsd
+
package system // import "github.com/docker/docker/pkg/system"
import (
"syscall"
- "unsafe"
"golang.org/x/sys/unix"
)
@@ -10,13 +11,12 @@ import (
// LUtimesNano is used to change access and modification time of the specified path.
// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm.
func LUtimesNano(path string, ts []syscall.Timespec) error {
- var _path *byte
- _path, err := unix.BytePtrFromString(path)
- if err != nil {
- return err
+ uts := []unix.Timespec{
+ unix.NsecToTimespec(syscall.TimespecToNsec(ts[0])),
+ unix.NsecToTimespec(syscall.TimespecToNsec(ts[1])),
}
-
- if _, _, err := unix.Syscall(unix.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != unix.ENOSYS {
+ err := unix.UtimesNanoAt(unix.AT_FDCWD, path, uts, unix.AT_SYMLINK_NOFOLLOW)
+ if err != nil && err != unix.ENOSYS {
return err
}
diff --git a/vendor/github.com/docker/docker/pkg/term/term_windows.go b/vendor/github.com/docker/docker/pkg/term/term_windows.go
index a3c3db131..6e83b59e9 100644
--- a/vendor/github.com/docker/docker/pkg/term/term_windows.go
+++ b/vendor/github.com/docker/docker/pkg/term/term_windows.go
@@ -7,7 +7,7 @@ import (
"syscall" // used for STD_INPUT_HANDLE, STD_OUTPUT_HANDLE and STD_ERROR_HANDLE
"github.com/Azure/go-ansiterm/winterm"
- "github.com/docker/docker/pkg/term/windows"
+ windowsconsole "github.com/docker/docker/pkg/term/windows"
)
// State holds the console mode for the terminal.
diff --git a/vendor/github.com/docker/docker/pkg/term/windows/windows.go b/vendor/github.com/docker/docker/pkg/term/windows/windows.go
index 3e5593ca6..7e8f265d4 100644
--- a/vendor/github.com/docker/docker/pkg/term/windows/windows.go
+++ b/vendor/github.com/docker/docker/pkg/term/windows/windows.go
@@ -1,3 +1,4 @@
+// +build windows
// These files implement ANSI-aware input and output streams for use by the Docker Windows client.
// When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create
// and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls.
@@ -9,7 +10,7 @@ import (
"os"
"sync"
- "github.com/Azure/go-ansiterm"
+ ansiterm "github.com/Azure/go-ansiterm"
"github.com/sirupsen/logrus"
)
diff --git a/vendor/github.com/docker/docker/profiles/seccomp/default.json b/vendor/github.com/docker/docker/profiles/seccomp/default.json
index 250a03e13..71ac412df 100644
--- a/vendor/github.com/docker/docker/profiles/seccomp/default.json
+++ b/vendor/github.com/docker/docker/profiles/seccomp/default.json
@@ -167,6 +167,9 @@
"ioprio_set",
"io_setup",
"io_submit",
+ "io_uring_enter",
+ "io_uring_register",
+ "io_uring_setup",
"ipc",
"kill",
"lchown",
@@ -314,6 +317,7 @@
"sigaltstack",
"signalfd",
"signalfd4",
+ "sigprocmask",
"sigreturn",
"socket",
"socketcall",
diff --git a/vendor/github.com/docker/docker/profiles/seccomp/seccomp.go b/vendor/github.com/docker/docker/profiles/seccomp/seccomp.go
index 9f222a6ee..12721a120 100644
--- a/vendor/github.com/docker/docker/profiles/seccomp/seccomp.go
+++ b/vendor/github.com/docker/docker/profiles/seccomp/seccomp.go
@@ -143,20 +143,18 @@ Loop:
}
if call.Name != "" {
- newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall(call.Name, call.Action, call.Args))
- }
-
- for _, n := range call.Names {
- newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall(n, call.Action, call.Args))
+ newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall([]string{call.Name}, call.Action, call.Args))
+ } else {
+ newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall(call.Names, call.Action, call.Args))
}
}
return newConfig, nil
}
-func createSpecsSyscall(name string, action types.Action, args []*types.Arg) specs.LinuxSyscall {
+func createSpecsSyscall(names []string, action types.Action, args []*types.Arg) specs.LinuxSyscall {
newCall := specs.LinuxSyscall{
- Names: []string{name},
+ Names: names,
Action: specs.LinuxSeccompAction(action),
}
diff --git a/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go
index 53333f43e..16148b408 100644
--- a/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go
+++ b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go
@@ -160,6 +160,9 @@ func DefaultProfile() *types.Seccomp {
"ioprio_set",
"io_setup",
"io_submit",
+ "io_uring_enter",
+ "io_uring_register",
+ "io_uring_setup",
"ipc",
"kill",
"lchown",
@@ -307,6 +310,7 @@ func DefaultProfile() *types.Seccomp {
"sigaltstack",
"signalfd",
"signalfd4",
+ "sigprocmask",
"sigreturn",
"socket",
"socketcall",