aboutsummaryrefslogtreecommitdiff
path: root/vendor/github.com/docker
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/docker')
-rw-r--r--vendor/github.com/docker/distribution/blobs.go2
-rw-r--r--vendor/github.com/docker/distribution/context/context.go85
-rw-r--r--vendor/github.com/docker/distribution/context/doc.go89
-rw-r--r--vendor/github.com/docker/distribution/context/http.go366
-rw-r--r--vendor/github.com/docker/distribution/context/logger.go116
-rw-r--r--vendor/github.com/docker/distribution/context/trace.go104
-rw-r--r--vendor/github.com/docker/distribution/context/util.go24
-rw-r--r--vendor/github.com/docker/distribution/context/version.go16
-rw-r--r--vendor/github.com/docker/distribution/manifests.go2
-rw-r--r--vendor/github.com/docker/distribution/registry.go3
-rw-r--r--vendor/github.com/docker/distribution/registry/client/blob_writer.go2
-rw-r--r--vendor/github.com/docker/distribution/registry/client/repository.go2
-rw-r--r--vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go30
-rw-r--r--vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go2
-rw-r--r--vendor/github.com/docker/distribution/tags.go2
-rw-r--r--vendor/github.com/docker/distribution/uuid/uuid.go126
-rw-r--r--vendor/github.com/docker/docker/contrib/README.md4
-rw-r--r--vendor/github.com/docker/docker/contrib/nnp-test/nnp-test.c10
-rw-r--r--vendor/github.com/docker/docker/contrib/syscall-test/acct.c16
-rw-r--r--vendor/github.com/docker/docker/contrib/syscall-test/exit32.s7
-rw-r--r--vendor/github.com/docker/docker/contrib/syscall-test/ns.c63
-rw-r--r--vendor/github.com/docker/docker/contrib/syscall-test/raw.c14
-rw-r--r--vendor/github.com/docker/docker/contrib/syscall-test/setgid.c11
-rw-r--r--vendor/github.com/docker/docker/contrib/syscall-test/setuid.c11
-rw-r--r--vendor/github.com/docker/docker/contrib/syscall-test/socket.c30
-rw-r--r--vendor/github.com/docker/docker/contrib/syscall-test/userns.c63
-rw-r--r--vendor/github.com/docker/docker/hack/README.md60
-rw-r--r--vendor/github.com/docker/docker/hack/integration-cli-on-swarm/README.md69
-rw-r--r--vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/vendor.conf2
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/README.md1
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/archive.go1237
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/archive_linux.go92
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/archive_other.go7
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/archive_unix.go114
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/archive_windows.go79
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/changes.go441
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/changes_linux.go313
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/changes_other.go97
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/changes_unix.go37
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/changes_windows.go30
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/copy.go472
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/copy_unix.go11
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/copy_windows.go9
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/diff.go256
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/example_changes.go97
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/time_linux.go16
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/time_unsupported.go16
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/whiteouts.go23
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/wrap.go59
-rw-r--r--vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go317
-rw-r--r--vendor/github.com/docker/docker/profiles/seccomp/generate.go32
-rw-r--r--vendor/github.com/docker/go-connections/nat/nat.go2
-rw-r--r--vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go1
-rw-r--r--vendor/github.com/docker/go-connections/tlsconfig/config.go26
-rw-r--r--vendor/github.com/docker/spdystream/connection.go2
-rw-r--r--vendor/github.com/docker/spdystream/handlers.go4
56 files changed, 283 insertions, 4839 deletions
diff --git a/vendor/github.com/docker/distribution/blobs.go b/vendor/github.com/docker/distribution/blobs.go
index 01d309029..145b07853 100644
--- a/vendor/github.com/docker/distribution/blobs.go
+++ b/vendor/github.com/docker/distribution/blobs.go
@@ -1,13 +1,13 @@
package distribution
import (
+ "context"
"errors"
"fmt"
"io"
"net/http"
"time"
- "github.com/docker/distribution/context"
"github.com/docker/distribution/reference"
"github.com/opencontainers/go-digest"
)
diff --git a/vendor/github.com/docker/distribution/context/context.go b/vendor/github.com/docker/distribution/context/context.go
deleted file mode 100644
index 23cbf5b54..000000000
--- a/vendor/github.com/docker/distribution/context/context.go
+++ /dev/null
@@ -1,85 +0,0 @@
-package context
-
-import (
- "sync"
-
- "github.com/docker/distribution/uuid"
- "golang.org/x/net/context"
-)
-
-// Context is a copy of Context from the golang.org/x/net/context package.
-type Context interface {
- context.Context
-}
-
-// instanceContext is a context that provides only an instance id. It is
-// provided as the main background context.
-type instanceContext struct {
- Context
- id string // id of context, logged as "instance.id"
- once sync.Once // once protect generation of the id
-}
-
-func (ic *instanceContext) Value(key interface{}) interface{} {
- if key == "instance.id" {
- ic.once.Do(func() {
- // We want to lazy initialize the UUID such that we don't
- // call a random generator from the package initialization
- // code. For various reasons random could not be available
- // https://github.com/docker/distribution/issues/782
- ic.id = uuid.Generate().String()
- })
- return ic.id
- }
-
- return ic.Context.Value(key)
-}
-
-var background = &instanceContext{
- Context: context.Background(),
-}
-
-// Background returns a non-nil, empty Context. The background context
-// provides a single key, "instance.id" that is globally unique to the
-// process.
-func Background() Context {
- return background
-}
-
-// WithValue returns a copy of parent in which the value associated with key is
-// val. Use context Values only for request-scoped data that transits processes
-// and APIs, not for passing optional parameters to functions.
-func WithValue(parent Context, key, val interface{}) Context {
- return context.WithValue(parent, key, val)
-}
-
-// stringMapContext is a simple context implementation that checks a map for a
-// key, falling back to a parent if not present.
-type stringMapContext struct {
- context.Context
- m map[string]interface{}
-}
-
-// WithValues returns a context that proxies lookups through a map. Only
-// supports string keys.
-func WithValues(ctx context.Context, m map[string]interface{}) context.Context {
- mo := make(map[string]interface{}, len(m)) // make our own copy.
- for k, v := range m {
- mo[k] = v
- }
-
- return stringMapContext{
- Context: ctx,
- m: mo,
- }
-}
-
-func (smc stringMapContext) Value(key interface{}) interface{} {
- if ks, ok := key.(string); ok {
- if v, ok := smc.m[ks]; ok {
- return v
- }
- }
-
- return smc.Context.Value(key)
-}
diff --git a/vendor/github.com/docker/distribution/context/doc.go b/vendor/github.com/docker/distribution/context/doc.go
deleted file mode 100644
index 9b623074e..000000000
--- a/vendor/github.com/docker/distribution/context/doc.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// Package context provides several utilities for working with
-// golang.org/x/net/context in http requests. Primarily, the focus is on
-// logging relevant request information but this package is not limited to
-// that purpose.
-//
-// The easiest way to get started is to get the background context:
-//
-// ctx := context.Background()
-//
-// The returned context should be passed around your application and be the
-// root of all other context instances. If the application has a version, this
-// line should be called before anything else:
-//
-// ctx := context.WithVersion(context.Background(), version)
-//
-// The above will store the version in the context and will be available to
-// the logger.
-//
-// Logging
-//
-// The most useful aspect of this package is GetLogger. This function takes
-// any context.Context interface and returns the current logger from the
-// context. Canonical usage looks like this:
-//
-// GetLogger(ctx).Infof("something interesting happened")
-//
-// GetLogger also takes optional key arguments. The keys will be looked up in
-// the context and reported with the logger. The following example would
-// return a logger that prints the version with each log message:
-//
-// ctx := context.Context(context.Background(), "version", version)
-// GetLogger(ctx, "version").Infof("this log message has a version field")
-//
-// The above would print out a log message like this:
-//
-// INFO[0000] this log message has a version field version=v2.0.0-alpha.2.m
-//
-// When used with WithLogger, we gain the ability to decorate the context with
-// loggers that have information from disparate parts of the call stack.
-// Following from the version example, we can build a new context with the
-// configured logger such that we always print the version field:
-//
-// ctx = WithLogger(ctx, GetLogger(ctx, "version"))
-//
-// Since the logger has been pushed to the context, we can now get the version
-// field for free with our log messages. Future calls to GetLogger on the new
-// context will have the version field:
-//
-// GetLogger(ctx).Infof("this log message has a version field")
-//
-// This becomes more powerful when we start stacking loggers. Let's say we
-// have the version logger from above but also want a request id. Using the
-// context above, in our request scoped function, we place another logger in
-// the context:
-//
-// ctx = context.WithValue(ctx, "http.request.id", "unique id") // called when building request context
-// ctx = WithLogger(ctx, GetLogger(ctx, "http.request.id"))
-//
-// When GetLogger is called on the new context, "http.request.id" will be
-// included as a logger field, along with the original "version" field:
-//
-// INFO[0000] this log message has a version field http.request.id=unique id version=v2.0.0-alpha.2.m
-//
-// Note that this only affects the new context, the previous context, with the
-// version field, can be used independently. Put another way, the new logger,
-// added to the request context, is unique to that context and can have
-// request scoped variables.
-//
-// HTTP Requests
-//
-// This package also contains several methods for working with http requests.
-// The concepts are very similar to those described above. We simply place the
-// request in the context using WithRequest. This makes the request variables
-// available. GetRequestLogger can then be called to get request specific
-// variables in a log line:
-//
-// ctx = WithRequest(ctx, req)
-// GetRequestLogger(ctx).Infof("request variables")
-//
-// Like above, if we want to include the request data in all log messages in
-// the context, we push the logger to a new context and use that one:
-//
-// ctx = WithLogger(ctx, GetRequestLogger(ctx))
-//
-// The concept is fairly powerful and ensures that calls throughout the stack
-// can be traced in log messages. Using the fields like "http.request.id", one
-// can analyze call flow for a particular request with a simple grep of the
-// logs.
-package context
diff --git a/vendor/github.com/docker/distribution/context/http.go b/vendor/github.com/docker/distribution/context/http.go
deleted file mode 100644
index 7d65c8524..000000000
--- a/vendor/github.com/docker/distribution/context/http.go
+++ /dev/null
@@ -1,366 +0,0 @@
-package context
-
-import (
- "errors"
- "net"
- "net/http"
- "strings"
- "sync"
- "time"
-
- "github.com/docker/distribution/uuid"
- "github.com/gorilla/mux"
- log "github.com/sirupsen/logrus"
-)
-
-// Common errors used with this package.
-var (
- ErrNoRequestContext = errors.New("no http request in context")
- ErrNoResponseWriterContext = errors.New("no http response in context")
-)
-
-func parseIP(ipStr string) net.IP {
- ip := net.ParseIP(ipStr)
- if ip == nil {
- log.Warnf("invalid remote IP address: %q", ipStr)
- }
- return ip
-}
-
-// RemoteAddr extracts the remote address of the request, taking into
-// account proxy headers.
-func RemoteAddr(r *http.Request) string {
- if prior := r.Header.Get("X-Forwarded-For"); prior != "" {
- proxies := strings.Split(prior, ",")
- if len(proxies) > 0 {
- remoteAddr := strings.Trim(proxies[0], " ")
- if parseIP(remoteAddr) != nil {
- return remoteAddr
- }
- }
- }
- // X-Real-Ip is less supported, but worth checking in the
- // absence of X-Forwarded-For
- if realIP := r.Header.Get("X-Real-Ip"); realIP != "" {
- if parseIP(realIP) != nil {
- return realIP
- }
- }
-
- return r.RemoteAddr
-}
-
-// RemoteIP extracts the remote IP of the request, taking into
-// account proxy headers.
-func RemoteIP(r *http.Request) string {
- addr := RemoteAddr(r)
-
- // Try parsing it as "IP:port"
- if ip, _, err := net.SplitHostPort(addr); err == nil {
- return ip
- }
-
- return addr
-}
-
-// WithRequest places the request on the context. The context of the request
-// is assigned a unique id, available at "http.request.id". The request itself
-// is available at "http.request". Other common attributes are available under
-// the prefix "http.request.". If a request is already present on the context,
-// this method will panic.
-func WithRequest(ctx Context, r *http.Request) Context {
- if ctx.Value("http.request") != nil {
- // NOTE(stevvooe): This needs to be considered a programming error. It
- // is unlikely that we'd want to have more than one request in
- // context.
- panic("only one request per context")
- }
-
- return &httpRequestContext{
- Context: ctx,
- startedAt: time.Now(),
- id: uuid.Generate().String(),
- r: r,
- }
-}
-
-// GetRequest returns the http request in the given context. Returns
-// ErrNoRequestContext if the context does not have an http request associated
-// with it.
-func GetRequest(ctx Context) (*http.Request, error) {
- if r, ok := ctx.Value("http.request").(*http.Request); r != nil && ok {
- return r, nil
- }
- return nil, ErrNoRequestContext
-}
-
-// GetRequestID attempts to resolve the current request id, if possible. An
-// error is return if it is not available on the context.
-func GetRequestID(ctx Context) string {
- return GetStringValue(ctx, "http.request.id")
-}
-
-// WithResponseWriter returns a new context and response writer that makes
-// interesting response statistics available within the context.
-func WithResponseWriter(ctx Context, w http.ResponseWriter) (Context, http.ResponseWriter) {
- if closeNotifier, ok := w.(http.CloseNotifier); ok {
- irwCN := &instrumentedResponseWriterCN{
- instrumentedResponseWriter: instrumentedResponseWriter{
- ResponseWriter: w,
- Context: ctx,
- },
- CloseNotifier: closeNotifier,
- }
-
- return irwCN, irwCN
- }
-
- irw := instrumentedResponseWriter{
- ResponseWriter: w,
- Context: ctx,
- }
- return &irw, &irw
-}
-
-// GetResponseWriter returns the http.ResponseWriter from the provided
-// context. If not present, ErrNoResponseWriterContext is returned. The
-// returned instance provides instrumentation in the context.
-func GetResponseWriter(ctx Context) (http.ResponseWriter, error) {
- v := ctx.Value("http.response")
-
- rw, ok := v.(http.ResponseWriter)
- if !ok || rw == nil {
- return nil, ErrNoResponseWriterContext
- }
-
- return rw, nil
-}
-
-// getVarsFromRequest let's us change request vars implementation for testing
-// and maybe future changes.
-var getVarsFromRequest = mux.Vars
-
-// WithVars extracts gorilla/mux vars and makes them available on the returned
-// context. Variables are available at keys with the prefix "vars.". For
-// example, if looking for the variable "name", it can be accessed as
-// "vars.name". Implementations that are accessing values need not know that
-// the underlying context is implemented with gorilla/mux vars.
-func WithVars(ctx Context, r *http.Request) Context {
- return &muxVarsContext{
- Context: ctx,
- vars: getVarsFromRequest(r),
- }
-}
-
-// GetRequestLogger returns a logger that contains fields from the request in
-// the current context. If the request is not available in the context, no
-// fields will display. Request loggers can safely be pushed onto the context.
-func GetRequestLogger(ctx Context) Logger {
- return GetLogger(ctx,
- "http.request.id",
- "http.request.method",
- "http.request.host",
- "http.request.uri",
- "http.request.referer",
- "http.request.useragent",
- "http.request.remoteaddr",
- "http.request.contenttype")
-}
-
-// GetResponseLogger reads the current response stats and builds a logger.
-// Because the values are read at call time, pushing a logger returned from
-// this function on the context will lead to missing or invalid data. Only
-// call this at the end of a request, after the response has been written.
-func GetResponseLogger(ctx Context) Logger {
- l := getLogrusLogger(ctx,
- "http.response.written",
- "http.response.status",
- "http.response.contenttype")
-
- duration := Since(ctx, "http.request.startedat")
-
- if duration > 0 {
- l = l.WithField("http.response.duration", duration.String())
- }
-
- return l
-}
-
-// httpRequestContext makes information about a request available to context.
-type httpRequestContext struct {
- Context
-
- startedAt time.Time
- id string
- r *http.Request
-}
-
-// Value returns a keyed element of the request for use in the context. To get
-// the request itself, query "request". For other components, access them as
-// "request.<component>". For example, r.RequestURI
-func (ctx *httpRequestContext) Value(key interface{}) interface{} {
- if keyStr, ok := key.(string); ok {
- if keyStr == "http.request" {
- return ctx.r
- }
-
- if !strings.HasPrefix(keyStr, "http.request.") {
- goto fallback
- }
-
- parts := strings.Split(keyStr, ".")
-
- if len(parts) != 3 {
- goto fallback
- }
-
- switch parts[2] {
- case "uri":
- return ctx.r.RequestURI
- case "remoteaddr":
- return RemoteAddr(ctx.r)
- case "method":
- return ctx.r.Method
- case "host":
- return ctx.r.Host
- case "referer":
- referer := ctx.r.Referer()
- if referer != "" {
- return referer
- }
- case "useragent":
- return ctx.r.UserAgent()
- case "id":
- return ctx.id
- case "startedat":
- return ctx.startedAt
- case "contenttype":
- ct := ctx.r.Header.Get("Content-Type")
- if ct != "" {
- return ct
- }
- }
- }
-
-fallback:
- return ctx.Context.Value(key)
-}
-
-type muxVarsContext struct {
- Context
- vars map[string]string
-}
-
-func (ctx *muxVarsContext) Value(key interface{}) interface{} {
- if keyStr, ok := key.(string); ok {
- if keyStr == "vars" {
- return ctx.vars
- }
-
- if strings.HasPrefix(keyStr, "vars.") {
- keyStr = strings.TrimPrefix(keyStr, "vars.")
- }
-
- if v, ok := ctx.vars[keyStr]; ok {
- return v
- }
- }
-
- return ctx.Context.Value(key)
-}
-
-// instrumentedResponseWriterCN provides response writer information in a
-// context. It implements http.CloseNotifier so that users can detect
-// early disconnects.
-type instrumentedResponseWriterCN struct {
- instrumentedResponseWriter
- http.CloseNotifier
-}
-
-// instrumentedResponseWriter provides response writer information in a
-// context. This variant is only used in the case where CloseNotifier is not
-// implemented by the parent ResponseWriter.
-type instrumentedResponseWriter struct {
- http.ResponseWriter
- Context
-
- mu sync.Mutex
- status int
- written int64
-}
-
-func (irw *instrumentedResponseWriter) Write(p []byte) (n int, err error) {
- n, err = irw.ResponseWriter.Write(p)
-
- irw.mu.Lock()
- irw.written += int64(n)
-
- // Guess the likely status if not set.
- if irw.status == 0 {
- irw.status = http.StatusOK
- }
-
- irw.mu.Unlock()
-
- return
-}
-
-func (irw *instrumentedResponseWriter) WriteHeader(status int) {
- irw.ResponseWriter.WriteHeader(status)
-
- irw.mu.Lock()
- irw.status = status
- irw.mu.Unlock()
-}
-
-func (irw *instrumentedResponseWriter) Flush() {
- if flusher, ok := irw.ResponseWriter.(http.Flusher); ok {
- flusher.Flush()
- }
-}
-
-func (irw *instrumentedResponseWriter) Value(key interface{}) interface{} {
- if keyStr, ok := key.(string); ok {
- if keyStr == "http.response" {
- return irw
- }
-
- if !strings.HasPrefix(keyStr, "http.response.") {
- goto fallback
- }
-
- parts := strings.Split(keyStr, ".")
-
- if len(parts) != 3 {
- goto fallback
- }
-
- irw.mu.Lock()
- defer irw.mu.Unlock()
-
- switch parts[2] {
- case "written":
- return irw.written
- case "status":
- return irw.status
- case "contenttype":
- contentType := irw.Header().Get("Content-Type")
- if contentType != "" {
- return contentType
- }
- }
- }
-
-fallback:
- return irw.Context.Value(key)
-}
-
-func (irw *instrumentedResponseWriterCN) Value(key interface{}) interface{} {
- if keyStr, ok := key.(string); ok {
- if keyStr == "http.response" {
- return irw
- }
- }
-
- return irw.instrumentedResponseWriter.Value(key)
-}
diff --git a/vendor/github.com/docker/distribution/context/logger.go b/vendor/github.com/docker/distribution/context/logger.go
deleted file mode 100644
index 86c5964e4..000000000
--- a/vendor/github.com/docker/distribution/context/logger.go
+++ /dev/null
@@ -1,116 +0,0 @@
-package context
-
-import (
- "fmt"
-
- "github.com/sirupsen/logrus"
- "runtime"
-)
-
-// Logger provides a leveled-logging interface.
-type Logger interface {
- // standard logger methods
- Print(args ...interface{})
- Printf(format string, args ...interface{})
- Println(args ...interface{})
-
- Fatal(args ...interface{})
- Fatalf(format string, args ...interface{})
- Fatalln(args ...interface{})
-
- Panic(args ...interface{})
- Panicf(format string, args ...interface{})
- Panicln(args ...interface{})
-
- // Leveled methods, from logrus
- Debug(args ...interface{})
- Debugf(format string, args ...interface{})
- Debugln(args ...interface{})
-
- Error(args ...interface{})
- Errorf(format string, args ...interface{})
- Errorln(args ...interface{})
-
- Info(args ...interface{})
- Infof(format string, args ...interface{})
- Infoln(args ...interface{})
-
- Warn(args ...interface{})
- Warnf(format string, args ...interface{})
- Warnln(args ...interface{})
-}
-
-// WithLogger creates a new context with provided logger.
-func WithLogger(ctx Context, logger Logger) Context {
- return WithValue(ctx, "logger", logger)
-}
-
-// GetLoggerWithField returns a logger instance with the specified field key
-// and value without affecting the context. Extra specified keys will be
-// resolved from the context.
-func GetLoggerWithField(ctx Context, key, value interface{}, keys ...interface{}) Logger {
- return getLogrusLogger(ctx, keys...).WithField(fmt.Sprint(key), value)
-}
-
-// GetLoggerWithFields returns a logger instance with the specified fields
-// without affecting the context. Extra specified keys will be resolved from
-// the context.
-func GetLoggerWithFields(ctx Context, fields map[interface{}]interface{}, keys ...interface{}) Logger {
- // must convert from interface{} -> interface{} to string -> interface{} for logrus.
- lfields := make(logrus.Fields, len(fields))
- for key, value := range fields {
- lfields[fmt.Sprint(key)] = value
- }
-
- return getLogrusLogger(ctx, keys...).WithFields(lfields)
-}
-
-// GetLogger returns the logger from the current context, if present. If one
-// or more keys are provided, they will be resolved on the context and
-// included in the logger. While context.Value takes an interface, any key
-// argument passed to GetLogger will be passed to fmt.Sprint when expanded as
-// a logging key field. If context keys are integer constants, for example,
-// its recommended that a String method is implemented.
-func GetLogger(ctx Context, keys ...interface{}) Logger {
- return getLogrusLogger(ctx, keys...)
-}
-
-// GetLogrusLogger returns the logrus logger for the context. If one more keys
-// are provided, they will be resolved on the context and included in the
-// logger. Only use this function if specific logrus functionality is
-// required.
-func getLogrusLogger(ctx Context, keys ...interface{}) *logrus.Entry {
- var logger *logrus.Entry
-
- // Get a logger, if it is present.
- loggerInterface := ctx.Value("logger")
- if loggerInterface != nil {
- if lgr, ok := loggerInterface.(*logrus.Entry); ok {
- logger = lgr
- }
- }
-
- if logger == nil {
- fields := logrus.Fields{}
-
- // Fill in the instance id, if we have it.
- instanceID := ctx.Value("instance.id")
- if instanceID != nil {
- fields["instance.id"] = instanceID
- }
-
- fields["go.version"] = runtime.Version()
- // If no logger is found, just return the standard logger.
- logger = logrus.StandardLogger().WithFields(fields)
- }
-
- fields := logrus.Fields{}
- for _, key := range keys {
- v := ctx.Value(key)
- if v != nil {
- fields[fmt.Sprint(key)] = v
- }
- }
-
- return logger.WithFields(fields)
-}
diff --git a/vendor/github.com/docker/distribution/context/trace.go b/vendor/github.com/docker/distribution/context/trace.go
deleted file mode 100644
index 721964a84..000000000
--- a/vendor/github.com/docker/distribution/context/trace.go
+++ /dev/null
@@ -1,104 +0,0 @@
-package context
-
-import (
- "runtime"
- "time"
-
- "github.com/docker/distribution/uuid"
-)
-
-// WithTrace allocates a traced timing span in a new context. This allows a
-// caller to track the time between calling WithTrace and the returned done
-// function. When the done function is called, a log message is emitted with a
-// "trace.duration" field, corresponding to the elapsed time and a
-// "trace.func" field, corresponding to the function that called WithTrace.
-//
-// The logging keys "trace.id" and "trace.parent.id" are provided to implement
-// dapper-like tracing. This function should be complemented with a WithSpan
-// method that could be used for tracing distributed RPC calls.
-//
-// The main benefit of this function is to post-process log messages or
-// intercept them in a hook to provide timing data. Trace ids and parent ids
-// can also be linked to provide call tracing, if so required.
-//
-// Here is an example of the usage:
-//
-// func timedOperation(ctx Context) {
-// ctx, done := WithTrace(ctx)
-// defer done("this will be the log message")
-// // ... function body ...
-// }
-//
-// If the function ran for roughly 1s, such a usage would emit a log message
-// as follows:
-//
-// INFO[0001] this will be the log message trace.duration=1.004575763s trace.func=github.com/docker/distribution/context.traceOperation trace.id=<id> ...
-//
-// Notice that the function name is automatically resolved, along with the
-// package and a trace id is emitted that can be linked with parent ids.
-func WithTrace(ctx Context) (Context, func(format string, a ...interface{})) {
- if ctx == nil {
- ctx = Background()
- }
-
- pc, file, line, _ := runtime.Caller(1)
- f := runtime.FuncForPC(pc)
- ctx = &traced{
- Context: ctx,
- id: uuid.Generate().String(),
- start: time.Now(),
- parent: GetStringValue(ctx, "trace.id"),
- fnname: f.Name(),
- file: file,
- line: line,
- }
-
- return ctx, func(format string, a ...interface{}) {
- GetLogger(ctx,
- "trace.duration",
- "trace.id",
- "trace.parent.id",
- "trace.func",
- "trace.file",
- "trace.line").
- Debugf(format, a...)
- }
-}
-
-// traced represents a context that is traced for function call timing. It
-// also provides fast lookup for the various attributes that are available on
-// the trace.
-type traced struct {
- Context
- id string
- parent string
- start time.Time
- fnname string
- file string
- line int
-}
-
-func (ts *traced) Value(key interface{}) interface{} {
- switch key {
- case "trace.start":
- return ts.start
- case "trace.duration":
- return time.Since(ts.start)
- case "trace.id":
- return ts.id
- case "trace.parent.id":
- if ts.parent == "" {
- return nil // must return nil to signal no parent.
- }
-
- return ts.parent
- case "trace.func":
- return ts.fnname
- case "trace.file":
- return ts.file
- case "trace.line":
- return ts.line
- }
-
- return ts.Context.Value(key)
-}
diff --git a/vendor/github.com/docker/distribution/context/util.go b/vendor/github.com/docker/distribution/context/util.go
deleted file mode 100644
index cb9ef52e3..000000000
--- a/vendor/github.com/docker/distribution/context/util.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package context
-
-import (
- "time"
-)
-
-// Since looks up key, which should be a time.Time, and returns the duration
-// since that time. If the key is not found, the value returned will be zero.
-// This is helpful when inferring metrics related to context execution times.
-func Since(ctx Context, key interface{}) time.Duration {
- if startedAt, ok := ctx.Value(key).(time.Time); ok {
- return time.Since(startedAt)
- }
- return 0
-}
-
-// GetStringValue returns a string value from the context. The empty string
-// will be returned if not found.
-func GetStringValue(ctx Context, key interface{}) (value string) {
- if valuev, ok := ctx.Value(key).(string); ok {
- value = valuev
- }
- return value
-}
diff --git a/vendor/github.com/docker/distribution/context/version.go b/vendor/github.com/docker/distribution/context/version.go
deleted file mode 100644
index 746cda02e..000000000
--- a/vendor/github.com/docker/distribution/context/version.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package context
-
-// WithVersion stores the application version in the context. The new context
-// gets a logger to ensure log messages are marked with the application
-// version.
-func WithVersion(ctx Context, version string) Context {
- ctx = WithValue(ctx, "version", version)
- // push a new logger onto the stack
- return WithLogger(ctx, GetLogger(ctx, "version"))
-}
-
-// GetVersion returns the application version from the context. An empty
-// string may returned if the version was not set on the context.
-func GetVersion(ctx Context) string {
- return GetStringValue(ctx, "version")
-}
diff --git a/vendor/github.com/docker/distribution/manifests.go b/vendor/github.com/docker/distribution/manifests.go
index 2c99f25d3..1816baea1 100644
--- a/vendor/github.com/docker/distribution/manifests.go
+++ b/vendor/github.com/docker/distribution/manifests.go
@@ -1,10 +1,10 @@
package distribution
import (
+ "context"
"fmt"
"mime"
- "github.com/docker/distribution/context"
"github.com/opencontainers/go-digest"
)
diff --git a/vendor/github.com/docker/distribution/registry.go b/vendor/github.com/docker/distribution/registry.go
index 1da1d533f..c34207d03 100644
--- a/vendor/github.com/docker/distribution/registry.go
+++ b/vendor/github.com/docker/distribution/registry.go
@@ -1,7 +1,8 @@
package distribution
import (
- "github.com/docker/distribution/context"
+ "context"
+
"github.com/docker/distribution/reference"
)
diff --git a/vendor/github.com/docker/distribution/registry/client/blob_writer.go b/vendor/github.com/docker/distribution/registry/client/blob_writer.go
index e3ffcb00f..695bf852f 100644
--- a/vendor/github.com/docker/distribution/registry/client/blob_writer.go
+++ b/vendor/github.com/docker/distribution/registry/client/blob_writer.go
@@ -2,6 +2,7 @@ package client
import (
"bytes"
+ "context"
"fmt"
"io"
"io/ioutil"
@@ -9,7 +10,6 @@ import (
"time"
"github.com/docker/distribution"
- "github.com/docker/distribution/context"
)
type httpBlobUpload struct {
diff --git a/vendor/github.com/docker/distribution/registry/client/repository.go b/vendor/github.com/docker/distribution/registry/client/repository.go
index b82a968e2..8bd2c3fb6 100644
--- a/vendor/github.com/docker/distribution/registry/client/repository.go
+++ b/vendor/github.com/docker/distribution/registry/client/repository.go
@@ -2,6 +2,7 @@ package client
import (
"bytes"
+ "context"
"encoding/json"
"errors"
"fmt"
@@ -14,7 +15,6 @@ import (
"time"
"github.com/docker/distribution"
- "github.com/docker/distribution/context"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/api/v2"
"github.com/docker/distribution/registry/client/transport"
diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go b/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go
index f647616bc..cdc34f5fe 100644
--- a/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go
+++ b/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go
@@ -1,10 +1,10 @@
package cache
import (
- "github.com/docker/distribution/context"
- "github.com/opencontainers/go-digest"
+ "context"
"github.com/docker/distribution"
+ "github.com/opencontainers/go-digest"
)
// Metrics is used to hold metric counters
@@ -16,12 +16,20 @@ type Metrics struct {
Misses uint64
}
+// Logger can be provided on the MetricsTracker to log errors.
+//
+// Usually, this is just a proxy to dcontext.GetLogger.
+type Logger interface {
+ Errorf(format string, args ...interface{})
+}
+
// MetricsTracker represents a metric tracker
// which simply counts the number of hits and misses.
type MetricsTracker interface {
Hit()
Miss()
Metrics() Metrics
+ Logger(context.Context) Logger
}
type cachedBlobStatter struct {
@@ -53,7 +61,7 @@ func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (di
desc, err := cbds.cache.Stat(ctx, dgst)
if err != nil {
if err != distribution.ErrBlobUnknown {
- context.GetLogger(ctx).Errorf("error retrieving descriptor from cache: %v", err)
+ logErrorf(ctx, cbds.tracker, "error retrieving descriptor from cache: %v", err)
}
goto fallback
@@ -73,7 +81,7 @@ fallback:
}
if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil {
- context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err)
+ logErrorf(ctx, cbds.tracker, "error adding descriptor %v to cache: %v", desc.Digest, err)
}
return desc, err
@@ -95,7 +103,19 @@ func (cbds *cachedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) er
func (cbds *cachedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil {
- context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err)
+ logErrorf(ctx, cbds.tracker, "error adding descriptor %v to cache: %v", desc.Digest, err)
}
return nil
}
+
+func logErrorf(ctx context.Context, tracker MetricsTracker, format string, args ...interface{}) {
+ if tracker == nil {
+ return
+ }
+
+ logger := tracker.Logger(ctx)
+ if logger == nil {
+ return
+ }
+ logger.Errorf(format, args...)
+}
diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go b/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go
index b2fcaf4e8..42d94d9bd 100644
--- a/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go
+++ b/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go
@@ -1,10 +1,10 @@
package memory
import (
+ "context"
"sync"
"github.com/docker/distribution"
- "github.com/docker/distribution/context"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/storage/cache"
"github.com/opencontainers/go-digest"
diff --git a/vendor/github.com/docker/distribution/tags.go b/vendor/github.com/docker/distribution/tags.go
index 503056596..f22df2b85 100644
--- a/vendor/github.com/docker/distribution/tags.go
+++ b/vendor/github.com/docker/distribution/tags.go
@@ -1,7 +1,7 @@
package distribution
import (
- "github.com/docker/distribution/context"
+ "context"
)
// TagService provides access to information about tagged objects.
diff --git a/vendor/github.com/docker/distribution/uuid/uuid.go b/vendor/github.com/docker/distribution/uuid/uuid.go
deleted file mode 100644
index d433ccaf5..000000000
--- a/vendor/github.com/docker/distribution/uuid/uuid.go
+++ /dev/null
@@ -1,126 +0,0 @@
-// Package uuid provides simple UUID generation. Only version 4 style UUIDs
-// can be generated.
-//
-// Please see http://tools.ietf.org/html/rfc4122 for details on UUIDs.
-package uuid
-
-import (
- "crypto/rand"
- "fmt"
- "io"
- "os"
- "syscall"
- "time"
-)
-
-const (
- // Bits is the number of bits in a UUID
- Bits = 128
-
- // Size is the number of bytes in a UUID
- Size = Bits / 8
-
- format = "%08x-%04x-%04x-%04x-%012x"
-)
-
-var (
- // ErrUUIDInvalid indicates a parsed string is not a valid uuid.
- ErrUUIDInvalid = fmt.Errorf("invalid uuid")
-
- // Loggerf can be used to override the default logging destination. Such
- // log messages in this library should be logged at warning or higher.
- Loggerf = func(format string, args ...interface{}) {}
-)
-
-// UUID represents a UUID value. UUIDs can be compared and set to other values
-// and accessed by byte.
-type UUID [Size]byte
-
-// Generate creates a new, version 4 uuid.
-func Generate() (u UUID) {
- const (
- // ensures we backoff for less than 450ms total. Use the following to
- // select new value, in units of 10ms:
- // n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2
- maxretries = 9
- backoff = time.Millisecond * 10
- )
-
- var (
- totalBackoff time.Duration
- count int
- retries int
- )
-
- for {
- // This should never block but the read may fail. Because of this,
- // we just try to read the random number generator until we get
- // something. This is a very rare condition but may happen.
- b := time.Duration(retries) * backoff
- time.Sleep(b)
- totalBackoff += b
-
- n, err := io.ReadFull(rand.Reader, u[count:])
- if err != nil {
- if retryOnError(err) && retries < maxretries {
- count += n
- retries++
- Loggerf("error generating version 4 uuid, retrying: %v", err)
- continue
- }
-
- // Any other errors represent a system problem. What did someone
- // do to /dev/urandom?
- panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err))
- }
-
- break
- }
-
- u[6] = (u[6] & 0x0f) | 0x40 // set version byte
- u[8] = (u[8] & 0x3f) | 0x80 // set high order byte 0b10{8,9,a,b}
-
- return u
-}
-
-// Parse attempts to extract a uuid from the string or returns an error.
-func Parse(s string) (u UUID, err error) {
- if len(s) != 36 {
- return UUID{}, ErrUUIDInvalid
- }
-
- // create stack addresses for each section of the uuid.
- p := make([][]byte, 5)
-
- if _, err := fmt.Sscanf(s, format, &p[0], &p[1], &p[2], &p[3], &p[4]); err != nil {
- return u, err
- }
-
- copy(u[0:4], p[0])
- copy(u[4:6], p[1])
- copy(u[6:8], p[2])
- copy(u[8:10], p[3])
- copy(u[10:16], p[4])
-
- return
-}
-
-func (u UUID) String() string {
- return fmt.Sprintf(format, u[:4], u[4:6], u[6:8], u[8:10], u[10:])
-}
-
-// retryOnError tries to detect whether or not retrying would be fruitful.
-func retryOnError(err error) bool {
- switch err := err.(type) {
- case *os.PathError:
- return retryOnError(err.Err) // unpack the target error
- case syscall.Errno:
- if err == syscall.EPERM {
- // EPERM represents an entropy pool exhaustion, a condition under
- // which we backoff and retry.
- return true
- }
- }
-
- return false
-}
diff --git a/vendor/github.com/docker/docker/contrib/README.md b/vendor/github.com/docker/docker/contrib/README.md
new file mode 100644
index 000000000..92b1d9443
--- /dev/null
+++ b/vendor/github.com/docker/docker/contrib/README.md
@@ -0,0 +1,4 @@
+The `contrib` directory contains scripts, images, and other helpful things
+which are not part of the core docker distribution. Please note that they
+could be out of date, since they do not receive the same attention as the
+rest of the repository.
diff --git a/vendor/github.com/docker/docker/contrib/nnp-test/nnp-test.c b/vendor/github.com/docker/docker/contrib/nnp-test/nnp-test.c
new file mode 100644
index 000000000..b767da7e1
--- /dev/null
+++ b/vendor/github.com/docker/docker/contrib/nnp-test/nnp-test.c
@@ -0,0 +1,10 @@
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/types.h>
+
+int main(int argc, char *argv[])
+{
+ printf("EUID=%d\n", geteuid());
+ return 0;
+}
+
diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/acct.c b/vendor/github.com/docker/docker/contrib/syscall-test/acct.c
new file mode 100644
index 000000000..88ac28796
--- /dev/null
+++ b/vendor/github.com/docker/docker/contrib/syscall-test/acct.c
@@ -0,0 +1,16 @@
+#define _GNU_SOURCE
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+
+int main(int argc, char **argv)
+{
+ int err = acct("/tmp/t");
+ if (err == -1) {
+ fprintf(stderr, "acct failed: %s\n", strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+ exit(EXIT_SUCCESS);
+}
diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/exit32.s b/vendor/github.com/docker/docker/contrib/syscall-test/exit32.s
new file mode 100644
index 000000000..8bbb5c58b
--- /dev/null
+++ b/vendor/github.com/docker/docker/contrib/syscall-test/exit32.s
@@ -0,0 +1,7 @@
+.globl _start
+.text
+_start:
+ xorl %eax, %eax
+ incl %eax
+ movb $0, %bl
+ int $0x80
diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/ns.c b/vendor/github.com/docker/docker/contrib/syscall-test/ns.c
new file mode 100644
index 000000000..624388630
--- /dev/null
+++ b/vendor/github.com/docker/docker/contrib/syscall-test/ns.c
@@ -0,0 +1,63 @@
+#define _GNU_SOURCE
+#include <errno.h>
+#include <sched.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#define STACK_SIZE (1024 * 1024) /* Stack size for cloned child */
+
+struct clone_args {
+ char **argv;
+};
+
+// child_exec is the func that will be executed as the result of clone
+static int child_exec(void *stuff)
+{
+ struct clone_args *args = (struct clone_args *)stuff;
+ if (execvp(args->argv[0], args->argv) != 0) {
+ fprintf(stderr, "failed to execvp arguments %s\n",
+ strerror(errno));
+ exit(-1);
+ }
+ // we should never reach here!
+ exit(EXIT_FAILURE);
+}
+
+int main(int argc, char **argv)
+{
+ struct clone_args args;
+ args.argv = &argv[1];
+
+ int clone_flags = CLONE_NEWNS | CLONE_NEWPID | SIGCHLD;
+
+ // allocate stack for child
+ char *stack; /* Start of stack buffer */
+ char *child_stack; /* End of stack buffer */
+ stack =
+ mmap(NULL, STACK_SIZE, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_ANON | MAP_STACK, -1, 0);
+ if (stack == MAP_FAILED) {
+ fprintf(stderr, "mmap failed: %s\n", strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+ child_stack = stack + STACK_SIZE; /* Assume stack grows downward */
+
+ // the result of this call is that our child_exec will be run in another
+ // process returning its pid
+ pid_t pid = clone(child_exec, child_stack, clone_flags, &args);
+ if (pid < 0) {
+ fprintf(stderr, "clone failed: %s\n", strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+ // lets wait on our child process here before we, the parent, exits
+ if (waitpid(pid, NULL, 0) == -1) {
+ fprintf(stderr, "failed to wait pid %d\n", pid);
+ exit(EXIT_FAILURE);
+ }
+ exit(EXIT_SUCCESS);
+}
diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/raw.c b/vendor/github.com/docker/docker/contrib/syscall-test/raw.c
new file mode 100644
index 000000000..7995a0d3a
--- /dev/null
+++ b/vendor/github.com/docker/docker/contrib/syscall-test/raw.c
@@ -0,0 +1,14 @@
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/socket.h>
+#include <netinet/ip.h>
+#include <netinet/udp.h>
+
+int main() {
+ if (socket(PF_INET, SOCK_RAW, IPPROTO_UDP) == -1) {
+ perror("socket");
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/setgid.c b/vendor/github.com/docker/docker/contrib/syscall-test/setgid.c
new file mode 100644
index 000000000..df9680c86
--- /dev/null
+++ b/vendor/github.com/docker/docker/contrib/syscall-test/setgid.c
@@ -0,0 +1,11 @@
+#include <sys/types.h>
+#include <unistd.h>
+#include <stdio.h>
+
+int main() {
+ if (setgid(1) == -1) {
+ perror("setgid");
+ return 1;
+ }
+ return 0;
+}
diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/setuid.c b/vendor/github.com/docker/docker/contrib/syscall-test/setuid.c
new file mode 100644
index 000000000..5b939677e
--- /dev/null
+++ b/vendor/github.com/docker/docker/contrib/syscall-test/setuid.c
@@ -0,0 +1,11 @@
+#include <sys/types.h>
+#include <unistd.h>
+#include <stdio.h>
+
+int main() {
+ if (setuid(1) == -1) {
+ perror("setuid");
+ return 1;
+ }
+ return 0;
+}
diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/socket.c b/vendor/github.com/docker/docker/contrib/syscall-test/socket.c
new file mode 100644
index 000000000..d26c82f00
--- /dev/null
+++ b/vendor/github.com/docker/docker/contrib/syscall-test/socket.c
@@ -0,0 +1,30 @@
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+
+int main() {
+ int s;
+ struct sockaddr_in sin;
+
+ s = socket(AF_INET, SOCK_STREAM, 0);
+ if (s == -1) {
+ perror("socket");
+ return 1;
+ }
+
+ sin.sin_family = AF_INET;
+ sin.sin_addr.s_addr = INADDR_ANY;
+ sin.sin_port = htons(80);
+
+ if (bind(s, (struct sockaddr *)&sin, sizeof(sin)) == -1) {
+ perror("bind");
+ return 1;
+ }
+
+ close(s);
+
+ return 0;
+}
diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/userns.c b/vendor/github.com/docker/docker/contrib/syscall-test/userns.c
new file mode 100644
index 000000000..4c5c8d304
--- /dev/null
+++ b/vendor/github.com/docker/docker/contrib/syscall-test/userns.c
@@ -0,0 +1,63 @@
+#define _GNU_SOURCE
+#include <errno.h>
+#include <sched.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#define STACK_SIZE (1024 * 1024) /* Stack size for cloned child */
+
+struct clone_args {
+ char **argv;
+};
+
+// child_exec is the func that will be executed as the result of clone
+static int child_exec(void *stuff)
+{
+ struct clone_args *args = (struct clone_args *)stuff;
+ if (execvp(args->argv[0], args->argv) != 0) {
+ fprintf(stderr, "failed to execvp arguments %s\n",
+ strerror(errno));
+ exit(-1);
+ }
+ // we should never reach here!
+ exit(EXIT_FAILURE);
+}
+
+int main(int argc, char **argv)
+{
+ struct clone_args args;
+ args.argv = &argv[1];
+
+ int clone_flags = CLONE_NEWUSER | SIGCHLD;
+
+ // allocate stack for child
+ char *stack; /* Start of stack buffer */
+ char *child_stack; /* End of stack buffer */
+ stack =
+ mmap(NULL, STACK_SIZE, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_ANON | MAP_STACK, -1, 0);
+ if (stack == MAP_FAILED) {
+ fprintf(stderr, "mmap failed: %s\n", strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+ child_stack = stack + STACK_SIZE; /* Assume stack grows downward */
+
+ // the result of this call is that our child_exec will be run in another
+ // process returning its pid
+ pid_t pid = clone(child_exec, child_stack, clone_flags, &args);
+ if (pid < 0) {
+ fprintf(stderr, "clone failed: %s\n", strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+ // lets wait on our child process here before we, the parent, exits
+ if (waitpid(pid, NULL, 0) == -1) {
+ fprintf(stderr, "failed to wait pid %d\n", pid);
+ exit(EXIT_FAILURE);
+ }
+ exit(EXIT_SUCCESS);
+}
diff --git a/vendor/github.com/docker/docker/hack/README.md b/vendor/github.com/docker/docker/hack/README.md
deleted file mode 100644
index 9e588db25..000000000
--- a/vendor/github.com/docker/docker/hack/README.md
+++ /dev/null
@@ -1,60 +0,0 @@
-## About
-
-This directory contains a collection of scripts used to build and manage this
-repository. If there are any issues regarding the intention of a particular
-script (or even part of a certain script), please reach out to us.
-It may help us either refine our current scripts, or add on new ones
-that are appropriate for a given use case.
-
-## DinD (dind.sh)
-
-DinD is a wrapper script which allows Docker to be run inside a Docker
-container. DinD requires the container to
-be run with privileged mode enabled.
-
-## Generate Authors (generate-authors.sh)
-
-Generates AUTHORS; a file with all the names and corresponding emails of
-individual contributors. AUTHORS can be found in the home directory of
-this repository.
-
-## Make
-
-There are two make files, each with different extensions. Neither are supposed
-to be called directly; only invoke `make`. Both scripts run inside a Docker
-container.
-
-### make.ps1
-
-- The Windows native build script that uses PowerShell semantics; it is limited
-unlike `hack\make.sh` since it does not provide support for the full set of
-operations provided by the Linux counterpart, `make.sh`. However, `make.ps1`
-does provide support for local Windows development and Windows to Windows CI.
-More information is found within `make.ps1` by the author, @jhowardmsft
-
-### make.sh
-
-- Referenced via `make test` when running tests on a local machine,
-or directly referenced when running tests inside a Docker development container.
-- When running on a local machine, `make test` to run all tests found in
-`test`, `test-unit`, `test-integration`, and `test-docker-py` on
-your local machine. The default timeout is set in `make.sh` to 60 minutes
-(`${TIMEOUT:=60m}`), since it currently takes up to an hour to run
-all of the tests.
-- When running inside a Docker development container, `hack/make.sh` does
-not have a single target that runs all the tests. You need to provide a
-single command line with multiple targets that performs the same thing.
-An example referenced from [Run targets inside a development container](https://docs.docker.com/opensource/project/test-and-docs/#run-targets-inside-a-development-container): `root@5f8630b873fe:/go/src/github.com/moby/moby# hack/make.sh dynbinary binary cross test-unit test-integration test-docker-py`
-- For more information related to testing outside the scope of this README,
-refer to
-[Run tests and test documentation](https://docs.docker.com/opensource/project/test-and-docs/)
-
-## Release (release.sh)
-
-Releases any bundles built by `make` on a public AWS S3 bucket.
-For information regarding configuration, please view `release.sh`.
-
-## Vendor (vendor.sh)
-
-A shell script that is a wrapper around Vndr. For information on how to use
-this, please refer to [vndr's README](https://github.com/LK4D4/vndr/blob/master/README.md)
diff --git a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/README.md b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/README.md
deleted file mode 100644
index 1cea52526..000000000
--- a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/README.md
+++ /dev/null
@@ -1,69 +0,0 @@
-# Integration Testing on Swarm
-
-IT on Swarm allows you to execute integration test in parallel across a Docker Swarm cluster
-
-## Architecture
-
-### Master service
-
- - Works as a funker caller
- - Calls a worker funker (`-worker-service`) with a chunk of `-check.f` filter strings (passed as a file via `-input` flag, typically `/mnt/input`)
-
-### Worker service
-
- - Works as a funker callee
- - Executes an equivalent of `TESTFLAGS=-check.f TestFoo|TestBar|TestBaz ... make test-integration-cli` using the bind-mounted API socket (`docker.sock`)
-
-### Client
-
- - Controls master and workers via `docker stack`
- - No need to have a local daemon
-
-Typically, the master and workers are supposed to be running on a cloud environment,
-while the client is supposed to be running on a laptop, e.g. Docker for Mac/Windows.
-
-## Requirement
-
- - Docker daemon 1.13 or later
- - Private registry for distributed execution with multiple nodes
-
-## Usage
-
-### Step 1: Prepare images
-
- $ make build-integration-cli-on-swarm
-
-Following environment variables are known to work in this step:
-
- - `BUILDFLAGS`
- - `DOCKER_INCREMENTAL_BINARY`
-
-Note: during the transition into Moby Project, you might need to create a symbolic link `$GOPATH/src/github.com/docker/docker` to `$GOPATH/src/github.com/moby/moby`.
-
-### Step 2: Execute tests
-
- $ ./hack/integration-cli-on-swarm/integration-cli-on-swarm -replicas 40 -push-worker-image YOUR_REGISTRY.EXAMPLE.COM/integration-cli-worker:latest
-
-Following environment variables are known to work in this step:
-
- - `DOCKER_GRAPHDRIVER`
- - `DOCKER_EXPERIMENTAL`
-
-#### Flags
-
-Basic flags:
-
- - `-replicas N`: the number of worker service replicas. i.e. degree of parallelism.
- - `-chunks N`: the number of chunks. By default, `chunks` == `replicas`.
- - `-push-worker-image REGISTRY/IMAGE:TAG`: push the worker image to the registry. Note that if you have only single node and hence you do not need a private registry, you do not need to specify `-push-worker-image`.
-
-Experimental flags for mitigating makespan nonuniformity:
-
- - `-shuffle`: Shuffle the test filter strings
-
-Flags for debugging IT on Swarm itself:
-
- - `-rand-seed N`: the random seed. This flag is useful for deterministic replaying. By default(0), the timestamp is used.
- - `-filters-file FILE`: the file contains `-check.f` strings. By default, the file is automatically generated.
- - `-dry-run`: skip the actual workload
- - `keep-executor`: do not auto-remove executor containers, which is used for running privileged programs on Swarm
diff --git a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/vendor.conf b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/vendor.conf
deleted file mode 100644
index efd6d6d04..000000000
--- a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/vendor.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-# dependencies specific to worker (i.e. github.com/docker/docker/...) are not vendored here
-github.com/bfirsh/funker-go eaa0a2e06f30e72c9a0b7f858951e581e26ef773
diff --git a/vendor/github.com/docker/docker/pkg/archive/README.md b/vendor/github.com/docker/docker/pkg/archive/README.md
deleted file mode 100644
index 7307d9694..000000000
--- a/vendor/github.com/docker/docker/pkg/archive/README.md
+++ /dev/null
@@ -1 +0,0 @@
-This code provides helper functions for dealing with archive files.
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go
deleted file mode 100644
index aa5563756..000000000
--- a/vendor/github.com/docker/docker/pkg/archive/archive.go
+++ /dev/null
@@ -1,1237 +0,0 @@
-package archive
-
-import (
- "archive/tar"
- "bufio"
- "bytes"
- "compress/bzip2"
- "compress/gzip"
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "os/exec"
- "path/filepath"
- "runtime"
- "strings"
- "syscall"
-
- "github.com/docker/docker/pkg/fileutils"
- "github.com/docker/docker/pkg/idtools"
- "github.com/docker/docker/pkg/ioutils"
- "github.com/docker/docker/pkg/pools"
- "github.com/docker/docker/pkg/system"
- "github.com/sirupsen/logrus"
-)
-
-type (
- // Compression is the state represents if compressed or not.
- Compression int
- // WhiteoutFormat is the format of whiteouts unpacked
- WhiteoutFormat int
-
- // TarOptions wraps the tar options.
- TarOptions struct {
- IncludeFiles []string
- ExcludePatterns []string
- Compression Compression
- NoLchown bool
- UIDMaps []idtools.IDMap
- GIDMaps []idtools.IDMap
- ChownOpts *idtools.IDPair
- IncludeSourceDir bool
- // WhiteoutFormat is the expected on disk format for whiteout files.
- // This format will be converted to the standard format on pack
- // and from the standard format on unpack.
- WhiteoutFormat WhiteoutFormat
- // When unpacking, specifies whether overwriting a directory with a
- // non-directory is allowed and vice versa.
- NoOverwriteDirNonDir bool
- // For each include when creating an archive, the included name will be
- // replaced with the matching name from this map.
- RebaseNames map[string]string
- InUserNS bool
- }
-)
-
-// Archiver implements the Archiver interface and allows the reuse of most utility functions of
-// this package with a pluggable Untar function. Also, to facilitate the passing of specific id
-// mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations.
-type Archiver struct {
- Untar func(io.Reader, string, *TarOptions) error
- IDMappingsVar *idtools.IDMappings
-}
-
-// NewDefaultArchiver returns a new Archiver without any IDMappings
-func NewDefaultArchiver() *Archiver {
- return &Archiver{Untar: Untar, IDMappingsVar: &idtools.IDMappings{}}
-}
-
-// breakoutError is used to differentiate errors related to breaking out
-// When testing archive breakout in the unit tests, this error is expected
-// in order for the test to pass.
-type breakoutError error
-
-const (
- // Uncompressed represents the uncompressed.
- Uncompressed Compression = iota
- // Bzip2 is bzip2 compression algorithm.
- Bzip2
- // Gzip is gzip compression algorithm.
- Gzip
- // Xz is xz compression algorithm.
- Xz
-)
-
-const (
- // AUFSWhiteoutFormat is the default format for whiteouts
- AUFSWhiteoutFormat WhiteoutFormat = iota
- // OverlayWhiteoutFormat formats whiteout according to the overlay
- // standard.
- OverlayWhiteoutFormat
-)
-
-const (
- modeISDIR = 040000 // Directory
- modeISFIFO = 010000 // FIFO
- modeISREG = 0100000 // Regular file
- modeISLNK = 0120000 // Symbolic link
- modeISBLK = 060000 // Block special file
- modeISCHR = 020000 // Character special file
- modeISSOCK = 0140000 // Socket
-)
-
-// IsArchivePath checks if the (possibly compressed) file at the given path
-// starts with a tar file header.
-func IsArchivePath(path string) bool {
- file, err := os.Open(path)
- if err != nil {
- return false
- }
- defer file.Close()
- rdr, err := DecompressStream(file)
- if err != nil {
- return false
- }
- r := tar.NewReader(rdr)
- _, err = r.Next()
- return err == nil
-}
-
-// DetectCompression detects the compression algorithm of the source.
-func DetectCompression(source []byte) Compression {
- for compression, m := range map[Compression][]byte{
- Bzip2: {0x42, 0x5A, 0x68},
- Gzip: {0x1F, 0x8B, 0x08},
- Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
- } {
- if len(source) < len(m) {
- logrus.Debug("Len too short")
- continue
- }
- if bytes.Equal(m, source[:len(m)]) {
- return compression
- }
- }
- return Uncompressed
-}
-
-func xzDecompress(archive io.Reader) (io.ReadCloser, <-chan struct{}, error) {
- args := []string{"xz", "-d", "-c", "-q"}
-
- return cmdStream(exec.Command(args[0], args[1:]...), archive)
-}
-
-// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive.
-func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
- p := pools.BufioReader32KPool
- buf := p.Get(archive)
- bs, err := buf.Peek(10)
- if err != nil && err != io.EOF {
- // Note: we'll ignore any io.EOF error because there are some odd
- // cases where the layer.tar file will be empty (zero bytes) and
- // that results in an io.EOF from the Peek() call. So, in those
- // cases we'll just treat it as a non-compressed stream and
- // that means just create an empty layer.
- // See Issue 18170
- return nil, err
- }
-
- compression := DetectCompression(bs)
- switch compression {
- case Uncompressed:
- readBufWrapper := p.NewReadCloserWrapper(buf, buf)
- return readBufWrapper, nil
- case Gzip:
- gzReader, err := gzip.NewReader(buf)
- if err != nil {
- return nil, err
- }
- readBufWrapper := p.NewReadCloserWrapper(buf, gzReader)
- return readBufWrapper, nil
- case Bzip2:
- bz2Reader := bzip2.NewReader(buf)
- readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader)
- return readBufWrapper, nil
- case Xz:
- xzReader, chdone, err := xzDecompress(buf)
- if err != nil {
- return nil, err
- }
- readBufWrapper := p.NewReadCloserWrapper(buf, xzReader)
- return ioutils.NewReadCloserWrapper(readBufWrapper, func() error {
- <-chdone
- return readBufWrapper.Close()
- }), nil
- default:
- return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
- }
-}
-
-// CompressStream compresses the dest with specified compression algorithm.
-func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) {
- p := pools.BufioWriter32KPool
- buf := p.Get(dest)
- switch compression {
- case Uncompressed:
- writeBufWrapper := p.NewWriteCloserWrapper(buf, buf)
- return writeBufWrapper, nil
- case Gzip:
- gzWriter := gzip.NewWriter(dest)
- writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter)
- return writeBufWrapper, nil
- case Bzip2, Xz:
- // archive/bzip2 does not support writing, and there is no xz support at all
- // However, this is not a problem as docker only currently generates gzipped tars
- return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
- default:
- return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
- }
-}
-
-// TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to
-// modify the contents or header of an entry in the archive. If the file already
-// exists in the archive the TarModifierFunc will be called with the Header and
-// a reader which will return the files content. If the file does not exist both
-// header and content will be nil.
-type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error)
-
-// ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the
-// tar stream are modified if they match any of the keys in mods.
-func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser {
- pipeReader, pipeWriter := io.Pipe()
-
- go func() {
- tarReader := tar.NewReader(inputTarStream)
- tarWriter := tar.NewWriter(pipeWriter)
- defer inputTarStream.Close()
- defer tarWriter.Close()
-
- modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error {
- header, data, err := modifier(name, original, tarReader)
- switch {
- case err != nil:
- return err
- case header == nil:
- return nil
- }
-
- header.Name = name
- header.Size = int64(len(data))
- if err := tarWriter.WriteHeader(header); err != nil {
- return err
- }
- if len(data) != 0 {
- if _, err := tarWriter.Write(data); err != nil {
- return err
- }
- }
- return nil
- }
-
- var err error
- var originalHeader *tar.Header
- for {
- originalHeader, err = tarReader.Next()
- if err == io.EOF {
- break
- }
- if err != nil {
- pipeWriter.CloseWithError(err)
- return
- }
-
- modifier, ok := mods[originalHeader.Name]
- if !ok {
- // No modifiers for this file, copy the header and data
- if err := tarWriter.WriteHeader(originalHeader); err != nil {
- pipeWriter.CloseWithError(err)
- return
- }
- if _, err := pools.Copy(tarWriter, tarReader); err != nil {
- pipeWriter.CloseWithError(err)
- return
- }
- continue
- }
- delete(mods, originalHeader.Name)
-
- if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil {
- pipeWriter.CloseWithError(err)
- return
- }
- }
-
- // Apply the modifiers that haven't matched any files in the archive
- for name, modifier := range mods {
- if err := modify(name, nil, modifier, nil); err != nil {
- pipeWriter.CloseWithError(err)
- return
- }
- }
-
- pipeWriter.Close()
-
- }()
- return pipeReader
-}
-
-// Extension returns the extension of a file that uses the specified compression algorithm.
-func (compression *Compression) Extension() string {
- switch *compression {
- case Uncompressed:
- return "tar"
- case Bzip2:
- return "tar.bz2"
- case Gzip:
- return "tar.gz"
- case Xz:
- return "tar.xz"
- }
- return ""
-}
-
-// FileInfoHeader creates a populated Header from fi.
-// Compared to archive pkg this function fills in more information.
-// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR),
-// which have been deleted since Go 1.9 archive/tar.
-func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) {
- hdr, err := tar.FileInfoHeader(fi, link)
- if err != nil {
- return nil, err
- }
- hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi)
- name, err = canonicalTarName(name, fi.IsDir())
- if err != nil {
- return nil, fmt.Errorf("tar: cannot canonicalize path: %v", err)
- }
- hdr.Name = name
- if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil {
- return nil, err
- }
- return hdr, nil
-}
-
-// fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar
-// https://github.com/golang/go/commit/66b5a2f
-func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 {
- fm := fi.Mode()
- switch {
- case fm.IsRegular():
- mode |= modeISREG
- case fi.IsDir():
- mode |= modeISDIR
- case fm&os.ModeSymlink != 0:
- mode |= modeISLNK
- case fm&os.ModeDevice != 0:
- if fm&os.ModeCharDevice != 0 {
- mode |= modeISCHR
- } else {
- mode |= modeISBLK
- }
- case fm&os.ModeNamedPipe != 0:
- mode |= modeISFIFO
- case fm&os.ModeSocket != 0:
- mode |= modeISSOCK
- }
- return mode
-}
-
-// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem
-// to a tar header
-func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
- capability, _ := system.Lgetxattr(path, "security.capability")
- if capability != nil {
- hdr.Xattrs = make(map[string]string)
- hdr.Xattrs["security.capability"] = string(capability)
- }
- return nil
-}
-
-type tarWhiteoutConverter interface {
- ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error)
- ConvertRead(*tar.Header, string) (bool, error)
-}
-
-type tarAppender struct {
- TarWriter *tar.Writer
- Buffer *bufio.Writer
-
- // for hardlink mapping
- SeenFiles map[uint64]string
- IDMappings *idtools.IDMappings
- ChownOpts *idtools.IDPair
-
- // For packing and unpacking whiteout files in the
- // non standard format. The whiteout files defined
- // by the AUFS standard are used as the tar whiteout
- // standard.
- WhiteoutConverter tarWhiteoutConverter
-}
-
-func newTarAppender(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *idtools.IDPair) *tarAppender {
- return &tarAppender{
- SeenFiles: make(map[uint64]string),
- TarWriter: tar.NewWriter(writer),
- Buffer: pools.BufioWriter32KPool.Get(nil),
- IDMappings: idMapping,
- ChownOpts: chownOpts,
- }
-}
-
-// canonicalTarName provides a platform-independent and consistent posix-style
-//path for files and directories to be archived regardless of the platform.
-func canonicalTarName(name string, isDir bool) (string, error) {
- name, err := CanonicalTarNameForPath(name)
- if err != nil {
- return "", err
- }
-
- // suffix with '/' for directories
- if isDir && !strings.HasSuffix(name, "/") {
- name += "/"
- }
- return name, nil
-}
-
-// addTarFile adds to the tar archive a file from `path` as `name`
-func (ta *tarAppender) addTarFile(path, name string) error {
- fi, err := os.Lstat(path)
- if err != nil {
- return err
- }
-
- var link string
- if fi.Mode()&os.ModeSymlink != 0 {
- var err error
- link, err = os.Readlink(path)
- if err != nil {
- return err
- }
- }
-
- hdr, err := FileInfoHeader(name, fi, link)
- if err != nil {
- return err
- }
- if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil {
- return err
- }
-
- // if it's not a directory and has more than 1 link,
- // it's hard linked, so set the type flag accordingly
- if !fi.IsDir() && hasHardlinks(fi) {
- inode, err := getInodeFromStat(fi.Sys())
- if err != nil {
- return err
- }
- // a link should have a name that it links too
- // and that linked name should be first in the tar archive
- if oldpath, ok := ta.SeenFiles[inode]; ok {
- hdr.Typeflag = tar.TypeLink
- hdr.Linkname = oldpath
- hdr.Size = 0 // This Must be here for the writer math to add up!
- } else {
- ta.SeenFiles[inode] = name
- }
- }
-
- //handle re-mapping container ID mappings back to host ID mappings before
- //writing tar headers/files. We skip whiteout files because they were written
- //by the kernel and already have proper ownership relative to the host
- if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IDMappings.Empty() {
- fileIDPair, err := getFileUIDGID(fi.Sys())
- if err != nil {
- return err
- }
- hdr.Uid, hdr.Gid, err = ta.IDMappings.ToContainer(fileIDPair)
- if err != nil {
- return err
- }
- }
-
- // explicitly override with ChownOpts
- if ta.ChownOpts != nil {
- hdr.Uid = ta.ChownOpts.UID
- hdr.Gid = ta.ChownOpts.GID
- }
-
- if ta.WhiteoutConverter != nil {
- wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi)
- if err != nil {
- return err
- }
-
- // If a new whiteout file exists, write original hdr, then
- // replace hdr with wo to be written after. Whiteouts should
- // always be written after the original. Note the original
- // hdr may have been updated to be a whiteout with returning
- // a whiteout header
- if wo != nil {
- if err := ta.TarWriter.WriteHeader(hdr); err != nil {
- return err
- }
- if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
- return fmt.Errorf("tar: cannot use whiteout for non-empty file")
- }
- hdr = wo
- }
- }
-
- if err := ta.TarWriter.WriteHeader(hdr); err != nil {
- return err
- }
-
- if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
- // We use system.OpenSequential to ensure we use sequential file
- // access on Windows to avoid depleting the standby list.
- // On Linux, this equates to a regular os.Open.
- file, err := system.OpenSequential(path)
- if err != nil {
- return err
- }
-
- ta.Buffer.Reset(ta.TarWriter)
- defer ta.Buffer.Reset(nil)
- _, err = io.Copy(ta.Buffer, file)
- file.Close()
- if err != nil {
- return err
- }
- err = ta.Buffer.Flush()
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns bool) error {
- // hdr.Mode is in linux format, which we can use for sycalls,
- // but for os.Foo() calls we need the mode converted to os.FileMode,
- // so use hdrInfo.Mode() (they differ for e.g. setuid bits)
- hdrInfo := hdr.FileInfo()
-
- switch hdr.Typeflag {
- case tar.TypeDir:
- // Create directory unless it exists as a directory already.
- // In that case we just want to merge the two
- if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) {
- if err := os.Mkdir(path, hdrInfo.Mode()); err != nil {
- return err
- }
- }
-
- case tar.TypeReg, tar.TypeRegA:
- // Source is regular file. We use system.OpenFileSequential to use sequential
- // file access to avoid depleting the standby list on Windows.
- // On Linux, this equates to a regular os.OpenFile
- file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode())
- if err != nil {
- return err
- }
- if _, err := io.Copy(file, reader); err != nil {
- file.Close()
- return err
- }
- file.Close()
-
- case tar.TypeBlock, tar.TypeChar:
- if inUserns { // cannot create devices in a userns
- return nil
- }
- // Handle this is an OS-specific way
- if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
- return err
- }
-
- case tar.TypeFifo:
- // Handle this is an OS-specific way
- if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
- return err
- }
-
- case tar.TypeLink:
- targetPath := filepath.Join(extractDir, hdr.Linkname)
- // check for hardlink breakout
- if !strings.HasPrefix(targetPath, extractDir) {
- return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname))
- }
- if err := os.Link(targetPath, path); err != nil {
- return err
- }
-
- case tar.TypeSymlink:
- // path -> hdr.Linkname = targetPath
- // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file
- targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname)
-
- // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because
- // that symlink would first have to be created, which would be caught earlier, at this very check:
- if !strings.HasPrefix(targetPath, extractDir) {
- return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname))
- }
- if err := os.Symlink(hdr.Linkname, path); err != nil {
- return err
- }
-
- case tar.TypeXGlobalHeader:
- logrus.Debug("PAX Global Extended Headers found and ignored")
- return nil
-
- default:
- return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag)
- }
-
- // Lchown is not supported on Windows.
- if Lchown && runtime.GOOS != "windows" {
- if chownOpts == nil {
- chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}
- }
- if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil {
- return err
- }
- }
-
- var errors []string
- for key, value := range hdr.Xattrs {
- if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil {
- if err == syscall.ENOTSUP {
- // We ignore errors here because not all graphdrivers support
- // xattrs *cough* old versions of AUFS *cough*. However only
- // ENOTSUP should be emitted in that case, otherwise we still
- // bail.
- errors = append(errors, err.Error())
- continue
- }
- return err
- }
-
- }
-
- if len(errors) > 0 {
- logrus.WithFields(logrus.Fields{
- "errors": errors,
- }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them")
- }
-
- // There is no LChmod, so ignore mode for symlink. Also, this
- // must happen after chown, as that can modify the file mode
- if err := handleLChmod(hdr, path, hdrInfo); err != nil {
- return err
- }
-
- aTime := hdr.AccessTime
- if aTime.Before(hdr.ModTime) {
- // Last access time should never be before last modified time.
- aTime = hdr.ModTime
- }
-
- // system.Chtimes doesn't support a NOFOLLOW flag atm
- if hdr.Typeflag == tar.TypeLink {
- if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
- if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil {
- return err
- }
- }
- } else if hdr.Typeflag != tar.TypeSymlink {
- if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil {
- return err
- }
- } else {
- ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)}
- if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
- return err
- }
- }
- return nil
-}
-
-// Tar creates an archive from the directory at `path`, and returns it as a
-// stream of bytes.
-func Tar(path string, compression Compression) (io.ReadCloser, error) {
- return TarWithOptions(path, &TarOptions{Compression: compression})
-}
-
-// TarWithOptions creates an archive from the directory at `path`, only including files whose relative
-// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
-func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
-
- // Fix the source path to work with long path names. This is a no-op
- // on platforms other than Windows.
- srcPath = fixVolumePathPrefix(srcPath)
-
- pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns)
- if err != nil {
- return nil, err
- }
-
- pipeReader, pipeWriter := io.Pipe()
-
- compressWriter, err := CompressStream(pipeWriter, options.Compression)
- if err != nil {
- return nil, err
- }
-
- go func() {
- ta := newTarAppender(
- idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps),
- compressWriter,
- options.ChownOpts,
- )
- ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat)
-
- defer func() {
- // Make sure to check the error on Close.
- if err := ta.TarWriter.Close(); err != nil {
- logrus.Errorf("Can't close tar writer: %s", err)
- }
- if err := compressWriter.Close(); err != nil {
- logrus.Errorf("Can't close compress writer: %s", err)
- }
- if err := pipeWriter.Close(); err != nil {
- logrus.Errorf("Can't close pipe writer: %s", err)
- }
- }()
-
- // this buffer is needed for the duration of this piped stream
- defer pools.BufioWriter32KPool.Put(ta.Buffer)
-
- // In general we log errors here but ignore them because
- // during e.g. a diff operation the container can continue
- // mutating the filesystem and we can see transient errors
- // from this
-
- stat, err := os.Lstat(srcPath)
- if err != nil {
- return
- }
-
- if !stat.IsDir() {
- // We can't later join a non-dir with any includes because the
- // 'walk' will error if "file/." is stat-ed and "file" is not a
- // directory. So, we must split the source path and use the
- // basename as the include.
- if len(options.IncludeFiles) > 0 {
- logrus.Warn("Tar: Can't archive a file with includes")
- }
-
- dir, base := SplitPathDirEntry(srcPath)
- srcPath = dir
- options.IncludeFiles = []string{base}
- }
-
- if len(options.IncludeFiles) == 0 {
- options.IncludeFiles = []string{"."}
- }
-
- seen := make(map[string]bool)
-
- for _, include := range options.IncludeFiles {
- rebaseName := options.RebaseNames[include]
-
- walkRoot := getWalkRoot(srcPath, include)
- filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error {
- if err != nil {
- logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err)
- return nil
- }
-
- relFilePath, err := filepath.Rel(srcPath, filePath)
- if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) {
- // Error getting relative path OR we are looking
- // at the source directory path. Skip in both situations.
- return nil
- }
-
- if options.IncludeSourceDir && include == "." && relFilePath != "." {
- relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator))
- }
-
- skip := false
-
- // If "include" is an exact match for the current file
- // then even if there's an "excludePatterns" pattern that
- // matches it, don't skip it. IOW, assume an explicit 'include'
- // is asking for that file no matter what - which is true
- // for some files, like .dockerignore and Dockerfile (sometimes)
- if include != relFilePath {
- skip, err = pm.Matches(relFilePath)
- if err != nil {
- logrus.Errorf("Error matching %s: %v", relFilePath, err)
- return err
- }
- }
-
- if skip {
- // If we want to skip this file and its a directory
- // then we should first check to see if there's an
- // excludes pattern (e.g. !dir/file) that starts with this
- // dir. If so then we can't skip this dir.
-
- // Its not a dir then so we can just return/skip.
- if !f.IsDir() {
- return nil
- }
-
- // No exceptions (!...) in patterns so just skip dir
- if !pm.Exclusions() {
- return filepath.SkipDir
- }
-
- dirSlash := relFilePath + string(filepath.Separator)
-
- for _, pat := range pm.Patterns() {
- if !pat.Exclusion() {
- continue
- }
- if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) {
- // found a match - so can't skip this dir
- return nil
- }
- }
-
- // No matching exclusion dir so just skip dir
- return filepath.SkipDir
- }
-
- if seen[relFilePath] {
- return nil
- }
- seen[relFilePath] = true
-
- // Rename the base resource.
- if rebaseName != "" {
- var replacement string
- if rebaseName != string(filepath.Separator) {
- // Special case the root directory to replace with an
- // empty string instead so that we don't end up with
- // double slashes in the paths.
- replacement = rebaseName
- }
-
- relFilePath = strings.Replace(relFilePath, include, replacement, 1)
- }
-
- if err := ta.addTarFile(filePath, relFilePath); err != nil {
- logrus.Errorf("Can't add file %s to tar: %s", filePath, err)
- // if pipe is broken, stop writing tar stream to it
- if err == io.ErrClosedPipe {
- return err
- }
- }
- return nil
- })
- }
- }()
-
- return pipeReader, nil
-}
-
-// Unpack unpacks the decompressedArchive to dest with options.
-func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error {
- tr := tar.NewReader(decompressedArchive)
- trBuf := pools.BufioReader32KPool.Get(nil)
- defer pools.BufioReader32KPool.Put(trBuf)
-
- var dirs []*tar.Header
- idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
- rootIDs := idMappings.RootPair()
- whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat)
-
- // Iterate through the files in the archive.
-loop:
- for {
- hdr, err := tr.Next()
- if err == io.EOF {
- // end of tar archive
- break
- }
- if err != nil {
- return err
- }
-
- // Normalize name, for safety and for a simple is-root check
- // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows:
- // This keeps "..\" as-is, but normalizes "\..\" to "\".
- hdr.Name = filepath.Clean(hdr.Name)
-
- for _, exclude := range options.ExcludePatterns {
- if strings.HasPrefix(hdr.Name, exclude) {
- continue loop
- }
- }
-
- // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in
- // the filepath format for the OS on which the daemon is running. Hence
- // the check for a slash-suffix MUST be done in an OS-agnostic way.
- if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
- // Not the root directory, ensure that the parent directory exists
- parent := filepath.Dir(hdr.Name)
- parentPath := filepath.Join(dest, parent)
- if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
- err = idtools.MkdirAllAndChownNew(parentPath, 0777, rootIDs)
- if err != nil {
- return err
- }
- }
- }
-
- path := filepath.Join(dest, hdr.Name)
- rel, err := filepath.Rel(dest, path)
- if err != nil {
- return err
- }
- if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
- return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
- }
-
- // If path exits we almost always just want to remove and replace it
- // The only exception is when it is a directory *and* the file from
- // the layer is also a directory. Then we want to merge them (i.e.
- // just apply the metadata from the layer).
- if fi, err := os.Lstat(path); err == nil {
- if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir {
- // If NoOverwriteDirNonDir is true then we cannot replace
- // an existing directory with a non-directory from the archive.
- return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest)
- }
-
- if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir {
- // If NoOverwriteDirNonDir is true then we cannot replace
- // an existing non-directory with a directory from the archive.
- return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest)
- }
-
- if fi.IsDir() && hdr.Name == "." {
- continue
- }
-
- if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
- if err := os.RemoveAll(path); err != nil {
- return err
- }
- }
- }
- trBuf.Reset(tr)
-
- if err := remapIDs(idMappings, hdr); err != nil {
- return err
- }
-
- if whiteoutConverter != nil {
- writeFile, err := whiteoutConverter.ConvertRead(hdr, path)
- if err != nil {
- return err
- }
- if !writeFile {
- continue
- }
- }
-
- if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil {
- return err
- }
-
- // Directory mtimes must be handled at the end to avoid further
- // file creation in them to modify the directory mtime
- if hdr.Typeflag == tar.TypeDir {
- dirs = append(dirs, hdr)
- }
- }
-
- for _, hdr := range dirs {
- path := filepath.Join(dest, hdr.Name)
-
- if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
- return err
- }
- }
- return nil
-}
-
-// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
-// and unpacks it into the directory at `dest`.
-// The archive may be compressed with one of the following algorithms:
-// identity (uncompressed), gzip, bzip2, xz.
-// FIXME: specify behavior when target path exists vs. doesn't exist.
-func Untar(tarArchive io.Reader, dest string, options *TarOptions) error {
- return untarHandler(tarArchive, dest, options, true)
-}
-
-// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive,
-// and unpacks it into the directory at `dest`.
-// The archive must be an uncompressed stream.
-func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error {
- return untarHandler(tarArchive, dest, options, false)
-}
-
-// Handler for teasing out the automatic decompression
-func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error {
- if tarArchive == nil {
- return fmt.Errorf("Empty archive")
- }
- dest = filepath.Clean(dest)
- if options == nil {
- options = &TarOptions{}
- }
- if options.ExcludePatterns == nil {
- options.ExcludePatterns = []string{}
- }
-
- r := tarArchive
- if decompress {
- decompressedArchive, err := DecompressStream(tarArchive)
- if err != nil {
- return err
- }
- defer decompressedArchive.Close()
- r = decompressedArchive
- }
-
- return Unpack(r, dest, options)
-}
-
-// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
-// If either Tar or Untar fails, TarUntar aborts and returns the error.
-func (archiver *Archiver) TarUntar(src, dst string) error {
- logrus.Debugf("TarUntar(%s %s)", src, dst)
- archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed})
- if err != nil {
- return err
- }
- defer archive.Close()
- options := &TarOptions{
- UIDMaps: archiver.IDMappingsVar.UIDs(),
- GIDMaps: archiver.IDMappingsVar.GIDs(),
- }
- return archiver.Untar(archive, dst, options)
-}
-
-// UntarPath untar a file from path to a destination, src is the source tar file path.
-func (archiver *Archiver) UntarPath(src, dst string) error {
- archive, err := os.Open(src)
- if err != nil {
- return err
- }
- defer archive.Close()
- options := &TarOptions{
- UIDMaps: archiver.IDMappingsVar.UIDs(),
- GIDMaps: archiver.IDMappingsVar.GIDs(),
- }
- return archiver.Untar(archive, dst, options)
-}
-
-// CopyWithTar creates a tar archive of filesystem path `src`, and
-// unpacks it at filesystem path `dst`.
-// The archive is streamed directly with fixed buffering and no
-// intermediary disk IO.
-func (archiver *Archiver) CopyWithTar(src, dst string) error {
- srcSt, err := os.Stat(src)
- if err != nil {
- return err
- }
- if !srcSt.IsDir() {
- return archiver.CopyFileWithTar(src, dst)
- }
-
- // if this Archiver is set up with ID mapping we need to create
- // the new destination directory with the remapped root UID/GID pair
- // as owner
- rootIDs := archiver.IDMappingsVar.RootPair()
- // Create dst, copy src's content into it
- logrus.Debugf("Creating dest directory: %s", dst)
- if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil {
- return err
- }
- logrus.Debugf("Calling TarUntar(%s, %s)", src, dst)
- return archiver.TarUntar(src, dst)
-}
-
-// CopyFileWithTar emulates the behavior of the 'cp' command-line
-// for a single file. It copies a regular file from path `src` to
-// path `dst`, and preserves all its metadata.
-func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
- logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst)
- srcSt, err := os.Stat(src)
- if err != nil {
- return err
- }
-
- if srcSt.IsDir() {
- return fmt.Errorf("Can't copy a directory")
- }
-
- // Clean up the trailing slash. This must be done in an operating
- // system specific manner.
- if dst[len(dst)-1] == os.PathSeparator {
- dst = filepath.Join(dst, filepath.Base(src))
- }
- // Create the holding directory if necessary
- if err := system.MkdirAll(filepath.Dir(dst), 0700, ""); err != nil {
- return err
- }
-
- r, w := io.Pipe()
- errC := make(chan error, 1)
-
- go func() {
- defer close(errC)
-
- errC <- func() error {
- defer w.Close()
-
- srcF, err := os.Open(src)
- if err != nil {
- return err
- }
- defer srcF.Close()
-
- hdr, err := tar.FileInfoHeader(srcSt, "")
- if err != nil {
- return err
- }
- hdr.Name = filepath.Base(dst)
- hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
-
- if err := remapIDs(archiver.IDMappingsVar, hdr); err != nil {
- return err
- }
-
- tw := tar.NewWriter(w)
- defer tw.Close()
- if err := tw.WriteHeader(hdr); err != nil {
- return err
- }
- if _, err := io.Copy(tw, srcF); err != nil {
- return err
- }
- return nil
- }()
- }()
- defer func() {
- if er := <-errC; err == nil && er != nil {
- err = er
- }
- }()
-
- err = archiver.Untar(r, filepath.Dir(dst), nil)
- if err != nil {
- r.CloseWithError(err)
- }
- return err
-}
-
-// IDMappings returns the IDMappings of the archiver.
-func (archiver *Archiver) IDMappings() *idtools.IDMappings {
- return archiver.IDMappingsVar
-}
-
-func remapIDs(idMappings *idtools.IDMappings, hdr *tar.Header) error {
- ids, err := idMappings.ToHost(idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid})
- hdr.Uid, hdr.Gid = ids.UID, ids.GID
- return err
-}
-
-// cmdStream executes a command, and returns its stdout as a stream.
-// If the command fails to run or doesn't complete successfully, an error
-// will be returned, including anything written on stderr.
-func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, <-chan struct{}, error) {
- chdone := make(chan struct{})
- cmd.Stdin = input
- pipeR, pipeW := io.Pipe()
- cmd.Stdout = pipeW
- var errBuf bytes.Buffer
- cmd.Stderr = &errBuf
-
- // Run the command and return the pipe
- if err := cmd.Start(); err != nil {
- return nil, nil, err
- }
-
- // Copy stdout to the returned pipe
- go func() {
- if err := cmd.Wait(); err != nil {
- pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String()))
- } else {
- pipeW.Close()
- }
- close(chdone)
- }()
-
- return pipeR, chdone, nil
-}
-
-// NewTempArchive reads the content of src into a temporary file, and returns the contents
-// of that file as an archive. The archive can only be read once - as soon as reading completes,
-// the file will be deleted.
-func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) {
- f, err := ioutil.TempFile(dir, "")
- if err != nil {
- return nil, err
- }
- if _, err := io.Copy(f, src); err != nil {
- return nil, err
- }
- if _, err := f.Seek(0, 0); err != nil {
- return nil, err
- }
- st, err := f.Stat()
- if err != nil {
- return nil, err
- }
- size := st.Size()
- return &TempArchive{File: f, Size: size}, nil
-}
-
-// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes,
-// the file will be deleted.
-type TempArchive struct {
- *os.File
- Size int64 // Pre-computed from Stat().Size() as a convenience
- read int64
- closed bool
-}
-
-// Close closes the underlying file if it's still open, or does a no-op
-// to allow callers to try to close the TempArchive multiple times safely.
-func (archive *TempArchive) Close() error {
- if archive.closed {
- return nil
- }
-
- archive.closed = true
-
- return archive.File.Close()
-}
-
-func (archive *TempArchive) Read(data []byte) (int, error) {
- n, err := archive.File.Read(data)
- archive.read += int64(n)
- if err != nil || archive.read == archive.Size {
- archive.Close()
- os.Remove(archive.File.Name())
- }
- return n, err
-}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go
deleted file mode 100644
index 6e950e93c..000000000
--- a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go
+++ /dev/null
@@ -1,92 +0,0 @@
-package archive
-
-import (
- "archive/tar"
- "os"
- "path/filepath"
- "strings"
-
- "github.com/docker/docker/pkg/system"
- "golang.org/x/sys/unix"
-)
-
-func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
- if format == OverlayWhiteoutFormat {
- return overlayWhiteoutConverter{}
- }
- return nil
-}
-
-type overlayWhiteoutConverter struct{}
-
-func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) {
- // convert whiteouts to AUFS format
- if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 {
- // we just rename the file and make it normal
- dir, filename := filepath.Split(hdr.Name)
- hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename)
- hdr.Mode = 0600
- hdr.Typeflag = tar.TypeReg
- hdr.Size = 0
- }
-
- if fi.Mode()&os.ModeDir != 0 {
- // convert opaque dirs to AUFS format by writing an empty file with the prefix
- opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque")
- if err != nil {
- return nil, err
- }
- if len(opaque) == 1 && opaque[0] == 'y' {
- if hdr.Xattrs != nil {
- delete(hdr.Xattrs, "trusted.overlay.opaque")
- }
-
- // create a header for the whiteout file
- // it should inherit some properties from the parent, but be a regular file
- wo = &tar.Header{
- Typeflag: tar.TypeReg,
- Mode: hdr.Mode & int64(os.ModePerm),
- Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir),
- Size: 0,
- Uid: hdr.Uid,
- Uname: hdr.Uname,
- Gid: hdr.Gid,
- Gname: hdr.Gname,
- AccessTime: hdr.AccessTime,
- ChangeTime: hdr.ChangeTime,
- }
- }
- }
-
- return
-}
-
-func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) {
- base := filepath.Base(path)
- dir := filepath.Dir(path)
-
- // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay
- if base == WhiteoutOpaqueDir {
- err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0)
- // don't write the file itself
- return false, err
- }
-
- // if a file was deleted and we are using overlay, we need to create a character device
- if strings.HasPrefix(base, WhiteoutPrefix) {
- originalBase := base[len(WhiteoutPrefix):]
- originalPath := filepath.Join(dir, originalBase)
-
- if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil {
- return false, err
- }
- if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil {
- return false, err
- }
-
- // don't write the file itself
- return false, nil
- }
-
- return true, nil
-}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_other.go b/vendor/github.com/docker/docker/pkg/archive/archive_other.go
deleted file mode 100644
index 54acbf285..000000000
--- a/vendor/github.com/docker/docker/pkg/archive/archive_other.go
+++ /dev/null
@@ -1,7 +0,0 @@
-// +build !linux
-
-package archive
-
-func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
- return nil
-}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go
deleted file mode 100644
index 02e95adff..000000000
--- a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go
+++ /dev/null
@@ -1,114 +0,0 @@
-// +build !windows
-
-package archive
-
-import (
- "archive/tar"
- "errors"
- "os"
- "path/filepath"
- "syscall"
-
- "github.com/docker/docker/pkg/idtools"
- "github.com/docker/docker/pkg/system"
- rsystem "github.com/opencontainers/runc/libcontainer/system"
- "golang.org/x/sys/unix"
-)
-
-// fixVolumePathPrefix does platform specific processing to ensure that if
-// the path being passed in is not in a volume path format, convert it to one.
-func fixVolumePathPrefix(srcPath string) string {
- return srcPath
-}
-
-// getWalkRoot calculates the root path when performing a TarWithOptions.
-// We use a separate function as this is platform specific. On Linux, we
-// can't use filepath.Join(srcPath,include) because this will clean away
-// a trailing "." or "/" which may be important.
-func getWalkRoot(srcPath string, include string) string {
- return srcPath + string(filepath.Separator) + include
-}
-
-// CanonicalTarNameForPath returns platform-specific filepath
-// to canonical posix-style path for tar archival. p is relative
-// path.
-func CanonicalTarNameForPath(p string) (string, error) {
- return p, nil // already unix-style
-}
-
-// chmodTarEntry is used to adjust the file permissions used in tar header based
-// on the platform the archival is done.
-
-func chmodTarEntry(perm os.FileMode) os.FileMode {
- return perm // noop for unix as golang APIs provide perm bits correctly
-}
-
-func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
- s, ok := stat.(*syscall.Stat_t)
-
- if ok {
- // Currently go does not fill in the major/minors
- if s.Mode&unix.S_IFBLK != 0 ||
- s.Mode&unix.S_IFCHR != 0 {
- hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) // nolint: unconvert
- hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) // nolint: unconvert
- }
- }
-
- return
-}
-
-func getInodeFromStat(stat interface{}) (inode uint64, err error) {
- s, ok := stat.(*syscall.Stat_t)
-
- if ok {
- inode = s.Ino
- }
-
- return
-}
-
-func getFileUIDGID(stat interface{}) (idtools.IDPair, error) {
- s, ok := stat.(*syscall.Stat_t)
-
- if !ok {
- return idtools.IDPair{}, errors.New("cannot convert stat value to syscall.Stat_t")
- }
- return idtools.IDPair{UID: int(s.Uid), GID: int(s.Gid)}, nil
-}
-
-// handleTarTypeBlockCharFifo is an OS-specific helper function used by
-// createTarFile to handle the following types of header: Block; Char; Fifo
-func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
- if rsystem.RunningInUserNS() {
- // cannot create a device if running in user namespace
- return nil
- }
-
- mode := uint32(hdr.Mode & 07777)
- switch hdr.Typeflag {
- case tar.TypeBlock:
- mode |= unix.S_IFBLK
- case tar.TypeChar:
- mode |= unix.S_IFCHR
- case tar.TypeFifo:
- mode |= unix.S_IFIFO
- }
-
- return system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor)))
-}
-
-func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
- if hdr.Typeflag == tar.TypeLink {
- if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
- if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
- return err
- }
- }
- } else if hdr.Typeflag != tar.TypeSymlink {
- if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go
deleted file mode 100644
index a22410c03..000000000
--- a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go
+++ /dev/null
@@ -1,79 +0,0 @@
-// +build windows
-
-package archive
-
-import (
- "archive/tar"
- "fmt"
- "os"
- "path/filepath"
- "strings"
-
- "github.com/docker/docker/pkg/idtools"
- "github.com/docker/docker/pkg/longpath"
-)
-
-// fixVolumePathPrefix does platform specific processing to ensure that if
-// the path being passed in is not in a volume path format, convert it to one.
-func fixVolumePathPrefix(srcPath string) string {
- return longpath.AddPrefix(srcPath)
-}
-
-// getWalkRoot calculates the root path when performing a TarWithOptions.
-// We use a separate function as this is platform specific.
-func getWalkRoot(srcPath string, include string) string {
- return filepath.Join(srcPath, include)
-}
-
-// CanonicalTarNameForPath returns platform-specific filepath
-// to canonical posix-style path for tar archival. p is relative
-// path.
-func CanonicalTarNameForPath(p string) (string, error) {
- // windows: convert windows style relative path with backslashes
- // into forward slashes. Since windows does not allow '/' or '\'
- // in file names, it is mostly safe to replace however we must
- // check just in case
- if strings.Contains(p, "/") {
- return "", fmt.Errorf("Windows path contains forward slash: %s", p)
- }
- return strings.Replace(p, string(os.PathSeparator), "/", -1), nil
-
-}
-
-// chmodTarEntry is used to adjust the file permissions used in tar header based
-// on the platform the archival is done.
-func chmodTarEntry(perm os.FileMode) os.FileMode {
- //perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.)
- permPart := perm & os.ModePerm
- noPermPart := perm &^ os.ModePerm
- // Add the x bit: make everything +x from windows
- permPart |= 0111
- permPart &= 0755
-
- return noPermPart | permPart
-}
-
-func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
- // do nothing. no notion of Rdev, Nlink in stat on Windows
- return
-}
-
-func getInodeFromStat(stat interface{}) (inode uint64, err error) {
- // do nothing. no notion of Inode in stat on Windows
- return
-}
-
-// handleTarTypeBlockCharFifo is an OS-specific helper function used by
-// createTarFile to handle the following types of header: Block; Char; Fifo
-func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
- return nil
-}
-
-func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
- return nil
-}
-
-func getFileUIDGID(stat interface{}) (idtools.IDPair, error) {
- // no notion of file ownership mapping yet on Windows
- return idtools.IDPair{0, 0}, nil
-}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes.go b/vendor/github.com/docker/docker/pkg/archive/changes.go
deleted file mode 100644
index d78fe6ac6..000000000
--- a/vendor/github.com/docker/docker/pkg/archive/changes.go
+++ /dev/null
@@ -1,441 +0,0 @@
-package archive
-
-import (
- "archive/tar"
- "bytes"
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "path/filepath"
- "sort"
- "strings"
- "syscall"
- "time"
-
- "github.com/docker/docker/pkg/idtools"
- "github.com/docker/docker/pkg/pools"
- "github.com/docker/docker/pkg/system"
- "github.com/sirupsen/logrus"
-)
-
-// ChangeType represents the change type.
-type ChangeType int
-
-const (
- // ChangeModify represents the modify operation.
- ChangeModify = iota
- // ChangeAdd represents the add operation.
- ChangeAdd
- // ChangeDelete represents the delete operation.
- ChangeDelete
-)
-
-func (c ChangeType) String() string {
- switch c {
- case ChangeModify:
- return "C"
- case ChangeAdd:
- return "A"
- case ChangeDelete:
- return "D"
- }
- return ""
-}
-
-// Change represents a change, it wraps the change type and path.
-// It describes changes of the files in the path respect to the
-// parent layers. The change could be modify, add, delete.
-// This is used for layer diff.
-type Change struct {
- Path string
- Kind ChangeType
-}
-
-func (change *Change) String() string {
- return fmt.Sprintf("%s %s", change.Kind, change.Path)
-}
-
-// for sort.Sort
-type changesByPath []Change
-
-func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path }
-func (c changesByPath) Len() int { return len(c) }
-func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] }
-
-// Gnu tar and the go tar writer don't have sub-second mtime
-// precision, which is problematic when we apply changes via tar
-// files, we handle this by comparing for exact times, *or* same
-// second count and either a or b having exactly 0 nanoseconds
-func sameFsTime(a, b time.Time) bool {
- return a == b ||
- (a.Unix() == b.Unix() &&
- (a.Nanosecond() == 0 || b.Nanosecond() == 0))
-}
-
-func sameFsTimeSpec(a, b syscall.Timespec) bool {
- return a.Sec == b.Sec &&
- (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0)
-}
-
-// Changes walks the path rw and determines changes for the files in the path,
-// with respect to the parent layers
-func Changes(layers []string, rw string) ([]Change, error) {
- return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip)
-}
-
-func aufsMetadataSkip(path string) (skip bool, err error) {
- skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path)
- if err != nil {
- skip = true
- }
- return
-}
-
-func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) {
- f := filepath.Base(path)
-
- // If there is a whiteout, then the file was removed
- if strings.HasPrefix(f, WhiteoutPrefix) {
- originalFile := f[len(WhiteoutPrefix):]
- return filepath.Join(filepath.Dir(path), originalFile), nil
- }
-
- return "", nil
-}
-
-type skipChange func(string) (bool, error)
-type deleteChange func(string, string, os.FileInfo) (string, error)
-
-func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) {
- var (
- changes []Change
- changedDirs = make(map[string]struct{})
- )
-
- err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error {
- if err != nil {
- return err
- }
-
- // Rebase path
- path, err = filepath.Rel(rw, path)
- if err != nil {
- return err
- }
-
- // As this runs on the daemon side, file paths are OS specific.
- path = filepath.Join(string(os.PathSeparator), path)
-
- // Skip root
- if path == string(os.PathSeparator) {
- return nil
- }
-
- if sc != nil {
- if skip, err := sc(path); skip {
- return err
- }
- }
-
- change := Change{
- Path: path,
- }
-
- deletedFile, err := dc(rw, path, f)
- if err != nil {
- return err
- }
-
- // Find out what kind of modification happened
- if deletedFile != "" {
- change.Path = deletedFile
- change.Kind = ChangeDelete
- } else {
- // Otherwise, the file was added
- change.Kind = ChangeAdd
-
- // ...Unless it already existed in a top layer, in which case, it's a modification
- for _, layer := range layers {
- stat, err := os.Stat(filepath.Join(layer, path))
- if err != nil && !os.IsNotExist(err) {
- return err
- }
- if err == nil {
- // The file existed in the top layer, so that's a modification
-
- // However, if it's a directory, maybe it wasn't actually modified.
- // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
- if stat.IsDir() && f.IsDir() {
- if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) {
- // Both directories are the same, don't record the change
- return nil
- }
- }
- change.Kind = ChangeModify
- break
- }
- }
- }
-
- // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files.
- // This block is here to ensure the change is recorded even if the
- // modify time, mode and size of the parent directory in the rw and ro layers are all equal.
- // Check https://github.com/docker/docker/pull/13590 for details.
- if f.IsDir() {
- changedDirs[path] = struct{}{}
- }
- if change.Kind == ChangeAdd || change.Kind == ChangeDelete {
- parent := filepath.Dir(path)
- if _, ok := changedDirs[parent]; !ok && parent != "/" {
- changes = append(changes, Change{Path: parent, Kind: ChangeModify})
- changedDirs[parent] = struct{}{}
- }
- }
-
- // Record change
- changes = append(changes, change)
- return nil
- })
- if err != nil && !os.IsNotExist(err) {
- return nil, err
- }
- return changes, nil
-}
-
-// FileInfo describes the information of a file.
-type FileInfo struct {
- parent *FileInfo
- name string
- stat *system.StatT
- children map[string]*FileInfo
- capability []byte
- added bool
-}
-
-// LookUp looks up the file information of a file.
-func (info *FileInfo) LookUp(path string) *FileInfo {
- // As this runs on the daemon side, file paths are OS specific.
- parent := info
- if path == string(os.PathSeparator) {
- return info
- }
-
- pathElements := strings.Split(path, string(os.PathSeparator))
- for _, elem := range pathElements {
- if elem != "" {
- child := parent.children[elem]
- if child == nil {
- return nil
- }
- parent = child
- }
- }
- return parent
-}
-
-func (info *FileInfo) path() string {
- if info.parent == nil {
- // As this runs on the daemon side, file paths are OS specific.
- return string(os.PathSeparator)
- }
- return filepath.Join(info.parent.path(), info.name)
-}
-
-func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
-
- sizeAtEntry := len(*changes)
-
- if oldInfo == nil {
- // add
- change := Change{
- Path: info.path(),
- Kind: ChangeAdd,
- }
- *changes = append(*changes, change)
- info.added = true
- }
-
- // We make a copy so we can modify it to detect additions
- // also, we only recurse on the old dir if the new info is a directory
- // otherwise any previous delete/change is considered recursive
- oldChildren := make(map[string]*FileInfo)
- if oldInfo != nil && info.isDir() {
- for k, v := range oldInfo.children {
- oldChildren[k] = v
- }
- }
-
- for name, newChild := range info.children {
- oldChild := oldChildren[name]
- if oldChild != nil {
- // change?
- oldStat := oldChild.stat
- newStat := newChild.stat
- // Note: We can't compare inode or ctime or blocksize here, because these change
- // when copying a file into a container. However, that is not generally a problem
- // because any content change will change mtime, and any status change should
- // be visible when actually comparing the stat fields. The only time this
- // breaks down is if some code intentionally hides a change by setting
- // back mtime
- if statDifferent(oldStat, newStat) ||
- !bytes.Equal(oldChild.capability, newChild.capability) {
- change := Change{
- Path: newChild.path(),
- Kind: ChangeModify,
- }
- *changes = append(*changes, change)
- newChild.added = true
- }
-
- // Remove from copy so we can detect deletions
- delete(oldChildren, name)
- }
-
- newChild.addChanges(oldChild, changes)
- }
- for _, oldChild := range oldChildren {
- // delete
- change := Change{
- Path: oldChild.path(),
- Kind: ChangeDelete,
- }
- *changes = append(*changes, change)
- }
-
- // If there were changes inside this directory, we need to add it, even if the directory
- // itself wasn't changed. This is needed to properly save and restore filesystem permissions.
- // As this runs on the daemon side, file paths are OS specific.
- if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) {
- change := Change{
- Path: info.path(),
- Kind: ChangeModify,
- }
- // Let's insert the directory entry before the recently added entries located inside this dir
- *changes = append(*changes, change) // just to resize the slice, will be overwritten
- copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:])
- (*changes)[sizeAtEntry] = change
- }
-
-}
-
-// Changes add changes to file information.
-func (info *FileInfo) Changes(oldInfo *FileInfo) []Change {
- var changes []Change
-
- info.addChanges(oldInfo, &changes)
-
- return changes
-}
-
-func newRootFileInfo() *FileInfo {
- // As this runs on the daemon side, file paths are OS specific.
- root := &FileInfo{
- name: string(os.PathSeparator),
- children: make(map[string]*FileInfo),
- }
- return root
-}
-
-// ChangesDirs compares two directories and generates an array of Change objects describing the changes.
-// If oldDir is "", then all files in newDir will be Add-Changes.
-func ChangesDirs(newDir, oldDir string) ([]Change, error) {
- var (
- oldRoot, newRoot *FileInfo
- )
- if oldDir == "" {
- emptyDir, err := ioutil.TempDir("", "empty")
- if err != nil {
- return nil, err
- }
- defer os.Remove(emptyDir)
- oldDir = emptyDir
- }
- oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir)
- if err != nil {
- return nil, err
- }
-
- return newRoot.Changes(oldRoot), nil
-}
-
-// ChangesSize calculates the size in bytes of the provided changes, based on newDir.
-func ChangesSize(newDir string, changes []Change) int64 {
- var (
- size int64
- sf = make(map[uint64]struct{})
- )
- for _, change := range changes {
- if change.Kind == ChangeModify || change.Kind == ChangeAdd {
- file := filepath.Join(newDir, change.Path)
- fileInfo, err := os.Lstat(file)
- if err != nil {
- logrus.Errorf("Can not stat %q: %s", file, err)
- continue
- }
-
- if fileInfo != nil && !fileInfo.IsDir() {
- if hasHardlinks(fileInfo) {
- inode := getIno(fileInfo)
- if _, ok := sf[inode]; !ok {
- size += fileInfo.Size()
- sf[inode] = struct{}{}
- }
- } else {
- size += fileInfo.Size()
- }
- }
- }
- }
- return size
-}
-
-// ExportChanges produces an Archive from the provided changes, relative to dir.
-func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) {
- reader, writer := io.Pipe()
- go func() {
- ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil)
-
- // this buffer is needed for the duration of this piped stream
- defer pools.BufioWriter32KPool.Put(ta.Buffer)
-
- sort.Sort(changesByPath(changes))
-
- // In general we log errors here but ignore them because
- // during e.g. a diff operation the container can continue
- // mutating the filesystem and we can see transient errors
- // from this
- for _, change := range changes {
- if change.Kind == ChangeDelete {
- whiteOutDir := filepath.Dir(change.Path)
- whiteOutBase := filepath.Base(change.Path)
- whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase)
- timestamp := time.Now()
- hdr := &tar.Header{
- Name: whiteOut[1:],
- Size: 0,
- ModTime: timestamp,
- AccessTime: timestamp,
- ChangeTime: timestamp,
- }
- if err := ta.TarWriter.WriteHeader(hdr); err != nil {
- logrus.Debugf("Can't write whiteout header: %s", err)
- }
- } else {
- path := filepath.Join(dir, change.Path)
- if err := ta.addTarFile(path, change.Path[1:]); err != nil {
- logrus.Debugf("Can't add file %s to tar: %s", path, err)
- }
- }
- }
-
- // Make sure to check the error on Close.
- if err := ta.TarWriter.Close(); err != nil {
- logrus.Debugf("Can't close layer: %s", err)
- }
- if err := writer.Close(); err != nil {
- logrus.Debugf("failed close Changes writer: %s", err)
- }
- }()
- return reader, nil
-}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go
deleted file mode 100644
index e9eb478fe..000000000
--- a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go
+++ /dev/null
@@ -1,313 +0,0 @@
-package archive
-
-import (
- "bytes"
- "fmt"
- "os"
- "path/filepath"
- "sort"
- "syscall"
- "unsafe"
-
- "github.com/docker/docker/pkg/system"
- "golang.org/x/sys/unix"
-)
-
-// walker is used to implement collectFileInfoForChanges on linux. Where this
-// method in general returns the entire contents of two directory trees, we
-// optimize some FS calls out on linux. In particular, we take advantage of the
-// fact that getdents(2) returns the inode of each file in the directory being
-// walked, which, when walking two trees in parallel to generate a list of
-// changes, can be used to prune subtrees without ever having to lstat(2) them
-// directly. Eliminating stat calls in this way can save up to seconds on large
-// images.
-type walker struct {
- dir1 string
- dir2 string
- root1 *FileInfo
- root2 *FileInfo
-}
-
-// collectFileInfoForChanges returns a complete representation of the trees
-// rooted at dir1 and dir2, with one important exception: any subtree or
-// leaf where the inode and device numbers are an exact match between dir1
-// and dir2 will be pruned from the results. This method is *only* to be used
-// to generating a list of changes between the two directories, as it does not
-// reflect the full contents.
-func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) {
- w := &walker{
- dir1: dir1,
- dir2: dir2,
- root1: newRootFileInfo(),
- root2: newRootFileInfo(),
- }
-
- i1, err := os.Lstat(w.dir1)
- if err != nil {
- return nil, nil, err
- }
- i2, err := os.Lstat(w.dir2)
- if err != nil {
- return nil, nil, err
- }
-
- if err := w.walk("/", i1, i2); err != nil {
- return nil, nil, err
- }
-
- return w.root1, w.root2, nil
-}
-
-// Given a FileInfo, its path info, and a reference to the root of the tree
-// being constructed, register this file with the tree.
-func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
- if fi == nil {
- return nil
- }
- parent := root.LookUp(filepath.Dir(path))
- if parent == nil {
- return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path)
- }
- info := &FileInfo{
- name: filepath.Base(path),
- children: make(map[string]*FileInfo),
- parent: parent,
- }
- cpath := filepath.Join(dir, path)
- stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t))
- if err != nil {
- return err
- }
- info.stat = stat
- info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access
- parent.children[info.name] = info
- return nil
-}
-
-// Walk a subtree rooted at the same path in both trees being iterated. For
-// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d
-func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) {
- // Register these nodes with the return trees, unless we're still at the
- // (already-created) roots:
- if path != "/" {
- if err := walkchunk(path, i1, w.dir1, w.root1); err != nil {
- return err
- }
- if err := walkchunk(path, i2, w.dir2, w.root2); err != nil {
- return err
- }
- }
-
- is1Dir := i1 != nil && i1.IsDir()
- is2Dir := i2 != nil && i2.IsDir()
-
- sameDevice := false
- if i1 != nil && i2 != nil {
- si1 := i1.Sys().(*syscall.Stat_t)
- si2 := i2.Sys().(*syscall.Stat_t)
- if si1.Dev == si2.Dev {
- sameDevice = true
- }
- }
-
- // If these files are both non-existent, or leaves (non-dirs), we are done.
- if !is1Dir && !is2Dir {
- return nil
- }
-
- // Fetch the names of all the files contained in both directories being walked:
- var names1, names2 []nameIno
- if is1Dir {
- names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access
- if err != nil {
- return err
- }
- }
- if is2Dir {
- names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access
- if err != nil {
- return err
- }
- }
-
- // We have lists of the files contained in both parallel directories, sorted
- // in the same order. Walk them in parallel, generating a unique merged list
- // of all items present in either or both directories.
- var names []string
- ix1 := 0
- ix2 := 0
-
- for {
- if ix1 >= len(names1) {
- break
- }
- if ix2 >= len(names2) {
- break
- }
-
- ni1 := names1[ix1]
- ni2 := names2[ix2]
-
- switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) {
- case -1: // ni1 < ni2 -- advance ni1
- // we will not encounter ni1 in names2
- names = append(names, ni1.name)
- ix1++
- case 0: // ni1 == ni2
- if ni1.ino != ni2.ino || !sameDevice {
- names = append(names, ni1.name)
- }
- ix1++
- ix2++
- case 1: // ni1 > ni2 -- advance ni2
- // we will not encounter ni2 in names1
- names = append(names, ni2.name)
- ix2++
- }
- }
- for ix1 < len(names1) {
- names = append(names, names1[ix1].name)
- ix1++
- }
- for ix2 < len(names2) {
- names = append(names, names2[ix2].name)
- ix2++
- }
-
- // For each of the names present in either or both of the directories being
- // iterated, stat the name under each root, and recurse the pair of them:
- for _, name := range names {
- fname := filepath.Join(path, name)
- var cInfo1, cInfo2 os.FileInfo
- if is1Dir {
- cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access
- if err != nil && !os.IsNotExist(err) {
- return err
- }
- }
- if is2Dir {
- cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access
- if err != nil && !os.IsNotExist(err) {
- return err
- }
- }
- if err = w.walk(fname, cInfo1, cInfo2); err != nil {
- return err
- }
- }
- return nil
-}
-
-// {name,inode} pairs used to support the early-pruning logic of the walker type
-type nameIno struct {
- name string
- ino uint64
-}
-
-type nameInoSlice []nameIno
-
-func (s nameInoSlice) Len() int { return len(s) }
-func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name }
-
-// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode
-// numbers further up the stack when reading directory contents. Unlike
-// os.Readdirnames, which returns a list of filenames, this function returns a
-// list of {filename,inode} pairs.
-func readdirnames(dirname string) (names []nameIno, err error) {
- var (
- size = 100
- buf = make([]byte, 4096)
- nbuf int
- bufp int
- nb int
- )
-
- f, err := os.Open(dirname)
- if err != nil {
- return nil, err
- }
- defer f.Close()
-
- names = make([]nameIno, 0, size) // Empty with room to grow.
- for {
- // Refill the buffer if necessary
- if bufp >= nbuf {
- bufp = 0
- nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux
- if nbuf < 0 {
- nbuf = 0
- }
- if err != nil {
- return nil, os.NewSyscallError("readdirent", err)
- }
- if nbuf <= 0 {
- break // EOF
- }
- }
-
- // Drain the buffer
- nb, names = parseDirent(buf[bufp:nbuf], names)
- bufp += nb
- }
-
- sl := nameInoSlice(names)
- sort.Sort(sl)
- return sl, nil
-}
-
-// parseDirent is a minor modification of unix.ParseDirent (linux version)
-// which returns {name,inode} pairs instead of just names.
-func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) {
- origlen := len(buf)
- for len(buf) > 0 {
- dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0]))
- buf = buf[dirent.Reclen:]
- if dirent.Ino == 0 { // File absent in directory.
- continue
- }
- bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0]))
- var name = string(bytes[0:clen(bytes[:])])
- if name == "." || name == ".." { // Useless names
- continue
- }
- names = append(names, nameIno{name, dirent.Ino})
- }
- return origlen - len(buf), names
-}
-
-func clen(n []byte) int {
- for i := 0; i < len(n); i++ {
- if n[i] == 0 {
- return i
- }
- }
- return len(n)
-}
-
-// OverlayChanges walks the path rw and determines changes for the files in the path,
-// with respect to the parent layers
-func OverlayChanges(layers []string, rw string) ([]Change, error) {
- return changes(layers, rw, overlayDeletedFile, nil)
-}
-
-func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) {
- if fi.Mode()&os.ModeCharDevice != 0 {
- s := fi.Sys().(*syscall.Stat_t)
- if unix.Major(uint64(s.Rdev)) == 0 && unix.Minor(uint64(s.Rdev)) == 0 { // nolint: unconvert
- return path, nil
- }
- }
- if fi.Mode()&os.ModeDir != 0 {
- opaque, err := system.Lgetxattr(filepath.Join(root, path), "trusted.overlay.opaque")
- if err != nil {
- return "", err
- }
- if len(opaque) == 1 && opaque[0] == 'y' {
- return path, nil
- }
- }
-
- return "", nil
-
-}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_other.go b/vendor/github.com/docker/docker/pkg/archive/changes_other.go
deleted file mode 100644
index da70ed37c..000000000
--- a/vendor/github.com/docker/docker/pkg/archive/changes_other.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// +build !linux
-
-package archive
-
-import (
- "fmt"
- "os"
- "path/filepath"
- "runtime"
- "strings"
-
- "github.com/docker/docker/pkg/system"
-)
-
-func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) {
- var (
- oldRoot, newRoot *FileInfo
- err1, err2 error
- errs = make(chan error, 2)
- )
- go func() {
- oldRoot, err1 = collectFileInfo(oldDir)
- errs <- err1
- }()
- go func() {
- newRoot, err2 = collectFileInfo(newDir)
- errs <- err2
- }()
-
- // block until both routines have returned
- for i := 0; i < 2; i++ {
- if err := <-errs; err != nil {
- return nil, nil, err
- }
- }
-
- return oldRoot, newRoot, nil
-}
-
-func collectFileInfo(sourceDir string) (*FileInfo, error) {
- root := newRootFileInfo()
-
- err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error {
- if err != nil {
- return err
- }
-
- // Rebase path
- relPath, err := filepath.Rel(sourceDir, path)
- if err != nil {
- return err
- }
-
- // As this runs on the daemon side, file paths are OS specific.
- relPath = filepath.Join(string(os.PathSeparator), relPath)
-
- // See https://github.com/golang/go/issues/9168 - bug in filepath.Join.
- // Temporary workaround. If the returned path starts with two backslashes,
- // trim it down to a single backslash. Only relevant on Windows.
- if runtime.GOOS == "windows" {
- if strings.HasPrefix(relPath, `\\`) {
- relPath = relPath[1:]
- }
- }
-
- if relPath == string(os.PathSeparator) {
- return nil
- }
-
- parent := root.LookUp(filepath.Dir(relPath))
- if parent == nil {
- return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath)
- }
-
- info := &FileInfo{
- name: filepath.Base(relPath),
- children: make(map[string]*FileInfo),
- parent: parent,
- }
-
- s, err := system.Lstat(path)
- if err != nil {
- return err
- }
- info.stat = s
-
- info.capability, _ = system.Lgetxattr(path, "security.capability")
-
- parent.children[info.name] = info
-
- return nil
- })
- if err != nil {
- return nil, err
- }
- return root, nil
-}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go
deleted file mode 100644
index 7aa1226d7..000000000
--- a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// +build !windows
-
-package archive
-
-import (
- "os"
- "syscall"
-
- "github.com/docker/docker/pkg/system"
- "golang.org/x/sys/unix"
-)
-
-func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
- // Don't look at size for dirs, its not a good measure of change
- if oldStat.Mode() != newStat.Mode() ||
- oldStat.UID() != newStat.UID() ||
- oldStat.GID() != newStat.GID() ||
- oldStat.Rdev() != newStat.Rdev() ||
- // Don't look at size for dirs, its not a good measure of change
- (oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR &&
- (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) {
- return true
- }
- return false
-}
-
-func (info *FileInfo) isDir() bool {
- return info.parent == nil || info.stat.Mode()&unix.S_IFDIR != 0
-}
-
-func getIno(fi os.FileInfo) uint64 {
- return fi.Sys().(*syscall.Stat_t).Ino
-}
-
-func hasHardlinks(fi os.FileInfo) bool {
- return fi.Sys().(*syscall.Stat_t).Nlink > 1
-}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go
deleted file mode 100644
index 6fd353269..000000000
--- a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package archive
-
-import (
- "os"
-
- "github.com/docker/docker/pkg/system"
-)
-
-func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
-
- // Don't look at size for dirs, its not a good measure of change
- if oldStat.Mtim() != newStat.Mtim() ||
- oldStat.Mode() != newStat.Mode() ||
- oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() {
- return true
- }
- return false
-}
-
-func (info *FileInfo) isDir() bool {
- return info.parent == nil || info.stat.Mode().IsDir()
-}
-
-func getIno(fi os.FileInfo) (inode uint64) {
- return
-}
-
-func hasHardlinks(fi os.FileInfo) bool {
- return false
-}
diff --git a/vendor/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/docker/docker/pkg/archive/copy.go
deleted file mode 100644
index d1e036d5c..000000000
--- a/vendor/github.com/docker/docker/pkg/archive/copy.go
+++ /dev/null
@@ -1,472 +0,0 @@
-package archive
-
-import (
- "archive/tar"
- "errors"
- "io"
- "io/ioutil"
- "os"
- "path/filepath"
- "strings"
-
- "github.com/docker/docker/pkg/system"
- "github.com/sirupsen/logrus"
-)
-
-// Errors used or returned by this file.
-var (
- ErrNotDirectory = errors.New("not a directory")
- ErrDirNotExists = errors.New("no such directory")
- ErrCannotCopyDir = errors.New("cannot copy directory")
- ErrInvalidCopySource = errors.New("invalid copy source content")
-)
-
-// PreserveTrailingDotOrSeparator returns the given cleaned path (after
-// processing using any utility functions from the path or filepath stdlib
-// packages) and appends a trailing `/.` or `/` if its corresponding original
-// path (from before being processed by utility functions from the path or
-// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned
-// path already ends in a `.` path segment, then another is not added. If the
-// clean path already ends in the separator, then another is not added.
-func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string, sep byte) string {
- // Ensure paths are in platform semantics
- cleanedPath = strings.Replace(cleanedPath, "/", string(sep), -1)
- originalPath = strings.Replace(originalPath, "/", string(sep), -1)
-
- if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) {
- if !hasTrailingPathSeparator(cleanedPath, sep) {
- // Add a separator if it doesn't already end with one (a cleaned
- // path would only end in a separator if it is the root).
- cleanedPath += string(sep)
- }
- cleanedPath += "."
- }
-
- if !hasTrailingPathSeparator(cleanedPath, sep) && hasTrailingPathSeparator(originalPath, sep) {
- cleanedPath += string(sep)
- }
-
- return cleanedPath
-}
-
-// assertsDirectory returns whether the given path is
-// asserted to be a directory, i.e., the path ends with
-// a trailing '/' or `/.`, assuming a path separator of `/`.
-func assertsDirectory(path string, sep byte) bool {
- return hasTrailingPathSeparator(path, sep) || specifiesCurrentDir(path)
-}
-
-// hasTrailingPathSeparator returns whether the given
-// path ends with the system's path separator character.
-func hasTrailingPathSeparator(path string, sep byte) bool {
- return len(path) > 0 && path[len(path)-1] == sep
-}
-
-// specifiesCurrentDir returns whether the given path specifies
-// a "current directory", i.e., the last path segment is `.`.
-func specifiesCurrentDir(path string) bool {
- return filepath.Base(path) == "."
-}
-
-// SplitPathDirEntry splits the given path between its directory name and its
-// basename by first cleaning the path but preserves a trailing "." if the
-// original path specified the current directory.
-func SplitPathDirEntry(path string) (dir, base string) {
- cleanedPath := filepath.Clean(filepath.FromSlash(path))
-
- if specifiesCurrentDir(path) {
- cleanedPath += string(os.PathSeparator) + "."
- }
-
- return filepath.Dir(cleanedPath), filepath.Base(cleanedPath)
-}
-
-// TarResource archives the resource described by the given CopyInfo to a Tar
-// archive. A non-nil error is returned if sourcePath does not exist or is
-// asserted to be a directory but exists as another type of file.
-//
-// This function acts as a convenient wrapper around TarWithOptions, which
-// requires a directory as the source path. TarResource accepts either a
-// directory or a file path and correctly sets the Tar options.
-func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) {
- return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName)
-}
-
-// TarResourceRebase is like TarResource but renames the first path element of
-// items in the resulting tar archive to match the given rebaseName if not "".
-func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) {
- sourcePath = normalizePath(sourcePath)
- if _, err = os.Lstat(sourcePath); err != nil {
- // Catches the case where the source does not exist or is not a
- // directory if asserted to be a directory, as this also causes an
- // error.
- return
- }
-
- // Separate the source path between its directory and
- // the entry in that directory which we are archiving.
- sourceDir, sourceBase := SplitPathDirEntry(sourcePath)
- opts := TarResourceRebaseOpts(sourceBase, rebaseName)
-
- logrus.Debugf("copying %q from %q", sourceBase, sourceDir)
- return TarWithOptions(sourceDir, opts)
-}
-
-// TarResourceRebaseOpts does not preform the Tar, but instead just creates the rebase
-// parameters to be sent to TarWithOptions (the TarOptions struct)
-func TarResourceRebaseOpts(sourceBase string, rebaseName string) *TarOptions {
- filter := []string{sourceBase}
- return &TarOptions{
- Compression: Uncompressed,
- IncludeFiles: filter,
- IncludeSourceDir: true,
- RebaseNames: map[string]string{
- sourceBase: rebaseName,
- },
- }
-}
-
-// CopyInfo holds basic info about the source
-// or destination path of a copy operation.
-type CopyInfo struct {
- Path string
- Exists bool
- IsDir bool
- RebaseName string
-}
-
-// CopyInfoSourcePath stats the given path to create a CopyInfo
-// struct representing that resource for the source of an archive copy
-// operation. The given path should be an absolute local path. A source path
-// has all symlinks evaluated that appear before the last path separator ("/"
-// on Unix). As it is to be a copy source, the path must exist.
-func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) {
- // normalize the file path and then evaluate the symbol link
- // we will use the target file instead of the symbol link if
- // followLink is set
- path = normalizePath(path)
-
- resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink)
- if err != nil {
- return CopyInfo{}, err
- }
-
- stat, err := os.Lstat(resolvedPath)
- if err != nil {
- return CopyInfo{}, err
- }
-
- return CopyInfo{
- Path: resolvedPath,
- Exists: true,
- IsDir: stat.IsDir(),
- RebaseName: rebaseName,
- }, nil
-}
-
-// CopyInfoDestinationPath stats the given path to create a CopyInfo
-// struct representing that resource for the destination of an archive copy
-// operation. The given path should be an absolute local path.
-func CopyInfoDestinationPath(path string) (info CopyInfo, err error) {
- maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot.
- path = normalizePath(path)
- originalPath := path
-
- stat, err := os.Lstat(path)
-
- if err == nil && stat.Mode()&os.ModeSymlink == 0 {
- // The path exists and is not a symlink.
- return CopyInfo{
- Path: path,
- Exists: true,
- IsDir: stat.IsDir(),
- }, nil
- }
-
- // While the path is a symlink.
- for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ {
- if n > maxSymlinkIter {
- // Don't follow symlinks more than this arbitrary number of times.
- return CopyInfo{}, errors.New("too many symlinks in " + originalPath)
- }
-
- // The path is a symbolic link. We need to evaluate it so that the
- // destination of the copy operation is the link target and not the
- // link itself. This is notably different than CopyInfoSourcePath which
- // only evaluates symlinks before the last appearing path separator.
- // Also note that it is okay if the last path element is a broken
- // symlink as the copy operation should create the target.
- var linkTarget string
-
- linkTarget, err = os.Readlink(path)
- if err != nil {
- return CopyInfo{}, err
- }
-
- if !system.IsAbs(linkTarget) {
- // Join with the parent directory.
- dstParent, _ := SplitPathDirEntry(path)
- linkTarget = filepath.Join(dstParent, linkTarget)
- }
-
- path = linkTarget
- stat, err = os.Lstat(path)
- }
-
- if err != nil {
- // It's okay if the destination path doesn't exist. We can still
- // continue the copy operation if the parent directory exists.
- if !os.IsNotExist(err) {
- return CopyInfo{}, err
- }
-
- // Ensure destination parent dir exists.
- dstParent, _ := SplitPathDirEntry(path)
-
- parentDirStat, err := os.Stat(dstParent)
- if err != nil {
- return CopyInfo{}, err
- }
- if !parentDirStat.IsDir() {
- return CopyInfo{}, ErrNotDirectory
- }
-
- return CopyInfo{Path: path}, nil
- }
-
- // The path exists after resolving symlinks.
- return CopyInfo{
- Path: path,
- Exists: true,
- IsDir: stat.IsDir(),
- }, nil
-}
-
-// PrepareArchiveCopy prepares the given srcContent archive, which should
-// contain the archived resource described by srcInfo, to the destination
-// described by dstInfo. Returns the possibly modified content archive along
-// with the path to the destination directory which it should be extracted to.
-func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) {
- // Ensure in platform semantics
- srcInfo.Path = normalizePath(srcInfo.Path)
- dstInfo.Path = normalizePath(dstInfo.Path)
-
- // Separate the destination path between its directory and base
- // components in case the source archive contents need to be rebased.
- dstDir, dstBase := SplitPathDirEntry(dstInfo.Path)
- _, srcBase := SplitPathDirEntry(srcInfo.Path)
-
- switch {
- case dstInfo.Exists && dstInfo.IsDir:
- // The destination exists as a directory. No alteration
- // to srcContent is needed as its contents can be
- // simply extracted to the destination directory.
- return dstInfo.Path, ioutil.NopCloser(srcContent), nil
- case dstInfo.Exists && srcInfo.IsDir:
- // The destination exists as some type of file and the source
- // content is a directory. This is an error condition since
- // you cannot copy a directory to an existing file location.
- return "", nil, ErrCannotCopyDir
- case dstInfo.Exists:
- // The destination exists as some type of file and the source content
- // is also a file. The source content entry will have to be renamed to
- // have a basename which matches the destination path's basename.
- if len(srcInfo.RebaseName) != 0 {
- srcBase = srcInfo.RebaseName
- }
- return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
- case srcInfo.IsDir:
- // The destination does not exist and the source content is an archive
- // of a directory. The archive should be extracted to the parent of
- // the destination path instead, and when it is, the directory that is
- // created as a result should take the name of the destination path.
- // The source content entries will have to be renamed to have a
- // basename which matches the destination path's basename.
- if len(srcInfo.RebaseName) != 0 {
- srcBase = srcInfo.RebaseName
- }
- return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
- case assertsDirectory(dstInfo.Path, os.PathSeparator):
- // The destination does not exist and is asserted to be created as a
- // directory, but the source content is not a directory. This is an
- // error condition since you cannot create a directory from a file
- // source.
- return "", nil, ErrDirNotExists
- default:
- // The last remaining case is when the destination does not exist, is
- // not asserted to be a directory, and the source content is not an
- // archive of a directory. It this case, the destination file will need
- // to be created when the archive is extracted and the source content
- // entry will have to be renamed to have a basename which matches the
- // destination path's basename.
- if len(srcInfo.RebaseName) != 0 {
- srcBase = srcInfo.RebaseName
- }
- return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
- }
-
-}
-
-// RebaseArchiveEntries rewrites the given srcContent archive replacing
-// an occurrence of oldBase with newBase at the beginning of entry names.
-func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser {
- if oldBase == string(os.PathSeparator) {
- // If oldBase specifies the root directory, use an empty string as
- // oldBase instead so that newBase doesn't replace the path separator
- // that all paths will start with.
- oldBase = ""
- }
-
- rebased, w := io.Pipe()
-
- go func() {
- srcTar := tar.NewReader(srcContent)
- rebasedTar := tar.NewWriter(w)
-
- for {
- hdr, err := srcTar.Next()
- if err == io.EOF {
- // Signals end of archive.
- rebasedTar.Close()
- w.Close()
- return
- }
- if err != nil {
- w.CloseWithError(err)
- return
- }
-
- hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1)
- if hdr.Typeflag == tar.TypeLink {
- hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1)
- }
-
- if err = rebasedTar.WriteHeader(hdr); err != nil {
- w.CloseWithError(err)
- return
- }
-
- if _, err = io.Copy(rebasedTar, srcTar); err != nil {
- w.CloseWithError(err)
- return
- }
- }
- }()
-
- return rebased
-}
-
-// TODO @gupta-ak. These might have to be changed in the future to be
-// continuity driver aware as well to support LCOW.
-
-// CopyResource performs an archive copy from the given source path to the
-// given destination path. The source path MUST exist and the destination
-// path's parent directory must exist.
-func CopyResource(srcPath, dstPath string, followLink bool) error {
- var (
- srcInfo CopyInfo
- err error
- )
-
- // Ensure in platform semantics
- srcPath = normalizePath(srcPath)
- dstPath = normalizePath(dstPath)
-
- // Clean the source and destination paths.
- srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath, os.PathSeparator)
- dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath, os.PathSeparator)
-
- if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil {
- return err
- }
-
- content, err := TarResource(srcInfo)
- if err != nil {
- return err
- }
- defer content.Close()
-
- return CopyTo(content, srcInfo, dstPath)
-}
-
-// CopyTo handles extracting the given content whose
-// entries should be sourced from srcInfo to dstPath.
-func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error {
- // The destination path need not exist, but CopyInfoDestinationPath will
- // ensure that at least the parent directory exists.
- dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath))
- if err != nil {
- return err
- }
-
- dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo)
- if err != nil {
- return err
- }
- defer copyArchive.Close()
-
- options := &TarOptions{
- NoLchown: true,
- NoOverwriteDirNonDir: true,
- }
-
- return Untar(copyArchive, dstDir, options)
-}
-
-// ResolveHostSourcePath decides real path need to be copied with parameters such as
-// whether to follow symbol link or not, if followLink is true, resolvedPath will return
-// link target of any symbol link file, else it will only resolve symlink of directory
-// but return symbol link file itself without resolving.
-func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) {
- if followLink {
- resolvedPath, err = filepath.EvalSymlinks(path)
- if err != nil {
- return
- }
-
- resolvedPath, rebaseName = GetRebaseName(path, resolvedPath)
- } else {
- dirPath, basePath := filepath.Split(path)
-
- // if not follow symbol link, then resolve symbol link of parent dir
- var resolvedDirPath string
- resolvedDirPath, err = filepath.EvalSymlinks(dirPath)
- if err != nil {
- return
- }
- // resolvedDirPath will have been cleaned (no trailing path separators) so
- // we can manually join it with the base path element.
- resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath
- if hasTrailingPathSeparator(path, os.PathSeparator) &&
- filepath.Base(path) != filepath.Base(resolvedPath) {
- rebaseName = filepath.Base(path)
- }
- }
- return resolvedPath, rebaseName, nil
-}
-
-// GetRebaseName normalizes and compares path and resolvedPath,
-// return completed resolved path and rebased file name
-func GetRebaseName(path, resolvedPath string) (string, string) {
- // linkTarget will have been cleaned (no trailing path separators and dot) so
- // we can manually join it with them
- var rebaseName string
- if specifiesCurrentDir(path) &&
- !specifiesCurrentDir(resolvedPath) {
- resolvedPath += string(filepath.Separator) + "."
- }
-
- if hasTrailingPathSeparator(path, os.PathSeparator) &&
- !hasTrailingPathSeparator(resolvedPath, os.PathSeparator) {
- resolvedPath += string(filepath.Separator)
- }
-
- if filepath.Base(path) != filepath.Base(resolvedPath) {
- // In the case where the path had a trailing separator and a symlink
- // evaluation has changed the last path component, we will need to
- // rebase the name in the archive that is being copied to match the
- // originally requested name.
- rebaseName = filepath.Base(path)
- }
- return resolvedPath, rebaseName
-}
diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go
deleted file mode 100644
index e305b5e4a..000000000
--- a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// +build !windows
-
-package archive
-
-import (
- "path/filepath"
-)
-
-func normalizePath(path string) string {
- return filepath.ToSlash(path)
-}
diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_windows.go b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go
deleted file mode 100644
index 2b775b45c..000000000
--- a/vendor/github.com/docker/docker/pkg/archive/copy_windows.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package archive
-
-import (
- "path/filepath"
-)
-
-func normalizePath(path string) string {
- return filepath.FromSlash(path)
-}
diff --git a/vendor/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/docker/docker/pkg/archive/diff.go
deleted file mode 100644
index 019facd38..000000000
--- a/vendor/github.com/docker/docker/pkg/archive/diff.go
+++ /dev/null
@@ -1,256 +0,0 @@
-package archive
-
-import (
- "archive/tar"
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "path/filepath"
- "runtime"
- "strings"
-
- "github.com/docker/docker/pkg/idtools"
- "github.com/docker/docker/pkg/pools"
- "github.com/docker/docker/pkg/system"
- "github.com/sirupsen/logrus"
-)
-
-// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be
-// compressed or uncompressed.
-// Returns the size in bytes of the contents of the layer.
-func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) {
- tr := tar.NewReader(layer)
- trBuf := pools.BufioReader32KPool.Get(tr)
- defer pools.BufioReader32KPool.Put(trBuf)
-
- var dirs []*tar.Header
- unpackedPaths := make(map[string]struct{})
-
- if options == nil {
- options = &TarOptions{}
- }
- if options.ExcludePatterns == nil {
- options.ExcludePatterns = []string{}
- }
- idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
-
- aufsTempdir := ""
- aufsHardlinks := make(map[string]*tar.Header)
-
- // Iterate through the files in the archive.
- for {
- hdr, err := tr.Next()
- if err == io.EOF {
- // end of tar archive
- break
- }
- if err != nil {
- return 0, err
- }
-
- size += hdr.Size
-
- // Normalize name, for safety and for a simple is-root check
- hdr.Name = filepath.Clean(hdr.Name)
-
- // Windows does not support filenames with colons in them. Ignore
- // these files. This is not a problem though (although it might
- // appear that it is). Let's suppose a client is running docker pull.
- // The daemon it points to is Windows. Would it make sense for the
- // client to be doing a docker pull Ubuntu for example (which has files
- // with colons in the name under /usr/share/man/man3)? No, absolutely
- // not as it would really only make sense that they were pulling a
- // Windows image. However, for development, it is necessary to be able
- // to pull Linux images which are in the repository.
- //
- // TODO Windows. Once the registry is aware of what images are Windows-
- // specific or Linux-specific, this warning should be changed to an error
- // to cater for the situation where someone does manage to upload a Linux
- // image but have it tagged as Windows inadvertently.
- if runtime.GOOS == "windows" {
- if strings.Contains(hdr.Name, ":") {
- logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name)
- continue
- }
- }
-
- // Note as these operations are platform specific, so must the slash be.
- if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
- // Not the root directory, ensure that the parent directory exists.
- // This happened in some tests where an image had a tarfile without any
- // parent directories.
- parent := filepath.Dir(hdr.Name)
- parentPath := filepath.Join(dest, parent)
-
- if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
- err = system.MkdirAll(parentPath, 0600, "")
- if err != nil {
- return 0, err
- }
- }
- }
-
- // Skip AUFS metadata dirs
- if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) {
- // Regular files inside /.wh..wh.plnk can be used as hardlink targets
- // We don't want this directory, but we need the files in them so that
- // such hardlinks can be resolved.
- if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg {
- basename := filepath.Base(hdr.Name)
- aufsHardlinks[basename] = hdr
- if aufsTempdir == "" {
- if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil {
- return 0, err
- }
- defer os.RemoveAll(aufsTempdir)
- }
- if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil {
- return 0, err
- }
- }
-
- if hdr.Name != WhiteoutOpaqueDir {
- continue
- }
- }
- path := filepath.Join(dest, hdr.Name)
- rel, err := filepath.Rel(dest, path)
- if err != nil {
- return 0, err
- }
-
- // Note as these operations are platform specific, so must the slash be.
- if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
- return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
- }
- base := filepath.Base(path)
-
- if strings.HasPrefix(base, WhiteoutPrefix) {
- dir := filepath.Dir(path)
- if base == WhiteoutOpaqueDir {
- _, err := os.Lstat(dir)
- if err != nil {
- return 0, err
- }
- err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
- if err != nil {
- if os.IsNotExist(err) {
- err = nil // parent was deleted
- }
- return err
- }
- if path == dir {
- return nil
- }
- if _, exists := unpackedPaths[path]; !exists {
- err := os.RemoveAll(path)
- return err
- }
- return nil
- })
- if err != nil {
- return 0, err
- }
- } else {
- originalBase := base[len(WhiteoutPrefix):]
- originalPath := filepath.Join(dir, originalBase)
- if err := os.RemoveAll(originalPath); err != nil {
- return 0, err
- }
- }
- } else {
- // If path exits we almost always just want to remove and replace it.
- // The only exception is when it is a directory *and* the file from
- // the layer is also a directory. Then we want to merge them (i.e.
- // just apply the metadata from the layer).
- if fi, err := os.Lstat(path); err == nil {
- if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
- if err := os.RemoveAll(path); err != nil {
- return 0, err
- }
- }
- }
-
- trBuf.Reset(tr)
- srcData := io.Reader(trBuf)
- srcHdr := hdr
-
- // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
- // we manually retarget these into the temporary files we extracted them into
- if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) {
- linkBasename := filepath.Base(hdr.Linkname)
- srcHdr = aufsHardlinks[linkBasename]
- if srcHdr == nil {
- return 0, fmt.Errorf("Invalid aufs hardlink")
- }
- tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename))
- if err != nil {
- return 0, err
- }
- defer tmpFile.Close()
- srcData = tmpFile
- }
-
- if err := remapIDs(idMappings, srcHdr); err != nil {
- return 0, err
- }
-
- if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS); err != nil {
- return 0, err
- }
-
- // Directory mtimes must be handled at the end to avoid further
- // file creation in them to modify the directory mtime
- if hdr.Typeflag == tar.TypeDir {
- dirs = append(dirs, hdr)
- }
- unpackedPaths[path] = struct{}{}
- }
- }
-
- for _, hdr := range dirs {
- path := filepath.Join(dest, hdr.Name)
- if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
- return 0, err
- }
- }
-
- return size, nil
-}
-
-// ApplyLayer parses a diff in the standard layer format from `layer`,
-// and applies it to the directory `dest`. The stream `layer` can be
-// compressed or uncompressed.
-// Returns the size in bytes of the contents of the layer.
-func ApplyLayer(dest string, layer io.Reader) (int64, error) {
- return applyLayerHandler(dest, layer, &TarOptions{}, true)
-}
-
-// ApplyUncompressedLayer parses a diff in the standard layer format from
-// `layer`, and applies it to the directory `dest`. The stream `layer`
-// can only be uncompressed.
-// Returns the size in bytes of the contents of the layer.
-func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) {
- return applyLayerHandler(dest, layer, options, false)
-}
-
-// do the bulk load of ApplyLayer, but allow for not calling DecompressStream
-func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) {
- dest = filepath.Clean(dest)
-
- // We need to be able to set any perms
- oldmask, err := system.Umask(0)
- if err != nil {
- return 0, err
- }
- defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform
-
- if decompress {
- layer, err = DecompressStream(layer)
- if err != nil {
- return 0, err
- }
- }
- return UnpackLayer(dest, layer, options)
-}
diff --git a/vendor/github.com/docker/docker/pkg/archive/example_changes.go b/vendor/github.com/docker/docker/pkg/archive/example_changes.go
deleted file mode 100644
index 495db809e..000000000
--- a/vendor/github.com/docker/docker/pkg/archive/example_changes.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// +build ignore
-
-// Simple tool to create an archive stream from an old and new directory
-//
-// By default it will stream the comparison of two temporary directories with junk files
-package main
-
-import (
- "flag"
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "path"
-
- "github.com/docker/docker/pkg/archive"
- "github.com/sirupsen/logrus"
-)
-
-var (
- flDebug = flag.Bool("D", false, "debugging output")
- flNewDir = flag.String("newdir", "", "")
- flOldDir = flag.String("olddir", "", "")
- log = logrus.New()
-)
-
-func main() {
- flag.Usage = func() {
- fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)")
- fmt.Printf("%s [OPTIONS]\n", os.Args[0])
- flag.PrintDefaults()
- }
- flag.Parse()
- log.Out = os.Stderr
- if (len(os.Getenv("DEBUG")) > 0) || *flDebug {
- logrus.SetLevel(logrus.DebugLevel)
- }
- var newDir, oldDir string
-
- if len(*flNewDir) == 0 {
- var err error
- newDir, err = ioutil.TempDir("", "docker-test-newDir")
- if err != nil {
- log.Fatal(err)
- }
- defer os.RemoveAll(newDir)
- if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil {
- log.Fatal(err)
- }
- } else {
- newDir = *flNewDir
- }
-
- if len(*flOldDir) == 0 {
- oldDir, err := ioutil.TempDir("", "docker-test-oldDir")
- if err != nil {
- log.Fatal(err)
- }
- defer os.RemoveAll(oldDir)
- } else {
- oldDir = *flOldDir
- }
-
- changes, err := archive.ChangesDirs(newDir, oldDir)
- if err != nil {
- log.Fatal(err)
- }
-
- a, err := archive.ExportChanges(newDir, changes)
- if err != nil {
- log.Fatal(err)
- }
- defer a.Close()
-
- i, err := io.Copy(os.Stdout, a)
- if err != nil && err != io.EOF {
- log.Fatal(err)
- }
- fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i)
-}
-
-func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
- fileData := []byte("fooo")
- for n := 0; n < numberOfFiles; n++ {
- fileName := fmt.Sprintf("file-%d", n)
- if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
- return 0, err
- }
- if makeLinks {
- if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
- return 0, err
- }
- }
- }
- totalSize := numberOfFiles * len(fileData)
- return totalSize, nil
-}
diff --git a/vendor/github.com/docker/docker/pkg/archive/time_linux.go b/vendor/github.com/docker/docker/pkg/archive/time_linux.go
deleted file mode 100644
index 3448569b1..000000000
--- a/vendor/github.com/docker/docker/pkg/archive/time_linux.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package archive
-
-import (
- "syscall"
- "time"
-)
-
-func timeToTimespec(time time.Time) (ts syscall.Timespec) {
- if time.IsZero() {
- // Return UTIME_OMIT special value
- ts.Sec = 0
- ts.Nsec = ((1 << 30) - 2)
- return
- }
- return syscall.NsecToTimespec(time.UnixNano())
-}
diff --git a/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go
deleted file mode 100644
index e85aac054..000000000
--- a/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build !linux
-
-package archive
-
-import (
- "syscall"
- "time"
-)
-
-func timeToTimespec(time time.Time) (ts syscall.Timespec) {
- nsec := int64(0)
- if !time.IsZero() {
- nsec = time.UnixNano()
- }
- return syscall.NsecToTimespec(nsec)
-}
diff --git a/vendor/github.com/docker/docker/pkg/archive/whiteouts.go b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go
deleted file mode 100644
index d20478a10..000000000
--- a/vendor/github.com/docker/docker/pkg/archive/whiteouts.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package archive
-
-// Whiteouts are files with a special meaning for the layered filesystem.
-// Docker uses AUFS whiteout files inside exported archives. In other
-// filesystems these files are generated/handled on tar creation/extraction.
-
-// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a
-// filename this means that file has been removed from the base layer.
-const WhiteoutPrefix = ".wh."
-
-// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not
-// for removing an actual file. Normally these files are excluded from exported
-// archives.
-const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix
-
-// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other
-// layers. Normally these should not go into exported archives and all changed
-// hardlinks should be copied to the top layer.
-const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk"
-
-// WhiteoutOpaqueDir file means directory has been made opaque - meaning
-// readdir calls to this directory do not follow to lower layers.
-const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq"
diff --git a/vendor/github.com/docker/docker/pkg/archive/wrap.go b/vendor/github.com/docker/docker/pkg/archive/wrap.go
deleted file mode 100644
index b39d12c87..000000000
--- a/vendor/github.com/docker/docker/pkg/archive/wrap.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package archive
-
-import (
- "archive/tar"
- "bytes"
- "io"
-)
-
-// Generate generates a new archive from the content provided
-// as input.
-//
-// `files` is a sequence of path/content pairs. A new file is
-// added to the archive for each pair.
-// If the last pair is incomplete, the file is created with an
-// empty content. For example:
-//
-// Generate("foo.txt", "hello world", "emptyfile")
-//
-// The above call will return an archive with 2 files:
-// * ./foo.txt with content "hello world"
-// * ./empty with empty content
-//
-// FIXME: stream content instead of buffering
-// FIXME: specify permissions and other archive metadata
-func Generate(input ...string) (io.Reader, error) {
- files := parseStringPairs(input...)
- buf := new(bytes.Buffer)
- tw := tar.NewWriter(buf)
- for _, file := range files {
- name, content := file[0], file[1]
- hdr := &tar.Header{
- Name: name,
- Size: int64(len(content)),
- }
- if err := tw.WriteHeader(hdr); err != nil {
- return nil, err
- }
- if _, err := tw.Write([]byte(content)); err != nil {
- return nil, err
- }
- }
- if err := tw.Close(); err != nil {
- return nil, err
- }
- return buf, nil
-}
-
-func parseStringPairs(input ...string) (output [][2]string) {
- output = make([][2]string, 0, len(input)/2+1)
- for i := 0; i < len(input); i += 2 {
- var pair [2]string
- pair[0] = input[i]
- if i+1 < len(input) {
- pair[1] = input[i+1]
- }
- output = append(output, pair)
- }
- return
-}
diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go
deleted file mode 100644
index 6cfa46483..000000000
--- a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go
+++ /dev/null
@@ -1,317 +0,0 @@
-package jsonmessage
-
-import (
- "encoding/json"
- "fmt"
- "io"
- "os"
- "strings"
- "time"
-
- gotty "github.com/Nvveen/Gotty"
- "github.com/docker/docker/pkg/term"
- units "github.com/docker/go-units"
-)
-
-// RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to
-// ensure the formatted time isalways the same number of characters.
-const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
-
-// JSONError wraps a concrete Code and Message, `Code` is
-// is an integer error code, `Message` is the error message.
-type JSONError struct {
- Code int `json:"code,omitempty"`
- Message string `json:"message,omitempty"`
-}
-
-func (e *JSONError) Error() string {
- return e.Message
-}
-
-// JSONProgress describes a Progress. terminalFd is the fd of the current terminal,
-// Start is the initial value for the operation. Current is the current status and
-// value of the progress made towards Total. Total is the end value describing when
-// we made 100% progress for an operation.
-type JSONProgress struct {
- terminalFd uintptr
- Current int64 `json:"current,omitempty"`
- Total int64 `json:"total,omitempty"`
- Start int64 `json:"start,omitempty"`
- // If true, don't show xB/yB
- HideCounts bool `json:"hidecounts,omitempty"`
- Units string `json:"units,omitempty"`
-}
-
-func (p *JSONProgress) String() string {
- var (
- width = 200
- pbBox string
- numbersBox string
- timeLeftBox string
- )
-
- ws, err := term.GetWinsize(p.terminalFd)
- if err == nil {
- width = int(ws.Width)
- }
-
- if p.Current <= 0 && p.Total <= 0 {
- return ""
- }
- if p.Total <= 0 {
- switch p.Units {
- case "":
- current := units.HumanSize(float64(p.Current))
- return fmt.Sprintf("%8v", current)
- default:
- return fmt.Sprintf("%d %s", p.Current, p.Units)
- }
- }
-
- percentage := int(float64(p.Current)/float64(p.Total)*100) / 2
- if percentage > 50 {
- percentage = 50
- }
- if width > 110 {
- // this number can't be negative gh#7136
- numSpaces := 0
- if 50-percentage > 0 {
- numSpaces = 50 - percentage
- }
- pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces))
- }
-
- switch {
- case p.HideCounts:
- case p.Units == "": // no units, use bytes
- current := units.HumanSize(float64(p.Current))
- total := units.HumanSize(float64(p.Total))
-
- numbersBox = fmt.Sprintf("%8v/%v", current, total)
-
- if p.Current > p.Total {
- // remove total display if the reported current is wonky.
- numbersBox = fmt.Sprintf("%8v", current)
- }
- default:
- numbersBox = fmt.Sprintf("%d/%d %s", p.Current, p.Total, p.Units)
-
- if p.Current > p.Total {
- // remove total display if the reported current is wonky.
- numbersBox = fmt.Sprintf("%d %s", p.Current, p.Units)
- }
- }
-
- if p.Current > 0 && p.Start > 0 && percentage < 50 {
- fromStart := time.Now().UTC().Sub(time.Unix(p.Start, 0))
- perEntry := fromStart / time.Duration(p.Current)
- left := time.Duration(p.Total-p.Current) * perEntry
- left = (left / time.Second) * time.Second
-
- if width > 50 {
- timeLeftBox = " " + left.String()
- }
- }
- return pbBox + numbersBox + timeLeftBox
-}
-
-// JSONMessage defines a message struct. It describes
-// the created time, where it from, status, ID of the
-// message. It's used for docker events.
-type JSONMessage struct {
- Stream string `json:"stream,omitempty"`
- Status string `json:"status,omitempty"`
- Progress *JSONProgress `json:"progressDetail,omitempty"`
- ProgressMessage string `json:"progress,omitempty"` //deprecated
- ID string `json:"id,omitempty"`
- From string `json:"from,omitempty"`
- Time int64 `json:"time,omitempty"`
- TimeNano int64 `json:"timeNano,omitempty"`
- Error *JSONError `json:"errorDetail,omitempty"`
- ErrorMessage string `json:"error,omitempty"` //deprecated
- // Aux contains out-of-band data, such as digests for push signing and image id after building.
- Aux *json.RawMessage `json:"aux,omitempty"`
-}
-
-/* Satisfied by gotty.TermInfo as well as noTermInfo from below */
-type termInfo interface {
- Parse(attr string, params ...interface{}) (string, error)
-}
-
-type noTermInfo struct{} // canary used when no terminfo.
-
-func (ti *noTermInfo) Parse(attr string, params ...interface{}) (string, error) {
- return "", fmt.Errorf("noTermInfo")
-}
-
-func clearLine(out io.Writer, ti termInfo) {
- // el2 (clear whole line) is not exposed by terminfo.
-
- // First clear line from beginning to cursor
- if attr, err := ti.Parse("el1"); err == nil {
- fmt.Fprintf(out, "%s", attr)
- } else {
- fmt.Fprintf(out, "\x1b[1K")
- }
- // Then clear line from cursor to end
- if attr, err := ti.Parse("el"); err == nil {
- fmt.Fprintf(out, "%s", attr)
- } else {
- fmt.Fprintf(out, "\x1b[K")
- }
-}
-
-func cursorUp(out io.Writer, ti termInfo, l int) {
- if l == 0 { // Should never be the case, but be tolerant
- return
- }
- if attr, err := ti.Parse("cuu", l); err == nil {
- fmt.Fprintf(out, "%s", attr)
- } else {
- fmt.Fprintf(out, "\x1b[%dA", l)
- }
-}
-
-func cursorDown(out io.Writer, ti termInfo, l int) {
- if l == 0 { // Should never be the case, but be tolerant
- return
- }
- if attr, err := ti.Parse("cud", l); err == nil {
- fmt.Fprintf(out, "%s", attr)
- } else {
- fmt.Fprintf(out, "\x1b[%dB", l)
- }
-}
-
-// Display displays the JSONMessage to `out`. `termInfo` is non-nil if `out`
-// is a terminal. If this is the case, it will erase the entire current line
-// when displaying the progressbar.
-func (jm *JSONMessage) Display(out io.Writer, termInfo termInfo) error {
- if jm.Error != nil {
- if jm.Error.Code == 401 {
- return fmt.Errorf("authentication is required")
- }
- return jm.Error
- }
- var endl string
- if termInfo != nil && jm.Stream == "" && jm.Progress != nil {
- clearLine(out, termInfo)
- endl = "\r"
- fmt.Fprintf(out, endl)
- } else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal
- return nil
- }
- if jm.TimeNano != 0 {
- fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(RFC3339NanoFixed))
- } else if jm.Time != 0 {
- fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(RFC3339NanoFixed))
- }
- if jm.ID != "" {
- fmt.Fprintf(out, "%s: ", jm.ID)
- }
- if jm.From != "" {
- fmt.Fprintf(out, "(from %s) ", jm.From)
- }
- if jm.Progress != nil && termInfo != nil {
- fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl)
- } else if jm.ProgressMessage != "" { //deprecated
- fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl)
- } else if jm.Stream != "" {
- fmt.Fprintf(out, "%s%s", jm.Stream, endl)
- } else {
- fmt.Fprintf(out, "%s%s\n", jm.Status, endl)
- }
- return nil
-}
-
-// DisplayJSONMessagesStream displays a json message stream from `in` to `out`, `isTerminal`
-// describes if `out` is a terminal. If this is the case, it will print `\n` at the end of
-// each line and move the cursor while displaying.
-func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(*json.RawMessage)) error {
- var (
- dec = json.NewDecoder(in)
- ids = make(map[string]int)
- )
-
- var termInfo termInfo
-
- if isTerminal {
- term := os.Getenv("TERM")
- if term == "" {
- term = "vt102"
- }
-
- var err error
- if termInfo, err = gotty.OpenTermInfo(term); err != nil {
- termInfo = &noTermInfo{}
- }
- }
-
- for {
- diff := 0
- var jm JSONMessage
- if err := dec.Decode(&jm); err != nil {
- if err == io.EOF {
- break
- }
- return err
- }
-
- if jm.Aux != nil {
- if auxCallback != nil {
- auxCallback(jm.Aux)
- }
- continue
- }
-
- if jm.Progress != nil {
- jm.Progress.terminalFd = terminalFd
- }
- if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") {
- line, ok := ids[jm.ID]
- if !ok {
- // NOTE: This approach of using len(id) to
- // figure out the number of lines of history
- // only works as long as we clear the history
- // when we output something that's not
- // accounted for in the map, such as a line
- // with no ID.
- line = len(ids)
- ids[jm.ID] = line
- if termInfo != nil {
- fmt.Fprintf(out, "\n")
- }
- }
- diff = len(ids) - line
- if termInfo != nil {
- cursorUp(out, termInfo, diff)
- }
- } else {
- // When outputting something that isn't progress
- // output, clear the history of previous lines. We
- // don't want progress entries from some previous
- // operation to be updated (for example, pull -a
- // with multiple tags).
- ids = make(map[string]int)
- }
- err := jm.Display(out, termInfo)
- if jm.ID != "" && termInfo != nil {
- cursorDown(out, termInfo, diff)
- }
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-type stream interface {
- io.Writer
- FD() uintptr
- IsTerminal() bool
-}
-
-// DisplayJSONMessagesToStream prints json messages to the output stream
-func DisplayJSONMessagesToStream(in io.Reader, stream stream, auxCallback func(*json.RawMessage)) error {
- return DisplayJSONMessagesStream(in, stream, stream.FD(), stream.IsTerminal(), auxCallback)
-}
diff --git a/vendor/github.com/docker/docker/profiles/seccomp/generate.go b/vendor/github.com/docker/docker/profiles/seccomp/generate.go
deleted file mode 100644
index 32f22bb37..000000000
--- a/vendor/github.com/docker/docker/profiles/seccomp/generate.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// +build ignore
-
-package main
-
-import (
- "encoding/json"
- "io/ioutil"
- "os"
- "path/filepath"
-
- "github.com/docker/docker/profiles/seccomp"
-)
-
-// saves the default seccomp profile as a json file so people can use it as a
-// base for their own custom profiles
-func main() {
- wd, err := os.Getwd()
- if err != nil {
- panic(err)
- }
- f := filepath.Join(wd, "default.json")
-
- // write the default profile to the file
- b, err := json.MarshalIndent(seccomp.DefaultProfile(), "", "\t")
- if err != nil {
- panic(err)
- }
-
- if err := ioutil.WriteFile(f, b, 0644); err != nil {
- panic(err)
- }
-}
diff --git a/vendor/github.com/docker/go-connections/nat/nat.go b/vendor/github.com/docker/go-connections/nat/nat.go
index 4d5f5ae63..bb7e4e336 100644
--- a/vendor/github.com/docker/go-connections/nat/nat.go
+++ b/vendor/github.com/docker/go-connections/nat/nat.go
@@ -113,7 +113,7 @@ func SplitProtoPort(rawPort string) (string, string) {
}
func validateProto(proto string) bool {
- for _, availableProto := range []string{"tcp", "udp"} {
+ for _, availableProto := range []string{"tcp", "udp", "sctp"} {
if availableProto == proto {
return true
}
diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go
index 9ca974539..1ff81c333 100644
--- a/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go
+++ b/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go
@@ -4,7 +4,6 @@ package tlsconfig
import (
"crypto/x509"
-
)
// SystemCertPool returns an new empty cert pool,
diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config.go b/vendor/github.com/docker/go-connections/tlsconfig/config.go
index 1b31bbb8b..0ef3fdcb4 100644
--- a/vendor/github.com/docker/go-connections/tlsconfig/config.go
+++ b/vendor/github.com/docker/go-connections/tlsconfig/config.go
@@ -46,8 +46,6 @@ var acceptedCBCCiphers = []uint16{
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
- tls.TLS_RSA_WITH_AES_256_CBC_SHA,
- tls.TLS_RSA_WITH_AES_128_CBC_SHA,
}
// DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls
@@ -65,22 +63,34 @@ var allTLSVersions = map[uint16]struct{}{
}
// ServerDefault returns a secure-enough TLS configuration for the server TLS configuration.
-func ServerDefault() *tls.Config {
- return &tls.Config{
- // Avoid fallback to SSL protocols < TLS1.0
- MinVersion: tls.VersionTLS10,
+func ServerDefault(ops ...func(*tls.Config)) *tls.Config {
+ tlsconfig := &tls.Config{
+ // Avoid fallback by default to SSL protocols < TLS1.2
+ MinVersion: tls.VersionTLS12,
PreferServerCipherSuites: true,
CipherSuites: DefaultServerAcceptedCiphers,
}
+
+ for _, op := range ops {
+ op(tlsconfig)
+ }
+
+ return tlsconfig
}
// ClientDefault returns a secure-enough TLS configuration for the client TLS configuration.
-func ClientDefault() *tls.Config {
- return &tls.Config{
+func ClientDefault(ops ...func(*tls.Config)) *tls.Config {
+ tlsconfig := &tls.Config{
// Prefer TLS1.2 as the client minimum
MinVersion: tls.VersionTLS12,
CipherSuites: clientCipherSuites,
}
+
+ for _, op := range ops {
+ op(tlsconfig)
+ }
+
+ return tlsconfig
}
// certPool returns an X.509 certificate pool from `caFile`, the certificate file.
diff --git a/vendor/github.com/docker/spdystream/connection.go b/vendor/github.com/docker/spdystream/connection.go
index df27d1dd1..2023ecf84 100644
--- a/vendor/github.com/docker/spdystream/connection.go
+++ b/vendor/github.com/docker/spdystream/connection.go
@@ -14,7 +14,7 @@ import (
var (
ErrInvalidStreamId = errors.New("Invalid stream id")
- ErrTimeout = errors.New("Timeout occured")
+ ErrTimeout = errors.New("Timeout occurred")
ErrReset = errors.New("Stream reset")
ErrWriteClosedStream = errors.New("Write on closed stream")
)
diff --git a/vendor/github.com/docker/spdystream/handlers.go b/vendor/github.com/docker/spdystream/handlers.go
index b59fa5fdc..d4ee7be81 100644
--- a/vendor/github.com/docker/spdystream/handlers.go
+++ b/vendor/github.com/docker/spdystream/handlers.go
@@ -30,9 +30,7 @@ func MirrorStreamHandler(stream *Stream) {
}()
}
-// NoopStreamHandler does nothing when stream connects, most
-// likely used with RejectAuthHandler which will not allow any
-// streams to make it to the stream handler.
+// NoopStreamHandler does nothing when stream connects.
func NoOpStreamHandler(stream *Stream) {
stream.SendReply(http.Header{}, false)
}