summaryrefslogtreecommitdiff
path: root/vendor/github.com/fsouza/go-dockerclient/internal
diff options
context:
space:
mode:
authorValentin Rothberg <rothberg@redhat.com>2019-01-08 14:52:57 +0100
committerValentin Rothberg <rothberg@redhat.com>2019-01-11 13:38:11 +0100
commitbd40dcfc2bc7c9014ea1f33482fb63aacbcdfe87 (patch)
tree5f06e4e289f16d9164d692590a3fe6541b5384cf /vendor/github.com/fsouza/go-dockerclient/internal
parent545f24421247c9f6251a634764db3f8f8070a812 (diff)
downloadpodman-bd40dcfc2bc7c9014ea1f33482fb63aacbcdfe87.tar.gz
podman-bd40dcfc2bc7c9014ea1f33482fb63aacbcdfe87.tar.bz2
podman-bd40dcfc2bc7c9014ea1f33482fb63aacbcdfe87.zip
vendor: update everything
* If possible, update each dependency to the latest available version. * Use releases over commit IDs and avoid vendoring branches. Signed-off-by: Valentin Rothberg <rothberg@redhat.com>
Diffstat (limited to 'vendor/github.com/fsouza/go-dockerclient/internal')
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/internal/archive/archive.go505
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_linux.go104
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_other.go11
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_unix.go77
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_windows.go71
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/internal/archive/changes_unix.go16
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/internal/archive/changes_windows.go11
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/internal/archive/copy.go29
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/internal/archive/whiteouts.go27
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/internal/jsonmessage/jsonmessage.go339
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/internal/term/term.go13
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/internal/term/winsize.go16
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/internal/term/winsize_windows.go22
13 files changed, 1241 insertions, 0 deletions
diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive.go b/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive.go
new file mode 100644
index 000000000..a13ee7cca
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive.go
@@ -0,0 +1,505 @@
+// Copyright 2014 Docker authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the DOCKER-LICENSE file.
+
+package archive
+
+import (
+ "archive/tar"
+ "bufio"
+ "compress/gzip"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/docker/docker/pkg/fileutils"
+ "github.com/docker/docker/pkg/idtools"
+ "github.com/docker/docker/pkg/pools"
+ "github.com/docker/docker/pkg/system"
+ "github.com/sirupsen/logrus"
+)
+
+const (
+ // Uncompressed represents the uncompressed.
+ Uncompressed Compression = iota
+ // Bzip2 is bzip2 compression algorithm.
+ Bzip2
+ // Gzip is gzip compression algorithm.
+ Gzip
+ // Xz is xz compression algorithm.
+ Xz
+)
+
+const (
+ modeISDIR = 040000 // Directory
+ modeISFIFO = 010000 // FIFO
+ modeISREG = 0100000 // Regular file
+ modeISLNK = 0120000 // Symbolic link
+ modeISBLK = 060000 // Block special file
+ modeISCHR = 020000 // Character special file
+ modeISSOCK = 0140000 // Socket
+)
+
+// Compression is the state represents if compressed or not.
+type Compression int
+
+// Extension returns the extension of a file that uses the specified compression algorithm.
+func (compression *Compression) Extension() string {
+ switch *compression {
+ case Uncompressed:
+ return "tar"
+ case Bzip2:
+ return "tar.bz2"
+ case Gzip:
+ return "tar.gz"
+ case Xz:
+ return "tar.xz"
+ }
+ return ""
+}
+
+// WhiteoutFormat is the format of whiteouts unpacked
+type WhiteoutFormat int
+
+// TarOptions wraps the tar options.
+type TarOptions struct {
+ IncludeFiles []string
+ ExcludePatterns []string
+ Compression Compression
+ NoLchown bool
+ UIDMaps []idtools.IDMap
+ GIDMaps []idtools.IDMap
+ ChownOpts *idtools.IDPair
+ IncludeSourceDir bool
+ // WhiteoutFormat is the expected on disk format for whiteout files.
+ // This format will be converted to the standard format on pack
+ // and from the standard format on unpack.
+ WhiteoutFormat WhiteoutFormat
+ // When unpacking, specifies whether overwriting a directory with a
+ // non-directory is allowed and vice versa.
+ NoOverwriteDirNonDir bool
+ // For each include when creating an archive, the included name will be
+ // replaced with the matching name from this map.
+ RebaseNames map[string]string
+ InUserNS bool
+}
+
+// TarWithOptions creates an archive from the directory at `path`, only including files whose relative
+// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
+func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
+
+ // Fix the source path to work with long path names. This is a no-op
+ // on platforms other than Windows.
+ srcPath = fixVolumePathPrefix(srcPath)
+
+ pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns)
+ if err != nil {
+ return nil, err
+ }
+
+ pipeReader, pipeWriter := io.Pipe()
+
+ compressWriter, err := CompressStream(pipeWriter, options.Compression)
+ if err != nil {
+ return nil, err
+ }
+
+ go func() {
+ ta := newTarAppender(
+ idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps),
+ compressWriter,
+ options.ChownOpts,
+ )
+ ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat)
+
+ defer func() {
+ // Make sure to check the error on Close.
+ if err := ta.TarWriter.Close(); err != nil {
+ logrus.Errorf("Can't close tar writer: %s", err)
+ }
+ if err := compressWriter.Close(); err != nil {
+ logrus.Errorf("Can't close compress writer: %s", err)
+ }
+ if err := pipeWriter.Close(); err != nil {
+ logrus.Errorf("Can't close pipe writer: %s", err)
+ }
+ }()
+
+ // this buffer is needed for the duration of this piped stream
+ defer pools.BufioWriter32KPool.Put(ta.Buffer)
+
+ // In general we log errors here but ignore them because
+ // during e.g. a diff operation the container can continue
+ // mutating the filesystem and we can see transient errors
+ // from this
+
+ stat, err := os.Lstat(srcPath)
+ if err != nil {
+ return
+ }
+
+ if !stat.IsDir() {
+ // We can't later join a non-dir with any includes because the
+ // 'walk' will error if "file/." is stat-ed and "file" is not a
+ // directory. So, we must split the source path and use the
+ // basename as the include.
+ if len(options.IncludeFiles) > 0 {
+ logrus.Warn("Tar: Can't archive a file with includes")
+ }
+
+ dir, base := SplitPathDirEntry(srcPath)
+ srcPath = dir
+ options.IncludeFiles = []string{base}
+ }
+
+ if len(options.IncludeFiles) == 0 {
+ options.IncludeFiles = []string{"."}
+ }
+
+ seen := make(map[string]bool)
+
+ for _, include := range options.IncludeFiles {
+ rebaseName := options.RebaseNames[include]
+
+ walkRoot := getWalkRoot(srcPath, include)
+ filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error {
+ if err != nil {
+ logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err)
+ return nil
+ }
+
+ relFilePath, err := filepath.Rel(srcPath, filePath)
+ if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) {
+ // Error getting relative path OR we are looking
+ // at the source directory path. Skip in both situations.
+ return nil
+ }
+
+ if options.IncludeSourceDir && include == "." && relFilePath != "." {
+ relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator))
+ }
+
+ skip := false
+
+ // If "include" is an exact match for the current file
+ // then even if there's an "excludePatterns" pattern that
+ // matches it, don't skip it. IOW, assume an explicit 'include'
+ // is asking for that file no matter what - which is true
+ // for some files, like .dockerignore and Dockerfile (sometimes)
+ if include != relFilePath {
+ skip, err = pm.Matches(relFilePath)
+ if err != nil {
+ logrus.Errorf("Error matching %s: %v", relFilePath, err)
+ return err
+ }
+ }
+
+ if skip {
+ // If we want to skip this file and its a directory
+ // then we should first check to see if there's an
+ // excludes pattern (e.g. !dir/file) that starts with this
+ // dir. If so then we can't skip this dir.
+
+ // Its not a dir then so we can just return/skip.
+ if !f.IsDir() {
+ return nil
+ }
+
+ // No exceptions (!...) in patterns so just skip dir
+ if !pm.Exclusions() {
+ return filepath.SkipDir
+ }
+
+ dirSlash := relFilePath + string(filepath.Separator)
+
+ for _, pat := range pm.Patterns() {
+ if !pat.Exclusion() {
+ continue
+ }
+ if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) {
+ // found a match - so can't skip this dir
+ return nil
+ }
+ }
+
+ // No matching exclusion dir so just skip dir
+ return filepath.SkipDir
+ }
+
+ if seen[relFilePath] {
+ return nil
+ }
+ seen[relFilePath] = true
+
+ // Rename the base resource.
+ if rebaseName != "" {
+ var replacement string
+ if rebaseName != string(filepath.Separator) {
+ // Special case the root directory to replace with an
+ // empty string instead so that we don't end up with
+ // double slashes in the paths.
+ replacement = rebaseName
+ }
+
+ relFilePath = strings.Replace(relFilePath, include, replacement, 1)
+ }
+
+ if err := ta.addTarFile(filePath, relFilePath); err != nil {
+ logrus.Errorf("Can't add file %s to tar: %s", filePath, err)
+ // if pipe is broken, stop writing tar stream to it
+ if err == io.ErrClosedPipe {
+ return err
+ }
+ }
+ return nil
+ })
+ }
+ }()
+
+ return pipeReader, nil
+}
+
+// CompressStream compresses the dest with specified compression algorithm.
+func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) {
+ p := pools.BufioWriter32KPool
+ buf := p.Get(dest)
+ switch compression {
+ case Uncompressed:
+ writeBufWrapper := p.NewWriteCloserWrapper(buf, buf)
+ return writeBufWrapper, nil
+ case Gzip:
+ gzWriter := gzip.NewWriter(dest)
+ writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter)
+ return writeBufWrapper, nil
+ case Bzip2, Xz:
+ // archive/bzip2 does not support writing, and there is no xz support at all
+ // However, this is not a problem as docker only currently generates gzipped tars
+ return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ default:
+ return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ }
+}
+
+type tarWhiteoutConverter interface {
+ ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error)
+ ConvertRead(*tar.Header, string) (bool, error)
+}
+
+type tarAppender struct {
+ TarWriter *tar.Writer
+ Buffer *bufio.Writer
+
+ // for hardlink mapping
+ SeenFiles map[uint64]string
+ IDMappings *idtools.IDMappings
+ ChownOpts *idtools.IDPair
+
+ // For packing and unpacking whiteout files in the
+ // non standard format. The whiteout files defined
+ // by the AUFS standard are used as the tar whiteout
+ // standard.
+ WhiteoutConverter tarWhiteoutConverter
+}
+
+func newTarAppender(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *idtools.IDPair) *tarAppender {
+ return &tarAppender{
+ SeenFiles: make(map[uint64]string),
+ TarWriter: tar.NewWriter(writer),
+ Buffer: pools.BufioWriter32KPool.Get(nil),
+ IDMappings: idMapping,
+ ChownOpts: chownOpts,
+ }
+}
+
+// addTarFile adds to the tar archive a file from `path` as `name`
+func (ta *tarAppender) addTarFile(path, name string) error {
+ fi, err := os.Lstat(path)
+ if err != nil {
+ return err
+ }
+
+ var link string
+ if fi.Mode()&os.ModeSymlink != 0 {
+ var err error
+ link, err = os.Readlink(path)
+ if err != nil {
+ return err
+ }
+ }
+
+ hdr, err := FileInfoHeader(name, fi, link)
+ if err != nil {
+ return err
+ }
+ if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil {
+ return err
+ }
+
+ // if it's not a directory and has more than 1 link,
+ // it's hard linked, so set the type flag accordingly
+ if !fi.IsDir() && hasHardlinks(fi) {
+ inode, err := getInodeFromStat(fi.Sys())
+ if err != nil {
+ return err
+ }
+ // a link should have a name that it links too
+ // and that linked name should be first in the tar archive
+ if oldpath, ok := ta.SeenFiles[inode]; ok {
+ hdr.Typeflag = tar.TypeLink
+ hdr.Linkname = oldpath
+ hdr.Size = 0 // This Must be here for the writer math to add up!
+ } else {
+ ta.SeenFiles[inode] = name
+ }
+ }
+
+ //check whether the file is overlayfs whiteout
+ //if yes, skip re-mapping container ID mappings.
+ isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0
+
+ //handle re-mapping container ID mappings back to host ID mappings before
+ //writing tar headers/files. We skip whiteout files because they were written
+ //by the kernel and already have proper ownership relative to the host
+ if !isOverlayWhiteout &&
+ !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) &&
+ !ta.IDMappings.Empty() {
+ fileIDPair, err := getFileUIDGID(fi.Sys())
+ if err != nil {
+ return err
+ }
+ hdr.Uid, hdr.Gid, err = ta.IDMappings.ToContainer(fileIDPair)
+ if err != nil {
+ return err
+ }
+ }
+
+ // explicitly override with ChownOpts
+ if ta.ChownOpts != nil {
+ hdr.Uid = ta.ChownOpts.UID
+ hdr.Gid = ta.ChownOpts.GID
+ }
+
+ if ta.WhiteoutConverter != nil {
+ wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi)
+ if err != nil {
+ return err
+ }
+
+ // If a new whiteout file exists, write original hdr, then
+ // replace hdr with wo to be written after. Whiteouts should
+ // always be written after the original. Note the original
+ // hdr may have been updated to be a whiteout with returning
+ // a whiteout header
+ if wo != nil {
+ if err := ta.TarWriter.WriteHeader(hdr); err != nil {
+ return err
+ }
+ if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
+ return fmt.Errorf("tar: cannot use whiteout for non-empty file")
+ }
+ hdr = wo
+ }
+ }
+
+ if err := ta.TarWriter.WriteHeader(hdr); err != nil {
+ return err
+ }
+
+ if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
+ // We use system.OpenSequential to ensure we use sequential file
+ // access on Windows to avoid depleting the standby list.
+ // On Linux, this equates to a regular os.Open.
+ file, err := system.OpenSequential(path)
+ if err != nil {
+ return err
+ }
+
+ ta.Buffer.Reset(ta.TarWriter)
+ defer ta.Buffer.Reset(nil)
+ _, err = io.Copy(ta.Buffer, file)
+ file.Close()
+ if err != nil {
+ return err
+ }
+ err = ta.Buffer.Flush()
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem
+// to a tar header
+func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
+ capability, _ := system.Lgetxattr(path, "security.capability")
+ if capability != nil {
+ hdr.Xattrs = make(map[string]string)
+ hdr.Xattrs["security.capability"] = string(capability)
+ }
+ return nil
+}
+
+// FileInfoHeader creates a populated Header from fi.
+// Compared to archive pkg this function fills in more information.
+// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR),
+// which have been deleted since Go 1.9 archive/tar.
+func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) {
+ hdr, err := tar.FileInfoHeader(fi, link)
+ if err != nil {
+ return nil, err
+ }
+ hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi)
+ name, err = canonicalTarName(name, fi.IsDir())
+ if err != nil {
+ return nil, fmt.Errorf("tar: cannot canonicalize path: %v", err)
+ }
+ hdr.Name = name
+ if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil {
+ return nil, err
+ }
+ return hdr, nil
+}
+
+// fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar
+// https://github.com/golang/go/commit/66b5a2f
+func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 {
+ fm := fi.Mode()
+ switch {
+ case fm.IsRegular():
+ mode |= modeISREG
+ case fi.IsDir():
+ mode |= modeISDIR
+ case fm&os.ModeSymlink != 0:
+ mode |= modeISLNK
+ case fm&os.ModeDevice != 0:
+ if fm&os.ModeCharDevice != 0 {
+ mode |= modeISCHR
+ } else {
+ mode |= modeISBLK
+ }
+ case fm&os.ModeNamedPipe != 0:
+ mode |= modeISFIFO
+ case fm&os.ModeSocket != 0:
+ mode |= modeISSOCK
+ }
+ return mode
+}
+
+// canonicalTarName provides a platform-independent and consistent posix-style
+//path for files and directories to be archived regardless of the platform.
+func canonicalTarName(name string, isDir bool) (string, error) {
+ name, err := CanonicalTarNameForPath(name)
+ if err != nil {
+ return "", err
+ }
+
+ // suffix with '/' for directories
+ if isDir && !strings.HasSuffix(name, "/") {
+ name += "/"
+ }
+ return name, nil
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_linux.go b/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_linux.go
new file mode 100644
index 000000000..9e1f3f2f1
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_linux.go
@@ -0,0 +1,104 @@
+// Copyright 2014 Docker authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the DOCKER-LICENSE file.
+
+package archive
+
+import (
+ "archive/tar"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/docker/docker/pkg/system"
+ "golang.org/x/sys/unix"
+)
+
+const (
+ // AUFSWhiteoutFormat is the default format for whiteouts
+ AUFSWhiteoutFormat WhiteoutFormat = iota
+ // OverlayWhiteoutFormat formats whiteout according to the overlay
+ // standard.
+ OverlayWhiteoutFormat
+)
+
+func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
+ if format == OverlayWhiteoutFormat {
+ return overlayWhiteoutConverter{}
+ }
+ return nil
+}
+
+type overlayWhiteoutConverter struct{}
+
+func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) {
+ // convert whiteouts to AUFS format
+ if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 {
+ // we just rename the file and make it normal
+ dir, filename := filepath.Split(hdr.Name)
+ hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename)
+ hdr.Mode = 0600
+ hdr.Typeflag = tar.TypeReg
+ hdr.Size = 0
+ }
+
+ if fi.Mode()&os.ModeDir != 0 {
+ // convert opaque dirs to AUFS format by writing an empty file with the prefix
+ opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque")
+ if err != nil {
+ return nil, err
+ }
+ if len(opaque) == 1 && opaque[0] == 'y' {
+ if hdr.Xattrs != nil {
+ delete(hdr.Xattrs, "trusted.overlay.opaque")
+ }
+
+ // create a header for the whiteout file
+ // it should inherit some properties from the parent, but be a regular file
+ wo = &tar.Header{
+ Typeflag: tar.TypeReg,
+ Mode: hdr.Mode & int64(os.ModePerm),
+ Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir),
+ Size: 0,
+ Uid: hdr.Uid,
+ Uname: hdr.Uname,
+ Gid: hdr.Gid,
+ Gname: hdr.Gname,
+ AccessTime: hdr.AccessTime,
+ ChangeTime: hdr.ChangeTime,
+ }
+ }
+ }
+
+ return
+}
+
+func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) {
+ base := filepath.Base(path)
+ dir := filepath.Dir(path)
+
+ // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay
+ if base == WhiteoutOpaqueDir {
+ err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0)
+ // don't write the file itself
+ return false, err
+ }
+
+ // if a file was deleted and we are using overlay, we need to create a character device
+ if strings.HasPrefix(base, WhiteoutPrefix) {
+ originalBase := base[len(WhiteoutPrefix):]
+ originalPath := filepath.Join(dir, originalBase)
+
+ if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil {
+ return false, err
+ }
+ if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil {
+ return false, err
+ }
+
+ // don't write the file itself
+ return false, nil
+ }
+
+ return true, nil
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_other.go b/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_other.go
new file mode 100644
index 000000000..72822c857
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_other.go
@@ -0,0 +1,11 @@
+// Copyright 2014 Docker authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the DOCKER-LICENSE file.
+
+// +build !linux
+
+package archive
+
+func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
+ return nil
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_unix.go b/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_unix.go
new file mode 100644
index 000000000..2633f5020
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_unix.go
@@ -0,0 +1,77 @@
+// Copyright 2014 Docker authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the DOCKER-LICENSE file.
+
+// +build !windows
+
+package archive
+
+import (
+ "archive/tar"
+ "errors"
+ "os"
+ "path/filepath"
+ "syscall"
+
+ "github.com/docker/docker/pkg/idtools"
+ "golang.org/x/sys/unix"
+)
+
+// CanonicalTarNameForPath returns platform-specific filepath
+// to canonical posix-style path for tar archival. p is relative
+// path.
+func CanonicalTarNameForPath(p string) (string, error) {
+ return p, nil // already unix-style
+}
+
+// fixVolumePathPrefix does platform specific processing to ensure that if
+// the path being passed in is not in a volume path format, convert it to one.
+func fixVolumePathPrefix(srcPath string) string {
+ return srcPath
+}
+
+// getWalkRoot calculates the root path when performing a TarWithOptions.
+// We use a separate function as this is platform specific. On Linux, we
+// can't use filepath.Join(srcPath,include) because this will clean away
+// a trailing "." or "/" which may be important.
+func getWalkRoot(srcPath string, include string) string {
+ return srcPath + string(filepath.Separator) + include
+}
+
+func getInodeFromStat(stat interface{}) (inode uint64, err error) {
+ s, ok := stat.(*syscall.Stat_t)
+
+ if ok {
+ inode = uint64(s.Ino)
+ }
+
+ return
+}
+
+func getFileUIDGID(stat interface{}) (idtools.IDPair, error) {
+ s, ok := stat.(*syscall.Stat_t)
+
+ if !ok {
+ return idtools.IDPair{}, errors.New("cannot convert stat value to syscall.Stat_t")
+ }
+ return idtools.IDPair{UID: int(s.Uid), GID: int(s.Gid)}, nil
+}
+
+func chmodTarEntry(perm os.FileMode) os.FileMode {
+ return perm // noop for unix as golang APIs provide perm bits correctly
+}
+
+func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
+ s, ok := stat.(*syscall.Stat_t)
+
+ if ok {
+ // Currently go does not fill in the major/minors
+ if s.Mode&unix.S_IFBLK != 0 ||
+ s.Mode&unix.S_IFCHR != 0 {
+ hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) // nolint: unconvert
+ hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) // nolint: unconvert
+ }
+ }
+
+ return
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_windows.go b/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_windows.go
new file mode 100644
index 000000000..c14875cd7
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_windows.go
@@ -0,0 +1,71 @@
+// Copyright 2014 Docker authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the DOCKER-LICENSE file.
+
+package archive
+
+import (
+ "archive/tar"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/docker/docker/pkg/idtools"
+ "github.com/docker/docker/pkg/longpath"
+)
+
+// CanonicalTarNameForPath returns platform-specific filepath
+// to canonical posix-style path for tar archival. p is relative
+// path.
+func CanonicalTarNameForPath(p string) (string, error) {
+ // windows: convert windows style relative path with backslashes
+ // into forward slashes. Since windows does not allow '/' or '\'
+ // in file names, it is mostly safe to replace however we must
+ // check just in case
+ if strings.Contains(p, "/") {
+ return "", fmt.Errorf("Windows path contains forward slash: %s", p)
+ }
+ return strings.Replace(p, string(os.PathSeparator), "/", -1), nil
+
+}
+
+// fixVolumePathPrefix does platform specific processing to ensure that if
+// the path being passed in is not in a volume path format, convert it to one.
+func fixVolumePathPrefix(srcPath string) string {
+ return longpath.AddPrefix(srcPath)
+}
+
+// getWalkRoot calculates the root path when performing a TarWithOptions.
+// We use a separate function as this is platform specific.
+func getWalkRoot(srcPath string, include string) string {
+ return filepath.Join(srcPath, include)
+}
+
+func getInodeFromStat(stat interface{}) (inode uint64, err error) {
+ // do nothing. no notion of Inode in stat on Windows
+ return
+}
+
+func getFileUIDGID(stat interface{}) (idtools.IDPair, error) {
+ // no notion of file ownership mapping yet on Windows
+ return idtools.IDPair{0, 0}, nil
+}
+
+// chmodTarEntry is used to adjust the file permissions used in tar header based
+// on the platform the archival is done.
+func chmodTarEntry(perm os.FileMode) os.FileMode {
+ //perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.)
+ permPart := perm & os.ModePerm
+ noPermPart := perm &^ os.ModePerm
+ // Add the x bit: make everything +x from windows
+ permPart |= 0111
+ permPart &= 0755
+
+ return noPermPart | permPart
+}
+
+func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
+ // do nothing. no notion of Rdev, Nlink in stat on Windows
+ return
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/archive/changes_unix.go b/vendor/github.com/fsouza/go-dockerclient/internal/archive/changes_unix.go
new file mode 100644
index 000000000..39ea287bf
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/internal/archive/changes_unix.go
@@ -0,0 +1,16 @@
+// Copyright 2014 Docker authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the DOCKER-LICENSE file.
+
+// +build !windows
+
+package archive
+
+import (
+ "os"
+ "syscall"
+)
+
+func hasHardlinks(fi os.FileInfo) bool {
+ return fi.Sys().(*syscall.Stat_t).Nlink > 1
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/archive/changes_windows.go b/vendor/github.com/fsouza/go-dockerclient/internal/archive/changes_windows.go
new file mode 100644
index 000000000..a93130474
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/internal/archive/changes_windows.go
@@ -0,0 +1,11 @@
+// Copyright 2014 Docker authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the DOCKER-LICENSE file.
+
+package archive
+
+import "os"
+
+func hasHardlinks(fi os.FileInfo) bool {
+ return false
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/archive/copy.go b/vendor/github.com/fsouza/go-dockerclient/internal/archive/copy.go
new file mode 100644
index 000000000..45d45f20e
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/internal/archive/copy.go
@@ -0,0 +1,29 @@
+// Copyright 2014 Docker authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the DOCKER-LICENSE file.
+
+package archive
+
+import (
+ "os"
+ "path/filepath"
+)
+
+// SplitPathDirEntry splits the given path between its directory name and its
+// basename by first cleaning the path but preserves a trailing "." if the
+// original path specified the current directory.
+func SplitPathDirEntry(path string) (dir, base string) {
+ cleanedPath := filepath.Clean(filepath.FromSlash(path))
+
+ if specifiesCurrentDir(path) {
+ cleanedPath += string(os.PathSeparator) + "."
+ }
+
+ return filepath.Dir(cleanedPath), filepath.Base(cleanedPath)
+}
+
+// specifiesCurrentDir returns whether the given path specifies
+// a "current directory", i.e., the last path segment is `.`.
+func specifiesCurrentDir(path string) bool {
+ return filepath.Base(path) == "."
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/archive/whiteouts.go b/vendor/github.com/fsouza/go-dockerclient/internal/archive/whiteouts.go
new file mode 100644
index 000000000..a61c22a08
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/internal/archive/whiteouts.go
@@ -0,0 +1,27 @@
+// Copyright 2014 Docker authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the DOCKER-LICENSE file.
+
+package archive
+
+// Whiteouts are files with a special meaning for the layered filesystem.
+// Docker uses AUFS whiteout files inside exported archives. In other
+// filesystems these files are generated/handled on tar creation/extraction.
+
+// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a
+// filename this means that file has been removed from the base layer.
+const WhiteoutPrefix = ".wh."
+
+// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not
+// for removing an actual file. Normally these files are excluded from exported
+// archives.
+const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix
+
+// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other
+// layers. Normally these should not go into exported archives and all changed
+// hardlinks should be copied to the top layer.
+const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk"
+
+// WhiteoutOpaqueDir file means directory has been made opaque - meaning
+// readdir calls to this directory do not follow to lower layers.
+const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq"
diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/jsonmessage/jsonmessage.go b/vendor/github.com/fsouza/go-dockerclient/internal/jsonmessage/jsonmessage.go
new file mode 100644
index 000000000..71b3395ce
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/internal/jsonmessage/jsonmessage.go
@@ -0,0 +1,339 @@
+// Copyright 2014 Docker authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the DOCKER-LICENSE file.
+
+package jsonmessage
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/Nvveen/Gotty"
+ "github.com/docker/go-units"
+ "github.com/fsouza/go-dockerclient/internal/term"
+)
+
+// RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to
+// ensure the formatted time isalways the same number of characters.
+const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
+
+// JSONError wraps a concrete Code and Message, `Code` is
+// is an integer error code, `Message` is the error message.
+type JSONError struct {
+ Code int `json:"code,omitempty"`
+ Message string `json:"message,omitempty"`
+}
+
+func (e *JSONError) Error() string {
+ return e.Message
+}
+
+// JSONProgress describes a Progress. terminalFd is the fd of the current terminal,
+// Start is the initial value for the operation. Current is the current status and
+// value of the progress made towards Total. Total is the end value describing when
+// we made 100% progress for an operation.
+type JSONProgress struct {
+ terminalFd uintptr
+ Current int64 `json:"current,omitempty"`
+ Total int64 `json:"total,omitempty"`
+ Start int64 `json:"start,omitempty"`
+ // If true, don't show xB/yB
+ HideCounts bool `json:"hidecounts,omitempty"`
+ Units string `json:"units,omitempty"`
+ nowFunc func() time.Time
+ winSize int
+}
+
+func (p *JSONProgress) String() string {
+ var (
+ width = p.width()
+ pbBox string
+ numbersBox string
+ timeLeftBox string
+ )
+ if p.Current <= 0 && p.Total <= 0 {
+ return ""
+ }
+ if p.Total <= 0 {
+ switch p.Units {
+ case "":
+ current := units.HumanSize(float64(p.Current))
+ return fmt.Sprintf("%8v", current)
+ default:
+ return fmt.Sprintf("%d %s", p.Current, p.Units)
+ }
+ }
+
+ percentage := int(float64(p.Current)/float64(p.Total)*100) / 2
+ if percentage > 50 {
+ percentage = 50
+ }
+ if width > 110 {
+ // this number can't be negative gh#7136
+ numSpaces := 0
+ if 50-percentage > 0 {
+ numSpaces = 50 - percentage
+ }
+ pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces))
+ }
+
+ switch {
+ case p.HideCounts:
+ case p.Units == "": // no units, use bytes
+ current := units.HumanSize(float64(p.Current))
+ total := units.HumanSize(float64(p.Total))
+
+ numbersBox = fmt.Sprintf("%8v/%v", current, total)
+
+ if p.Current > p.Total {
+ // remove total display if the reported current is wonky.
+ numbersBox = fmt.Sprintf("%8v", current)
+ }
+ default:
+ numbersBox = fmt.Sprintf("%d/%d %s", p.Current, p.Total, p.Units)
+
+ if p.Current > p.Total {
+ // remove total display if the reported current is wonky.
+ numbersBox = fmt.Sprintf("%d %s", p.Current, p.Units)
+ }
+ }
+
+ if p.Current > 0 && p.Start > 0 && percentage < 50 {
+ fromStart := p.now().Sub(time.Unix(p.Start, 0))
+ perEntry := fromStart / time.Duration(p.Current)
+ left := time.Duration(p.Total-p.Current) * perEntry
+ left = (left / time.Second) * time.Second
+
+ if width > 50 {
+ timeLeftBox = " " + left.String()
+ }
+ }
+ return pbBox + numbersBox + timeLeftBox
+}
+
+// shim for testing
+func (p *JSONProgress) now() time.Time {
+ if p.nowFunc == nil {
+ p.nowFunc = func() time.Time {
+ return time.Now().UTC()
+ }
+ }
+ return p.nowFunc()
+}
+
+// shim for testing
+func (p *JSONProgress) width() int {
+ if p.winSize != 0 {
+ return p.winSize
+ }
+ ws, err := term.GetWinsize(p.terminalFd)
+ if err == nil {
+ return int(ws.Width)
+ }
+ return 200
+}
+
+// JSONMessage defines a message struct. It describes
+// the created time, where it from, status, ID of the
+// message. It's used for docker events.
+type JSONMessage struct {
+ Stream string `json:"stream,omitempty"`
+ Status string `json:"status,omitempty"`
+ Progress *JSONProgress `json:"progressDetail,omitempty"`
+ ProgressMessage string `json:"progress,omitempty"` //deprecated
+ ID string `json:"id,omitempty"`
+ From string `json:"from,omitempty"`
+ Time int64 `json:"time,omitempty"`
+ TimeNano int64 `json:"timeNano,omitempty"`
+ Error *JSONError `json:"errorDetail,omitempty"`
+ ErrorMessage string `json:"error,omitempty"` //deprecated
+ // Aux contains out-of-band data, such as digests for push signing and image id after building.
+ Aux *json.RawMessage `json:"aux,omitempty"`
+}
+
+/* Satisfied by gotty.TermInfo as well as noTermInfo from below */
+type termInfo interface {
+ Parse(attr string, params ...interface{}) (string, error)
+}
+
+type noTermInfo struct{} // canary used when no terminfo.
+
+func (ti *noTermInfo) Parse(attr string, params ...interface{}) (string, error) {
+ return "", fmt.Errorf("noTermInfo")
+}
+
+func clearLine(out io.Writer, ti termInfo) {
+ // el2 (clear whole line) is not exposed by terminfo.
+
+ // First clear line from beginning to cursor
+ if attr, err := ti.Parse("el1"); err == nil {
+ fmt.Fprintf(out, "%s", attr)
+ } else {
+ fmt.Fprintf(out, "\x1b[1K")
+ }
+ // Then clear line from cursor to end
+ if attr, err := ti.Parse("el"); err == nil {
+ fmt.Fprintf(out, "%s", attr)
+ } else {
+ fmt.Fprintf(out, "\x1b[K")
+ }
+}
+
+func cursorUp(out io.Writer, ti termInfo, l int) {
+ if l == 0 { // Should never be the case, but be tolerant
+ return
+ }
+ if attr, err := ti.Parse("cuu", l); err == nil {
+ fmt.Fprintf(out, "%s", attr)
+ } else {
+ fmt.Fprintf(out, "\x1b[%dA", l)
+ }
+}
+
+func cursorDown(out io.Writer, ti termInfo, l int) {
+ if l == 0 { // Should never be the case, but be tolerant
+ return
+ }
+ if attr, err := ti.Parse("cud", l); err == nil {
+ fmt.Fprintf(out, "%s", attr)
+ } else {
+ fmt.Fprintf(out, "\x1b[%dB", l)
+ }
+}
+
+// Display displays the JSONMessage to `out`. `termInfo` is non-nil if `out`
+// is a terminal. If this is the case, it will erase the entire current line
+// when displaying the progressbar.
+func (jm *JSONMessage) Display(out io.Writer, termInfo termInfo) error {
+ if jm.Error != nil {
+ if jm.Error.Code == 401 {
+ return fmt.Errorf("authentication is required")
+ }
+ return jm.Error
+ }
+ var endl string
+ if termInfo != nil && jm.Stream == "" && jm.Progress != nil {
+ clearLine(out, termInfo)
+ endl = "\r"
+ fmt.Fprintf(out, endl)
+ } else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal
+ return nil
+ }
+ if jm.TimeNano != 0 {
+ fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(RFC3339NanoFixed))
+ } else if jm.Time != 0 {
+ fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(RFC3339NanoFixed))
+ }
+ if jm.ID != "" {
+ fmt.Fprintf(out, "%s: ", jm.ID)
+ }
+ if jm.From != "" {
+ fmt.Fprintf(out, "(from %s) ", jm.From)
+ }
+ if jm.Progress != nil && termInfo != nil {
+ fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl)
+ } else if jm.ProgressMessage != "" { //deprecated
+ fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl)
+ } else if jm.Stream != "" {
+ fmt.Fprintf(out, "%s%s", jm.Stream, endl)
+ } else {
+ fmt.Fprintf(out, "%s%s\n", jm.Status, endl)
+ }
+ return nil
+}
+
+// DisplayJSONMessagesStream displays a json message stream from `in` to `out`, `isTerminal`
+// describes if `out` is a terminal. If this is the case, it will print `\n` at the end of
+// each line and move the cursor while displaying.
+func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(JSONMessage)) error {
+ var (
+ dec = json.NewDecoder(in)
+ ids = make(map[string]int)
+ )
+
+ var termInfo termInfo
+
+ if isTerminal {
+ term := os.Getenv("TERM")
+ if term == "" {
+ term = "vt102"
+ }
+
+ var err error
+ if termInfo, err = gotty.OpenTermInfo(term); err != nil {
+ termInfo = &noTermInfo{}
+ }
+ }
+
+ for {
+ diff := 0
+ var jm JSONMessage
+ if err := dec.Decode(&jm); err != nil {
+ if err == io.EOF {
+ break
+ }
+ return err
+ }
+
+ if jm.Aux != nil {
+ if auxCallback != nil {
+ auxCallback(jm)
+ }
+ continue
+ }
+
+ if jm.Progress != nil {
+ jm.Progress.terminalFd = terminalFd
+ }
+ if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") {
+ line, ok := ids[jm.ID]
+ if !ok {
+ // NOTE: This approach of using len(id) to
+ // figure out the number of lines of history
+ // only works as long as we clear the history
+ // when we output something that's not
+ // accounted for in the map, such as a line
+ // with no ID.
+ line = len(ids)
+ ids[jm.ID] = line
+ if termInfo != nil {
+ fmt.Fprintf(out, "\n")
+ }
+ }
+ diff = len(ids) - line
+ if termInfo != nil {
+ cursorUp(out, termInfo, diff)
+ }
+ } else {
+ // When outputting something that isn't progress
+ // output, clear the history of previous lines. We
+ // don't want progress entries from some previous
+ // operation to be updated (for example, pull -a
+ // with multiple tags).
+ ids = make(map[string]int)
+ }
+ err := jm.Display(out, termInfo)
+ if jm.ID != "" && termInfo != nil {
+ cursorDown(out, termInfo, diff)
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+type stream interface {
+ io.Writer
+ FD() uintptr
+ IsTerminal() bool
+}
+
+// DisplayJSONMessagesToStream prints json messages to the output stream
+func DisplayJSONMessagesToStream(in io.Reader, stream stream, auxCallback func(JSONMessage)) error {
+ return DisplayJSONMessagesStream(in, stream, stream.FD(), stream.IsTerminal(), auxCallback)
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/term/term.go b/vendor/github.com/fsouza/go-dockerclient/internal/term/term.go
new file mode 100644
index 000000000..af06911d8
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/internal/term/term.go
@@ -0,0 +1,13 @@
+// Copyright 2014 Docker authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the DOCKER-LICENSE file.
+
+package term
+
+// Winsize represents the size of the terminal window.
+type Winsize struct {
+ Height uint16
+ Width uint16
+ x uint16
+ y uint16
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/term/winsize.go b/vendor/github.com/fsouza/go-dockerclient/internal/term/winsize.go
new file mode 100644
index 000000000..2a9964a0d
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/internal/term/winsize.go
@@ -0,0 +1,16 @@
+// Copyright 2014 Docker authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the DOCKER-LICENSE file.
+
+// +build !windows
+
+package term
+
+import "golang.org/x/sys/unix"
+
+// GetWinsize returns the window size based on the specified file descriptor.
+func GetWinsize(fd uintptr) (*Winsize, error) {
+ uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ)
+ ws := &Winsize{Height: uws.Row, Width: uws.Col, x: uws.Xpixel, y: uws.Ypixel}
+ return ws, err
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/term/winsize_windows.go b/vendor/github.com/fsouza/go-dockerclient/internal/term/winsize_windows.go
new file mode 100644
index 000000000..4a07a5d19
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/internal/term/winsize_windows.go
@@ -0,0 +1,22 @@
+// Copyright 2014 Docker authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the DOCKER-LICENSE file.
+
+package term
+
+import "github.com/Azure/go-ansiterm/winterm"
+
+// GetWinsize returns the window size based on the specified file descriptor.
+func GetWinsize(fd uintptr) (*Winsize, error) {
+ info, err := winterm.GetConsoleScreenBufferInfo(fd)
+ if err != nil {
+ return nil, err
+ }
+
+ winsize := &Winsize{
+ Width: uint16(info.Window.Right - info.Window.Left + 1),
+ Height: uint16(info.Window.Bottom - info.Window.Top + 1),
+ }
+
+ return winsize, nil
+}