summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTomSweeneyRedHat <tsweeney@redhat.com>2020-11-16 15:47:32 -0500
committerTomSweeneyRedHat <tsweeney@redhat.com>2020-11-16 15:50:14 -0500
commitb78a90cbdea05ea92e080f50f9f8fc8fa3cd4d68 (patch)
tree2502a8a847e080d5b739eca29afbabe2ce0d98a2
parente59394973a7559feb42b89ea882f2ce52d0432b8 (diff)
downloadpodman-b78a90cbdea05ea92e080f50f9f8fc8fa3cd4d68.tar.gz
podman-b78a90cbdea05ea92e080f50f9f8fc8fa3cd4d68.tar.bz2
podman-b78a90cbdea05ea92e080f50f9f8fc8fa3cd4d68.zip
Bump Buildah to v1.18.0, c/storage to v1.24.0
Update to Buildah v1.18.0 and c/storage to v1.24 Signed-off-by: TomSweeneyRedHat <tsweeney@redhat.com>
-rw-r--r--go.mod4
-rw-r--r--go.sum8
-rw-r--r--vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE27
-rw-r--r--vendor/github.com/Microsoft/go-winio/archive/tar/common.go344
-rw-r--r--vendor/github.com/Microsoft/go-winio/archive/tar/reader.go1002
-rw-r--r--vendor/github.com/Microsoft/go-winio/archive/tar/stat_atim.go20
-rw-r--r--vendor/github.com/Microsoft/go-winio/archive/tar/stat_atimespec.go20
-rw-r--r--vendor/github.com/Microsoft/go-winio/archive/tar/stat_unix.go32
-rw-r--r--vendor/github.com/Microsoft/go-winio/archive/tar/writer.go444
-rw-r--r--vendor/github.com/Microsoft/go-winio/backuptar/strconv.go68
-rw-r--r--vendor/github.com/Microsoft/go-winio/backuptar/tar.go60
-rw-r--r--vendor/github.com/containers/buildah/CHANGELOG.md32
-rw-r--r--vendor/github.com/containers/buildah/buildah.go2
-rw-r--r--vendor/github.com/containers/buildah/changelog.txt32
-rw-r--r--vendor/github.com/containers/buildah/define/types.go11
-rw-r--r--vendor/github.com/containers/buildah/go.mod2
-rw-r--r--vendor/github.com/containers/buildah/go.sum6
-rw-r--r--vendor/github.com/containers/buildah/import.go2
-rw-r--r--vendor/github.com/containers/buildah/pull.go3
-rw-r--r--vendor/github.com/containers/storage/VERSION2
-rw-r--r--vendor/github.com/containers/storage/drivers/driver.go1
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/overlay.go71
-rw-r--r--vendor/github.com/containers/storage/drivers/windows/windows.go2
-rw-r--r--vendor/github.com/containers/storage/go.mod2
-rw-r--r--vendor/github.com/containers/storage/go.sum2
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive.go41
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go61
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive_linux.go12
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive_other.go4
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive_unix.go10
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive_windows.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/diff.go4
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/archive.go9
-rw-r--r--vendor/github.com/containers/storage/pkg/config/config.go13
-rw-r--r--vendor/github.com/containers/storage/storage.conf33
-rw-r--r--vendor/github.com/containers/storage/store.go3
-rw-r--r--vendor/modules.txt7
37 files changed, 446 insertions, 1952 deletions
diff --git a/go.mod b/go.mod
index e9dbd9925..c093319e8 100644
--- a/go.mod
+++ b/go.mod
@@ -10,12 +10,12 @@ require (
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd // indirect
github.com/containernetworking/cni v0.8.0
github.com/containernetworking/plugins v0.8.7
- github.com/containers/buildah v1.17.1-0.20201113135631-d0c958d65eb2
+ github.com/containers/buildah v1.18.0
github.com/containers/common v0.27.0
github.com/containers/conmon v2.0.20+incompatible
github.com/containers/image/v5 v5.8.0
github.com/containers/psgo v1.5.1
- github.com/containers/storage v1.23.9
+ github.com/containers/storage v1.24.0
github.com/coreos/go-systemd/v22 v22.1.0
github.com/cri-o/ocicni v0.2.1-0.20201102180012-75c612fda1a2
github.com/cyphar/filepath-securejoin v0.2.2
diff --git a/go.sum b/go.sum
index ea0a85d5a..baff472f6 100644
--- a/go.sum
+++ b/go.sum
@@ -26,6 +26,8 @@ github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcy
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
github.com/Microsoft/go-winio v0.4.15-0.20200113171025-3fe6c5262873 h1:93nQ7k53GjoMQ07HVP8g6Zj1fQZDDj7Xy2VkNNtvX8o=
github.com/Microsoft/go-winio v0.4.15-0.20200113171025-3fe6c5262873/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
+github.com/Microsoft/go-winio v0.4.15 h1:qkLXKzb1QoVatRyd/YlXZ/Kg0m5K3SPuoD82jjSOaBc=
+github.com/Microsoft/go-winio v0.4.15/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
github.com/Microsoft/hcsshim v0.8.9 h1:VrfodqvztU8YSOvygU+DN1BGaSGxmrNfqOv5oOuX2Bk=
github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
@@ -91,8 +93,8 @@ github.com/containernetworking/cni v0.8.0 h1:BT9lpgGoH4jw3lFC7Odz2prU5ruiYKcgAjM
github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
github.com/containernetworking/plugins v0.8.7 h1:bU7QieuAp+sACI2vCzESJ3FoT860urYP+lThyZkb/2M=
github.com/containernetworking/plugins v0.8.7/go.mod h1:R7lXeZaBzpfqapcAbHRW8/CYwm0dHzbz0XEjofx0uB0=
-github.com/containers/buildah v1.17.1-0.20201113135631-d0c958d65eb2 h1:sYOJ4xbCJTQEhjQax649sE+iy8ZohxmLGP8pCTrnypY=
-github.com/containers/buildah v1.17.1-0.20201113135631-d0c958d65eb2/go.mod h1:+GBrGojiBt2/IXxKYMCVD02kLIxfe5KYMvCwBjhJkFk=
+github.com/containers/buildah v1.18.0 h1:mWEm013LVNGecF++sYo0T7fe/4pqMas/PQxQ/qviC68=
+github.com/containers/buildah v1.18.0/go.mod h1:qHLk7RUL7cHfA7ve1MKkZ6cyKUxHD0YxiLJcKY+mJe8=
github.com/containers/common v0.26.3/go.mod h1:hJWZIlrl5MsE2ELNRa+MPp6I1kPbXHauuj0Ym4BsLG4=
github.com/containers/common v0.27.0 h1:+QlYEOitVYtU9/x8xebRgxdGqt4sLaIqV6MBOns+zLk=
github.com/containers/common v0.27.0/go.mod h1:ZTswJJfu4aGF6Anyi2yON8Getda9NDYcdIzurOEHHXI=
@@ -111,6 +113,8 @@ github.com/containers/storage v1.23.6/go.mod h1:haFs0HRowKwyzvWEx9EgI3WsL8XCSnBD
github.com/containers/storage v1.23.7/go.mod h1:cUT2zHjtx+WlVri30obWmM2gpqpi8jfPsmIzP1TVpEI=
github.com/containers/storage v1.23.9 h1:qbgnTp76pLSyW3vYwY5GH4vk5cHYVXFJ+CsUEBp9TMw=
github.com/containers/storage v1.23.9/go.mod h1:3b2ktpB6pw53SEeIoFfO0sQfP9+IoJJKPq5iJk74gxE=
+github.com/containers/storage v1.24.0 h1:Fo2LkF7tkMLmo38sTZ/G8wHjcn8JfUFPfyTxM4WwMfk=
+github.com/containers/storage v1.24.0/go.mod h1:A4d3BzuZK9b3oLVEsiSRhZLPIx3z7utgiPyXLK/YMhY=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-iptables v0.4.5 h1:DpHb9vJrZQEFMcVLFKAAGMUVX0XoRC0ptCthinRYm38=
diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE b/vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE
deleted file mode 100644
index 744875676..000000000
--- a/vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2012 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/common.go b/vendor/github.com/Microsoft/go-winio/archive/tar/common.go
deleted file mode 100644
index 0378401c0..000000000
--- a/vendor/github.com/Microsoft/go-winio/archive/tar/common.go
+++ /dev/null
@@ -1,344 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package tar implements access to tar archives.
-// It aims to cover most of the variations, including those produced
-// by GNU and BSD tars.
-//
-// References:
-// http://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5
-// http://www.gnu.org/software/tar/manual/html_node/Standard.html
-// http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html
-package tar
-
-import (
- "bytes"
- "errors"
- "fmt"
- "os"
- "path"
- "time"
-)
-
-const (
- blockSize = 512
-
- // Types
- TypeReg = '0' // regular file
- TypeRegA = '\x00' // regular file
- TypeLink = '1' // hard link
- TypeSymlink = '2' // symbolic link
- TypeChar = '3' // character device node
- TypeBlock = '4' // block device node
- TypeDir = '5' // directory
- TypeFifo = '6' // fifo node
- TypeCont = '7' // reserved
- TypeXHeader = 'x' // extended header
- TypeXGlobalHeader = 'g' // global extended header
- TypeGNULongName = 'L' // Next file has a long name
- TypeGNULongLink = 'K' // Next file symlinks to a file w/ a long name
- TypeGNUSparse = 'S' // sparse file
-)
-
-// A Header represents a single header in a tar archive.
-// Some fields may not be populated.
-type Header struct {
- Name string // name of header file entry
- Mode int64 // permission and mode bits
- Uid int // user id of owner
- Gid int // group id of owner
- Size int64 // length in bytes
- ModTime time.Time // modified time
- Typeflag byte // type of header entry
- Linkname string // target name of link
- Uname string // user name of owner
- Gname string // group name of owner
- Devmajor int64 // major number of character or block device
- Devminor int64 // minor number of character or block device
- AccessTime time.Time // access time
- ChangeTime time.Time // status change time
- CreationTime time.Time // creation time
- Xattrs map[string]string
- Winheaders map[string]string
-}
-
-// File name constants from the tar spec.
-const (
- fileNameSize = 100 // Maximum number of bytes in a standard tar name.
- fileNamePrefixSize = 155 // Maximum number of ustar extension bytes.
-)
-
-// FileInfo returns an os.FileInfo for the Header.
-func (h *Header) FileInfo() os.FileInfo {
- return headerFileInfo{h}
-}
-
-// headerFileInfo implements os.FileInfo.
-type headerFileInfo struct {
- h *Header
-}
-
-func (fi headerFileInfo) Size() int64 { return fi.h.Size }
-func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() }
-func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime }
-func (fi headerFileInfo) Sys() interface{} { return fi.h }
-
-// Name returns the base name of the file.
-func (fi headerFileInfo) Name() string {
- if fi.IsDir() {
- return path.Base(path.Clean(fi.h.Name))
- }
- return path.Base(fi.h.Name)
-}
-
-// Mode returns the permission and mode bits for the headerFileInfo.
-func (fi headerFileInfo) Mode() (mode os.FileMode) {
- // Set file permission bits.
- mode = os.FileMode(fi.h.Mode).Perm()
-
- // Set setuid, setgid and sticky bits.
- if fi.h.Mode&c_ISUID != 0 {
- // setuid
- mode |= os.ModeSetuid
- }
- if fi.h.Mode&c_ISGID != 0 {
- // setgid
- mode |= os.ModeSetgid
- }
- if fi.h.Mode&c_ISVTX != 0 {
- // sticky
- mode |= os.ModeSticky
- }
-
- // Set file mode bits.
- // clear perm, setuid, setgid and sticky bits.
- m := os.FileMode(fi.h.Mode) &^ 07777
- if m == c_ISDIR {
- // directory
- mode |= os.ModeDir
- }
- if m == c_ISFIFO {
- // named pipe (FIFO)
- mode |= os.ModeNamedPipe
- }
- if m == c_ISLNK {
- // symbolic link
- mode |= os.ModeSymlink
- }
- if m == c_ISBLK {
- // device file
- mode |= os.ModeDevice
- }
- if m == c_ISCHR {
- // Unix character device
- mode |= os.ModeDevice
- mode |= os.ModeCharDevice
- }
- if m == c_ISSOCK {
- // Unix domain socket
- mode |= os.ModeSocket
- }
-
- switch fi.h.Typeflag {
- case TypeSymlink:
- // symbolic link
- mode |= os.ModeSymlink
- case TypeChar:
- // character device node
- mode |= os.ModeDevice
- mode |= os.ModeCharDevice
- case TypeBlock:
- // block device node
- mode |= os.ModeDevice
- case TypeDir:
- // directory
- mode |= os.ModeDir
- case TypeFifo:
- // fifo node
- mode |= os.ModeNamedPipe
- }
-
- return mode
-}
-
-// sysStat, if non-nil, populates h from system-dependent fields of fi.
-var sysStat func(fi os.FileInfo, h *Header) error
-
-// Mode constants from the tar spec.
-const (
- c_ISUID = 04000 // Set uid
- c_ISGID = 02000 // Set gid
- c_ISVTX = 01000 // Save text (sticky bit)
- c_ISDIR = 040000 // Directory
- c_ISFIFO = 010000 // FIFO
- c_ISREG = 0100000 // Regular file
- c_ISLNK = 0120000 // Symbolic link
- c_ISBLK = 060000 // Block special file
- c_ISCHR = 020000 // Character special file
- c_ISSOCK = 0140000 // Socket
-)
-
-// Keywords for the PAX Extended Header
-const (
- paxAtime = "atime"
- paxCharset = "charset"
- paxComment = "comment"
- paxCtime = "ctime" // please note that ctime is not a valid pax header.
- paxCreationTime = "LIBARCHIVE.creationtime"
- paxGid = "gid"
- paxGname = "gname"
- paxLinkpath = "linkpath"
- paxMtime = "mtime"
- paxPath = "path"
- paxSize = "size"
- paxUid = "uid"
- paxUname = "uname"
- paxXattr = "SCHILY.xattr."
- paxWindows = "MSWINDOWS."
- paxNone = ""
-)
-
-// FileInfoHeader creates a partially-populated Header from fi.
-// If fi describes a symlink, FileInfoHeader records link as the link target.
-// If fi describes a directory, a slash is appended to the name.
-// Because os.FileInfo's Name method returns only the base name of
-// the file it describes, it may be necessary to modify the Name field
-// of the returned header to provide the full path name of the file.
-func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) {
- if fi == nil {
- return nil, errors.New("tar: FileInfo is nil")
- }
- fm := fi.Mode()
- h := &Header{
- Name: fi.Name(),
- ModTime: fi.ModTime(),
- Mode: int64(fm.Perm()), // or'd with c_IS* constants later
- }
- switch {
- case fm.IsRegular():
- h.Mode |= c_ISREG
- h.Typeflag = TypeReg
- h.Size = fi.Size()
- case fi.IsDir():
- h.Typeflag = TypeDir
- h.Mode |= c_ISDIR
- h.Name += "/"
- case fm&os.ModeSymlink != 0:
- h.Typeflag = TypeSymlink
- h.Mode |= c_ISLNK
- h.Linkname = link
- case fm&os.ModeDevice != 0:
- if fm&os.ModeCharDevice != 0 {
- h.Mode |= c_ISCHR
- h.Typeflag = TypeChar
- } else {
- h.Mode |= c_ISBLK
- h.Typeflag = TypeBlock
- }
- case fm&os.ModeNamedPipe != 0:
- h.Typeflag = TypeFifo
- h.Mode |= c_ISFIFO
- case fm&os.ModeSocket != 0:
- h.Mode |= c_ISSOCK
- default:
- return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm)
- }
- if fm&os.ModeSetuid != 0 {
- h.Mode |= c_ISUID
- }
- if fm&os.ModeSetgid != 0 {
- h.Mode |= c_ISGID
- }
- if fm&os.ModeSticky != 0 {
- h.Mode |= c_ISVTX
- }
- // If possible, populate additional fields from OS-specific
- // FileInfo fields.
- if sys, ok := fi.Sys().(*Header); ok {
- // This FileInfo came from a Header (not the OS). Use the
- // original Header to populate all remaining fields.
- h.Uid = sys.Uid
- h.Gid = sys.Gid
- h.Uname = sys.Uname
- h.Gname = sys.Gname
- h.AccessTime = sys.AccessTime
- h.ChangeTime = sys.ChangeTime
- if sys.Xattrs != nil {
- h.Xattrs = make(map[string]string)
- for k, v := range sys.Xattrs {
- h.Xattrs[k] = v
- }
- }
- if sys.Typeflag == TypeLink {
- // hard link
- h.Typeflag = TypeLink
- h.Size = 0
- h.Linkname = sys.Linkname
- }
- }
- if sysStat != nil {
- return h, sysStat(fi, h)
- }
- return h, nil
-}
-
-var zeroBlock = make([]byte, blockSize)
-
-// POSIX specifies a sum of the unsigned byte values, but the Sun tar uses signed byte values.
-// We compute and return both.
-func checksum(header []byte) (unsigned int64, signed int64) {
- for i := 0; i < len(header); i++ {
- if i == 148 {
- // The chksum field (header[148:156]) is special: it should be treated as space bytes.
- unsigned += ' ' * 8
- signed += ' ' * 8
- i += 7
- continue
- }
- unsigned += int64(header[i])
- signed += int64(int8(header[i]))
- }
- return
-}
-
-type slicer []byte
-
-func (sp *slicer) next(n int) (b []byte) {
- s := *sp
- b, *sp = s[0:n], s[n:]
- return
-}
-
-func isASCII(s string) bool {
- for _, c := range s {
- if c >= 0x80 {
- return false
- }
- }
- return true
-}
-
-func toASCII(s string) string {
- if isASCII(s) {
- return s
- }
- var buf bytes.Buffer
- for _, c := range s {
- if c < 0x80 {
- buf.WriteByte(byte(c))
- }
- }
- return buf.String()
-}
-
-// isHeaderOnlyType checks if the given type flag is of the type that has no
-// data section even if a size is specified.
-func isHeaderOnlyType(flag byte) bool {
- switch flag {
- case TypeLink, TypeSymlink, TypeChar, TypeBlock, TypeDir, TypeFifo:
- return true
- default:
- return false
- }
-}
diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/reader.go b/vendor/github.com/Microsoft/go-winio/archive/tar/reader.go
deleted file mode 100644
index e210c618a..000000000
--- a/vendor/github.com/Microsoft/go-winio/archive/tar/reader.go
+++ /dev/null
@@ -1,1002 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package tar
-
-// TODO(dsymonds):
-// - pax extensions
-
-import (
- "bytes"
- "errors"
- "io"
- "io/ioutil"
- "math"
- "os"
- "strconv"
- "strings"
- "time"
-)
-
-var (
- ErrHeader = errors.New("archive/tar: invalid tar header")
-)
-
-const maxNanoSecondIntSize = 9
-
-// A Reader provides sequential access to the contents of a tar archive.
-// A tar archive consists of a sequence of files.
-// The Next method advances to the next file in the archive (including the first),
-// and then it can be treated as an io.Reader to access the file's data.
-type Reader struct {
- r io.Reader
- err error
- pad int64 // amount of padding (ignored) after current file entry
- curr numBytesReader // reader for current file entry
- hdrBuff [blockSize]byte // buffer to use in readHeader
-}
-
-type parser struct {
- err error // Last error seen
-}
-
-// A numBytesReader is an io.Reader with a numBytes method, returning the number
-// of bytes remaining in the underlying encoded data.
-type numBytesReader interface {
- io.Reader
- numBytes() int64
-}
-
-// A regFileReader is a numBytesReader for reading file data from a tar archive.
-type regFileReader struct {
- r io.Reader // underlying reader
- nb int64 // number of unread bytes for current file entry
-}
-
-// A sparseFileReader is a numBytesReader for reading sparse file data from a
-// tar archive.
-type sparseFileReader struct {
- rfr numBytesReader // Reads the sparse-encoded file data
- sp []sparseEntry // The sparse map for the file
- pos int64 // Keeps track of file position
- total int64 // Total size of the file
-}
-
-// A sparseEntry holds a single entry in a sparse file's sparse map.
-//
-// Sparse files are represented using a series of sparseEntrys.
-// Despite the name, a sparseEntry represents an actual data fragment that
-// references data found in the underlying archive stream. All regions not
-// covered by a sparseEntry are logically filled with zeros.
-//
-// For example, if the underlying raw file contains the 10-byte data:
-// var compactData = "abcdefgh"
-//
-// And the sparse map has the following entries:
-// var sp = []sparseEntry{
-// {offset: 2, numBytes: 5} // Data fragment for [2..7]
-// {offset: 18, numBytes: 3} // Data fragment for [18..21]
-// }
-//
-// Then the content of the resulting sparse file with a "real" size of 25 is:
-// var sparseData = "\x00"*2 + "abcde" + "\x00"*11 + "fgh" + "\x00"*4
-type sparseEntry struct {
- offset int64 // Starting position of the fragment
- numBytes int64 // Length of the fragment
-}
-
-// Keywords for GNU sparse files in a PAX extended header
-const (
- paxGNUSparseNumBlocks = "GNU.sparse.numblocks"
- paxGNUSparseOffset = "GNU.sparse.offset"
- paxGNUSparseNumBytes = "GNU.sparse.numbytes"
- paxGNUSparseMap = "GNU.sparse.map"
- paxGNUSparseName = "GNU.sparse.name"
- paxGNUSparseMajor = "GNU.sparse.major"
- paxGNUSparseMinor = "GNU.sparse.minor"
- paxGNUSparseSize = "GNU.sparse.size"
- paxGNUSparseRealSize = "GNU.sparse.realsize"
-)
-
-// Keywords for old GNU sparse headers
-const (
- oldGNUSparseMainHeaderOffset = 386
- oldGNUSparseMainHeaderIsExtendedOffset = 482
- oldGNUSparseMainHeaderNumEntries = 4
- oldGNUSparseExtendedHeaderIsExtendedOffset = 504
- oldGNUSparseExtendedHeaderNumEntries = 21
- oldGNUSparseOffsetSize = 12
- oldGNUSparseNumBytesSize = 12
-)
-
-// NewReader creates a new Reader reading from r.
-func NewReader(r io.Reader) *Reader { return &Reader{r: r} }
-
-// Next advances to the next entry in the tar archive.
-//
-// io.EOF is returned at the end of the input.
-func (tr *Reader) Next() (*Header, error) {
- if tr.err != nil {
- return nil, tr.err
- }
-
- var hdr *Header
- var extHdrs map[string]string
-
- // Externally, Next iterates through the tar archive as if it is a series of
- // files. Internally, the tar format often uses fake "files" to add meta
- // data that describes the next file. These meta data "files" should not
- // normally be visible to the outside. As such, this loop iterates through
- // one or more "header files" until it finds a "normal file".
-loop:
- for {
- tr.err = tr.skipUnread()
- if tr.err != nil {
- return nil, tr.err
- }
-
- hdr = tr.readHeader()
- if tr.err != nil {
- return nil, tr.err
- }
-
- // Check for PAX/GNU special headers and files.
- switch hdr.Typeflag {
- case TypeXHeader:
- extHdrs, tr.err = parsePAX(tr)
- if tr.err != nil {
- return nil, tr.err
- }
- continue loop // This is a meta header affecting the next header
- case TypeGNULongName, TypeGNULongLink:
- var realname []byte
- realname, tr.err = ioutil.ReadAll(tr)
- if tr.err != nil {
- return nil, tr.err
- }
-
- // Convert GNU extensions to use PAX headers.
- if extHdrs == nil {
- extHdrs = make(map[string]string)
- }
- var p parser
- switch hdr.Typeflag {
- case TypeGNULongName:
- extHdrs[paxPath] = p.parseString(realname)
- case TypeGNULongLink:
- extHdrs[paxLinkpath] = p.parseString(realname)
- }
- if p.err != nil {
- tr.err = p.err
- return nil, tr.err
- }
- continue loop // This is a meta header affecting the next header
- default:
- mergePAX(hdr, extHdrs)
-
- // Check for a PAX format sparse file
- sp, err := tr.checkForGNUSparsePAXHeaders(hdr, extHdrs)
- if err != nil {
- tr.err = err
- return nil, err
- }
- if sp != nil {
- // Current file is a PAX format GNU sparse file.
- // Set the current file reader to a sparse file reader.
- tr.curr, tr.err = newSparseFileReader(tr.curr, sp, hdr.Size)
- if tr.err != nil {
- return nil, tr.err
- }
- }
- break loop // This is a file, so stop
- }
- }
- return hdr, nil
-}
-
-// checkForGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers. If they are found, then
-// this function reads the sparse map and returns it. Unknown sparse formats are ignored, causing the file to
-// be treated as a regular file.
-func (tr *Reader) checkForGNUSparsePAXHeaders(hdr *Header, headers map[string]string) ([]sparseEntry, error) {
- var sparseFormat string
-
- // Check for sparse format indicators
- major, majorOk := headers[paxGNUSparseMajor]
- minor, minorOk := headers[paxGNUSparseMinor]
- sparseName, sparseNameOk := headers[paxGNUSparseName]
- _, sparseMapOk := headers[paxGNUSparseMap]
- sparseSize, sparseSizeOk := headers[paxGNUSparseSize]
- sparseRealSize, sparseRealSizeOk := headers[paxGNUSparseRealSize]
-
- // Identify which, if any, sparse format applies from which PAX headers are set
- if majorOk && minorOk {
- sparseFormat = major + "." + minor
- } else if sparseNameOk && sparseMapOk {
- sparseFormat = "0.1"
- } else if sparseSizeOk {
- sparseFormat = "0.0"
- } else {
- // Not a PAX format GNU sparse file.
- return nil, nil
- }
-
- // Check for unknown sparse format
- if sparseFormat != "0.0" && sparseFormat != "0.1" && sparseFormat != "1.0" {
- return nil, nil
- }
-
- // Update hdr from GNU sparse PAX headers
- if sparseNameOk {
- hdr.Name = sparseName
- }
- if sparseSizeOk {
- realSize, err := strconv.ParseInt(sparseSize, 10, 0)
- if err != nil {
- return nil, ErrHeader
- }
- hdr.Size = realSize
- } else if sparseRealSizeOk {
- realSize, err := strconv.ParseInt(sparseRealSize, 10, 0)
- if err != nil {
- return nil, ErrHeader
- }
- hdr.Size = realSize
- }
-
- // Set up the sparse map, according to the particular sparse format in use
- var sp []sparseEntry
- var err error
- switch sparseFormat {
- case "0.0", "0.1":
- sp, err = readGNUSparseMap0x1(headers)
- case "1.0":
- sp, err = readGNUSparseMap1x0(tr.curr)
- }
- return sp, err
-}
-
-// mergePAX merges well known headers according to PAX standard.
-// In general headers with the same name as those found
-// in the header struct overwrite those found in the header
-// struct with higher precision or longer values. Esp. useful
-// for name and linkname fields.
-func mergePAX(hdr *Header, headers map[string]string) error {
- for k, v := range headers {
- switch k {
- case paxPath:
- hdr.Name = v
- case paxLinkpath:
- hdr.Linkname = v
- case paxGname:
- hdr.Gname = v
- case paxUname:
- hdr.Uname = v
- case paxUid:
- uid, err := strconv.ParseInt(v, 10, 0)
- if err != nil {
- return err
- }
- hdr.Uid = int(uid)
- case paxGid:
- gid, err := strconv.ParseInt(v, 10, 0)
- if err != nil {
- return err
- }
- hdr.Gid = int(gid)
- case paxAtime:
- t, err := parsePAXTime(v)
- if err != nil {
- return err
- }
- hdr.AccessTime = t
- case paxMtime:
- t, err := parsePAXTime(v)
- if err != nil {
- return err
- }
- hdr.ModTime = t
- case paxCtime:
- t, err := parsePAXTime(v)
- if err != nil {
- return err
- }
- hdr.ChangeTime = t
- case paxCreationTime:
- t, err := parsePAXTime(v)
- if err != nil {
- return err
- }
- hdr.CreationTime = t
- case paxSize:
- size, err := strconv.ParseInt(v, 10, 0)
- if err != nil {
- return err
- }
- hdr.Size = int64(size)
- default:
- if strings.HasPrefix(k, paxXattr) {
- if hdr.Xattrs == nil {
- hdr.Xattrs = make(map[string]string)
- }
- hdr.Xattrs[k[len(paxXattr):]] = v
- } else if strings.HasPrefix(k, paxWindows) {
- if hdr.Winheaders == nil {
- hdr.Winheaders = make(map[string]string)
- }
- hdr.Winheaders[k[len(paxWindows):]] = v
- }
- }
- }
- return nil
-}
-
-// parsePAXTime takes a string of the form %d.%d as described in
-// the PAX specification.
-func parsePAXTime(t string) (time.Time, error) {
- buf := []byte(t)
- pos := bytes.IndexByte(buf, '.')
- var seconds, nanoseconds int64
- var err error
- if pos == -1 {
- seconds, err = strconv.ParseInt(t, 10, 0)
- if err != nil {
- return time.Time{}, err
- }
- } else {
- seconds, err = strconv.ParseInt(string(buf[:pos]), 10, 0)
- if err != nil {
- return time.Time{}, err
- }
- nano_buf := string(buf[pos+1:])
- // Pad as needed before converting to a decimal.
- // For example .030 -> .030000000 -> 30000000 nanoseconds
- if len(nano_buf) < maxNanoSecondIntSize {
- // Right pad
- nano_buf += strings.Repeat("0", maxNanoSecondIntSize-len(nano_buf))
- } else if len(nano_buf) > maxNanoSecondIntSize {
- // Right truncate
- nano_buf = nano_buf[:maxNanoSecondIntSize]
- }
- nanoseconds, err = strconv.ParseInt(string(nano_buf), 10, 0)
- if err != nil {
- return time.Time{}, err
- }
- }
- ts := time.Unix(seconds, nanoseconds)
- return ts, nil
-}
-
-// parsePAX parses PAX headers.
-// If an extended header (type 'x') is invalid, ErrHeader is returned
-func parsePAX(r io.Reader) (map[string]string, error) {
- buf, err := ioutil.ReadAll(r)
- if err != nil {
- return nil, err
- }
- sbuf := string(buf)
-
- // For GNU PAX sparse format 0.0 support.
- // This function transforms the sparse format 0.0 headers into sparse format 0.1 headers.
- var sparseMap bytes.Buffer
-
- headers := make(map[string]string)
- // Each record is constructed as
- // "%d %s=%s\n", length, keyword, value
- for len(sbuf) > 0 {
- key, value, residual, err := parsePAXRecord(sbuf)
- if err != nil {
- return nil, ErrHeader
- }
- sbuf = residual
-
- keyStr := string(key)
- if keyStr == paxGNUSparseOffset || keyStr == paxGNUSparseNumBytes {
- // GNU sparse format 0.0 special key. Write to sparseMap instead of using the headers map.
- sparseMap.WriteString(value)
- sparseMap.Write([]byte{','})
- } else {
- // Normal key. Set the value in the headers map.
- headers[keyStr] = string(value)
- }
- }
- if sparseMap.Len() != 0 {
- // Add sparse info to headers, chopping off the extra comma
- sparseMap.Truncate(sparseMap.Len() - 1)
- headers[paxGNUSparseMap] = sparseMap.String()
- }
- return headers, nil
-}
-
-// parsePAXRecord parses the input PAX record string into a key-value pair.
-// If parsing is successful, it will slice off the currently read record and
-// return the remainder as r.
-//
-// A PAX record is of the following form:
-// "%d %s=%s\n" % (size, key, value)
-func parsePAXRecord(s string) (k, v, r string, err error) {
- // The size field ends at the first space.
- sp := strings.IndexByte(s, ' ')
- if sp == -1 {
- return "", "", s, ErrHeader
- }
-
- // Parse the first token as a decimal integer.
- n, perr := strconv.ParseInt(s[:sp], 10, 0) // Intentionally parse as native int
- if perr != nil || n < 5 || int64(len(s)) < n {
- return "", "", s, ErrHeader
- }
-
- // Extract everything between the space and the final newline.
- rec, nl, rem := s[sp+1:n-1], s[n-1:n], s[n:]
- if nl != "\n" {
- return "", "", s, ErrHeader
- }
-
- // The first equals separates the key from the value.
- eq := strings.IndexByte(rec, '=')
- if eq == -1 {
- return "", "", s, ErrHeader
- }
- return rec[:eq], rec[eq+1:], rem, nil
-}
-
-// parseString parses bytes as a NUL-terminated C-style string.
-// If a NUL byte is not found then the whole slice is returned as a string.
-func (*parser) parseString(b []byte) string {
- n := 0
- for n < len(b) && b[n] != 0 {
- n++
- }
- return string(b[0:n])
-}
-
-// parseNumeric parses the input as being encoded in either base-256 or octal.
-// This function may return negative numbers.
-// If parsing fails or an integer overflow occurs, err will be set.
-func (p *parser) parseNumeric(b []byte) int64 {
- // Check for base-256 (binary) format first.
- // If the first bit is set, then all following bits constitute a two's
- // complement encoded number in big-endian byte order.
- if len(b) > 0 && b[0]&0x80 != 0 {
- // Handling negative numbers relies on the following identity:
- // -a-1 == ^a
- //
- // If the number is negative, we use an inversion mask to invert the
- // data bytes and treat the value as an unsigned number.
- var inv byte // 0x00 if positive or zero, 0xff if negative
- if b[0]&0x40 != 0 {
- inv = 0xff
- }
-
- var x uint64
- for i, c := range b {
- c ^= inv // Inverts c only if inv is 0xff, otherwise does nothing
- if i == 0 {
- c &= 0x7f // Ignore signal bit in first byte
- }
- if (x >> 56) > 0 {
- p.err = ErrHeader // Integer overflow
- return 0
- }
- x = x<<8 | uint64(c)
- }
- if (x >> 63) > 0 {
- p.err = ErrHeader // Integer overflow
- return 0
- }
- if inv == 0xff {
- return ^int64(x)
- }
- return int64(x)
- }
-
- // Normal case is base-8 (octal) format.
- return p.parseOctal(b)
-}
-
-func (p *parser) parseOctal(b []byte) int64 {
- // Because unused fields are filled with NULs, we need
- // to skip leading NULs. Fields may also be padded with
- // spaces or NULs.
- // So we remove leading and trailing NULs and spaces to
- // be sure.
- b = bytes.Trim(b, " \x00")
-
- if len(b) == 0 {
- return 0
- }
- x, perr := strconv.ParseUint(p.parseString(b), 8, 64)
- if perr != nil {
- p.err = ErrHeader
- }
- return int64(x)
-}
-
-// skipUnread skips any unread bytes in the existing file entry, as well as any
-// alignment padding. It returns io.ErrUnexpectedEOF if any io.EOF is
-// encountered in the data portion; it is okay to hit io.EOF in the padding.
-//
-// Note that this function still works properly even when sparse files are being
-// used since numBytes returns the bytes remaining in the underlying io.Reader.
-func (tr *Reader) skipUnread() error {
- dataSkip := tr.numBytes() // Number of data bytes to skip
- totalSkip := dataSkip + tr.pad // Total number of bytes to skip
- tr.curr, tr.pad = nil, 0
-
- // If possible, Seek to the last byte before the end of the data section.
- // Do this because Seek is often lazy about reporting errors; this will mask
- // the fact that the tar stream may be truncated. We can rely on the
- // io.CopyN done shortly afterwards to trigger any IO errors.
- var seekSkipped int64 // Number of bytes skipped via Seek
- if sr, ok := tr.r.(io.Seeker); ok && dataSkip > 1 {
- // Not all io.Seeker can actually Seek. For example, os.Stdin implements
- // io.Seeker, but calling Seek always returns an error and performs
- // no action. Thus, we try an innocent seek to the current position
- // to see if Seek is really supported.
- pos1, err := sr.Seek(0, os.SEEK_CUR)
- if err == nil {
- // Seek seems supported, so perform the real Seek.
- pos2, err := sr.Seek(dataSkip-1, os.SEEK_CUR)
- if err != nil {
- tr.err = err
- return tr.err
- }
- seekSkipped = pos2 - pos1
- }
- }
-
- var copySkipped int64 // Number of bytes skipped via CopyN
- copySkipped, tr.err = io.CopyN(ioutil.Discard, tr.r, totalSkip-seekSkipped)
- if tr.err == io.EOF && seekSkipped+copySkipped < dataSkip {
- tr.err = io.ErrUnexpectedEOF
- }
- return tr.err
-}
-
-func (tr *Reader) verifyChecksum(header []byte) bool {
- if tr.err != nil {
- return false
- }
-
- var p parser
- given := p.parseOctal(header[148:156])
- unsigned, signed := checksum(header)
- return p.err == nil && (given == unsigned || given == signed)
-}
-
-// readHeader reads the next block header and assumes that the underlying reader
-// is already aligned to a block boundary.
-//
-// The err will be set to io.EOF only when one of the following occurs:
-// * Exactly 0 bytes are read and EOF is hit.
-// * Exactly 1 block of zeros is read and EOF is hit.
-// * At least 2 blocks of zeros are read.
-func (tr *Reader) readHeader() *Header {
- header := tr.hdrBuff[:]
- copy(header, zeroBlock)
-
- if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil {
- return nil // io.EOF is okay here
- }
-
- // Two blocks of zero bytes marks the end of the archive.
- if bytes.Equal(header, zeroBlock[0:blockSize]) {
- if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil {
- return nil // io.EOF is okay here
- }
- if bytes.Equal(header, zeroBlock[0:blockSize]) {
- tr.err = io.EOF
- } else {
- tr.err = ErrHeader // zero block and then non-zero block
- }
- return nil
- }
-
- if !tr.verifyChecksum(header) {
- tr.err = ErrHeader
- return nil
- }
-
- // Unpack
- var p parser
- hdr := new(Header)
- s := slicer(header)
-
- hdr.Name = p.parseString(s.next(100))
- hdr.Mode = p.parseNumeric(s.next(8))
- hdr.Uid = int(p.parseNumeric(s.next(8)))
- hdr.Gid = int(p.parseNumeric(s.next(8)))
- hdr.Size = p.parseNumeric(s.next(12))
- hdr.ModTime = time.Unix(p.parseNumeric(s.next(12)), 0)
- s.next(8) // chksum
- hdr.Typeflag = s.next(1)[0]
- hdr.Linkname = p.parseString(s.next(100))
-
- // The remainder of the header depends on the value of magic.
- // The original (v7) version of tar had no explicit magic field,
- // so its magic bytes, like the rest of the block, are NULs.
- magic := string(s.next(8)) // contains version field as well.
- var format string
- switch {
- case magic[:6] == "ustar\x00": // POSIX tar (1003.1-1988)
- if string(header[508:512]) == "tar\x00" {
- format = "star"
- } else {
- format = "posix"
- }
- case magic == "ustar \x00": // old GNU tar
- format = "gnu"
- }
-
- switch format {
- case "posix", "gnu", "star":
- hdr.Uname = p.parseString(s.next(32))
- hdr.Gname = p.parseString(s.next(32))
- devmajor := s.next(8)
- devminor := s.next(8)
- if hdr.Typeflag == TypeChar || hdr.Typeflag == TypeBlock {
- hdr.Devmajor = p.parseNumeric(devmajor)
- hdr.Devminor = p.parseNumeric(devminor)
- }
- var prefix string
- switch format {
- case "posix", "gnu":
- prefix = p.parseString(s.next(155))
- case "star":
- prefix = p.parseString(s.next(131))
- hdr.AccessTime = time.Unix(p.parseNumeric(s.next(12)), 0)
- hdr.ChangeTime = time.Unix(p.parseNumeric(s.next(12)), 0)
- }
- if len(prefix) > 0 {
- hdr.Name = prefix + "/" + hdr.Name
- }
- }
-
- if p.err != nil {
- tr.err = p.err
- return nil
- }
-
- nb := hdr.Size
- if isHeaderOnlyType(hdr.Typeflag) {
- nb = 0
- }
- if nb < 0 {
- tr.err = ErrHeader
- return nil
- }
-
- // Set the current file reader.
- tr.pad = -nb & (blockSize - 1) // blockSize is a power of two
- tr.curr = &regFileReader{r: tr.r, nb: nb}
-
- // Check for old GNU sparse format entry.
- if hdr.Typeflag == TypeGNUSparse {
- // Get the real size of the file.
- hdr.Size = p.parseNumeric(header[483:495])
- if p.err != nil {
- tr.err = p.err
- return nil
- }
-
- // Read the sparse map.
- sp := tr.readOldGNUSparseMap(header)
- if tr.err != nil {
- return nil
- }
-
- // Current file is a GNU sparse file. Update the current file reader.
- tr.curr, tr.err = newSparseFileReader(tr.curr, sp, hdr.Size)
- if tr.err != nil {
- return nil
- }
- }
-
- return hdr
-}
-
-// readOldGNUSparseMap reads the sparse map as stored in the old GNU sparse format.
-// The sparse map is stored in the tar header if it's small enough. If it's larger than four entries,
-// then one or more extension headers are used to store the rest of the sparse map.
-func (tr *Reader) readOldGNUSparseMap(header []byte) []sparseEntry {
- var p parser
- isExtended := header[oldGNUSparseMainHeaderIsExtendedOffset] != 0
- spCap := oldGNUSparseMainHeaderNumEntries
- if isExtended {
- spCap += oldGNUSparseExtendedHeaderNumEntries
- }
- sp := make([]sparseEntry, 0, spCap)
- s := slicer(header[oldGNUSparseMainHeaderOffset:])
-
- // Read the four entries from the main tar header
- for i := 0; i < oldGNUSparseMainHeaderNumEntries; i++ {
- offset := p.parseNumeric(s.next(oldGNUSparseOffsetSize))
- numBytes := p.parseNumeric(s.next(oldGNUSparseNumBytesSize))
- if p.err != nil {
- tr.err = p.err
- return nil
- }
- if offset == 0 && numBytes == 0 {
- break
- }
- sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
- }
-
- for isExtended {
- // There are more entries. Read an extension header and parse its entries.
- sparseHeader := make([]byte, blockSize)
- if _, tr.err = io.ReadFull(tr.r, sparseHeader); tr.err != nil {
- return nil
- }
- isExtended = sparseHeader[oldGNUSparseExtendedHeaderIsExtendedOffset] != 0
- s = slicer(sparseHeader)
- for i := 0; i < oldGNUSparseExtendedHeaderNumEntries; i++ {
- offset := p.parseNumeric(s.next(oldGNUSparseOffsetSize))
- numBytes := p.parseNumeric(s.next(oldGNUSparseNumBytesSize))
- if p.err != nil {
- tr.err = p.err
- return nil
- }
- if offset == 0 && numBytes == 0 {
- break
- }
- sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
- }
- }
- return sp
-}
-
-// readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format
-// version 1.0. The format of the sparse map consists of a series of
-// newline-terminated numeric fields. The first field is the number of entries
-// and is always present. Following this are the entries, consisting of two
-// fields (offset, numBytes). This function must stop reading at the end
-// boundary of the block containing the last newline.
-//
-// Note that the GNU manual says that numeric values should be encoded in octal
-// format. However, the GNU tar utility itself outputs these values in decimal.
-// As such, this library treats values as being encoded in decimal.
-func readGNUSparseMap1x0(r io.Reader) ([]sparseEntry, error) {
- var cntNewline int64
- var buf bytes.Buffer
- var blk = make([]byte, blockSize)
-
- // feedTokens copies data in numBlock chunks from r into buf until there are
- // at least cnt newlines in buf. It will not read more blocks than needed.
- var feedTokens = func(cnt int64) error {
- for cntNewline < cnt {
- if _, err := io.ReadFull(r, blk); err != nil {
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
- return err
- }
- buf.Write(blk)
- for _, c := range blk {
- if c == '\n' {
- cntNewline++
- }
- }
- }
- return nil
- }
-
- // nextToken gets the next token delimited by a newline. This assumes that
- // at least one newline exists in the buffer.
- var nextToken = func() string {
- cntNewline--
- tok, _ := buf.ReadString('\n')
- return tok[:len(tok)-1] // Cut off newline
- }
-
- // Parse for the number of entries.
- // Use integer overflow resistant math to check this.
- if err := feedTokens(1); err != nil {
- return nil, err
- }
- numEntries, err := strconv.ParseInt(nextToken(), 10, 0) // Intentionally parse as native int
- if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) {
- return nil, ErrHeader
- }
-
- // Parse for all member entries.
- // numEntries is trusted after this since a potential attacker must have
- // committed resources proportional to what this library used.
- if err := feedTokens(2 * numEntries); err != nil {
- return nil, err
- }
- sp := make([]sparseEntry, 0, numEntries)
- for i := int64(0); i < numEntries; i++ {
- offset, err := strconv.ParseInt(nextToken(), 10, 64)
- if err != nil {
- return nil, ErrHeader
- }
- numBytes, err := strconv.ParseInt(nextToken(), 10, 64)
- if err != nil {
- return nil, ErrHeader
- }
- sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
- }
- return sp, nil
-}
-
-// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format
-// version 0.1. The sparse map is stored in the PAX headers.
-func readGNUSparseMap0x1(extHdrs map[string]string) ([]sparseEntry, error) {
- // Get number of entries.
- // Use integer overflow resistant math to check this.
- numEntriesStr := extHdrs[paxGNUSparseNumBlocks]
- numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0) // Intentionally parse as native int
- if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) {
- return nil, ErrHeader
- }
-
- // There should be two numbers in sparseMap for each entry.
- sparseMap := strings.Split(extHdrs[paxGNUSparseMap], ",")
- if int64(len(sparseMap)) != 2*numEntries {
- return nil, ErrHeader
- }
-
- // Loop through the entries in the sparse map.
- // numEntries is trusted now.
- sp := make([]sparseEntry, 0, numEntries)
- for i := int64(0); i < numEntries; i++ {
- offset, err := strconv.ParseInt(sparseMap[2*i], 10, 64)
- if err != nil {
- return nil, ErrHeader
- }
- numBytes, err := strconv.ParseInt(sparseMap[2*i+1], 10, 64)
- if err != nil {
- return nil, ErrHeader
- }
- sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
- }
- return sp, nil
-}
-
-// numBytes returns the number of bytes left to read in the current file's entry
-// in the tar archive, or 0 if there is no current file.
-func (tr *Reader) numBytes() int64 {
- if tr.curr == nil {
- // No current file, so no bytes
- return 0
- }
- return tr.curr.numBytes()
-}
-
-// Read reads from the current entry in the tar archive.
-// It returns 0, io.EOF when it reaches the end of that entry,
-// until Next is called to advance to the next entry.
-//
-// Calling Read on special types like TypeLink, TypeSymLink, TypeChar,
-// TypeBlock, TypeDir, and TypeFifo returns 0, io.EOF regardless of what
-// the Header.Size claims.
-func (tr *Reader) Read(b []byte) (n int, err error) {
- if tr.err != nil {
- return 0, tr.err
- }
- if tr.curr == nil {
- return 0, io.EOF
- }
-
- n, err = tr.curr.Read(b)
- if err != nil && err != io.EOF {
- tr.err = err
- }
- return
-}
-
-func (rfr *regFileReader) Read(b []byte) (n int, err error) {
- if rfr.nb == 0 {
- // file consumed
- return 0, io.EOF
- }
- if int64(len(b)) > rfr.nb {
- b = b[0:rfr.nb]
- }
- n, err = rfr.r.Read(b)
- rfr.nb -= int64(n)
-
- if err == io.EOF && rfr.nb > 0 {
- err = io.ErrUnexpectedEOF
- }
- return
-}
-
-// numBytes returns the number of bytes left to read in the file's data in the tar archive.
-func (rfr *regFileReader) numBytes() int64 {
- return rfr.nb
-}
-
-// newSparseFileReader creates a new sparseFileReader, but validates all of the
-// sparse entries before doing so.
-func newSparseFileReader(rfr numBytesReader, sp []sparseEntry, total int64) (*sparseFileReader, error) {
- if total < 0 {
- return nil, ErrHeader // Total size cannot be negative
- }
-
- // Validate all sparse entries. These are the same checks as performed by
- // the BSD tar utility.
- for i, s := range sp {
- switch {
- case s.offset < 0 || s.numBytes < 0:
- return nil, ErrHeader // Negative values are never okay
- case s.offset > math.MaxInt64-s.numBytes:
- return nil, ErrHeader // Integer overflow with large length
- case s.offset+s.numBytes > total:
- return nil, ErrHeader // Region extends beyond the "real" size
- case i > 0 && sp[i-1].offset+sp[i-1].numBytes > s.offset:
- return nil, ErrHeader // Regions can't overlap and must be in order
- }
- }
- return &sparseFileReader{rfr: rfr, sp: sp, total: total}, nil
-}
-
-// readHole reads a sparse hole ending at endOffset.
-func (sfr *sparseFileReader) readHole(b []byte, endOffset int64) int {
- n64 := endOffset - sfr.pos
- if n64 > int64(len(b)) {
- n64 = int64(len(b))
- }
- n := int(n64)
- for i := 0; i < n; i++ {
- b[i] = 0
- }
- sfr.pos += n64
- return n
-}
-
-// Read reads the sparse file data in expanded form.
-func (sfr *sparseFileReader) Read(b []byte) (n int, err error) {
- // Skip past all empty fragments.
- for len(sfr.sp) > 0 && sfr.sp[0].numBytes == 0 {
- sfr.sp = sfr.sp[1:]
- }
-
- // If there are no more fragments, then it is possible that there
- // is one last sparse hole.
- if len(sfr.sp) == 0 {
- // This behavior matches the BSD tar utility.
- // However, GNU tar stops returning data even if sfr.total is unmet.
- if sfr.pos < sfr.total {
- return sfr.readHole(b, sfr.total), nil
- }
- return 0, io.EOF
- }
-
- // In front of a data fragment, so read a hole.
- if sfr.pos < sfr.sp[0].offset {
- return sfr.readHole(b, sfr.sp[0].offset), nil
- }
-
- // In a data fragment, so read from it.
- // This math is overflow free since we verify that offset and numBytes can
- // be safely added when creating the sparseFileReader.
- endPos := sfr.sp[0].offset + sfr.sp[0].numBytes // End offset of fragment
- bytesLeft := endPos - sfr.pos // Bytes left in fragment
- if int64(len(b)) > bytesLeft {
- b = b[:bytesLeft]
- }
-
- n, err = sfr.rfr.Read(b)
- sfr.pos += int64(n)
- if err == io.EOF {
- if sfr.pos < endPos {
- err = io.ErrUnexpectedEOF // There was supposed to be more data
- } else if sfr.pos < sfr.total {
- err = nil // There is still an implicit sparse hole at the end
- }
- }
-
- if sfr.pos == endPos {
- sfr.sp = sfr.sp[1:] // We are done with this fragment, so pop it
- }
- return n, err
-}
-
-// numBytes returns the number of bytes left to read in the sparse file's
-// sparse-encoded data in the tar archive.
-func (sfr *sparseFileReader) numBytes() int64 {
- return sfr.rfr.numBytes()
-}
diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/stat_atim.go b/vendor/github.com/Microsoft/go-winio/archive/tar/stat_atim.go
deleted file mode 100644
index cf9cc79c5..000000000
--- a/vendor/github.com/Microsoft/go-winio/archive/tar/stat_atim.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build linux dragonfly openbsd solaris
-
-package tar
-
-import (
- "syscall"
- "time"
-)
-
-func statAtime(st *syscall.Stat_t) time.Time {
- return time.Unix(st.Atim.Unix())
-}
-
-func statCtime(st *syscall.Stat_t) time.Time {
- return time.Unix(st.Ctim.Unix())
-}
diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/stat_atimespec.go b/vendor/github.com/Microsoft/go-winio/archive/tar/stat_atimespec.go
deleted file mode 100644
index 6f17dbe30..000000000
--- a/vendor/github.com/Microsoft/go-winio/archive/tar/stat_atimespec.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build darwin freebsd netbsd
-
-package tar
-
-import (
- "syscall"
- "time"
-)
-
-func statAtime(st *syscall.Stat_t) time.Time {
- return time.Unix(st.Atimespec.Unix())
-}
-
-func statCtime(st *syscall.Stat_t) time.Time {
- return time.Unix(st.Ctimespec.Unix())
-}
diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/stat_unix.go b/vendor/github.com/Microsoft/go-winio/archive/tar/stat_unix.go
deleted file mode 100644
index cb843db4c..000000000
--- a/vendor/github.com/Microsoft/go-winio/archive/tar/stat_unix.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build linux darwin dragonfly freebsd openbsd netbsd solaris
-
-package tar
-
-import (
- "os"
- "syscall"
-)
-
-func init() {
- sysStat = statUnix
-}
-
-func statUnix(fi os.FileInfo, h *Header) error {
- sys, ok := fi.Sys().(*syscall.Stat_t)
- if !ok {
- return nil
- }
- h.Uid = int(sys.Uid)
- h.Gid = int(sys.Gid)
- // TODO(bradfitz): populate username & group. os/user
- // doesn't cache LookupId lookups, and lacks group
- // lookup functions.
- h.AccessTime = statAtime(sys)
- h.ChangeTime = statCtime(sys)
- // TODO(bradfitz): major/minor device numbers?
- return nil
-}
diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/writer.go b/vendor/github.com/Microsoft/go-winio/archive/tar/writer.go
deleted file mode 100644
index 30d7e606d..000000000
--- a/vendor/github.com/Microsoft/go-winio/archive/tar/writer.go
+++ /dev/null
@@ -1,444 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package tar
-
-// TODO(dsymonds):
-// - catch more errors (no first header, etc.)
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "path"
- "sort"
- "strconv"
- "strings"
- "time"
-)
-
-var (
- ErrWriteTooLong = errors.New("archive/tar: write too long")
- ErrFieldTooLong = errors.New("archive/tar: header field too long")
- ErrWriteAfterClose = errors.New("archive/tar: write after close")
- errInvalidHeader = errors.New("archive/tar: header field too long or contains invalid values")
-)
-
-// A Writer provides sequential writing of a tar archive in POSIX.1 format.
-// A tar archive consists of a sequence of files.
-// Call WriteHeader to begin a new file, and then call Write to supply that file's data,
-// writing at most hdr.Size bytes in total.
-type Writer struct {
- w io.Writer
- err error
- nb int64 // number of unwritten bytes for current file entry
- pad int64 // amount of padding to write after current file entry
- closed bool
- usedBinary bool // whether the binary numeric field extension was used
- preferPax bool // use pax header instead of binary numeric header
- hdrBuff [blockSize]byte // buffer to use in writeHeader when writing a regular header
- paxHdrBuff [blockSize]byte // buffer to use in writeHeader when writing a pax header
-}
-
-type formatter struct {
- err error // Last error seen
-}
-
-// NewWriter creates a new Writer writing to w.
-func NewWriter(w io.Writer) *Writer { return &Writer{w: w, preferPax: true} }
-
-// Flush finishes writing the current file (optional).
-func (tw *Writer) Flush() error {
- if tw.nb > 0 {
- tw.err = fmt.Errorf("archive/tar: missed writing %d bytes", tw.nb)
- return tw.err
- }
-
- n := tw.nb + tw.pad
- for n > 0 && tw.err == nil {
- nr := n
- if nr > blockSize {
- nr = blockSize
- }
- var nw int
- nw, tw.err = tw.w.Write(zeroBlock[0:nr])
- n -= int64(nw)
- }
- tw.nb = 0
- tw.pad = 0
- return tw.err
-}
-
-// Write s into b, terminating it with a NUL if there is room.
-func (f *formatter) formatString(b []byte, s string) {
- if len(s) > len(b) {
- f.err = ErrFieldTooLong
- return
- }
- ascii := toASCII(s)
- copy(b, ascii)
- if len(ascii) < len(b) {
- b[len(ascii)] = 0
- }
-}
-
-// Encode x as an octal ASCII string and write it into b with leading zeros.
-func (f *formatter) formatOctal(b []byte, x int64) {
- s := strconv.FormatInt(x, 8)
- // leading zeros, but leave room for a NUL.
- for len(s)+1 < len(b) {
- s = "0" + s
- }
- f.formatString(b, s)
-}
-
-// fitsInBase256 reports whether x can be encoded into n bytes using base-256
-// encoding. Unlike octal encoding, base-256 encoding does not require that the
-// string ends with a NUL character. Thus, all n bytes are available for output.
-//
-// If operating in binary mode, this assumes strict GNU binary mode; which means
-// that the first byte can only be either 0x80 or 0xff. Thus, the first byte is
-// equivalent to the sign bit in two's complement form.
-func fitsInBase256(n int, x int64) bool {
- var binBits = uint(n-1) * 8
- return n >= 9 || (x >= -1<<binBits && x < 1<<binBits)
-}
-
-// Write x into b, as binary (GNUtar/star extension).
-func (f *formatter) formatNumeric(b []byte, x int64) {
- if fitsInBase256(len(b), x) {
- for i := len(b) - 1; i >= 0; i-- {
- b[i] = byte(x)
- x >>= 8
- }
- b[0] |= 0x80 // Highest bit indicates binary format
- return
- }
-
- f.formatOctal(b, 0) // Last resort, just write zero
- f.err = ErrFieldTooLong
-}
-
-var (
- minTime = time.Unix(0, 0)
- // There is room for 11 octal digits (33 bits) of mtime.
- maxTime = minTime.Add((1<<33 - 1) * time.Second)
-)
-
-// WriteHeader writes hdr and prepares to accept the file's contents.
-// WriteHeader calls Flush if it is not the first header.
-// Calling after a Close will return ErrWriteAfterClose.
-func (tw *Writer) WriteHeader(hdr *Header) error {
- return tw.writeHeader(hdr, true)
-}
-
-// WriteHeader writes hdr and prepares to accept the file's contents.
-// WriteHeader calls Flush if it is not the first header.
-// Calling after a Close will return ErrWriteAfterClose.
-// As this method is called internally by writePax header to allow it to
-// suppress writing the pax header.
-func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error {
- if tw.closed {
- return ErrWriteAfterClose
- }
- if tw.err == nil {
- tw.Flush()
- }
- if tw.err != nil {
- return tw.err
- }
-
- // a map to hold pax header records, if any are needed
- paxHeaders := make(map[string]string)
-
- // TODO(shanemhansen): we might want to use PAX headers for
- // subsecond time resolution, but for now let's just capture
- // too long fields or non ascii characters
-
- var f formatter
- var header []byte
-
- // We need to select which scratch buffer to use carefully,
- // since this method is called recursively to write PAX headers.
- // If allowPax is true, this is the non-recursive call, and we will use hdrBuff.
- // If allowPax is false, we are being called by writePAXHeader, and hdrBuff is
- // already being used by the non-recursive call, so we must use paxHdrBuff.
- header = tw.hdrBuff[:]
- if !allowPax {
- header = tw.paxHdrBuff[:]
- }
- copy(header, zeroBlock)
- s := slicer(header)
-
- // Wrappers around formatter that automatically sets paxHeaders if the
- // argument extends beyond the capacity of the input byte slice.
- var formatString = func(b []byte, s string, paxKeyword string) {
- needsPaxHeader := paxKeyword != paxNone && len(s) > len(b) || !isASCII(s)
- if needsPaxHeader {
- paxHeaders[paxKeyword] = s
- return
- }
- f.formatString(b, s)
- }
- var formatNumeric = func(b []byte, x int64, paxKeyword string) {
- // Try octal first.
- s := strconv.FormatInt(x, 8)
- if len(s) < len(b) {
- f.formatOctal(b, x)
- return
- }
-
- // If it is too long for octal, and PAX is preferred, use a PAX header.
- if paxKeyword != paxNone && tw.preferPax {
- f.formatOctal(b, 0)
- s := strconv.FormatInt(x, 10)
- paxHeaders[paxKeyword] = s
- return
- }
-
- tw.usedBinary = true
- f.formatNumeric(b, x)
- }
- var formatTime = func(b []byte, t time.Time, paxKeyword string) {
- var unixTime int64
- if !t.Before(minTime) && !t.After(maxTime) {
- unixTime = t.Unix()
- }
- formatNumeric(b, unixTime, paxNone)
-
- // Write a PAX header if the time didn't fit precisely.
- if paxKeyword != "" && tw.preferPax && allowPax && (t.Nanosecond() != 0 || !t.Before(minTime) || !t.After(maxTime)) {
- paxHeaders[paxKeyword] = formatPAXTime(t)
- }
- }
-
- // keep a reference to the filename to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
- pathHeaderBytes := s.next(fileNameSize)
-
- formatString(pathHeaderBytes, hdr.Name, paxPath)
-
- f.formatOctal(s.next(8), hdr.Mode) // 100:108
- formatNumeric(s.next(8), int64(hdr.Uid), paxUid) // 108:116
- formatNumeric(s.next(8), int64(hdr.Gid), paxGid) // 116:124
- formatNumeric(s.next(12), hdr.Size, paxSize) // 124:136
- formatTime(s.next(12), hdr.ModTime, paxMtime) // 136:148
- s.next(8) // chksum (148:156)
- s.next(1)[0] = hdr.Typeflag // 156:157
-
- formatString(s.next(100), hdr.Linkname, paxLinkpath)
-
- copy(s.next(8), []byte("ustar\x0000")) // 257:265
- formatString(s.next(32), hdr.Uname, paxUname) // 265:297
- formatString(s.next(32), hdr.Gname, paxGname) // 297:329
- formatNumeric(s.next(8), hdr.Devmajor, paxNone) // 329:337
- formatNumeric(s.next(8), hdr.Devminor, paxNone) // 337:345
-
- // keep a reference to the prefix to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
- prefixHeaderBytes := s.next(155)
- formatString(prefixHeaderBytes, "", paxNone) // 345:500 prefix
-
- // Use the GNU magic instead of POSIX magic if we used any GNU extensions.
- if tw.usedBinary {
- copy(header[257:265], []byte("ustar \x00"))
- }
-
- _, paxPathUsed := paxHeaders[paxPath]
- // try to use a ustar header when only the name is too long
- if !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed {
- prefix, suffix, ok := splitUSTARPath(hdr.Name)
- if ok {
- // Since we can encode in USTAR format, disable PAX header.
- delete(paxHeaders, paxPath)
-
- // Update the path fields
- formatString(pathHeaderBytes, suffix, paxNone)
- formatString(prefixHeaderBytes, prefix, paxNone)
- }
- }
-
- // The chksum field is terminated by a NUL and a space.
- // This is different from the other octal fields.
- chksum, _ := checksum(header)
- f.formatOctal(header[148:155], chksum) // Never fails
- header[155] = ' '
-
- // Check if there were any formatting errors.
- if f.err != nil {
- tw.err = f.err
- return tw.err
- }
-
- if allowPax {
- if !hdr.AccessTime.IsZero() {
- paxHeaders[paxAtime] = formatPAXTime(hdr.AccessTime)
- }
- if !hdr.ChangeTime.IsZero() {
- paxHeaders[paxCtime] = formatPAXTime(hdr.ChangeTime)
- }
- if !hdr.CreationTime.IsZero() {
- paxHeaders[paxCreationTime] = formatPAXTime(hdr.CreationTime)
- }
- for k, v := range hdr.Xattrs {
- paxHeaders[paxXattr+k] = v
- }
- for k, v := range hdr.Winheaders {
- paxHeaders[paxWindows+k] = v
- }
- }
-
- if len(paxHeaders) > 0 {
- if !allowPax {
- return errInvalidHeader
- }
- if err := tw.writePAXHeader(hdr, paxHeaders); err != nil {
- return err
- }
- }
- tw.nb = int64(hdr.Size)
- tw.pad = (blockSize - (tw.nb % blockSize)) % blockSize
-
- _, tw.err = tw.w.Write(header)
- return tw.err
-}
-
-func formatPAXTime(t time.Time) string {
- sec := t.Unix()
- usec := t.Nanosecond()
- s := strconv.FormatInt(sec, 10)
- if usec != 0 {
- s = fmt.Sprintf("%s.%09d", s, usec)
- }
- return s
-}
-
-// splitUSTARPath splits a path according to USTAR prefix and suffix rules.
-// If the path is not splittable, then it will return ("", "", false).
-func splitUSTARPath(name string) (prefix, suffix string, ok bool) {
- length := len(name)
- if length <= fileNameSize || !isASCII(name) {
- return "", "", false
- } else if length > fileNamePrefixSize+1 {
- length = fileNamePrefixSize + 1
- } else if name[length-1] == '/' {
- length--
- }
-
- i := strings.LastIndex(name[:length], "/")
- nlen := len(name) - i - 1 // nlen is length of suffix
- plen := i // plen is length of prefix
- if i <= 0 || nlen > fileNameSize || nlen == 0 || plen > fileNamePrefixSize {
- return "", "", false
- }
- return name[:i], name[i+1:], true
-}
-
-// writePaxHeader writes an extended pax header to the
-// archive.
-func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) error {
- // Prepare extended header
- ext := new(Header)
- ext.Typeflag = TypeXHeader
- // Setting ModTime is required for reader parsing to
- // succeed, and seems harmless enough.
- ext.ModTime = hdr.ModTime
- // The spec asks that we namespace our pseudo files
- // with the current pid. However, this results in differing outputs
- // for identical inputs. As such, the constant 0 is now used instead.
- // golang.org/issue/12358
- dir, file := path.Split(hdr.Name)
- fullName := path.Join(dir, "PaxHeaders.0", file)
-
- ascii := toASCII(fullName)
- if len(ascii) > 100 {
- ascii = ascii[:100]
- }
- ext.Name = ascii
- // Construct the body
- var buf bytes.Buffer
-
- // Keys are sorted before writing to body to allow deterministic output.
- var keys []string
- for k := range paxHeaders {
- keys = append(keys, k)
- }
- sort.Strings(keys)
-
- for _, k := range keys {
- fmt.Fprint(&buf, formatPAXRecord(k, paxHeaders[k]))
- }
-
- ext.Size = int64(len(buf.Bytes()))
- if err := tw.writeHeader(ext, false); err != nil {
- return err
- }
- if _, err := tw.Write(buf.Bytes()); err != nil {
- return err
- }
- if err := tw.Flush(); err != nil {
- return err
- }
- return nil
-}
-
-// formatPAXRecord formats a single PAX record, prefixing it with the
-// appropriate length.
-func formatPAXRecord(k, v string) string {
- const padding = 3 // Extra padding for ' ', '=', and '\n'
- size := len(k) + len(v) + padding
- size += len(strconv.Itoa(size))
- record := fmt.Sprintf("%d %s=%s\n", size, k, v)
-
- // Final adjustment if adding size field increased the record size.
- if len(record) != size {
- size = len(record)
- record = fmt.Sprintf("%d %s=%s\n", size, k, v)
- }
- return record
-}
-
-// Write writes to the current entry in the tar archive.
-// Write returns the error ErrWriteTooLong if more than
-// hdr.Size bytes are written after WriteHeader.
-func (tw *Writer) Write(b []byte) (n int, err error) {
- if tw.closed {
- err = ErrWriteAfterClose
- return
- }
- overwrite := false
- if int64(len(b)) > tw.nb {
- b = b[0:tw.nb]
- overwrite = true
- }
- n, err = tw.w.Write(b)
- tw.nb -= int64(n)
- if err == nil && overwrite {
- err = ErrWriteTooLong
- return
- }
- tw.err = err
- return
-}
-
-// Close closes the tar archive, flushing any unwritten
-// data to the underlying writer.
-func (tw *Writer) Close() error {
- if tw.err != nil || tw.closed {
- return tw.err
- }
- tw.Flush()
- tw.closed = true
- if tw.err != nil {
- return tw.err
- }
-
- // trailer: two zero blocks
- for i := 0; i < 2; i++ {
- _, tw.err = tw.w.Write(zeroBlock)
- if tw.err != nil {
- break
- }
- }
- return tw.err
-}
diff --git a/vendor/github.com/Microsoft/go-winio/backuptar/strconv.go b/vendor/github.com/Microsoft/go-winio/backuptar/strconv.go
new file mode 100644
index 000000000..341609663
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/backuptar/strconv.go
@@ -0,0 +1,68 @@
+package backuptar
+
+import (
+ "archive/tar"
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Functions copied from https://github.com/golang/go/blob/master/src/archive/tar/strconv.go
+// as we need to manage the LIBARCHIVE.creationtime PAXRecord manually.
+// Idea taken from containerd which did the same thing.
+
+// parsePAXTime takes a string of the form %d.%d as described in the PAX
+// specification. Note that this implementation allows for negative timestamps,
+// which is allowed for by the PAX specification, but not always portable.
+func parsePAXTime(s string) (time.Time, error) {
+ const maxNanoSecondDigits = 9
+
+ // Split string into seconds and sub-seconds parts.
+ ss, sn := s, ""
+ if pos := strings.IndexByte(s, '.'); pos >= 0 {
+ ss, sn = s[:pos], s[pos+1:]
+ }
+
+ // Parse the seconds.
+ secs, err := strconv.ParseInt(ss, 10, 64)
+ if err != nil {
+ return time.Time{}, tar.ErrHeader
+ }
+ if len(sn) == 0 {
+ return time.Unix(secs, 0), nil // No sub-second values
+ }
+
+ // Parse the nanoseconds.
+ if strings.Trim(sn, "0123456789") != "" {
+ return time.Time{}, tar.ErrHeader
+ }
+ if len(sn) < maxNanoSecondDigits {
+ sn += strings.Repeat("0", maxNanoSecondDigits-len(sn)) // Right pad
+ } else {
+ sn = sn[:maxNanoSecondDigits] // Right truncate
+ }
+ nsecs, _ := strconv.ParseInt(sn, 10, 64) // Must succeed
+ if len(ss) > 0 && ss[0] == '-' {
+ return time.Unix(secs, -1*nsecs), nil // Negative correction
+ }
+ return time.Unix(secs, nsecs), nil
+}
+
+// formatPAXTime converts ts into a time of the form %d.%d as described in the
+// PAX specification. This function is capable of negative timestamps.
+func formatPAXTime(ts time.Time) (s string) {
+ secs, nsecs := ts.Unix(), ts.Nanosecond()
+ if nsecs == 0 {
+ return strconv.FormatInt(secs, 10)
+ }
+
+ // If seconds is negative, then perform correction.
+ sign := ""
+ if secs < 0 {
+ sign = "-" // Remember sign
+ secs = -(secs + 1) // Add a second to secs
+ nsecs = -(nsecs - 1e9) // Take that second away from nsecs
+ }
+ return strings.TrimRight(fmt.Sprintf("%s%d.%09d", sign, secs, nsecs), "0")
+}
diff --git a/vendor/github.com/Microsoft/go-winio/backuptar/tar.go b/vendor/github.com/Microsoft/go-winio/backuptar/tar.go
index d6566dbf0..088a43c68 100644
--- a/vendor/github.com/Microsoft/go-winio/backuptar/tar.go
+++ b/vendor/github.com/Microsoft/go-winio/backuptar/tar.go
@@ -3,6 +3,7 @@
package backuptar
import (
+ "archive/tar"
"encoding/base64"
"errors"
"fmt"
@@ -15,7 +16,6 @@ import (
"time"
"github.com/Microsoft/go-winio"
- "github.com/Microsoft/go-winio/archive/tar" // until archive/tar supports pax extensions in its interface
)
const (
@@ -32,11 +32,13 @@ const (
)
const (
- hdrFileAttributes = "fileattr"
- hdrSecurityDescriptor = "sd"
- hdrRawSecurityDescriptor = "rawsd"
- hdrMountPoint = "mountpoint"
- hdrEaPrefix = "xattr."
+ hdrFileAttributes = "MSWINDOWS.fileattr"
+ hdrSecurityDescriptor = "MSWINDOWS.sd"
+ hdrRawSecurityDescriptor = "MSWINDOWS.rawsd"
+ hdrMountPoint = "MSWINDOWS.mountpoint"
+ hdrEaPrefix = "MSWINDOWS.xattr."
+
+ hdrCreationTime = "LIBARCHIVE.creationtime"
)
func writeZeroes(w io.Writer, count int64) error {
@@ -86,16 +88,17 @@ func copySparse(t *tar.Writer, br *winio.BackupStreamReader) error {
// BasicInfoHeader creates a tar header from basic file information.
func BasicInfoHeader(name string, size int64, fileInfo *winio.FileBasicInfo) *tar.Header {
hdr := &tar.Header{
- Name: filepath.ToSlash(name),
- Size: size,
- Typeflag: tar.TypeReg,
- ModTime: time.Unix(0, fileInfo.LastWriteTime.Nanoseconds()),
- ChangeTime: time.Unix(0, fileInfo.ChangeTime.Nanoseconds()),
- AccessTime: time.Unix(0, fileInfo.LastAccessTime.Nanoseconds()),
- CreationTime: time.Unix(0, fileInfo.CreationTime.Nanoseconds()),
- Winheaders: make(map[string]string),
+ Format: tar.FormatPAX,
+ Name: filepath.ToSlash(name),
+ Size: size,
+ Typeflag: tar.TypeReg,
+ ModTime: time.Unix(0, fileInfo.LastWriteTime.Nanoseconds()),
+ ChangeTime: time.Unix(0, fileInfo.ChangeTime.Nanoseconds()),
+ AccessTime: time.Unix(0, fileInfo.LastAccessTime.Nanoseconds()),
+ PAXRecords: make(map[string]string),
}
- hdr.Winheaders[hdrFileAttributes] = fmt.Sprintf("%d", fileInfo.FileAttributes)
+ hdr.PAXRecords[hdrFileAttributes] = fmt.Sprintf("%d", fileInfo.FileAttributes)
+ hdr.PAXRecords[hdrCreationTime] = formatPAXTime(time.Unix(0, fileInfo.CreationTime.Nanoseconds()))
if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 {
hdr.Mode |= c_ISDIR
@@ -155,7 +158,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size
if err != nil {
return err
}
- hdr.Winheaders[hdrRawSecurityDescriptor] = base64.StdEncoding.EncodeToString(sd)
+ hdr.PAXRecords[hdrRawSecurityDescriptor] = base64.StdEncoding.EncodeToString(sd)
case winio.BackupReparseData:
hdr.Mode |= c_ISLNK
@@ -166,7 +169,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size
return err
}
if rp.IsMountPoint {
- hdr.Winheaders[hdrMountPoint] = "1"
+ hdr.PAXRecords[hdrMountPoint] = "1"
}
hdr.Linkname = rp.Target
@@ -183,7 +186,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size
// Use base64 encoding for the binary value. Note that there
// is no way to encode the EA's flags, since their use doesn't
// make any sense for persisted EAs.
- hdr.Winheaders[hdrEaPrefix+ea.Name] = base64.StdEncoding.EncodeToString(ea.Value)
+ hdr.PAXRecords[hdrEaPrefix+ea.Name] = base64.StdEncoding.EncodeToString(ea.Value)
}
case winio.BackupAlternateData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData:
@@ -254,6 +257,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size
}
if (bhdr.Attributes & winio.StreamSparseAttributes) == 0 {
hdr = &tar.Header{
+ Format: hdr.Format,
Name: name + altName,
Mode: hdr.Mode,
Typeflag: tar.TypeReg,
@@ -296,9 +300,10 @@ func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *win
LastAccessTime: syscall.NsecToFiletime(hdr.AccessTime.UnixNano()),
LastWriteTime: syscall.NsecToFiletime(hdr.ModTime.UnixNano()),
ChangeTime: syscall.NsecToFiletime(hdr.ChangeTime.UnixNano()),
- CreationTime: syscall.NsecToFiletime(hdr.CreationTime.UnixNano()),
+ // Default to ModTime, we'll pull hdrCreationTime below if present
+ CreationTime: syscall.NsecToFiletime(hdr.ModTime.UnixNano()),
}
- if attrStr, ok := hdr.Winheaders[hdrFileAttributes]; ok {
+ if attrStr, ok := hdr.PAXRecords[hdrFileAttributes]; ok {
attr, err := strconv.ParseUint(attrStr, 10, 32)
if err != nil {
return "", 0, nil, err
@@ -309,6 +314,13 @@ func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *win
fileInfo.FileAttributes |= syscall.FILE_ATTRIBUTE_DIRECTORY
}
}
+ if creationTimeStr, ok := hdr.PAXRecords[hdrCreationTime]; ok {
+ creationTime, err := parsePAXTime(creationTimeStr)
+ if err != nil {
+ return "", 0, nil, err
+ }
+ fileInfo.CreationTime = syscall.NsecToFiletime(creationTime.UnixNano())
+ }
return
}
@@ -321,13 +333,13 @@ func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (
var err error
// Maintaining old SDDL-based behavior for backward compatibility. All new tar headers written
// by this library will have raw binary for the security descriptor.
- if sddl, ok := hdr.Winheaders[hdrSecurityDescriptor]; ok {
+ if sddl, ok := hdr.PAXRecords[hdrSecurityDescriptor]; ok {
sd, err = winio.SddlToSecurityDescriptor(sddl)
if err != nil {
return nil, err
}
}
- if sdraw, ok := hdr.Winheaders[hdrRawSecurityDescriptor]; ok {
+ if sdraw, ok := hdr.PAXRecords[hdrRawSecurityDescriptor]; ok {
sd, err = base64.StdEncoding.DecodeString(sdraw)
if err != nil {
return nil, err
@@ -348,7 +360,7 @@ func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (
}
}
var eas []winio.ExtendedAttribute
- for k, v := range hdr.Winheaders {
+ for k, v := range hdr.PAXRecords {
if !strings.HasPrefix(k, hdrEaPrefix) {
continue
}
@@ -380,7 +392,7 @@ func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (
}
}
if hdr.Typeflag == tar.TypeSymlink {
- _, isMountPoint := hdr.Winheaders[hdrMountPoint]
+ _, isMountPoint := hdr.PAXRecords[hdrMountPoint]
rp := winio.ReparsePoint{
Target: filepath.FromSlash(hdr.Linkname),
IsMountPoint: isMountPoint,
diff --git a/vendor/github.com/containers/buildah/CHANGELOG.md b/vendor/github.com/containers/buildah/CHANGELOG.md
index e0a2671f7..f37f387e7 100644
--- a/vendor/github.com/containers/buildah/CHANGELOG.md
+++ b/vendor/github.com/containers/buildah/CHANGELOG.md
@@ -2,6 +2,38 @@
# Changelog
+## v1.18.0 (2020-11-16)
+ Fix testing error caused by simultanious merge
+ Vendor in containers/storage v1.24.0
+ short-names aliasing
+ Add --policy flag to buildah pull
+ Stop overwrapping and stuttering
+ copier.Get(): ignore ENOTSUP/ENOSYS when listing xattrs
+ Run: don't forcibly disable UTS namespaces in rootless mode
+ test: ensure non-directory in a Dockerfile path is handled correctly
+ Add a few tests for `pull` command
+ Fix buildah config --cmd to handle array
+ build(deps): bump github.com/containers/storage from 1.23.8 to 1.23.9
+ Fix NPE when Dockerfile path contains non-directory entries
+ Update buildah bud man page from podman build man page
+ Move declaration of decryption-keys to common cli
+ Run: correctly call copier.Mkdir
+ util: digging UID/GID out of os.FileInfo should work on Unix
+ imagebuildah.getImageTypeAndHistoryAndDiffIDs: cache results
+ Verify userns-uid-map and userns-gid-map input
+ Use CPP, CC and flags in dep check scripts
+ Avoid overriding LDFLAGS in Makefile
+ ADD: handle --chown on URLs
+ Update nix pin with `make nixpkgs`
+ (*Builder).Run: MkdirAll: handle EEXIST error
+ copier: try to force loading of nsswitch modules before chroot()
+ fix MkdirAll usage
+ build(deps): bump github.com/containers/common from 0.26.2 to 0.26.3
+ build(deps): bump github.com/containers/storage from 1.23.7 to 1.23.8
+ Use osusergo build tag for static build
+ imagebuildah: cache should take image format into account
+ Bump to v1.18.0-dev
+
## v1.17.0 (2020-10-29)
Handle cases where other tools mount/unmount containers
overlay.MountReadOnly: support RO overlay mounts
diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go
index 96e8619a8..9ab47e60c 100644
--- a/vendor/github.com/containers/buildah/buildah.go
+++ b/vendor/github.com/containers/buildah/buildah.go
@@ -28,7 +28,7 @@ const (
Package = "buildah"
// Version for the Package. Bump version in contrib/rpm/buildah.spec
// too.
- Version = "1.18.0-dev"
+ Version = "1.18.0"
// The value we use to identify what type of information, currently a
// serialized Builder structure, we are using as per-container state.
// This should only be changed when we make incompatible changes to
diff --git a/vendor/github.com/containers/buildah/changelog.txt b/vendor/github.com/containers/buildah/changelog.txt
index df19d0746..f59d426ae 100644
--- a/vendor/github.com/containers/buildah/changelog.txt
+++ b/vendor/github.com/containers/buildah/changelog.txt
@@ -1,3 +1,35 @@
+- Changelog for v1.18.0 (2020-11-16)
+ * Fix testing error caused by simultanious merge
+ * Vendor in containers/storage v1.24.0
+ * short-names aliasing
+ * Add --policy flag to buildah pull
+ * Stop overwrapping and stuttering
+ * copier.Get(): ignore ENOTSUP/ENOSYS when listing xattrs
+ * Run: don't forcibly disable UTS namespaces in rootless mode
+ * test: ensure non-directory in a Dockerfile path is handled correctly
+ * Add a few tests for `pull` command
+ * Fix buildah config --cmd to handle array
+ * build(deps): bump github.com/containers/storage from 1.23.8 to 1.23.9
+ * Fix NPE when Dockerfile path contains non-directory entries
+ * Update buildah bud man page from podman build man page
+ * Move declaration of decryption-keys to common cli
+ * Run: correctly call copier.Mkdir
+ * util: digging UID/GID out of os.FileInfo should work on Unix
+ * imagebuildah.getImageTypeAndHistoryAndDiffIDs: cache results
+ * Verify userns-uid-map and userns-gid-map input
+ * Use CPP, CC and flags in dep check scripts
+ * Avoid overriding LDFLAGS in Makefile
+ * ADD: handle --chown on URLs
+ * Update nix pin with `make nixpkgs`
+ * (*Builder).Run: MkdirAll: handle EEXIST error
+ * copier: try to force loading of nsswitch modules before chroot()
+ * fix MkdirAll usage
+ * build(deps): bump github.com/containers/common from 0.26.2 to 0.26.3
+ * build(deps): bump github.com/containers/storage from 1.23.7 to 1.23.8
+ * Use osusergo build tag for static build
+ * imagebuildah: cache should take image format into account
+ * Bump to v1.18.0-dev
+
- Changelog for v1.17.0 (2020-10-29)
* Handle cases where other tools mount/unmount containers
* overlay.MountReadOnly: support RO overlay mounts
diff --git a/vendor/github.com/containers/buildah/define/types.go b/vendor/github.com/containers/buildah/define/types.go
index 187e785d3..62c47d6bc 100644
--- a/vendor/github.com/containers/buildah/define/types.go
+++ b/vendor/github.com/containers/buildah/define/types.go
@@ -1,6 +1,8 @@
package define
-import "fmt"
+import (
+ "fmt"
+)
// PullPolicy takes the value PullIfMissing, PullAlways, PullIfNewer, or PullNever.
type PullPolicy int
@@ -39,3 +41,10 @@ func (p PullPolicy) String() string {
}
return fmt.Sprintf("unrecognized policy %d", p)
}
+
+var PolicyMap = map[string]PullPolicy{
+ "missing": PullIfMissing,
+ "always": PullAlways,
+ "never": PullNever,
+ "ifnewer": PullIfNewer,
+}
diff --git a/vendor/github.com/containers/buildah/go.mod b/vendor/github.com/containers/buildah/go.mod
index 2bc71f948..b1f3ad67a 100644
--- a/vendor/github.com/containers/buildah/go.mod
+++ b/vendor/github.com/containers/buildah/go.mod
@@ -8,7 +8,7 @@ require (
github.com/containers/common v0.26.3
github.com/containers/image/v5 v5.8.0
github.com/containers/ocicrypt v1.0.3
- github.com/containers/storage v1.23.9
+ github.com/containers/storage v1.24.0
github.com/docker/distribution v2.7.1+incompatible
github.com/docker/docker v17.12.0-ce-rc1.0.20201020191947-73dc6a680cdd+incompatible // indirect
github.com/docker/go-units v0.4.0
diff --git a/vendor/github.com/containers/buildah/go.sum b/vendor/github.com/containers/buildah/go.sum
index 1952ace1a..069328c38 100644
--- a/vendor/github.com/containers/buildah/go.sum
+++ b/vendor/github.com/containers/buildah/go.sum
@@ -22,6 +22,8 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
github.com/Microsoft/go-winio v0.4.15-0.20200113171025-3fe6c5262873 h1:93nQ7k53GjoMQ07HVP8g6Zj1fQZDDj7Xy2VkNNtvX8o=
github.com/Microsoft/go-winio v0.4.15-0.20200113171025-3fe6c5262873/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
+github.com/Microsoft/go-winio v0.4.15 h1:qkLXKzb1QoVatRyd/YlXZ/Kg0m5K3SPuoD82jjSOaBc=
+github.com/Microsoft/go-winio v0.4.15/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
github.com/Microsoft/hcsshim v0.8.9 h1:VrfodqvztU8YSOvygU+DN1BGaSGxmrNfqOv5oOuX2Bk=
github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
@@ -83,8 +85,8 @@ github.com/containers/ocicrypt v1.0.3 h1:vYgl+RZ9Q3DPMuTfxmN+qp0X2Bj52uuY2vnt6Gz
github.com/containers/ocicrypt v1.0.3/go.mod h1:CUBa+8MRNL/VkpxYIpaMtgn1WgXGyvPQj8jcy0EVG6g=
github.com/containers/storage v1.23.6/go.mod h1:haFs0HRowKwyzvWEx9EgI3WsL8XCSnBDb5f8P5CAxJY=
github.com/containers/storage v1.23.7/go.mod h1:cUT2zHjtx+WlVri30obWmM2gpqpi8jfPsmIzP1TVpEI=
-github.com/containers/storage v1.23.9 h1:qbgnTp76pLSyW3vYwY5GH4vk5cHYVXFJ+CsUEBp9TMw=
-github.com/containers/storage v1.23.9/go.mod h1:3b2ktpB6pw53SEeIoFfO0sQfP9+IoJJKPq5iJk74gxE=
+github.com/containers/storage v1.24.0 h1:Fo2LkF7tkMLmo38sTZ/G8wHjcn8JfUFPfyTxM4WwMfk=
+github.com/containers/storage v1.24.0/go.mod h1:A4d3BzuZK9b3oLVEsiSRhZLPIx3z7utgiPyXLK/YMhY=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
diff --git a/vendor/github.com/containers/buildah/import.go b/vendor/github.com/containers/buildah/import.go
index 329633b44..0aa1d236d 100644
--- a/vendor/github.com/containers/buildah/import.go
+++ b/vendor/github.com/containers/buildah/import.go
@@ -154,7 +154,7 @@ func importBuilderFromImage(ctx context.Context, store storage.Store, options Im
_, img, err := util.FindImage(store, "", systemContext, options.Image)
if err != nil {
- return nil, errors.Wrapf(err, "error locating image %q for importing settings", options.Image)
+ return nil, errors.Wrapf(err, "importing settings")
}
builder, err := importBuilderDataFromImage(ctx, store, systemContext, img.ID, "", "")
diff --git a/vendor/github.com/containers/buildah/pull.go b/vendor/github.com/containers/buildah/pull.go
index d7e7b8890..d1fec145e 100644
--- a/vendor/github.com/containers/buildah/pull.go
+++ b/vendor/github.com/containers/buildah/pull.go
@@ -60,6 +60,8 @@ type PullOptions struct {
// OciDecryptConfig contains the config that can be used to decrypt an image if it is
// encrypted if non-nil. If nil, it does not attempt to decrypt an image.
OciDecryptConfig *encconfig.DecryptConfig
+ // PullPolicy takes the value PullIfMissing, PullAlways, PullIfNewer, or PullNever.
+ PullPolicy PullPolicy
}
func localImageNameForReference(ctx context.Context, store storage.Store, srcRef types.ImageReference) (string, error) {
@@ -169,6 +171,7 @@ func Pull(ctx context.Context, imageName string, options PullOptions) (imageID s
MaxPullRetries: options.MaxRetries,
PullRetryDelay: options.RetryDelay,
OciDecryptConfig: options.OciDecryptConfig,
+ PullPolicy: options.PullPolicy,
}
if !options.AllTags {
diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION
index 63f23d2af..53cc1a6f9 100644
--- a/vendor/github.com/containers/storage/VERSION
+++ b/vendor/github.com/containers/storage/VERSION
@@ -1 +1 @@
-1.23.9
+1.24.0
diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go
index a5393c10f..2d6485e80 100644
--- a/vendor/github.com/containers/storage/drivers/driver.go
+++ b/vendor/github.com/containers/storage/drivers/driver.go
@@ -60,6 +60,7 @@ type ApplyDiffOpts struct {
Mappings *idtools.IDMappings
MountLabel string
IgnoreChownErrors bool
+ ForceMask *os.FileMode
}
// InitFunc initializes the storage driver.
diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
index a7cfeadc7..c1895c364 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
@@ -93,6 +93,7 @@ type overlayOptions struct {
skipMountHome bool
mountOptions string
ignoreChownErrors bool
+ forceMask *os.FileMode
}
// Driver contains information about the home directory and the list of active mounts that are created using this driver.
@@ -143,6 +144,9 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
// check if they are running over btrfs, aufs, zfs, overlay, or ecryptfs
if opts.mountProgram == "" {
+ if opts.forceMask != nil {
+ return nil, errors.New("'force_mask' is supported only with 'mount_program'")
+ }
switch fsMagic {
case graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs:
return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' is not supported over %s, a mount_program is required", backingFs)
@@ -328,6 +332,22 @@ func parseOptions(options []string) (*overlayOptions, error) {
if err != nil {
return nil, err
}
+ case "force_mask":
+ logrus.Debugf("overlay: force_mask=%s", val)
+ var mask int64
+ switch val {
+ case "shared":
+ mask = 0755
+ case "private":
+ mask = 0700
+ default:
+ mask, err = strconv.ParseInt(val, 8, 32)
+ if err != nil {
+ return nil, err
+ }
+ }
+ m := os.FileMode(mask)
+ o.forceMask = &m
default:
return nil, fmt.Errorf("overlay: Unknown option %s", key)
}
@@ -573,17 +593,15 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr
if err := idtools.MkdirAllAs(path.Dir(dir), 0700, rootUID, rootGID); err != nil {
return err
}
- perms := defaultPerms
if parent != "" {
st, err := system.Stat(d.dir(parent))
if err != nil {
return err
}
- perms = os.FileMode(st.Mode())
rootUID = int(st.UID())
rootGID = int(st.GID())
}
- if err := idtools.MkdirAs(dir, perms, rootUID, rootGID); err != nil {
+ if err := idtools.MkdirAs(dir, 0700, rootUID, rootGID); err != nil {
return err
}
@@ -608,6 +626,18 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr
}
}
+ perms := defaultPerms
+ if d.options.forceMask != nil {
+ perms = *d.options.forceMask
+ }
+ if parent != "" {
+ st, err := system.Stat(filepath.Join(d.dir(parent), "diff"))
+ if err != nil {
+ return err
+ }
+ perms = os.FileMode(st.Mode())
+ }
+
if err := idtools.MkdirAs(path.Join(dir, "diff"), perms, rootUID, rootGID); err != nil {
return err
}
@@ -852,15 +882,24 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
}
diffN := 1
perms := defaultPerms
+ if d.options.forceMask != nil {
+ perms = *d.options.forceMask
+ }
+ permsKnown := false
st, err := os.Stat(filepath.Join(dir, nameWithSuffix("diff", diffN)))
if err == nil {
perms = os.FileMode(st.Mode())
+ permsKnown = true
}
for err == nil {
absLowers = append(absLowers, filepath.Join(dir, nameWithSuffix("diff", diffN)))
relLowers = append(relLowers, dumbJoin(string(link), "..", nameWithSuffix("diff", diffN)))
diffN++
- _, err = os.Stat(filepath.Join(dir, nameWithSuffix("diff", diffN)))
+ st, err = os.Stat(filepath.Join(dir, nameWithSuffix("diff", diffN)))
+ if err == nil && !permsKnown {
+ perms = os.FileMode(st.Mode())
+ permsKnown = true
+ }
}
// For each lower, resolve its path, and append it and any additional diffN
@@ -871,10 +910,14 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
}
lower := ""
newpath := path.Join(d.home, l)
- if _, err := os.Stat(newpath); err != nil {
+ if st, err := os.Stat(newpath); err != nil {
for _, p := range d.AdditionalImageStores() {
lower = path.Join(p, d.name, l)
- if _, err2 := os.Stat(lower); err2 == nil {
+ if st2, err2 := os.Stat(lower); err2 == nil {
+ if !permsKnown {
+ perms = os.FileMode(st2.Mode())
+ permsKnown = true
+ }
break
}
lower = ""
@@ -892,6 +935,10 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
return "", fmt.Errorf("Can't stat lower layer %q: %v", newpath, err)
}
} else {
+ if !permsKnown {
+ perms = os.FileMode(st.Mode())
+ permsKnown = true
+ }
lower = newpath
}
absLowers = append(absLowers, lower)
@@ -1122,6 +1169,9 @@ func (d *Driver) ApplyDiff(id, parent string, options graphdriver.ApplyDiffOpts)
if d.options.ignoreChownErrors {
options.IgnoreChownErrors = d.options.ignoreChownErrors
}
+ if d.options.forceMask != nil {
+ options.ForceMask = d.options.forceMask
+ }
return d.naiveDiff.ApplyDiff(id, parent, options)
}
@@ -1138,6 +1188,7 @@ func (d *Driver) ApplyDiff(id, parent string, options graphdriver.ApplyDiffOpts)
UIDMaps: idMappings.UIDs(),
GIDMaps: idMappings.GIDs(),
IgnoreChownErrors: d.options.ignoreChownErrors,
+ ForceMask: d.options.forceMask,
WhiteoutFormat: d.getWhiteoutFormat(),
InUserNS: rsystem.RunningInUserNS(),
}); err != nil {
@@ -1251,8 +1302,12 @@ func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMapp
i := 0
perms := defaultPerms
st, err := os.Stat(nameWithSuffix(diffDir, i))
- if err == nil {
- perms = os.FileMode(st.Mode())
+ if d.options.forceMask != nil {
+ perms = *d.options.forceMask
+ } else {
+ if err == nil {
+ perms = os.FileMode(st.Mode())
+ }
}
for err == nil {
i++
diff --git a/vendor/github.com/containers/storage/drivers/windows/windows.go b/vendor/github.com/containers/storage/drivers/windows/windows.go
index c1ab93e1d..1fd84e3b4 100644
--- a/vendor/github.com/containers/storage/drivers/windows/windows.go
+++ b/vendor/github.com/containers/storage/drivers/windows/windows.go
@@ -3,6 +3,7 @@
package windows
import (
+ "archive/tar"
"bufio"
"bytes"
"encoding/json"
@@ -21,7 +22,6 @@ import (
"unsafe"
"github.com/Microsoft/go-winio"
- "github.com/Microsoft/go-winio/archive/tar"
"github.com/Microsoft/go-winio/backuptar"
"github.com/Microsoft/hcsshim"
"github.com/containers/storage/drivers"
diff --git a/vendor/github.com/containers/storage/go.mod b/vendor/github.com/containers/storage/go.mod
index 9d5a2b425..34c1ea7ad 100644
--- a/vendor/github.com/containers/storage/go.mod
+++ b/vendor/github.com/containers/storage/go.mod
@@ -4,7 +4,7 @@ module github.com/containers/storage
require (
github.com/BurntSushi/toml v0.3.1
- github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5
+ github.com/Microsoft/go-winio v0.4.15
github.com/Microsoft/hcsshim v0.8.9
github.com/docker/go-units v0.4.0
github.com/hashicorp/go-multierror v1.1.0
diff --git a/vendor/github.com/containers/storage/go.sum b/vendor/github.com/containers/storage/go.sum
index 681f77cbc..bec6aa59a 100644
--- a/vendor/github.com/containers/storage/go.sum
+++ b/vendor/github.com/containers/storage/go.sum
@@ -3,6 +3,8 @@ github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5 h1:ygIc8M6trr62pF5DucadTWGdEB4mEyvzi0e2nbcmcyA=
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
+github.com/Microsoft/go-winio v0.4.15 h1:qkLXKzb1QoVatRyd/YlXZ/Kg0m5K3SPuoD82jjSOaBc=
+github.com/Microsoft/go-winio v0.4.15/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
github.com/Microsoft/hcsshim v0.8.9 h1:VrfodqvztU8YSOvygU+DN1BGaSGxmrNfqOv5oOuX2Bk=
github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
github.com/checkpoint-restore/go-criu/v4 v4.0.2 h1:jt+rnBIhFtPw0fhtpYGcUOilh4aO9Hj7r+YLEtf30uA=
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go
index 345da2903..2f917344a 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive.go
@@ -65,13 +65,16 @@ type (
// from the traditional behavior/format to get features like subsecond
// precision in timestamps.
CopyPass bool
+ // ForceMask, if set, indicates the permission mask used for created files.
+ ForceMask *os.FileMode
}
)
const (
- tarExt = "tar"
- solaris = "solaris"
- windows = "windows"
+ tarExt = "tar"
+ solaris = "solaris"
+ windows = "windows"
+ containersOverrideXattr = "user.containers.override_stat"
)
// Archiver allows the reuse of most utility functions of this package with a
@@ -603,18 +606,23 @@ func (ta *tarAppender) addTarFile(path, name string) error {
return nil
}
-func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns, ignoreChownErrors bool, buffer []byte) error {
+func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns, ignoreChownErrors bool, forceMask *os.FileMode, buffer []byte) error {
// hdr.Mode is in linux format, which we can use for sycalls,
// but for os.Foo() calls we need the mode converted to os.FileMode,
// so use hdrInfo.Mode() (they differ for e.g. setuid bits)
hdrInfo := hdr.FileInfo()
+ mask := hdrInfo.Mode()
+ if forceMask != nil {
+ mask = *forceMask
+ }
+
switch hdr.Typeflag {
case tar.TypeDir:
// Create directory unless it exists as a directory already.
// In that case we just want to merge the two
if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) {
- if err := os.Mkdir(path, hdrInfo.Mode()); err != nil {
+ if err := os.Mkdir(path, mask); err != nil {
return err
}
}
@@ -623,7 +631,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
// Source is regular file. We use system.OpenFileSequential to use sequential
// file access to avoid depleting the standby list on Windows.
// On Linux, this equates to a regular os.OpenFile
- file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode())
+ file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, mask)
if err != nil {
return err
}
@@ -680,6 +688,13 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag)
}
+ if forceMask != nil && hdr.Typeflag != tar.TypeSymlink {
+ value := fmt.Sprintf("%d:%d:0%o", hdr.Uid, hdr.Gid, hdrInfo.Mode()&07777)
+ if err := system.Lsetxattr(path, containersOverrideXattr, []byte(value), 0); err != nil {
+ return err
+ }
+ }
+
// Lchown is not supported on Windows.
if Lchown && runtime.GOOS != windows {
if chownOpts == nil {
@@ -697,7 +712,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
// There is no LChmod, so ignore mode for symlink. Also, this
// must happen after chown, as that can modify the file mode
- if err := handleLChmod(hdr, path, hdrInfo); err != nil {
+ if err := handleLChmod(hdr, path, hdrInfo, forceMask); err != nil {
return err
}
@@ -946,6 +961,16 @@ func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) err
whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData)
buffer := make([]byte, 1<<20)
+ if options.ForceMask != nil {
+ uid, gid, mode, err := getFileOwner(dest)
+ if err == nil {
+ value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode)
+ if err := system.Lsetxattr(dest, containersOverrideXattr, []byte(value), 0); err != nil {
+ return err
+ }
+ }
+ }
+
// Iterate through the files in the archive.
loop:
for {
@@ -1041,7 +1066,7 @@ loop:
chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}
}
- if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, chownOpts, options.InUserNS, options.IgnoreChownErrors, buffer); err != nil {
+ if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, chownOpts, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil {
return err
}
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go b/vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go
index 05aae4c13..6a5a867c7 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go
@@ -10,6 +10,7 @@ import (
"fmt"
"github.com/containers/storage/pkg/idtools"
fflib "github.com/pquerna/ffjson/fflib/v1"
+ "os"
)
// MarshalJSON marshal bytes to json - template
@@ -501,6 +502,12 @@ func (j *TarOptions) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
} else {
buf.WriteString(`,"CopyPass":false`)
}
+ if j.ForceMask != nil {
+ buf.WriteString(`,"ForceMask":`)
+ fflib.FormatBits2(buf, uint64(*j.ForceMask), 10, false)
+ } else {
+ buf.WriteString(`,"ForceMask":null`)
+ }
buf.WriteByte('}')
return nil
}
@@ -538,6 +545,8 @@ const (
ffjtTarOptionsInUserNS
ffjtTarOptionsCopyPass
+
+ ffjtTarOptionsForceMask
)
var ffjKeyTarOptionsIncludeFiles = []byte("IncludeFiles")
@@ -570,6 +579,8 @@ var ffjKeyTarOptionsInUserNS = []byte("InUserNS")
var ffjKeyTarOptionsCopyPass = []byte("CopyPass")
+var ffjKeyTarOptionsForceMask = []byte("ForceMask")
+
// UnmarshalJSON umarshall json - template of ffjson
func (j *TarOptions) UnmarshalJSON(input []byte) error {
fs := fflib.NewFFLexer(input)
@@ -657,6 +668,14 @@ mainparse:
goto mainparse
}
+ case 'F':
+
+ if bytes.Equal(ffjKeyTarOptionsForceMask, kn) {
+ currentKey = ffjtTarOptionsForceMask
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
case 'G':
if bytes.Equal(ffjKeyTarOptionsGIDMaps, kn) {
@@ -732,6 +751,12 @@ mainparse:
}
+ if fflib.EqualFoldRight(ffjKeyTarOptionsForceMask, kn) {
+ currentKey = ffjtTarOptionsForceMask
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
if fflib.EqualFoldRight(ffjKeyTarOptionsCopyPass, kn) {
currentKey = ffjtTarOptionsCopyPass
state = fflib.FFParse_want_colon
@@ -884,6 +909,9 @@ mainparse:
case ffjtTarOptionsCopyPass:
goto handle_CopyPass
+ case ffjtTarOptionsForceMask:
+ goto handle_ForceMask
+
case ffjtTarOptionsnosuchkey:
err = fs.SkipField(tok)
if err != nil {
@@ -1597,6 +1625,39 @@ handle_CopyPass:
state = fflib.FFParse_after_value
goto mainparse
+handle_ForceMask:
+
+ /* handler: j.ForceMask type=os.FileMode kind=uint32 quoted=false*/
+
+ {
+ if tok != fflib.FFTok_integer && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for FileMode", tok))
+ }
+ }
+
+ {
+
+ if tok == fflib.FFTok_null {
+
+ j.ForceMask = nil
+
+ } else {
+
+ tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 32)
+
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ ttypval := os.FileMode(tval)
+ j.ForceMask = &ttypval
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
wantedvalue:
return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
wrongtokenerror:
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go
index 3a47eceae..3faa23889 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go
@@ -142,3 +142,15 @@ func isWhiteOut(stat os.FileInfo) bool {
s := stat.Sys().(*syscall.Stat_t)
return major(uint64(s.Rdev)) == 0 && minor(uint64(s.Rdev)) == 0
}
+
+func getFileOwner(path string) (uint32, uint32, uint32, error) {
+ f, err := os.Stat(path)
+ if err != nil {
+ return 0, 0, 0, err
+ }
+ s, ok := f.Sys().(*syscall.Stat_t)
+ if ok {
+ return s.Uid, s.Gid, s.Mode & 07777, nil
+ }
+ return 0, 0, uint32(f.Mode()), nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_other.go b/vendor/github.com/containers/storage/pkg/archive/archive_other.go
index 585faa824..08e3bc889 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive_other.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_other.go
@@ -5,3 +5,7 @@ package archive
func getWhiteoutConverter(format WhiteoutFormat, data interface{}) tarWhiteoutConverter {
return nil
}
+
+func getFileOwner(path string) (uint32, uint32, uint32, error) {
+ return 0, 0, 0, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go
index bdc1a3d79..ecb704b64 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go
@@ -106,15 +106,19 @@ func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
return system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor)))
}
-func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
+func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *os.FileMode) error {
+ permissionsMask := hdrInfo.Mode()
+ if forceMask != nil {
+ permissionsMask = *forceMask
+ }
if hdr.Typeflag == tar.TypeLink {
if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
- if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
+ if err := os.Chmod(path, permissionsMask); err != nil {
return err
}
}
} else if hdr.Typeflag != tar.TypeSymlink {
- if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
+ if err := os.Chmod(path, permissionsMask); err != nil {
return err
}
}
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_windows.go b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go
index 0bcbb925d..a0872444f 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive_windows.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go
@@ -69,7 +69,7 @@ func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
return nil
}
-func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
+func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *os.FileMode) error {
return nil
}
diff --git a/vendor/github.com/containers/storage/pkg/archive/diff.go b/vendor/github.com/containers/storage/pkg/archive/diff.go
index a12dd4202..14ffad5c0 100644
--- a/vendor/github.com/containers/storage/pkg/archive/diff.go
+++ b/vendor/github.com/containers/storage/pkg/archive/diff.go
@@ -106,7 +106,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
}
defer os.RemoveAll(aufsTempdir)
}
- if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS, options.IgnoreChownErrors, buffer); err != nil {
+ if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil {
return 0, err
}
}
@@ -197,7 +197,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
return 0, err
}
- if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS, options.IgnoreChownErrors, buffer); err != nil {
+ if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil {
return 0, err
}
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
index 33ba6a128..aacfee76f 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
@@ -5,7 +5,9 @@ import (
"fmt"
"io"
"io/ioutil"
+ "net"
"os"
+ "os/user"
"path/filepath"
"sync"
@@ -15,6 +17,13 @@ import (
"github.com/pkg/errors"
)
+func init() {
+ // initialize nss libraries in Glibc so that the dynamic libraries are loaded in the host
+ // environment not in the chroot from untrusted files.
+ _, _ = user.Lookup("storage")
+ _, _ = net.LookupHost("localhost")
+}
+
// NewArchiver returns a new Archiver which uses chrootarchive.Untar
func NewArchiver(idMappings *idtools.IDMappings) *archive.Archiver {
archiver := archive.NewArchiver(idMappings)
diff --git a/vendor/github.com/containers/storage/pkg/config/config.go b/vendor/github.com/containers/storage/pkg/config/config.go
index c2fa2109e..7c9ac6ad6 100644
--- a/vendor/github.com/containers/storage/pkg/config/config.go
+++ b/vendor/github.com/containers/storage/pkg/config/config.go
@@ -2,6 +2,7 @@ package config
import (
"fmt"
+ "os"
)
// ThinpoolOptionsConfig represents the "storage.options.thinpool"
@@ -94,6 +95,9 @@ type OverlayOptionsConfig struct {
Size string `toml:"size"`
// Do not create a bind mount on the storage home
SkipMountHome string `toml:"skip_mount_home"`
+ // ForceMask indicates the permissions mask (e.g. "0755") to use for new
+ // files and directories
+ ForceMask string `toml:"force_mask"`
}
type VfsOptionsConfig struct {
@@ -129,6 +133,10 @@ type OptionsConfig struct {
// ignored when building an image.
IgnoreChownErrors string `toml:"ignore_chown_errors"`
+ // ForceMask indicates the permissions mask (e.g. "0755") to use for new
+ // files and directories.
+ ForceMask os.FileMode `toml:"force_mask"`
+
// RemapUser is the name of one or more entries in /etc/subuid which
// should be used to set up default UID mappings.
RemapUser string `toml:"remap-user"`
@@ -279,6 +287,11 @@ func GetGraphDriverOptions(driverName string, options OptionsConfig) []string {
} else if options.SkipMountHome != "" {
doptions = append(doptions, fmt.Sprintf("%s.skip_mount_home=%s", driverName, options.SkipMountHome))
}
+ if options.Overlay.ForceMask != "" {
+ doptions = append(doptions, fmt.Sprintf("%s.force_mask=%s", driverName, options.Overlay.ForceMask))
+ } else if options.ForceMask != 0 {
+ doptions = append(doptions, fmt.Sprintf("%s.force_mask=%s", driverName, options.ForceMask))
+ }
case "vfs":
if options.Vfs.IgnoreChownErrors != "" {
doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.Vfs.IgnoreChownErrors))
diff --git a/vendor/github.com/containers/storage/storage.conf b/vendor/github.com/containers/storage/storage.conf
index 64e02f327..0577e84ca 100644
--- a/vendor/github.com/containers/storage/storage.conf
+++ b/vendor/github.com/containers/storage/storage.conf
@@ -82,6 +82,39 @@ mountopt = "nodev"
# Size is used to set a maximum size of the container image.
# size = ""
+# ForceMask specifies the permissions mask that is used for new files and
+# directories.
+#
+# The values "shared" and "private" are accepted.
+# Octal permission masks are also accepted.
+#
+# "": No value specified.
+# All files/directories, get set with the permissions identified within the
+# image.
+# "private": it is equivalent to 0700.
+# All files/directories get set with 0700 permissions. The owner has rwx
+# access to the files. No other users on the system can access the files.
+# This setting could be used with networked based homedirs.
+# "shared": it is equivalent to 0755.
+# The owner has rwx access to the files and everyone else can read, access
+# and execute them. This setting is useful for sharing containers storage
+# with other users. For instance have a storage owned by root but shared
+# to rootless users as an additional store.
+# NOTE: All files within the image are made readable and executable by any
+# user on the system. Even /etc/shadow within your image is now readable by
+# any user.
+#
+# OCTAL: Users can experiment with other OCTAL Permissions.
+#
+# Note: The force_mask Flag is an experimental feature, it could change in the
+# future. When "force_mask" is set the original permission mask is stored in
+# the "user.containers.override_stat" xattr and the "mount_program" option must
+# be specified. Mount programs like "/usr/bin/fuse-overlayfs" present the
+# extended attribute permissions to processes within containers rather then the
+# "force_mask" permissions.
+#
+# force_mask = ""
+
[storage.options.thinpool]
# Storage Options for thinpool
diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go
index 6b51b405d..b9115f195 100644
--- a/vendor/github.com/containers/storage/store.go
+++ b/vendor/github.com/containers/storage/store.go
@@ -3551,6 +3551,9 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
if config.Storage.Options.IgnoreChownErrors != "" {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.ignore_chown_errors=%s", config.Storage.Driver, config.Storage.Options.IgnoreChownErrors))
}
+ if config.Storage.Options.ForceMask != 0 {
+ storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.force_mask=%o", config.Storage.Driver, config.Storage.Options.ForceMask))
+ }
if config.Storage.Options.MountOpt != "" {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.mountopt=%s", config.Storage.Driver, config.Storage.Options.MountOpt))
}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 673687b2e..320d9851f 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -3,9 +3,8 @@ github.com/Azure/go-ansiterm
github.com/Azure/go-ansiterm/winterm
# github.com/BurntSushi/toml v0.3.1
github.com/BurntSushi/toml
-# github.com/Microsoft/go-winio v0.4.15-0.20200113171025-3fe6c5262873
+# github.com/Microsoft/go-winio v0.4.15
github.com/Microsoft/go-winio
-github.com/Microsoft/go-winio/archive/tar
github.com/Microsoft/go-winio/backuptar
github.com/Microsoft/go-winio/pkg/guid
github.com/Microsoft/go-winio/vhd
@@ -68,7 +67,7 @@ github.com/containernetworking/plugins/pkg/utils/hwaddr
github.com/containernetworking/plugins/pkg/utils/sysctl
github.com/containernetworking/plugins/plugins/ipam/host-local/backend
github.com/containernetworking/plugins/plugins/ipam/host-local/backend/allocator
-# github.com/containers/buildah v1.17.1-0.20201113135631-d0c958d65eb2
+# github.com/containers/buildah v1.18.0
github.com/containers/buildah
github.com/containers/buildah/bind
github.com/containers/buildah/chroot
@@ -169,7 +168,7 @@ github.com/containers/psgo/internal/dev
github.com/containers/psgo/internal/host
github.com/containers/psgo/internal/proc
github.com/containers/psgo/internal/process
-# github.com/containers/storage v1.23.9
+# github.com/containers/storage v1.24.0
github.com/containers/storage
github.com/containers/storage/drivers
github.com/containers/storage/drivers/aufs