diff options
20 files changed, 259 insertions, 61 deletions
diff --git a/cmd/podman/cp.go b/cmd/podman/cp.go index 2e2ca272a..7679ebcf1 100644 --- a/cmd/podman/cp.go +++ b/cmd/podman/cp.go @@ -51,7 +51,7 @@ func init() { cpCommand.Command = _cpCommand flags := cpCommand.Flags() flags.BoolVar(&cpCommand.Extract, "extract", false, "Extract the tar file into the destination directory.") - flags.BoolVar(&cpCommand.Pause, "pause", true, "Pause the container while copying") + flags.BoolVar(&cpCommand.Pause, "pause", false, "Pause the container while copying") cpCommand.SetHelpTemplate(HelpTemplate()) cpCommand.SetUsageTemplate(UsageTemplate()) rootCmd.AddCommand(cpCommand.Command) diff --git a/docs/podman-cp.1.md b/docs/podman-cp.1.md index 76fe57a9e..ee218d66a 100644 --- a/docs/podman-cp.1.md +++ b/docs/podman-cp.1.md @@ -63,7 +63,7 @@ Extract the tar file into the destination directory. If the destination director **--pause** -Pause the container while copying into it to avoid potential security issues around symlinks. Defaults to *true*. +Pause the container while copying into it to avoid potential security issues around symlinks. Defaults to *false*. ## ALTERNATIVES diff --git a/vendor.conf b/vendor.conf index cfd410889..a33a52786 100644 --- a/vendor.conf +++ b/vendor.conf @@ -19,7 +19,7 @@ github.com/containers/image 2c0349c99af7d90694b3faa0e9bde404d407b145 github.com/vbauerster/mpb v3.3.4 github.com/mattn/go-isatty v0.0.4 github.com/VividCortex/ewma v1.1.1 -github.com/containers/storage v1.12.7 +github.com/containers/storage 9b10041d7b2ef767ce9c42b5862b6c51eeb82214 github.com/containers/psgo v1.3.0 github.com/coreos/go-systemd v17 github.com/coreos/pkg v4 diff --git a/vendor/github.com/containers/storage/containers.go b/vendor/github.com/containers/storage/containers.go index bbac78b60..e69552361 100644 --- a/vendor/github.com/containers/storage/containers.go +++ b/vendor/github.com/containers/storage/containers.go @@ -572,6 +572,10 @@ func (r *containerStore) Lock() { r.lockfile.Lock() } +func (r *containerStore) RecursiveLock() { + r.lockfile.RecursiveLock() +} + func (r *containerStore) RLock() { r.lockfile.RLock() } diff --git a/vendor/github.com/containers/storage/drivers/aufs/aufs.go b/vendor/github.com/containers/storage/drivers/aufs/aufs.go index e821bc0c5..353d1707a 100644 --- a/vendor/github.com/containers/storage/drivers/aufs/aufs.go +++ b/vendor/github.com/containers/storage/drivers/aufs/aufs.go @@ -255,6 +255,9 @@ func (a *Driver) AdditionalImageStores() []string { // CreateFromTemplate creates a layer with the same contents and parent as another layer. func (a *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error { + if opts == nil { + opts = &graphdriver.CreateOpts{} + } return graphdriver.NaiveCreateFromTemplate(a, id, template, templateIDMappings, parent, parentIDMappings, opts, readWrite) } diff --git a/vendor/github.com/containers/storage/drivers/chown.go b/vendor/github.com/containers/storage/drivers/chown.go index 4d4011ee0..f2f1ec386 100644 --- a/vendor/github.com/containers/storage/drivers/chown.go +++ b/vendor/github.com/containers/storage/drivers/chown.go @@ -55,6 +55,9 @@ func chownByMapsMain() { if err != nil { return fmt.Errorf("error walking to %q: %v", path, err) } + if path == "." { + return nil + } return platformLChown(path, info, toHost, toContainer) } if err := filepath.Walk(".", chown); err != nil { diff --git a/vendor/github.com/containers/storage/images.go b/vendor/github.com/containers/storage/images.go index 38b5a3ef3..6f487504a 100644 --- a/vendor/github.com/containers/storage/images.go +++ b/vendor/github.com/containers/storage/images.go @@ -82,6 +82,9 @@ type Image struct { // is set before using it. Created time.Time `json:"created,omitempty"` + // ReadOnly is true if this image resides in a read-only layer store. + ReadOnly bool `json:"-"` + Flags map[string]interface{} `json:"flags,omitempty"` } @@ -159,6 +162,7 @@ func copyImage(i *Image) *Image { BigDataSizes: copyStringInt64Map(i.BigDataSizes), BigDataDigests: copyStringDigestMap(i.BigDataDigests), Created: i.Created, + ReadOnly: i.ReadOnly, Flags: copyStringInterfaceMap(i.Flags), } } @@ -269,6 +273,7 @@ func (r *imageStore) Load() error { list := digests[digest] digests[digest] = append(list, image) } + image.ReadOnly = !r.IsReadWrite() } } if shouldSave && (!r.IsReadWrite() || !r.Locked()) { @@ -739,6 +744,10 @@ func (r *imageStore) Lock() { r.lockfile.Lock() } +func (r *imageStore) RecursiveLock() { + r.lockfile.RecursiveLock() +} + func (r *imageStore) RLock() { r.lockfile.RLock() } diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go index a35dd476b..fb79238cd 100644 --- a/vendor/github.com/containers/storage/layers.go +++ b/vendor/github.com/containers/storage/layers.go @@ -103,6 +103,9 @@ type Layer struct { // for use inside of a user namespace where UID mapping is being used. UIDMap []idtools.IDMap `json:"uidmap,omitempty"` GIDMap []idtools.IDMap `json:"gidmap,omitempty"` + + // ReadOnly is true if this layer resides in a read-only layer store. + ReadOnly bool `json:"-"` } type layerMountPoint struct { @@ -259,6 +262,7 @@ func copyLayer(l *Layer) *Layer { UncompressedDigest: l.UncompressedDigest, UncompressedSize: l.UncompressedSize, CompressionType: l.CompressionType, + ReadOnly: l.ReadOnly, Flags: copyStringInterfaceMap(l.Flags), UIDMap: copyIDMap(l.UIDMap), GIDMap: copyIDMap(l.GIDMap), @@ -318,6 +322,7 @@ func (r *layerStore) Load() error { if layer.MountLabel != "" { label.ReserveLabel(layer.MountLabel) } + layer.ReadOnly = !r.IsReadWrite() } err = nil } @@ -1304,6 +1309,10 @@ func (r *layerStore) Lock() { r.lockfile.Lock() } +func (r *layerStore) RecursiveLock() { + r.lockfile.RecursiveLock() +} + func (r *layerStore) RLock() { r.lockfile.RLock() } diff --git a/vendor/github.com/containers/storage/layers_ffjson.go b/vendor/github.com/containers/storage/layers_ffjson.go index 09b5d0f33..125b5d8c9 100644 --- a/vendor/github.com/containers/storage/layers_ffjson.go +++ b/vendor/github.com/containers/storage/layers_ffjson.go @@ -1,5 +1,5 @@ // Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT. -// source: ./layers.go +// source: layers.go package storage diff --git a/vendor/github.com/containers/storage/lockfile.go b/vendor/github.com/containers/storage/lockfile.go index ed8753337..c4f1b5549 100644 --- a/vendor/github.com/containers/storage/lockfile.go +++ b/vendor/github.com/containers/storage/lockfile.go @@ -15,6 +15,10 @@ type Locker interface { // Acquire a writer lock. Lock() + // Acquire a writer lock recursively, allowing for recursive acquisitions + // within the same process space. + RecursiveLock() + // Unlock the lock. Unlock() diff --git a/vendor/github.com/containers/storage/lockfile_linux.go b/vendor/github.com/containers/storage/lockfile_linux.go deleted file mode 100644 index 903387c66..000000000 --- a/vendor/github.com/containers/storage/lockfile_linux.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build linux solaris - -package storage - -import ( - "time" - - "golang.org/x/sys/unix" -) - -// TouchedSince indicates if the lock file has been touched since the specified time -func (l *lockfile) TouchedSince(when time.Time) bool { - st := unix.Stat_t{} - err := unix.Fstat(int(l.fd), &st) - if err != nil { - return true - } - touched := time.Unix(st.Mtim.Unix()) - return when.Before(touched) -} diff --git a/vendor/github.com/containers/storage/lockfile_otherunix.go b/vendor/github.com/containers/storage/lockfile_otherunix.go deleted file mode 100644 index 041d54c05..000000000 --- a/vendor/github.com/containers/storage/lockfile_otherunix.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build darwin freebsd - -package storage - -import ( - "time" - - "golang.org/x/sys/unix" -) - -func (l *lockfile) TouchedSince(when time.Time) bool { - st := unix.Stat_t{} - err := unix.Fstat(int(l.fd), &st) - if err != nil { - return true - } - touched := time.Unix(st.Mtimespec.Unix()) - return when.Before(touched) -} diff --git a/vendor/github.com/containers/storage/lockfile_unix.go b/vendor/github.com/containers/storage/lockfile_unix.go index 8e0f22cb5..00215e928 100644 --- a/vendor/github.com/containers/storage/lockfile_unix.go +++ b/vendor/github.com/containers/storage/lockfile_unix.go @@ -9,6 +9,7 @@ import ( "time" "github.com/containers/storage/pkg/stringid" + "github.com/containers/storage/pkg/system" "github.com/pkg/errors" "golang.org/x/sys/unix" ) @@ -25,6 +26,7 @@ type lockfile struct { locktype int16 locked bool ro bool + recursive bool } // openLock opens the file at path and returns the corresponding file @@ -75,7 +77,7 @@ func createLockerForPath(path string, ro bool) (Locker, error) { // lock locks the lockfile via FCTNL(2) based on the specified type and // command. -func (l *lockfile) lock(l_type int16) { +func (l *lockfile) lock(l_type int16, recursive bool) { lk := unix.Flock_t{ Type: l_type, Whence: int16(os.SEEK_SET), @@ -86,7 +88,13 @@ func (l *lockfile) lock(l_type int16) { case unix.F_RDLCK: l.rwMutex.RLock() case unix.F_WRLCK: - l.rwMutex.Lock() + if recursive { + // NOTE: that's okay as recursive is only set in RecursiveLock(), so + // there's no need to protect against hypothetical RDLCK cases. + l.rwMutex.RLock() + } else { + l.rwMutex.Lock() + } default: panic(fmt.Sprintf("attempted to acquire a file lock of unrecognized type %d", l_type)) } @@ -110,6 +118,7 @@ func (l *lockfile) lock(l_type int16) { } l.locktype = l_type l.locked = true + l.recursive = recursive l.counter++ } @@ -119,13 +128,24 @@ func (l *lockfile) Lock() { if l.ro { l.RLock() } else { - l.lock(unix.F_WRLCK) + l.lock(unix.F_WRLCK, false) + } +} + +// RecursiveLock locks the lockfile as a writer but allows for recursive +// acquisitions within the same process space. Note that RLock() will be called +// if it's a lockTypReader lock. +func (l *lockfile) RecursiveLock() { + if l.ro { + l.RLock() + } else { + l.lock(unix.F_WRLCK, true) } } // LockRead locks the lockfile as a reader. func (l *lockfile) RLock() { - l.lock(unix.F_RDLCK) + l.lock(unix.F_RDLCK, false) } // Unlock unlocks the lockfile. @@ -161,7 +181,7 @@ func (l *lockfile) Unlock() { // Close the file descriptor on the last unlock. unix.Close(int(l.fd)) } - if l.locktype == unix.F_RDLCK { + if l.locktype == unix.F_RDLCK || l.recursive { l.rwMutex.RUnlock() } else { l.rwMutex.Unlock() @@ -232,3 +252,14 @@ func (l *lockfile) Modified() (bool, error) { func (l *lockfile) IsReadWrite() bool { return !l.ro } + +// TouchedSince indicates if the lock file has been touched since the specified time +func (l *lockfile) TouchedSince(when time.Time) bool { + st, err := system.Fstat(int(l.fd)) + if err != nil { + return true + } + mtim := st.Mtim() + touched := time.Unix(mtim.Unix()) + return when.Before(touched) +} diff --git a/vendor/github.com/containers/storage/lockfile_windows.go b/vendor/github.com/containers/storage/lockfile_windows.go index c02069495..caf7c184a 100644 --- a/vendor/github.com/containers/storage/lockfile_windows.go +++ b/vendor/github.com/containers/storage/lockfile_windows.go @@ -36,6 +36,12 @@ func (l *lockfile) Lock() { l.locked = true } +func (l *lockfile) RecursiveLock() { + // We don't support Windows but a recursive writer-lock in one process-space + // is really a writer lock, so just panic. + panic("not supported") +} + func (l *lockfile) RLock() { l.mu.Lock() l.locked = true diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go index a36ff1cb1..33ba6a128 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go @@ -1,7 +1,7 @@ package chrootarchive import ( - "archive/tar" + stdtar "archive/tar" "fmt" "io" "io/ioutil" @@ -34,18 +34,34 @@ func NewArchiverWithChown(tarIDMappings *idtools.IDMappings, chownOpts *idtools. // The archive may be compressed with one of the following algorithms: // identity (uncompressed), gzip, bzip2, xz. func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error { - return untarHandler(tarArchive, dest, options, true) + return untarHandler(tarArchive, dest, options, true, dest) +} + +// UntarWithRoot is the same as `Untar`, but allows you to pass in a root directory +// The root directory is the directory that will be chrooted to. +// `dest` must be a path within `root`, if it is not an error will be returned. +// +// `root` should set to a directory which is not controlled by any potentially +// malicious process. +// +// This should be used to prevent a potential attacker from manipulating `dest` +// such that it would provide access to files outside of `dest` through things +// like symlinks. Normally `ResolveSymlinksInScope` would handle this, however +// sanitizing symlinks in this manner is inherrently racey: +// ref: CVE-2018-15664 +func UntarWithRoot(tarArchive io.Reader, dest string, options *archive.TarOptions, root string) error { + return untarHandler(tarArchive, dest, options, true, root) } // UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive must be an uncompressed stream. func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error { - return untarHandler(tarArchive, dest, options, false) + return untarHandler(tarArchive, dest, options, false, dest) } // Handler for teasing out the automatic decompression -func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool) error { +func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool, root string) error { if tarArchive == nil { return fmt.Errorf("Empty archive") } @@ -77,7 +93,15 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions r = decompressedArchive } - return invokeUnpack(r, dest, options) + return invokeUnpack(r, dest, options, root) +} + +// Tar tars the requested path while chrooted to the specified root. +func Tar(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) { + if options == nil { + options = &archive.TarOptions{} + } + return invokePack(srcPath, options, root) } // CopyFileWithTarAndChown returns a function which copies a single file from outside @@ -99,7 +123,7 @@ func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap var hashWorker sync.WaitGroup hashWorker.Add(1) go func() { - t := tar.NewReader(contentReader) + t := stdtar.NewReader(contentReader) _, err := t.Next() if err != nil { hashError = err diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go index e04ed787c..ca9fb10d7 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go @@ -10,10 +10,13 @@ import ( "io" "io/ioutil" "os" + "path/filepath" "runtime" + "strings" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/reexec" + "github.com/pkg/errors" ) // untar is the entry-point for storage-untar on re-exec. This is not used on @@ -23,18 +26,28 @@ func untar() { runtime.LockOSThread() flag.Parse() - var options *archive.TarOptions + var options archive.TarOptions //read the options from the pipe "ExtraFiles" if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil { fatal(err) } - if err := chroot(flag.Arg(0)); err != nil { + dst := flag.Arg(0) + var root string + if len(flag.Args()) > 1 { + root = flag.Arg(1) + } + + if root == "" { + root = dst + } + + if err := chroot(root); err != nil { fatal(err) } - if err := archive.Unpack(os.Stdin, "/", options); err != nil { + if err := archive.Unpack(os.Stdin, dst, &options); err != nil { fatal(err) } // fully consume stdin in case it is zero padded @@ -45,7 +58,10 @@ func untar() { os.Exit(0) } -func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions) error { +func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions, root string) error { + if root == "" { + return errors.New("must specify a root to chroot to") + } // We can't pass a potentially large exclude list directly via cmd line // because we easily overrun the kernel's max argument/environment size @@ -57,7 +73,21 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T return fmt.Errorf("Untar pipe failure: %v", err) } - cmd := reexec.Command("storage-untar", dest) + if root != "" { + relDest, err := filepath.Rel(root, dest) + if err != nil { + return err + } + if relDest == "." { + relDest = "/" + } + if relDest[0] != '/' { + relDest = "/" + relDest + } + dest = relDest + } + + cmd := reexec.Command("storage-untar", dest, root) cmd.Stdin = decompressedArchive cmd.ExtraFiles = append(cmd.ExtraFiles, r) @@ -68,6 +98,7 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T if err := cmd.Start(); err != nil { return fmt.Errorf("Untar error on re-exec cmd: %v", err) } + //write the options to the pipe for the untar exec to read if err := json.NewEncoder(w).Encode(options); err != nil { return fmt.Errorf("Untar json encode to pipe failed: %v", err) @@ -84,3 +115,92 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T } return nil } + +func tar() { + runtime.LockOSThread() + flag.Parse() + + src := flag.Arg(0) + var root string + if len(flag.Args()) > 1 { + root = flag.Arg(1) + } + + if root == "" { + root = src + } + + if err := realChroot(root); err != nil { + fatal(err) + } + + var options archive.TarOptions + if err := json.NewDecoder(os.Stdin).Decode(&options); err != nil { + fatal(err) + } + + rdr, err := archive.TarWithOptions(src, &options) + if err != nil { + fatal(err) + } + defer rdr.Close() + + if _, err := io.Copy(os.Stdout, rdr); err != nil { + fatal(err) + } + + os.Exit(0) +} + +func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) { + if root == "" { + return nil, errors.New("root path must not be empty") + } + + relSrc, err := filepath.Rel(root, srcPath) + if err != nil { + return nil, err + } + if relSrc == "." { + relSrc = "/" + } + if relSrc[0] != '/' { + relSrc = "/" + relSrc + } + + // make sure we didn't trim a trailing slash with the call to `Rel` + if strings.HasSuffix(srcPath, "/") && !strings.HasSuffix(relSrc, "/") { + relSrc += "/" + } + + cmd := reexec.Command("storage-tar", relSrc, root) + + errBuff := bytes.NewBuffer(nil) + cmd.Stderr = errBuff + + tarR, tarW := io.Pipe() + cmd.Stdout = tarW + + stdin, err := cmd.StdinPipe() + if err != nil { + return nil, errors.Wrap(err, "error getting options pipe for tar process") + } + + if err := cmd.Start(); err != nil { + return nil, errors.Wrap(err, "tar error on re-exec cmd") + } + + go func() { + err := cmd.Wait() + err = errors.Wrapf(err, "error processing tar file: %s", errBuff) + tarW.CloseWithError(err) + }() + + if err := json.NewEncoder(stdin).Encode(options); err != nil { + stdin.Close() + return nil, errors.Wrap(err, "tar json encode to pipe failed") + } + stdin.Close() + + return tarR, nil +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go index 93fde4220..8a5c680b1 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go @@ -14,9 +14,16 @@ func chroot(path string) error { func invokeUnpack(decompressedArchive io.ReadCloser, dest string, - options *archive.TarOptions) error { + options *archive.TarOptions, root string) error { // Windows is different to Linux here because Windows does not support // chroot. Hence there is no point sandboxing a chrooted process to // do the unpack. We call inline instead within the daemon process. return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options) } + +func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) { + // Windows is different to Linux here because Windows does not support + // chroot. Hence there is no point sandboxing a chrooted process to + // do the pack. We call inline instead within the daemon process. + return archive.TarWithOptions(srcPath, options) +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go index f9b5dece8..83278ee50 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go @@ -4,9 +4,13 @@ package chrootarchive import "golang.org/x/sys/unix" -func chroot(path string) error { +func realChroot(path string) error { if err := unix.Chroot(path); err != nil { return err } return unix.Chdir("/") } + +func chroot(path string) error { + return realChroot(path) +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go index 21cd87992..ea08135e4 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go @@ -14,6 +14,7 @@ import ( func init() { reexec.Register("storage-applyLayer", applyLayer) reexec.Register("storage-untar", untar) + reexec.Register("storage-tar", tar) } func fatal(err error) { diff --git a/vendor/github.com/containers/storage/pkg/system/stat_unix.go b/vendor/github.com/containers/storage/pkg/system/stat_unix.go index 91c7d121c..f9a1b4877 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat_unix.go +++ b/vendor/github.com/containers/storage/pkg/system/stat_unix.go @@ -58,3 +58,15 @@ func Stat(path string) (*StatT, error) { } return fromStatT(s) } + +// Fstat takes an open file descriptor and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file descriptor is invalid +func Fstat(fd int) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Fstat(fd, s); err != nil { + return nil, err + } + return fromStatT(s) +} |