From 561e65969f89c6f60193cf1755752b571a1149f5 Mon Sep 17 00:00:00 2001 From: baude Date: Thu, 3 Jan 2019 08:55:35 -0600 Subject: vendor in new containers/storage vendor in latest containers/storage which contains a fix for when a filesystem that overlayfs is on is ENOSPC. adding pgzip/compress as a new dep for c/s Signed-off-by: baude --- vendor/github.com/klauspost/pgzip/GO_LICENSE | 27 ++ vendor/github.com/klauspost/pgzip/LICENSE | 22 + vendor/github.com/klauspost/pgzip/README.md | 136 +++++++ vendor/github.com/klauspost/pgzip/gunzip.go | 573 +++++++++++++++++++++++++++ vendor/github.com/klauspost/pgzip/gzip.go | 501 +++++++++++++++++++++++ 5 files changed, 1259 insertions(+) create mode 100644 vendor/github.com/klauspost/pgzip/GO_LICENSE create mode 100644 vendor/github.com/klauspost/pgzip/LICENSE create mode 100644 vendor/github.com/klauspost/pgzip/README.md create mode 100644 vendor/github.com/klauspost/pgzip/gunzip.go create mode 100644 vendor/github.com/klauspost/pgzip/gzip.go (limited to 'vendor/github.com/klauspost/pgzip') diff --git a/vendor/github.com/klauspost/pgzip/GO_LICENSE b/vendor/github.com/klauspost/pgzip/GO_LICENSE new file mode 100644 index 000000000..744875676 --- /dev/null +++ b/vendor/github.com/klauspost/pgzip/GO_LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/pgzip/LICENSE b/vendor/github.com/klauspost/pgzip/LICENSE new file mode 100644 index 000000000..2bdc0d751 --- /dev/null +++ b/vendor/github.com/klauspost/pgzip/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2014 Klaus Post + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/klauspost/pgzip/README.md b/vendor/github.com/klauspost/pgzip/README.md new file mode 100644 index 000000000..81000996c --- /dev/null +++ b/vendor/github.com/klauspost/pgzip/README.md @@ -0,0 +1,136 @@ +pgzip +===== + +Go parallel gzip compression/decompression. This is a fully gzip compatible drop in replacement for "compress/gzip". + +This will split compression into blocks that are compressed in parallel. +This can be useful for compressing big amounts of data. The output is a standard gzip file. + +The gzip decompression is modified so it decompresses ahead of the current reader. +This means that reads will be non-blocking if the decompressor can keep ahead of your code reading from it. +CRC calculation also takes place in a separate goroutine. + +You should only use this if you are (de)compressing big amounts of data, +say **more than 1MB** at the time, otherwise you will not see any benefit, +and it will likely be faster to use the internal gzip library +or [this package](https://github.com/klauspost/compress). + +It is important to note that this library creates and reads *standard gzip files*. +You do not have to match the compressor/decompressor to get the described speedups, +and the gzip files are fully compatible with other gzip readers/writers. + +A golang variant of this is [bgzf](https://godoc.org/github.com/biogo/hts/bgzf), +which has the same feature, as well as seeking in the resulting file. +The only drawback is a slightly bigger overhead compared to this and pure gzip. +See a comparison below. + +[![GoDoc][1]][2] [![Build Status][3]][4] + +[1]: https://godoc.org/github.com/klauspost/pgzip?status.svg +[2]: https://godoc.org/github.com/klauspost/pgzip +[3]: https://travis-ci.org/klauspost/pgzip.svg +[4]: https://travis-ci.org/klauspost/pgzip + +Installation +==== +```go get github.com/klauspost/pgzip/...``` + +You might need to get/update the dependencies: + +``` +go get -u github.com/klauspost/compress +go get -u github.com/klauspost/crc32 +``` + +Usage +==== +[Godoc Doumentation](https://godoc.org/github.com/klauspost/pgzip) + +To use as a replacement for gzip, exchange + +```import "compress/gzip"``` +with +```import gzip "github.com/klauspost/pgzip"```. + +# Changes + +* Oct 6, 2016: Fixed an issue if the destination writer returned an error. +* Oct 6, 2016: Better buffer reuse, should now generate less garbage. +* Oct 6, 2016: Output does not change based on write sizes. +* Dec 8, 2015: Decoder now supports the io.WriterTo interface, giving a speedup and less GC pressure. +* Oct 9, 2015: Reduced allocations by ~35 by using sync.Pool. ~15% overall speedup. + +Changes in [github.com/klauspost/compress](https://github.com/klauspost/compress#changelog) are also carried over, so see that for more changes. + +## Compression +The simplest way to use this is to simply do the same as you would when using [compress/gzip](http://golang.org/pkg/compress/gzip). + +To change the block size, use the added (*pgzip.Writer).SetConcurrency(blockSize, blocks int) function. With this you can control the approximate size of your blocks, as well as how many you want to be processing in parallel. Default values for this is SetConcurrency(250000, 16), meaning blocks are split at 250000 bytes and up to 16 blocks can be processing at once before the writer blocks. + + +Example: +``` +var b bytes.Buffer +w := gzip.NewWriter(&b) +w.SetConcurrency(100000, 10) +w.Write([]byte("hello, world\n")) +w.Close() +``` + +To get any performance gains, you should at least be compressing more than 1 megabyte of data at the time. + +You should at least have a block size of 100k and at least a number of blocks that match the number of cores your would like to utilize, but about twice the number of blocks would be the best. + +Another side effect of this is, that it is likely to speed up your other code, since writes to the compressor only blocks if the compressor is already compressing the number of blocks you have specified. This also means you don't have worry about buffering input to the compressor. + +## Decompression + +Decompression works similar to compression. That means that you simply call pgzip the same way as you would call [compress/gzip](http://golang.org/pkg/compress/gzip). + +The only difference is that if you want to specify your own readahead, you have to use `pgzip.NewReaderN(r io.Reader, blockSize, blocks int)` to get a reader with your custom blocksizes. The `blockSize` is the size of each block decoded, and `blocks` is the maximum number of blocks that is decoded ahead. + +See [Example on playground](http://play.golang.org/p/uHv1B5NbDh) + +Performance +==== +## Compression + +See my blog post in [Benchmarks of Golang Gzip](https://blog.klauspost.com/go-gzipdeflate-benchmarks/). + +Compression cost is usually about 0.2% with default settings with a block size of 250k. + +Example with GOMAXPROC set to 8 (quad core with 8 hyperthreads) + +Content is [Matt Mahoneys 10GB corpus](http://mattmahoney.net/dc/10gb.html). Compression level 6. + +Compressor | MB/sec | speedup | size | size overhead (lower=better) +------------|----------|---------|------|--------- +[gzip](http://golang.org/pkg/compress/gzip) (golang) | 7.21MB/s | 1.0x | 4786608902 | 0% +[gzip](http://github.com/klauspost/compress/gzip) (klauspost) | 10.98MB/s | 1.52x | 4781331645 | -0.11% +[pgzip](https://github.com/klauspost/pgzip) (klauspost) | 50.76MB/s|7.04x | 4784121440 | -0.052% +[bgzf](https://godoc.org/github.com/biogo/hts/bgzf) (biogo) | 38.65MB/s | 5.36x | 4924899484 | 2.889% +[pargzip](https://godoc.org/github.com/golang/build/pargzip) (builder) | 32.00MB/s | 4.44x | 4791226567 | 0.096% + +pgzip also contains a [linear time compression](https://github.com/klauspost/compress#linear-time-compression) mode, that will allow compression at ~150MB per core per second, independent of the content. + +See the [complete sheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) for different content types and compression settings. + +## Decompression + +The decompression speedup is there because it allows you to do other work while the decompression is taking place. + +In the example above, the numbers are as follows on a 4 CPU machine: + +Decompressor | Time | Speedup +-------------|------|-------- +[gzip](http://golang.org/pkg/compress/gzip) (golang) | 1m28.85s | 0% +[pgzip](https://github.com/klauspost/pgzip) (golang) | 43.48s | 104% + +But wait, since gzip decompression is inherently singlethreaded (aside from CRC calculation) how can it be more than 100% faster? Because pgzip due to its design also acts as a buffer. When using unbuffered gzip, you are also waiting for io when you are decompressing. If the gzip decoder can keep up, it will always have data ready for your reader, and you will not be waiting for input to the gzip decompressor to complete. + +This is pretty much an optimal situation for pgzip, but it reflects most common usecases for CPU intensive gzip usage. + +I haven't included [bgzf](https://godoc.org/github.com/biogo/hts/bgzf) in this comparison, since it only can decompress files created by a compatible encoder, and therefore cannot be considered a generic gzip decompressor. But if you are able to compress your files with a bgzf compatible program, you can expect it to scale beyond 100%. + +# License +This contains large portions of code from the go repository - see GO_LICENSE for more information. The changes are released under MIT License. See LICENSE for more information. diff --git a/vendor/github.com/klauspost/pgzip/gunzip.go b/vendor/github.com/klauspost/pgzip/gunzip.go new file mode 100644 index 000000000..93efec714 --- /dev/null +++ b/vendor/github.com/klauspost/pgzip/gunzip.go @@ -0,0 +1,573 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pgzip implements reading and writing of gzip format compressed files, +// as specified in RFC 1952. +// +// This is a drop in replacement for "compress/gzip". +// This will split compression into blocks that are compressed in parallel. +// This can be useful for compressing big amounts of data. +// The gzip decompression has not been modified, but remains in the package, +// so you can use it as a complete replacement for "compress/gzip". +// +// See more at https://github.com/klauspost/pgzip +package pgzip + +import ( + "bufio" + "errors" + "hash" + "hash/crc32" + "io" + "sync" + "time" + + "github.com/klauspost/compress/flate" +) + +const ( + gzipID1 = 0x1f + gzipID2 = 0x8b + gzipDeflate = 8 + flagText = 1 << 0 + flagHdrCrc = 1 << 1 + flagExtra = 1 << 2 + flagName = 1 << 3 + flagComment = 1 << 4 +) + +func makeReader(r io.Reader) flate.Reader { + if rr, ok := r.(flate.Reader); ok { + return rr + } + return bufio.NewReader(r) +} + +var ( + // ErrChecksum is returned when reading GZIP data that has an invalid checksum. + ErrChecksum = errors.New("gzip: invalid checksum") + // ErrHeader is returned when reading GZIP data that has an invalid header. + ErrHeader = errors.New("gzip: invalid header") +) + +// The gzip file stores a header giving metadata about the compressed file. +// That header is exposed as the fields of the Writer and Reader structs. +type Header struct { + Comment string // comment + Extra []byte // "extra data" + ModTime time.Time // modification time + Name string // file name + OS byte // operating system type +} + +// A Reader is an io.Reader that can be read to retrieve +// uncompressed data from a gzip-format compressed file. +// +// In general, a gzip file can be a concatenation of gzip files, +// each with its own header. Reads from the Reader +// return the concatenation of the uncompressed data of each. +// Only the first header is recorded in the Reader fields. +// +// Gzip files store a length and checksum of the uncompressed data. +// The Reader will return a ErrChecksum when Read +// reaches the end of the uncompressed data if it does not +// have the expected length or checksum. Clients should treat data +// returned by Read as tentative until they receive the io.EOF +// marking the end of the data. +type Reader struct { + Header + r flate.Reader + decompressor io.ReadCloser + digest hash.Hash32 + size uint32 + flg byte + buf [512]byte + err error + closeErr chan error + multistream bool + + readAhead chan read + roff int // read offset + current []byte + closeReader chan struct{} + lastBlock bool + blockSize int + blocks int + + activeRA bool // Indication if readahead is active + mu sync.Mutex // Lock for above + + blockPool chan []byte +} + +type read struct { + b []byte + err error +} + +// NewReader creates a new Reader reading the given reader. +// The implementation buffers input and may read more data than necessary from r. +// It is the caller's responsibility to call Close on the Reader when done. +func NewReader(r io.Reader) (*Reader, error) { + z := new(Reader) + z.blocks = defaultBlocks + z.blockSize = defaultBlockSize + z.r = makeReader(r) + z.digest = crc32.NewIEEE() + z.multistream = true + z.blockPool = make(chan []byte, z.blocks) + for i := 0; i < z.blocks; i++ { + z.blockPool <- make([]byte, z.blockSize) + } + if err := z.readHeader(true); err != nil { + return nil, err + } + return z, nil +} + +// NewReaderN creates a new Reader reading the given reader. +// The implementation buffers input and may read more data than necessary from r. +// It is the caller's responsibility to call Close on the Reader when done. +// +// With this you can control the approximate size of your blocks, +// as well as how many blocks you want to have prefetched. +// +// Default values for this is blockSize = 250000, blocks = 16, +// meaning up to 16 blocks of maximum 250000 bytes will be +// prefetched. +func NewReaderN(r io.Reader, blockSize, blocks int) (*Reader, error) { + z := new(Reader) + z.blocks = blocks + z.blockSize = blockSize + z.r = makeReader(r) + z.digest = crc32.NewIEEE() + z.multistream = true + + // Account for too small values + if z.blocks <= 0 { + z.blocks = defaultBlocks + } + if z.blockSize <= 512 { + z.blockSize = defaultBlockSize + } + z.blockPool = make(chan []byte, z.blocks) + for i := 0; i < z.blocks; i++ { + z.blockPool <- make([]byte, z.blockSize) + } + if err := z.readHeader(true); err != nil { + return nil, err + } + return z, nil +} + +// Reset discards the Reader z's state and makes it equivalent to the +// result of its original state from NewReader, but reading from r instead. +// This permits reusing a Reader rather than allocating a new one. +func (z *Reader) Reset(r io.Reader) error { + z.killReadAhead() + z.r = makeReader(r) + z.digest = crc32.NewIEEE() + z.size = 0 + z.err = nil + z.multistream = true + + // Account for uninitialized values + if z.blocks <= 0 { + z.blocks = defaultBlocks + } + if z.blockSize <= 512 { + z.blockSize = defaultBlockSize + } + + if z.blockPool == nil { + z.blockPool = make(chan []byte, z.blocks) + for i := 0; i < z.blocks; i++ { + z.blockPool <- make([]byte, z.blockSize) + } + } + + return z.readHeader(true) +} + +// Multistream controls whether the reader supports multistream files. +// +// If enabled (the default), the Reader expects the input to be a sequence +// of individually gzipped data streams, each with its own header and +// trailer, ending at EOF. The effect is that the concatenation of a sequence +// of gzipped files is treated as equivalent to the gzip of the concatenation +// of the sequence. This is standard behavior for gzip readers. +// +// Calling Multistream(false) disables this behavior; disabling the behavior +// can be useful when reading file formats that distinguish individual gzip +// data streams or mix gzip data streams with other data streams. +// In this mode, when the Reader reaches the end of the data stream, +// Read returns io.EOF. If the underlying reader implements io.ByteReader, +// it will be left positioned just after the gzip stream. +// To start the next stream, call z.Reset(r) followed by z.Multistream(false). +// If there is no next stream, z.Reset(r) will return io.EOF. +func (z *Reader) Multistream(ok bool) { + z.multistream = ok +} + +// GZIP (RFC 1952) is little-endian, unlike ZLIB (RFC 1950). +func get4(p []byte) uint32 { + return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24 +} + +func (z *Reader) readString() (string, error) { + var err error + needconv := false + for i := 0; ; i++ { + if i >= len(z.buf) { + return "", ErrHeader + } + z.buf[i], err = z.r.ReadByte() + if err != nil { + return "", err + } + if z.buf[i] > 0x7f { + needconv = true + } + if z.buf[i] == 0 { + // GZIP (RFC 1952) specifies that strings are NUL-terminated ISO 8859-1 (Latin-1). + if needconv { + s := make([]rune, 0, i) + for _, v := range z.buf[0:i] { + s = append(s, rune(v)) + } + return string(s), nil + } + return string(z.buf[0:i]), nil + } + } +} + +func (z *Reader) read2() (uint32, error) { + _, err := io.ReadFull(z.r, z.buf[0:2]) + if err != nil { + return 0, err + } + return uint32(z.buf[0]) | uint32(z.buf[1])<<8, nil +} + +func (z *Reader) readHeader(save bool) error { + z.killReadAhead() + + _, err := io.ReadFull(z.r, z.buf[0:10]) + if err != nil { + return err + } + if z.buf[0] != gzipID1 || z.buf[1] != gzipID2 || z.buf[2] != gzipDeflate { + return ErrHeader + } + z.flg = z.buf[3] + if save { + z.ModTime = time.Unix(int64(get4(z.buf[4:8])), 0) + // z.buf[8] is xfl, ignored + z.OS = z.buf[9] + } + z.digest.Reset() + z.digest.Write(z.buf[0:10]) + + if z.flg&flagExtra != 0 { + n, err := z.read2() + if err != nil { + return err + } + data := make([]byte, n) + if _, err = io.ReadFull(z.r, data); err != nil { + return err + } + if save { + z.Extra = data + } + } + + var s string + if z.flg&flagName != 0 { + if s, err = z.readString(); err != nil { + return err + } + if save { + z.Name = s + } + } + + if z.flg&flagComment != 0 { + if s, err = z.readString(); err != nil { + return err + } + if save { + z.Comment = s + } + } + + if z.flg&flagHdrCrc != 0 { + n, err := z.read2() + if err != nil { + return err + } + sum := z.digest.Sum32() & 0xFFFF + if n != sum { + return ErrHeader + } + } + + z.digest.Reset() + z.decompressor = flate.NewReader(z.r) + z.doReadAhead() + return nil +} + +func (z *Reader) killReadAhead() error { + z.mu.Lock() + defer z.mu.Unlock() + if z.activeRA { + if z.closeReader != nil { + close(z.closeReader) + } + + // Wait for decompressor to be closed and return error, if any. + e, ok := <-z.closeErr + z.activeRA = false + if !ok { + // Channel is closed, so if there was any error it has already been returned. + return nil + } + return e + } + return nil +} + +// Starts readahead. +// Will return on error (including io.EOF) +// or when z.closeReader is closed. +func (z *Reader) doReadAhead() { + z.mu.Lock() + defer z.mu.Unlock() + z.activeRA = true + + if z.blocks <= 0 { + z.blocks = defaultBlocks + } + if z.blockSize <= 512 { + z.blockSize = defaultBlockSize + } + ra := make(chan read, z.blocks) + z.readAhead = ra + closeReader := make(chan struct{}, 0) + z.closeReader = closeReader + z.lastBlock = false + closeErr := make(chan error, 1) + z.closeErr = closeErr + z.size = 0 + z.roff = 0 + z.current = nil + decomp := z.decompressor + + go func() { + defer func() { + closeErr <- decomp.Close() + close(closeErr) + close(ra) + }() + + // We hold a local reference to digest, since + // it way be changed by reset. + digest := z.digest + var wg sync.WaitGroup + for { + var buf []byte + select { + case buf = <-z.blockPool: + case <-closeReader: + return + } + buf = buf[0:z.blockSize] + // Try to fill the buffer + n, err := io.ReadFull(decomp, buf) + if err == io.ErrUnexpectedEOF { + if n > 0 { + err = nil + } else { + // If we got zero bytes, we need to establish if + // we reached end of stream or truncated stream. + _, err = decomp.Read([]byte{}) + if err == io.EOF { + err = nil + } + } + } + if n < len(buf) { + buf = buf[0:n] + } + wg.Wait() + wg.Add(1) + go func() { + digest.Write(buf) + wg.Done() + }() + z.size += uint32(n) + + // If we return any error, out digest must be ready + if err != nil { + wg.Wait() + } + select { + case z.readAhead <- read{b: buf, err: err}: + case <-closeReader: + // Sent on close, we don't care about the next results + return + } + if err != nil { + return + } + } + }() +} + +func (z *Reader) Read(p []byte) (n int, err error) { + if z.err != nil { + return 0, z.err + } + if len(p) == 0 { + return 0, nil + } + + for { + if len(z.current) == 0 && !z.lastBlock { + read := <-z.readAhead + + if read.err != nil { + // If not nil, the reader will have exited + z.closeReader = nil + + if read.err != io.EOF { + z.err = read.err + return + } + if read.err == io.EOF { + z.lastBlock = true + err = nil + } + } + z.current = read.b + z.roff = 0 + } + avail := z.current[z.roff:] + if len(p) >= len(avail) { + // If len(p) >= len(current), return all content of current + n = copy(p, avail) + z.blockPool <- z.current + z.current = nil + if z.lastBlock { + err = io.EOF + break + } + } else { + // We copy as much as there is space for + n = copy(p, avail) + z.roff += n + } + return + } + + // Finished file; check checksum + size. + if _, err := io.ReadFull(z.r, z.buf[0:8]); err != nil { + z.err = err + return 0, err + } + crc32, isize := get4(z.buf[0:4]), get4(z.buf[4:8]) + sum := z.digest.Sum32() + if sum != crc32 || isize != z.size { + z.err = ErrChecksum + return 0, z.err + } + + // File is ok; should we attempt reading one more? + if !z.multistream { + return 0, io.EOF + } + + // Is there another? + if err = z.readHeader(false); err != nil { + z.err = err + return + } + + // Yes. Reset and read from it. + return z.Read(p) +} + +func (z *Reader) WriteTo(w io.Writer) (n int64, err error) { + total := int64(0) + for { + if z.err != nil { + return total, z.err + } + // We write both to output and digest. + for { + // Read from input + read := <-z.readAhead + if read.err != nil { + // If not nil, the reader will have exited + z.closeReader = nil + + if read.err != io.EOF { + z.err = read.err + return total, z.err + } + if read.err == io.EOF { + z.lastBlock = true + err = nil + } + } + // Write what we got + n, err := w.Write(read.b) + if n != len(read.b) { + return total, io.ErrShortWrite + } + total += int64(n) + if err != nil { + return total, err + } + // Put block back + z.blockPool <- read.b + if z.lastBlock { + break + } + } + + // Finished file; check checksum + size. + if _, err := io.ReadFull(z.r, z.buf[0:8]); err != nil { + z.err = err + return total, err + } + crc32, isize := get4(z.buf[0:4]), get4(z.buf[4:8]) + sum := z.digest.Sum32() + if sum != crc32 || isize != z.size { + z.err = ErrChecksum + return total, z.err + } + // File is ok; should we attempt reading one more? + if !z.multistream { + return total, nil + } + + // Is there another? + err = z.readHeader(false) + if err == io.EOF { + return total, nil + } + if err != nil { + z.err = err + return total, err + } + } +} + +// Close closes the Reader. It does not close the underlying io.Reader. +func (z *Reader) Close() error { + return z.killReadAhead() +} diff --git a/vendor/github.com/klauspost/pgzip/gzip.go b/vendor/github.com/klauspost/pgzip/gzip.go new file mode 100644 index 000000000..85d14e9cb --- /dev/null +++ b/vendor/github.com/klauspost/pgzip/gzip.go @@ -0,0 +1,501 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pgzip + +import ( + "bytes" + "errors" + "fmt" + "hash" + "hash/crc32" + "io" + "sync" + "time" + + "github.com/klauspost/compress/flate" +) + +const ( + defaultBlockSize = 256 << 10 + tailSize = 16384 + defaultBlocks = 16 +) + +// These constants are copied from the flate package, so that code that imports +// "compress/gzip" does not also have to import "compress/flate". +const ( + NoCompression = flate.NoCompression + BestSpeed = flate.BestSpeed + BestCompression = flate.BestCompression + DefaultCompression = flate.DefaultCompression + ConstantCompression = flate.ConstantCompression + HuffmanOnly = flate.HuffmanOnly +) + +// A Writer is an io.WriteCloser. +// Writes to a Writer are compressed and written to w. +type Writer struct { + Header + w io.Writer + level int + wroteHeader bool + blockSize int + blocks int + currentBuffer []byte + prevTail []byte + digest hash.Hash32 + size int + closed bool + buf [10]byte + errMu sync.RWMutex + err error + pushedErr chan struct{} + results chan result + dictFlatePool sync.Pool + dstPool sync.Pool + wg sync.WaitGroup +} + +type result struct { + result chan []byte + notifyWritten chan struct{} +} + +// Use SetConcurrency to finetune the concurrency level if needed. +// +// With this you can control the approximate size of your blocks, +// as well as how many you want to be processing in parallel. +// +// Default values for this is SetConcurrency(250000, 16), +// meaning blocks are split at 250000 bytes and up to 16 blocks +// can be processing at once before the writer blocks. +func (z *Writer) SetConcurrency(blockSize, blocks int) error { + if blockSize <= tailSize { + return fmt.Errorf("gzip: block size cannot be less than or equal to %d", tailSize) + } + if blocks <= 0 { + return errors.New("gzip: blocks cannot be zero or less") + } + if blockSize == z.blockSize && blocks == z.blocks { + return nil + } + z.blockSize = blockSize + z.results = make(chan result, blocks) + z.blocks = blocks + z.dstPool = sync.Pool{New: func() interface{} { return make([]byte, 0, blockSize+(blockSize)>>4) }} + return nil +} + +// NewWriter returns a new Writer. +// Writes to the returned writer are compressed and written to w. +// +// It is the caller's responsibility to call Close on the WriteCloser when done. +// Writes may be buffered and not flushed until Close. +// +// Callers that wish to set the fields in Writer.Header must do so before +// the first call to Write or Close. The Comment and Name header fields are +// UTF-8 strings in Go, but the underlying format requires NUL-terminated ISO +// 8859-1 (Latin-1). NUL or non-Latin-1 runes in those strings will lead to an +// error on Write. +func NewWriter(w io.Writer) *Writer { + z, _ := NewWriterLevel(w, DefaultCompression) + return z +} + +// NewWriterLevel is like NewWriter but specifies the compression level instead +// of assuming DefaultCompression. +// +// The compression level can be DefaultCompression, NoCompression, or any +// integer value between BestSpeed and BestCompression inclusive. The error +// returned will be nil if the level is valid. +func NewWriterLevel(w io.Writer, level int) (*Writer, error) { + if level < ConstantCompression || level > BestCompression { + return nil, fmt.Errorf("gzip: invalid compression level: %d", level) + } + z := new(Writer) + z.SetConcurrency(defaultBlockSize, defaultBlocks) + z.init(w, level) + return z, nil +} + +// This function must be used by goroutines to set an +// error condition, since z.err access is restricted +// to the callers goruotine. +func (z *Writer) pushError(err error) { + z.errMu.Lock() + if z.err != nil { + z.errMu.Unlock() + return + } + z.err = err + close(z.pushedErr) + z.errMu.Unlock() +} + +func (z *Writer) init(w io.Writer, level int) { + z.wg.Wait() + digest := z.digest + if digest != nil { + digest.Reset() + } else { + digest = crc32.NewIEEE() + } + z.Header = Header{OS: 255} + z.w = w + z.level = level + z.digest = digest + z.pushedErr = make(chan struct{}, 0) + z.results = make(chan result, z.blocks) + z.err = nil + z.closed = false + z.Comment = "" + z.Extra = nil + z.ModTime = time.Time{} + z.wroteHeader = false + z.currentBuffer = nil + z.buf = [10]byte{} + z.prevTail = nil + z.size = 0 + if z.dictFlatePool.New == nil { + z.dictFlatePool.New = func() interface{} { + f, _ := flate.NewWriterDict(w, level, nil) + return f + } + } +} + +// Reset discards the Writer z's state and makes it equivalent to the +// result of its original state from NewWriter or NewWriterLevel, but +// writing to w instead. This permits reusing a Writer rather than +// allocating a new one. +func (z *Writer) Reset(w io.Writer) { + if z.results != nil && !z.closed { + close(z.results) + } + z.SetConcurrency(defaultBlockSize, defaultBlocks) + z.init(w, z.level) +} + +// GZIP (RFC 1952) is little-endian, unlike ZLIB (RFC 1950). +func put2(p []byte, v uint16) { + p[0] = uint8(v >> 0) + p[1] = uint8(v >> 8) +} + +func put4(p []byte, v uint32) { + p[0] = uint8(v >> 0) + p[1] = uint8(v >> 8) + p[2] = uint8(v >> 16) + p[3] = uint8(v >> 24) +} + +// writeBytes writes a length-prefixed byte slice to z.w. +func (z *Writer) writeBytes(b []byte) error { + if len(b) > 0xffff { + return errors.New("gzip.Write: Extra data is too large") + } + put2(z.buf[0:2], uint16(len(b))) + _, err := z.w.Write(z.buf[0:2]) + if err != nil { + return err + } + _, err = z.w.Write(b) + return err +} + +// writeString writes a UTF-8 string s in GZIP's format to z.w. +// GZIP (RFC 1952) specifies that strings are NUL-terminated ISO 8859-1 (Latin-1). +func (z *Writer) writeString(s string) (err error) { + // GZIP stores Latin-1 strings; error if non-Latin-1; convert if non-ASCII. + needconv := false + for _, v := range s { + if v == 0 || v > 0xff { + return errors.New("gzip.Write: non-Latin-1 header string") + } + if v > 0x7f { + needconv = true + } + } + if needconv { + b := make([]byte, 0, len(s)) + for _, v := range s { + b = append(b, byte(v)) + } + _, err = z.w.Write(b) + } else { + _, err = io.WriteString(z.w, s) + } + if err != nil { + return err + } + // GZIP strings are NUL-terminated. + z.buf[0] = 0 + _, err = z.w.Write(z.buf[0:1]) + return err +} + +// compressCurrent will compress the data currently buffered +// This should only be called from the main writer/flush/closer +func (z *Writer) compressCurrent(flush bool) { + r := result{} + r.result = make(chan []byte, 1) + r.notifyWritten = make(chan struct{}, 0) + select { + case z.results <- r: + case <-z.pushedErr: + return + } + + // If block given is more than twice the block size, split it. + c := z.currentBuffer + if len(c) > z.blockSize*2 { + c = c[:z.blockSize] + z.wg.Add(1) + go z.compressBlock(c, z.prevTail, r, false) + z.prevTail = c[len(c)-tailSize:] + z.currentBuffer = z.currentBuffer[z.blockSize:] + z.compressCurrent(flush) + // Last one flushes if needed + return + } + + z.wg.Add(1) + go z.compressBlock(c, z.prevTail, r, z.closed) + if len(c) > tailSize { + z.prevTail = c[len(c)-tailSize:] + } else { + z.prevTail = nil + } + z.currentBuffer = z.dstPool.Get().([]byte) + z.currentBuffer = z.currentBuffer[:0] + + // Wait if flushing + if flush { + <-r.notifyWritten + } +} + +// Returns an error if it has been set. +// Cannot be used by functions that are from internal goroutines. +func (z *Writer) checkError() error { + z.errMu.RLock() + err := z.err + z.errMu.RUnlock() + return err +} + +// Write writes a compressed form of p to the underlying io.Writer. The +// compressed bytes are not necessarily flushed to output until +// the Writer is closed or Flush() is called. +// +// The function will return quickly, if there are unused buffers. +// The sent slice (p) is copied, and the caller is free to re-use the buffer +// when the function returns. +// +// Errors that occur during compression will be reported later, and a nil error +// does not signify that the compression succeeded (since it is most likely still running) +// That means that the call that returns an error may not be the call that caused it. +// Only Flush and Close functions are guaranteed to return any errors up to that point. +func (z *Writer) Write(p []byte) (int, error) { + if err := z.checkError(); err != nil { + return 0, err + } + // Write the GZIP header lazily. + if !z.wroteHeader { + z.wroteHeader = true + z.buf[0] = gzipID1 + z.buf[1] = gzipID2 + z.buf[2] = gzipDeflate + z.buf[3] = 0 + if z.Extra != nil { + z.buf[3] |= 0x04 + } + if z.Name != "" { + z.buf[3] |= 0x08 + } + if z.Comment != "" { + z.buf[3] |= 0x10 + } + put4(z.buf[4:8], uint32(z.ModTime.Unix())) + if z.level == BestCompression { + z.buf[8] = 2 + } else if z.level == BestSpeed { + z.buf[8] = 4 + } else { + z.buf[8] = 0 + } + z.buf[9] = z.OS + var n int + var err error + n, err = z.w.Write(z.buf[0:10]) + if err != nil { + z.pushError(err) + return n, err + } + if z.Extra != nil { + err = z.writeBytes(z.Extra) + if err != nil { + z.pushError(err) + return n, err + } + } + if z.Name != "" { + err = z.writeString(z.Name) + if err != nil { + z.pushError(err) + return n, err + } + } + if z.Comment != "" { + err = z.writeString(z.Comment) + if err != nil { + z.pushError(err) + return n, err + } + } + // Start receiving data from compressors + go func() { + listen := z.results + for { + r, ok := <-listen + // If closed, we are finished. + if !ok { + return + } + buf := <-r.result + n, err := z.w.Write(buf) + if err != nil { + z.pushError(err) + close(r.notifyWritten) + return + } + if n != len(buf) { + z.pushError(fmt.Errorf("gzip: short write %d should be %d", n, len(buf))) + close(r.notifyWritten) + return + } + z.dstPool.Put(buf) + close(r.notifyWritten) + } + }() + z.currentBuffer = make([]byte, 0, z.blockSize) + } + q := p + for len(q) > 0 { + length := len(q) + if length+len(z.currentBuffer) > z.blockSize { + length = z.blockSize - len(z.currentBuffer) + } + z.digest.Write(q[:length]) + z.currentBuffer = append(z.currentBuffer, q[:length]...) + if len(z.currentBuffer) >= z.blockSize { + z.compressCurrent(false) + if err := z.checkError(); err != nil { + return len(p) - len(q) - length, err + } + } + z.size += length + q = q[length:] + } + return len(p), z.checkError() +} + +// Step 1: compresses buffer to buffer +// Step 2: send writer to channel +// Step 3: Close result channel to indicate we are done +func (z *Writer) compressBlock(p, prevTail []byte, r result, closed bool) { + defer func() { + close(r.result) + z.wg.Done() + }() + buf := z.dstPool.Get().([]byte) + dest := bytes.NewBuffer(buf[:0]) + + compressor := z.dictFlatePool.Get().(*flate.Writer) + compressor.ResetDict(dest, prevTail) + compressor.Write(p) + + err := compressor.Flush() + if err != nil { + z.pushError(err) + return + } + if closed { + err = compressor.Close() + if err != nil { + z.pushError(err) + return + } + } + z.dictFlatePool.Put(compressor) + // Read back buffer + buf = dest.Bytes() + r.result <- buf +} + +// Flush flushes any pending compressed data to the underlying writer. +// +// It is useful mainly in compressed network protocols, to ensure that +// a remote reader has enough data to reconstruct a packet. Flush does +// not return until the data has been written. If the underlying +// writer returns an error, Flush returns that error. +// +// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH. +func (z *Writer) Flush() error { + if err := z.checkError(); err != nil { + return err + } + if z.closed { + return nil + } + if !z.wroteHeader { + _, err := z.Write(nil) + if err != nil { + return err + } + } + // We send current block to compression + z.compressCurrent(true) + + return z.checkError() +} + +// UncompressedSize will return the number of bytes written. +// pgzip only, not a function in the official gzip package. +func (z *Writer) UncompressedSize() int { + return z.size +} + +// Close closes the Writer, flushing any unwritten data to the underlying +// io.Writer, but does not close the underlying io.Writer. +func (z *Writer) Close() error { + if err := z.checkError(); err != nil { + return err + } + if z.closed { + return nil + } + + z.closed = true + if !z.wroteHeader { + z.Write(nil) + if err := z.checkError(); err != nil { + return err + } + } + z.compressCurrent(true) + if err := z.checkError(); err != nil { + return err + } + close(z.results) + put4(z.buf[0:4], z.digest.Sum32()) + put4(z.buf[4:8], uint32(z.size)) + _, err := z.w.Write(z.buf[0:8]) + if err != nil { + z.pushError(err) + return err + } + return nil +} -- cgit v1.2.3-54-g00ecf