diff options
author | Daniel J Walsh <dwalsh@redhat.com> | 2021-05-27 12:34:22 -0400 |
---|---|---|
committer | Daniel J Walsh <dwalsh@redhat.com> | 2021-05-27 12:34:26 -0400 |
commit | c9609d820b6fd4082eecf7904178c361a22de6e9 (patch) | |
tree | f22728d168b0785b5ee0b80ec3d3f238aef41a31 /vendor/github.com | |
parent | cd1f99d063cf8bb4efdb22d2f56f1aef73ff4ba0 (diff) | |
download | podman-c9609d820b6fd4082eecf7904178c361a22de6e9.tar.gz podman-c9609d820b6fd4082eecf7904178c361a22de6e9.tar.bz2 podman-c9609d820b6fd4082eecf7904178c361a22de6e9.zip |
Vendor in containers/storage v1.32.1
Theoretically this should fix the aweful flake we have
been suffering with.
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
Diffstat (limited to 'vendor/github.com')
17 files changed, 363 insertions, 212 deletions
diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION index 359c41089..96cd6ee1e 100644 --- a/vendor/github.com/containers/storage/VERSION +++ b/vendor/github.com/containers/storage/VERSION @@ -1 +1 @@ -1.32.0 +1.32.1 diff --git a/vendor/github.com/containers/storage/containers.go b/vendor/github.com/containers/storage/containers.go index 6407e9a83..b4f773f2b 100644 --- a/vendor/github.com/containers/storage/containers.go +++ b/vendor/github.com/containers/storage/containers.go @@ -626,5 +626,5 @@ func (r *containerStore) ReloadIfChanged() error { if err == nil && modified { return r.Load() } - return nil + return err } diff --git a/vendor/github.com/containers/storage/go.mod b/vendor/github.com/containers/storage/go.mod index 3d720cde2..e7ca56e64 100644 --- a/vendor/github.com/containers/storage/go.mod +++ b/vendor/github.com/containers/storage/go.mod @@ -10,7 +10,7 @@ require ( github.com/google/go-intervals v0.0.2 github.com/hashicorp/go-multierror v1.1.1 github.com/json-iterator/go v1.1.11 - github.com/klauspost/compress v1.12.2 + github.com/klauspost/compress v1.12.3 github.com/klauspost/pgzip v1.2.5 github.com/mattn/go-shellwords v1.0.11 github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible diff --git a/vendor/github.com/containers/storage/go.sum b/vendor/github.com/containers/storage/go.sum index 91403a201..5373d0597 100644 --- a/vendor/github.com/containers/storage/go.sum +++ b/vendor/github.com/containers/storage/go.sum @@ -383,8 +383,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.12.2 h1:2KCfW3I9M7nSc5wOqXAlW2v2U6v+w6cbjvbfp+OykW8= -github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.12.3 h1:G5AfA94pHPysR56qqrkO2pxEexdDzrpFJ6yt/VqWxVU= +github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= diff --git a/vendor/github.com/containers/storage/images.go b/vendor/github.com/containers/storage/images.go index c7b968e05..bca25a65b 100644 --- a/vendor/github.com/containers/storage/images.go +++ b/vendor/github.com/containers/storage/images.go @@ -810,5 +810,5 @@ func (r *imageStore) ReloadIfChanged() error { if err == nil && modified { return r.Load() } - return nil + return err } diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go index 1ed265d5d..9b05474bb 100644 --- a/vendor/github.com/containers/storage/layers.go +++ b/vendor/github.com/containers/storage/layers.go @@ -1156,48 +1156,51 @@ func (r *layerStore) deleteInternal(id string) error { } id = layer.ID err := r.driver.Remove(id) - if err == nil { - os.Remove(r.tspath(id)) - os.RemoveAll(r.datadir(id)) - delete(r.byid, id) - for _, name := range layer.Names { - delete(r.byname, name) + if err != nil { + return err + } + + os.Remove(r.tspath(id)) + os.RemoveAll(r.datadir(id)) + delete(r.byid, id) + for _, name := range layer.Names { + delete(r.byname, name) + } + r.idindex.Delete(id) + mountLabel := layer.MountLabel + if layer.MountPoint != "" { + delete(r.bymount, layer.MountPoint) + } + r.deleteInDigestMap(id) + toDeleteIndex := -1 + for i, candidate := range r.layers { + if candidate.ID == id { + toDeleteIndex = i + break } - r.idindex.Delete(id) - mountLabel := layer.MountLabel - if layer.MountPoint != "" { - delete(r.bymount, layer.MountPoint) + } + if toDeleteIndex != -1 { + // delete the layer at toDeleteIndex + if toDeleteIndex == len(r.layers)-1 { + r.layers = r.layers[:len(r.layers)-1] + } else { + r.layers = append(r.layers[:toDeleteIndex], r.layers[toDeleteIndex+1:]...) } - r.deleteInDigestMap(id) - toDeleteIndex := -1 - for i, candidate := range r.layers { - if candidate.ID == id { - toDeleteIndex = i + } + if mountLabel != "" { + var found bool + for _, candidate := range r.layers { + if candidate.MountLabel == mountLabel { + found = true break } } - if toDeleteIndex != -1 { - // delete the layer at toDeleteIndex - if toDeleteIndex == len(r.layers)-1 { - r.layers = r.layers[:len(r.layers)-1] - } else { - r.layers = append(r.layers[:toDeleteIndex], r.layers[toDeleteIndex+1:]...) - } - } - if mountLabel != "" { - var found bool - for _, candidate := range r.layers { - if candidate.MountLabel == mountLabel { - found = true - break - } - } - if !found { - label.ReleaseLabel(mountLabel) - } + if !found { + label.ReleaseLabel(mountLabel) } } - return err + + return nil } func (r *layerStore) deleteInDigestMap(id string) { @@ -1777,7 +1780,7 @@ func (r *layerStore) ReloadIfChanged() error { if err == nil && modified { return r.Load() } - return nil + return err } func closeAll(closes ...func() error) (rErr error) { diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go index 759407c63..d6d547c64 100644 --- a/vendor/github.com/containers/storage/store.go +++ b/vendor/github.com/containers/storage/store.go @@ -788,6 +788,15 @@ func (s *store) load() error { } s.containerStore = rcs + for _, store := range driver.AdditionalImageStores() { + gipath := filepath.Join(store, driverPrefix+"images") + ris, err := newROImageStore(gipath) + if err != nil { + return err + } + s.roImageStores = append(s.roImageStores, ris) + } + s.digestLockRoot = filepath.Join(s.runRoot, driverPrefix+"locks") if err := os.MkdirAll(s.digestLockRoot, 0700); err != nil { return err @@ -910,22 +919,10 @@ func (s *store) ImageStore() (ImageStore, error) { // Store. Accessing these stores directly will bypass locking and // synchronization, so it is not a part of the exported Store interface. func (s *store) ROImageStores() ([]ROImageStore, error) { - if len(s.roImageStores) != 0 { - return s.roImageStores, nil - } - driver, err := s.getGraphDriver() - if err != nil { - return nil, err - } - driverPrefix := s.graphDriverName + "-" - for _, store := range driver.AdditionalImageStores() { - gipath := filepath.Join(store, driverPrefix+"images") - ris, err := newROImageStore(gipath) - if err != nil { - return nil, err - } - s.roImageStores = append(s.roImageStores, ris) + if s.imageStore == nil { + return nil, ErrLoadError } + return s.roImageStores, nil } @@ -2655,8 +2652,13 @@ func (s *store) mount(id string, options drivers.MountOpts) (string, error) { return "", err } + modified, err := s.graphLock.Modified() + if err != nil { + return "", err + } + /* We need to make sure the home mount is present when the Mount is done. */ - if s.graphLock.TouchedSince(s.lastLoaded) { + if modified { s.graphDriver = nil s.layerStore = nil s.graphDriver, err = s.getGraphDriver() diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go index 40b5802de..5283ac5a5 100644 --- a/vendor/github.com/klauspost/compress/flate/deflate.go +++ b/vendor/github.com/klauspost/compress/flate/deflate.go @@ -644,7 +644,7 @@ func (d *compressor) init(w io.Writer, level int) (err error) { d.fill = (*compressor).fillBlock d.step = (*compressor).store case level == ConstantCompression: - d.w.logNewTablePenalty = 8 + d.w.logNewTablePenalty = 10 d.window = make([]byte, 32<<10) d.fill = (*compressor).fillBlock d.step = (*compressor).storeHuff diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go index 678f08105..347ac2c90 100644 --- a/vendor/github.com/klauspost/compress/flate/fast_encoder.go +++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go @@ -45,7 +45,7 @@ const ( bTableBits = 17 // Bits used in the big tables bTableSize = 1 << bTableBits // Size of the table - allocHistory = maxStoreBlockSize * 10 // Size to preallocate for history. + allocHistory = maxStoreBlockSize * 5 // Size to preallocate for history. bufferReset = (1 << 31) - allocHistory - maxStoreBlockSize - 1 // Reset the buffer offset when reaching this. ) diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go index db54be139..3ad5e9807 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go @@ -6,6 +6,7 @@ package flate import ( "encoding/binary" + "fmt" "io" ) @@ -27,7 +28,7 @@ const ( // after which bytes are flushed to the writer. // Should preferably be a multiple of 6, since // we accumulate 6 bytes between writes to the buffer. - bufferFlushSize = 240 + bufferFlushSize = 246 // bufferSize is the actual output byte buffer size. // It must have additional headroom for a flush @@ -59,19 +60,31 @@ var offsetExtraBits = [64]int8{ 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, } -var offsetBase = [64]uint32{ - /* normal deflate */ - 0x000000, 0x000001, 0x000002, 0x000003, 0x000004, - 0x000006, 0x000008, 0x00000c, 0x000010, 0x000018, - 0x000020, 0x000030, 0x000040, 0x000060, 0x000080, - 0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300, - 0x000400, 0x000600, 0x000800, 0x000c00, 0x001000, - 0x001800, 0x002000, 0x003000, 0x004000, 0x006000, +var offsetCombined = [32]uint32{} - /* extended window */ - 0x008000, 0x00c000, 0x010000, 0x018000, 0x020000, - 0x030000, 0x040000, 0x060000, 0x080000, 0x0c0000, - 0x100000, 0x180000, 0x200000, 0x300000, +func init() { + var offsetBase = [64]uint32{ + /* normal deflate */ + 0x000000, 0x000001, 0x000002, 0x000003, 0x000004, + 0x000006, 0x000008, 0x00000c, 0x000010, 0x000018, + 0x000020, 0x000030, 0x000040, 0x000060, 0x000080, + 0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300, + 0x000400, 0x000600, 0x000800, 0x000c00, 0x001000, + 0x001800, 0x002000, 0x003000, 0x004000, 0x006000, + + /* extended window */ + 0x008000, 0x00c000, 0x010000, 0x018000, 0x020000, + 0x030000, 0x040000, 0x060000, 0x080000, 0x0c0000, + 0x100000, 0x180000, 0x200000, 0x300000, + } + + for i := range offsetCombined[:] { + // Don't use extended window values... + if offsetBase[i] > 0x006000 { + continue + } + offsetCombined[i] = uint32(offsetExtraBits[i])<<16 | (offsetBase[i]) + } } // The odd order in which the codegen code sizes are written. @@ -88,15 +101,16 @@ type huffmanBitWriter struct { bits uint64 nbits uint16 nbytes uint8 + lastHuffMan bool literalEncoding *huffmanEncoder + tmpLitEncoding *huffmanEncoder offsetEncoding *huffmanEncoder codegenEncoding *huffmanEncoder err error lastHeader int // Set between 0 (reused block can be up to 2x the size) logNewTablePenalty uint - lastHuffMan bool - bytes [256]byte + bytes [256 + 8]byte literalFreq [lengthCodesStart + 32]uint16 offsetFreq [32]uint16 codegenFreq [codegenCodeCount]uint16 @@ -128,6 +142,7 @@ func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter { return &huffmanBitWriter{ writer: w, literalEncoding: newHuffmanEncoder(literalCount), + tmpLitEncoding: newHuffmanEncoder(literalCount), codegenEncoding: newHuffmanEncoder(codegenCodeCount), offsetEncoding: newHuffmanEncoder(offsetCodeCount), } @@ -745,9 +760,31 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) offs := oeCodes[:32] lengths := leCodes[lengthCodesStart:] lengths = lengths[:32] + + // Go 1.16 LOVES having these on stack. + bits, nbits, nbytes := w.bits, w.nbits, w.nbytes + for _, t := range tokens { if t < matchType { - w.writeCode(lits[t.literal()]) + //w.writeCode(lits[t.literal()]) + c := lits[t.literal()] + bits |= uint64(c.code) << nbits + nbits += c.len + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + } continue } @@ -759,38 +796,99 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) } else { // inlined c := lengths[lengthCode&31] - w.bits |= uint64(c.code) << w.nbits - w.nbits += c.len - if w.nbits >= 48 { - w.writeOutBits() + bits |= uint64(c.code) << nbits + nbits += c.len + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } } } extraLengthBits := uint16(lengthExtraBits[lengthCode&31]) if extraLengthBits > 0 { + //w.writeBits(extraLength, extraLengthBits) extraLength := int32(length - lengthBase[lengthCode&31]) - w.writeBits(extraLength, extraLengthBits) + bits |= uint64(extraLength) << nbits + nbits += extraLengthBits + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + } } // Write the offset offset := t.offset() - offsetCode := offsetCode(offset) + offsetCode := offset >> 16 + offset &= matchOffsetOnlyMask if false { w.writeCode(offs[offsetCode&31]) } else { // inlined - c := offs[offsetCode&31] - w.bits |= uint64(c.code) << w.nbits - w.nbits += c.len - if w.nbits >= 48 { - w.writeOutBits() + c := offs[offsetCode] + bits |= uint64(c.code) << nbits + nbits += c.len + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } } } - extraOffsetBits := uint16(offsetExtraBits[offsetCode&63]) - if extraOffsetBits > 0 { - extraOffset := int32(offset - offsetBase[offsetCode&63]) - w.writeBits(extraOffset, extraOffsetBits) + offsetComb := offsetCombined[offsetCode] + if offsetComb > 1<<16 { + //w.writeBits(extraOffset, extraOffsetBits) + bits |= uint64(offset&matchOffsetOnlyMask-(offsetComb&0xffff)) << nbits + nbits += uint16(offsetComb >> 16) + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + } } } + // Restore... + w.bits, w.nbits, w.nbytes = bits, nbits, nbytes + if deferEOB { w.writeCode(leCodes[endBlockMarker]) } @@ -825,13 +923,28 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { } } + // Fill is rarely better... + const fill = false + const numLiterals = endBlockMarker + 1 + const numOffsets = 1 + // Add everything as literals // We have to estimate the header size. // Assume header is around 70 bytes: // https://stackoverflow.com/a/25454430 const guessHeaderSizeBits = 70 * 8 - estBits := histogramSize(input, w.literalFreq[:], !eof && !sync) - estBits += w.lastHeader + len(input)/32 + histogram(input, w.literalFreq[:numLiterals], fill) + w.literalFreq[endBlockMarker] = 1 + w.tmpLitEncoding.generate(w.literalFreq[:numLiterals], 15) + if fill { + // Clear fill... + for i := range w.literalFreq[:numLiterals] { + w.literalFreq[i] = 0 + } + histogram(input, w.literalFreq[:numLiterals], false) + } + estBits := w.tmpLitEncoding.canReuseBits(w.literalFreq[:numLiterals]) + estBits += w.lastHeader if w.lastHeader == 0 { estBits += guessHeaderSizeBits } @@ -839,33 +952,31 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { // Store bytes, if we don't get a reasonable improvement. ssize, storable := w.storedSize(input) - if storable && ssize < estBits { + if storable && ssize <= estBits { w.writeStoredHeader(len(input), eof) w.writeBytes(input) return } - reuseSize := 0 if w.lastHeader > 0 { - reuseSize = w.literalEncoding.bitLength(w.literalFreq[:256]) + reuseSize := w.literalEncoding.canReuseBits(w.literalFreq[:256]) if estBits < reuseSize { + if debugDeflate { + //fmt.Println("not reusing, reuse:", reuseSize/8, "> new:", estBits/8, "- header est:", w.lastHeader/8) + } // We owe an EOB w.writeCode(w.literalEncoding.codes[endBlockMarker]) w.lastHeader = 0 + } else if debugDeflate { + fmt.Println("reusing, reuse:", reuseSize/8, "> new:", estBits/8, "- header est:", w.lastHeader/8) } } - const numLiterals = endBlockMarker + 1 - const numOffsets = 1 + count := 0 if w.lastHeader == 0 { - if !eof && !sync { - // Generate a slightly suboptimal tree that can be used for all. - fillHist(w.literalFreq[:numLiterals]) - } - w.literalFreq[endBlockMarker] = 1 - w.literalEncoding.generate(w.literalFreq[:numLiterals], 15) - + // Use the temp encoding, so swap. + w.literalEncoding, w.tmpLitEncoding = w.tmpLitEncoding, w.literalEncoding // Generate codegen and codegenFrequencies, which indicates how to encode // the literalEncoding and the offsetEncoding. w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset) @@ -876,34 +987,47 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) w.lastHuffMan = true w.lastHeader, _ = w.headerSize() + if debugDeflate { + count += w.lastHeader + fmt.Println("header:", count/8) + } } - encoding := w.literalEncoding.codes[:257] + encoding := w.literalEncoding.codes[:256] + // Go 1.16 LOVES having these on stack. At least 1.5x the speed. + bits, nbits, nbytes := w.bits, w.nbits, w.nbytes for _, t := range input { // Bitwriting inlined, ~30% speedup c := encoding[t] - w.bits |= uint64(c.code) << w.nbits - w.nbits += c.len - if w.nbits >= 48 { - bits := w.bits - w.bits >>= 48 - w.nbits -= 48 - n := w.nbytes - binary.LittleEndian.PutUint64(w.bytes[n:], bits) - n += 6 - if n >= bufferFlushSize { + bits |= uint64(c.code) << nbits + nbits += c.len + if debugDeflate { + count += int(c.len) + } + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { if w.err != nil { - n = 0 + nbytes = 0 return } - w.write(w.bytes[:n]) - n = 0 + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 } - w.nbytes = n } } + // Restore... + w.bits, w.nbits, w.nbytes = bits, nbits, nbytes + + if debugDeflate { + fmt.Println("wrote", count/8, "bytes") + } if eof || sync { - w.writeCode(encoding[endBlockMarker]) + w.writeCode(w.literalEncoding.codes[endBlockMarker]) w.lastHeader = 0 w.lastHuffMan = false } diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go index 0d3445a1c..67b2b3872 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_code.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go @@ -21,9 +21,13 @@ type hcode struct { } type huffmanEncoder struct { - codes []hcode - freqcache []literalNode - bitCount [17]int32 + codes []hcode + bitCount [17]int32 + + // Allocate a reusable buffer with the longest possible frequency table. + // Possible lengths are codegenCodeCount, offsetCodeCount and literalCount. + // The largest of these is literalCount, so we allocate for that case. + freqcache [literalCount + 1]literalNode } type literalNode struct { @@ -132,6 +136,21 @@ func (h *huffmanEncoder) bitLengthRaw(b []byte) int { return total } +// canReuseBits returns the number of bits or math.MaxInt32 if the encoder cannot be reused. +func (h *huffmanEncoder) canReuseBits(freq []uint16) int { + var total int + for i, f := range freq { + if f != 0 { + code := h.codes[i] + if code.len == 0 { + return math.MaxInt32 + } + total += int(f) * int(code.len) + } + } + return total +} + // Return the number of literals assigned to each bit size in the Huffman encoding // // This method is only called when list.length >= 3 @@ -291,12 +310,6 @@ func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalN // freq An array of frequencies, in which frequency[i] gives the frequency of literal i. // maxBits The maximum number of bits to use for any literal. func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) { - if h.freqcache == nil { - // Allocate a reusable buffer with the longest possible frequency table. - // Possible lengths are codegenCodeCount, offsetCodeCount and literalCount. - // The largest of these is literalCount, so we allocate for that case. - h.freqcache = make([]literalNode, literalCount+1) - } list := h.freqcache[:len(freq)+1] // Number of non-zero literals count := 0 @@ -330,10 +343,14 @@ func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) { h.assignEncodingAndSize(bitCount, list) } +// atLeastOne clamps the result between 1 and 15. func atLeastOne(v float32) float32 { if v < 1 { return 1 } + if v > 15 { + return 15 + } return v } @@ -346,31 +363,12 @@ func fillHist(b []uint16) { } } -// histogramSize accumulates a histogram of b in h. -// An estimated size in bits is returned. -// len(h) must be >= 256, and h's elements must be all zeroes. -func histogramSize(b []byte, h []uint16, fill bool) (bits int) { +func histogram(b []byte, h []uint16, fill bool) { h = h[:256] for _, t := range b { h[t]++ } - total := len(b) if fill { - for _, v := range h { - if v == 0 { - total++ - } - } + fillHist(h) } - - invTotal := 1.0 / float32(total) - shannon := float32(0.0) - for _, v := range h { - if v > 0 { - n := float32(v) - shannon += atLeastOne(-mFastLog2(n*invTotal)) * n - } - } - - return int(shannon + 0.99) } diff --git a/vendor/github.com/klauspost/compress/flate/token.go b/vendor/github.com/klauspost/compress/flate/token.go index f9abf606d..eb862d7a9 100644 --- a/vendor/github.com/klauspost/compress/flate/token.go +++ b/vendor/github.com/klauspost/compress/flate/token.go @@ -13,14 +13,17 @@ import ( ) const ( + // From top // 2 bits: type 0 = literal 1=EOF 2=Match 3=Unused // 8 bits: xlength = length - MIN_MATCH_LENGTH - // 22 bits xoffset = offset - MIN_OFFSET_SIZE, or literal - lengthShift = 22 - offsetMask = 1<<lengthShift - 1 - typeMask = 3 << 30 - literalType = 0 << 30 - matchType = 1 << 30 + // 5 bits offsetcode + // 16 bits xoffset = offset - MIN_OFFSET_SIZE, or literal + lengthShift = 22 + offsetMask = 1<<lengthShift - 1 + typeMask = 3 << 30 + literalType = 0 << 30 + matchType = 1 << 30 + matchOffsetOnlyMask = 0xffff ) // The length code for length X (MIN_MATCH_LENGTH <= X <= MAX_MATCH_LENGTH) @@ -187,7 +190,7 @@ func (t *tokens) indexTokens(in []token) { t.AddLiteral(tok.literal()) continue } - t.AddMatch(uint32(tok.length()), tok.offset()) + t.AddMatch(uint32(tok.length()), tok.offset()&matchOffsetOnlyMask) } } @@ -232,7 +235,7 @@ func (t *tokens) EstimatedBits() int { for _, v := range t.litHist[:] { if v > 0 { n := float32(v) - shannon += -mFastLog2(n*invTotal) * n + shannon += atLeastOne(-mFastLog2(n*invTotal)) * n } } // Just add 15 for EOB @@ -240,7 +243,7 @@ func (t *tokens) EstimatedBits() int { for i, v := range t.extraHist[1 : literalCount-256] { if v > 0 { n := float32(v) - shannon += -mFastLog2(n*invTotal) * n + shannon += atLeastOne(-mFastLog2(n*invTotal)) * n bits += int(lengthExtraBits[i&31]) * int(v) nMatches += int(v) } @@ -251,7 +254,7 @@ func (t *tokens) EstimatedBits() int { for i, v := range t.offHist[:offsetCodeCount] { if v > 0 { n := float32(v) - shannon += -mFastLog2(n*invTotal) * n + shannon += atLeastOne(-mFastLog2(n*invTotal)) * n bits += int(offsetExtraBits[i&31]) * int(v) } } @@ -270,11 +273,13 @@ func (t *tokens) AddMatch(xlength uint32, xoffset uint32) { panic(fmt.Errorf("invalid offset: %v", xoffset)) } } + oCode := offsetCode(xoffset) + xoffset |= oCode << 16 t.nLits++ - lengthCode := lengthCodes1[uint8(xlength)] & 31 + + t.extraHist[lengthCodes1[uint8(xlength)]]++ + t.offHist[oCode]++ t.tokens[t.n] = token(matchType | xlength<<lengthShift | xoffset) - t.extraHist[lengthCode]++ - t.offHist[offsetCode(xoffset)&31]++ t.n++ } @@ -286,7 +291,8 @@ func (t *tokens) AddMatchLong(xlength int32, xoffset uint32) { panic(fmt.Errorf("invalid offset: %v", xoffset)) } } - oc := offsetCode(xoffset) & 31 + oc := offsetCode(xoffset) + xoffset |= oc << 16 for xlength > 0 { xl := xlength if xl > 258 { @@ -294,12 +300,11 @@ func (t *tokens) AddMatchLong(xlength int32, xoffset uint32) { xl = 258 - baseMatchLength } xlength -= xl - xl -= 3 + xl -= baseMatchLength t.nLits++ - lengthCode := lengthCodes1[uint8(xl)] & 31 - t.tokens[t.n] = token(matchType | uint32(xl)<<lengthShift | xoffset) - t.extraHist[lengthCode]++ + t.extraHist[lengthCodes1[uint8(xl)]]++ t.offHist[oc]++ + t.tokens[t.n] = token(matchType | uint32(xl)<<lengthShift | xoffset) t.n++ } } diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md index e7d7eb0ae..787813fa9 100644 --- a/vendor/github.com/klauspost/compress/zstd/README.md +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -16,8 +16,7 @@ Currently the package is heavily optimized for 64 bit processors and will be sig Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`. -Godoc Documentation: https://godoc.org/github.com/klauspost/compress/zstd - +[![Go Reference](https://pkg.go.dev/badge/github.com/klauspost/compress/zstd.svg)](https://pkg.go.dev/github.com/klauspost/compress/zstd) ## Compressor @@ -405,13 +404,28 @@ BenchmarkDecoder_DecodeAllParallelCgo/comp-data.bin.zst-16 749938 This reflects the performance around May 2020, but this may be out of date. +## Zstd inside ZIP files + +It is possible to use zstandard to compress individual files inside zip archives. +While this isn't widely supported it can be useful for internal files. + +To support the compression and decompression of these files you must register a compressor and decompressor. + +It is highly recommended registering the (de)compressors on individual zip Reader/Writer and NOT +use the global registration functions. The main reason for this is that 2 registrations from +different packages will result in a panic. + +It is a good idea to only have a single compressor and decompressor, since they can be used for multiple zip +files concurrently, and using a single instance will allow reusing some resources. + +See [this example](https://pkg.go.dev/github.com/klauspost/compress/zstd#example-ZipCompressor) for +how to compress and decompress files inside zip archives. + # Contributions Contributions are always welcome. For new features/fixes, remember to add tests and for performance enhancements include benchmarks. -For sending files for reproducing errors use a service like [goobox](https://goobox.io/#/upload) or similar to share your files. - For general feedback and experience reports, feel free to open an issue or write me on [Twitter](https://twitter.com/sh0dan). This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare. diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index b51d922bd..6cea054d2 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -123,12 +123,10 @@ func newBlockDec(lowMem bool) *blockDec { // Input must be a start of a block and will be at the end of the block when returned. func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { b.WindowSize = windowSize - tmp := br.readSmall(3) - if tmp == nil { - if debug { - println("Reading block header:", io.ErrUnexpectedEOF) - } - return io.ErrUnexpectedEOF + tmp, err := br.readSmall(3) + if err != nil { + println("Reading block header:", err) + return err } bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) b.Last = bh&1 != 0 @@ -179,7 +177,6 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { if cap(b.dst) <= maxSize { b.dst = make([]byte, 0, maxSize+1) } - var err error b.data, err = br.readBig(cSize, b.dataStorage) if err != nil { if debug { diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go index 658ef7838..17e820a6a 100644 --- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go +++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go @@ -12,8 +12,8 @@ import ( type byteBuffer interface { // Read up to 8 bytes. - // Returns nil if no more input is available. - readSmall(n int) []byte + // Returns io.ErrUnexpectedEOF if this cannot be satisfied. + readSmall(n int) ([]byte, error) // Read >8 bytes. // MAY use the destination slice. @@ -29,17 +29,17 @@ type byteBuffer interface { // in-memory buffer type byteBuf []byte -func (b *byteBuf) readSmall(n int) []byte { +func (b *byteBuf) readSmall(n int) ([]byte, error) { if debugAsserts && n > 8 { panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) } bb := *b if len(bb) < n { - return nil + return nil, io.ErrUnexpectedEOF } r := bb[:n] *b = bb[n:] - return r + return r, nil } func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { @@ -81,19 +81,22 @@ type readerWrapper struct { tmp [8]byte } -func (r *readerWrapper) readSmall(n int) []byte { +func (r *readerWrapper) readSmall(n int) ([]byte, error) { if debugAsserts && n > 8 { panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) } n2, err := io.ReadFull(r.r, r.tmp[:n]) // We only really care about the actual bytes read. - if n2 != n { + if err != nil { + if err == io.EOF { + return nil, io.ErrUnexpectedEOF + } if debug { println("readSmall: got", n2, "want", n, "err", err) } - return nil + return nil, err } - return r.tmp[:n] + return r.tmp[:n], nil } func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index 693c5f05d..4dc151213 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -80,9 +80,14 @@ func (d *frameDec) reset(br byteBuffer) error { d.WindowSize = 0 var b []byte for { - b = br.readSmall(4) - if b == nil { + var err error + b, err = br.readSmall(4) + switch err { + case io.EOF, io.ErrUnexpectedEOF: return io.EOF + default: + return err + case nil: } if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 { if debug { @@ -92,14 +97,14 @@ func (d *frameDec) reset(br byteBuffer) error { break } // Read size to skip - b = br.readSmall(4) - if b == nil { - println("Reading Frame Size EOF") - return io.ErrUnexpectedEOF + b, err = br.readSmall(4) + if err != nil { + println("Reading Frame Size", err) + return err } n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) println("Skipping frame with", n, "bytes.") - err := br.skipN(int(n)) + err = br.skipN(int(n)) if err != nil { if debug { println("Reading discarded frame", err) @@ -147,12 +152,11 @@ func (d *frameDec) reset(br byteBuffer) error { if size == 3 { size = 4 } - b = br.readSmall(int(size)) - if b == nil { - if debug { - println("Reading Dictionary_ID", io.ErrUnexpectedEOF) - } - return io.ErrUnexpectedEOF + + b, err = br.readSmall(int(size)) + if err != nil { + println("Reading Dictionary_ID", err) + return err } var id uint32 switch size { @@ -187,10 +191,10 @@ func (d *frameDec) reset(br byteBuffer) error { } d.FrameContentSize = 0 if fcsSize > 0 { - b := br.readSmall(fcsSize) - if b == nil { - println("Reading Frame content", io.ErrUnexpectedEOF) - return io.ErrUnexpectedEOF + b, err = br.readSmall(fcsSize) + if err != nil { + println("Reading Frame content", err) + return err } switch fcsSize { case 1: @@ -307,10 +311,10 @@ func (d *frameDec) checkCRC() error { tmp[3] = byte(got >> 24) // We can overwrite upper tmp now - want := d.rawInput.readSmall(4) - if want == nil { - println("CRC missing?") - return io.ErrUnexpectedEOF + want, err := d.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + return err } if !bytes.Equal(tmp[:], want) { diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go index e35a0a2f8..9325b928a 100644 --- a/vendor/github.com/klauspost/compress/zstd/zip.go +++ b/vendor/github.com/klauspost/compress/zstd/zip.go @@ -13,8 +13,9 @@ import ( // See https://www.winzip.com/win/en/comp_info.html const ZipMethodWinZip = 93 -// ZipMethodPKWare is the method number used by PKWARE to indicate Zstandard compression. -// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.7.TXT +// ZipMethodPKWare is the original method number used by PKWARE to indicate Zstandard compression. +// Deprecated: This has been deprecated by PKWARE, use ZipMethodWinZip instead for compression. +// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT const ZipMethodPKWare = 20 var zipReaderPool sync.Pool |