diff options
author | dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> | 2021-06-10 06:28:23 +0000 |
---|---|---|
committer | GitHub <noreply@github.com> | 2021-06-10 06:28:23 +0000 |
commit | b5890fc86bdaacccf0e523c5c2774e1fd1c675f6 (patch) | |
tree | 3ff85852ced121567381c9db6af6edcde75be124 /vendor/github.com/klauspost | |
parent | ea39735845655aaf77de577a35557a0fb003514f (diff) | |
download | podman-b5890fc86bdaacccf0e523c5c2774e1fd1c675f6.tar.gz podman-b5890fc86bdaacccf0e523c5c2774e1fd1c675f6.tar.bz2 podman-b5890fc86bdaacccf0e523c5c2774e1fd1c675f6.zip |
Bump github.com/containers/storage from 1.32.1 to 1.32.2
Bumps [github.com/containers/storage](https://github.com/containers/storage) from 1.32.1 to 1.32.2.
- [Release notes](https://github.com/containers/storage/releases)
- [Changelog](https://github.com/containers/storage/blob/master/docs/containers-storage-changes.md)
- [Commits](https://github.com/containers/storage/compare/v1.32.1...v1.32.2)
---
updated-dependencies:
- dependency-name: github.com/containers/storage
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot] <support@github.com>
Diffstat (limited to 'vendor/github.com/klauspost')
14 files changed, 164 insertions, 140 deletions
diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index 6cea054d2..e30af505c 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -144,7 +144,7 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { } cSize = 1 case blockTypeCompressed: - if debug { + if debugDecoder { println("Data size on stream:", cSize) } b.RLESize = 0 @@ -153,7 +153,7 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { maxSize = int(windowSize) } if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize { - if debug { + if debugDecoder { printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b) } return ErrCompressedSizeTooBig @@ -179,7 +179,7 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { } b.data, err = br.readBig(cSize, b.dataStorage) if err != nil { - if debug { + if debugDecoder { println("Reading block:", err, "(", cSize, ")", len(b.data)) printf("%T", br) } @@ -249,7 +249,7 @@ func (b *blockDec) startDecoder() { b: b.dst, err: err, } - if debug { + if debugDecoder { println("Decompressed to", len(b.dst), "bytes, error:", err) } b.result <- o @@ -264,7 +264,7 @@ func (b *blockDec) startDecoder() { default: panic("Invalid block type") } - if debug { + if debugDecoder { println("blockDec: Finished block") } } @@ -297,7 +297,7 @@ func (b *blockDec) decodeBuf(hist *history) error { b.dst = hist.b hist.b = nil err := b.decodeCompressed(hist) - if debug { + if debugDecoder { println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err) } hist.b = b.dst @@ -390,7 +390,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { in = in[5:] } } - if debug { + if debugDecoder { println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams) } var literals []byte @@ -428,7 +428,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { literals[i] = v } in = in[1:] - if debug { + if debugDecoder { printf("Found %d RLE compressed literals\n", litRegenSize) } case literalsBlockTreeless: @@ -439,7 +439,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { // Store compressed literals, so we defer decoding until we get history. literals = in[:litCompSize] in = in[litCompSize:] - if debug { + if debugDecoder { printf("Found %d compressed literals\n", litCompSize) } case literalsBlockCompressed: @@ -481,7 +481,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { if len(literals) != litRegenSize { return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) } - if debug { + if debugDecoder { printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) } } @@ -532,12 +532,12 @@ func (b *blockDec) decodeCompressed(hist *history) error { br := byteReader{b: in, off: 0} compMode := br.Uint8() br.advance(1) - if debug { + if debugDecoder { printf("Compression modes: 0b%b", compMode) } for i := uint(0); i < 3; i++ { mode := seqCompMode((compMode >> (6 - i*2)) & 3) - if debug { + if debugDecoder { println("Table", tableIndex(i), "is", mode) } var seq *sequenceDec @@ -568,7 +568,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { } dec.setRLE(symb) seq.fse = dec - if debug { + if debugDecoder { printf("RLE set to %+v, code: %v", symb, v) } case compModeFSE: @@ -584,7 +584,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { println("Transform table error:", err) return err } - if debug { + if debugDecoder { println("Read table ok", "symbolLen:", dec.symbolLen) } seq.fse = dec @@ -652,7 +652,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { if huff != nil { hist.huffTree = huff } - if debug { + if debugDecoder { println("Final literals:", len(literals), "hash:", xxhash.Sum64(literals), "and", nSeqs, "sequences.") } @@ -669,7 +669,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { if err != nil { return err } - if debug { + if debugDecoder { println("History merged ok") } br := &bitReader{} @@ -728,7 +728,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { } hist.append(b.dst) hist.recentOffsets = seqs.prevOffset - if debug { + if debugDecoder { println("Finished block with literals:", len(literals), "and", nSeqs, "sequences.") } diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go index e1be092f3..3df185ee4 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -156,7 +156,7 @@ func (h *literalsHeader) setSize(regenLen int) { switch { case inBits < 5: lh |= (uint64(regenLen) << 3) | (1 << 60) - if debug { + if debugEncoder { got := int(lh>>3) & 0xff if got != regenLen { panic(fmt.Sprint("litRegenSize = ", regenLen, "(want) != ", got, "(got)")) @@ -184,7 +184,7 @@ func (h *literalsHeader) setSizes(compLen, inLen int, single bool) { lh |= 1 << 2 } lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60) - if debug { + if debugEncoder { const mmask = (1 << 24) - 1 n := (lh >> 4) & mmask if int(n&1023) != inLen { @@ -312,7 +312,7 @@ func (b *blockEnc) encodeRaw(a []byte) { bh.setType(blockTypeRaw) b.output = bh.appendTo(b.output[:0]) b.output = append(b.output, a...) - if debug { + if debugEncoder { println("Adding RAW block, length", len(a), "last:", b.last) } } @@ -325,7 +325,7 @@ func (b *blockEnc) encodeRawTo(dst, src []byte) []byte { bh.setType(blockTypeRaw) dst = bh.appendTo(dst) dst = append(dst, src...) - if debug { + if debugEncoder { println("Adding RAW block, length", len(src), "last:", b.last) } return dst @@ -339,7 +339,7 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error { // Don't compress extremely small blocks if len(lits) < 8 || (len(lits) < 32 && b.dictLitEnc == nil) || raw { - if debug { + if debugEncoder { println("Adding RAW block, length", len(lits), "last:", b.last) } bh.setType(blockTypeRaw) @@ -371,7 +371,7 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error { switch err { case huff0.ErrIncompressible: - if debug { + if debugEncoder { println("Adding RAW block, length", len(lits), "last:", b.last) } bh.setType(blockTypeRaw) @@ -379,7 +379,7 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error { b.output = append(b.output, lits...) return nil case huff0.ErrUseRLE: - if debug { + if debugEncoder { println("Adding RLE block, length", len(lits)) } bh.setType(blockTypeRLE) @@ -396,12 +396,12 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error { bh.setType(blockTypeCompressed) var lh literalsHeader if reUsed { - if debug { + if debugEncoder { println("Reused tree, compressed to", len(out)) } lh.setType(literalsBlockTreeless) } else { - if debug { + if debugEncoder { println("New tree, compressed to", len(out), "tree size:", len(b.litEnc.OutTable)) } lh.setType(literalsBlockCompressed) @@ -517,7 +517,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { lh.setSize(len(b.literals)) b.output = lh.appendTo(b.output) b.output = append(b.output, b.literals...) - if debug { + if debugEncoder { println("Adding literals RAW, length", len(b.literals)) } case huff0.ErrUseRLE: @@ -525,22 +525,22 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { lh.setSize(len(b.literals)) b.output = lh.appendTo(b.output) b.output = append(b.output, b.literals[0]) - if debug { + if debugEncoder { println("Adding literals RLE") } case nil: // Compressed litLen... if reUsed { - if debug { + if debugEncoder { println("reused tree") } lh.setType(literalsBlockTreeless) } else { - if debug { + if debugEncoder { println("new tree, size:", len(b.litEnc.OutTable)) } lh.setType(literalsBlockCompressed) - if debug { + if debugEncoder { _, _, err := huff0.ReadTable(out, nil) if err != nil { panic(err) @@ -548,18 +548,18 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { } } lh.setSizes(len(out), len(b.literals), single) - if debug { + if debugEncoder { printf("Compressed %d literals to %d bytes", len(b.literals), len(out)) println("Adding literal header:", lh) } b.output = lh.appendTo(b.output) b.output = append(b.output, out...) b.litEnc.Reuse = huff0.ReusePolicyAllow - if debug { + if debugEncoder { println("Adding literals compressed") } default: - if debug { + if debugEncoder { println("Adding literals ERROR:", err) } return err @@ -577,7 +577,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { n := len(b.sequences) - 0x7f00 b.output = append(b.output, 255, uint8(n), uint8(n>>8)) } - if debug { + if debugEncoder { println("Encoding", len(b.sequences), "sequences") } b.genCodes() @@ -611,17 +611,17 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { nSize = nSize + (nSize+2*8*16)>>4 switch { case predefSize <= prevSize && predefSize <= nSize || forcePreDef: - if debug { + if debugEncoder { println("Using predefined", predefSize>>3, "<=", nSize>>3) } return preDef, compModePredefined case prevSize <= nSize: - if debug { + if debugEncoder { println("Using previous", prevSize>>3, "<=", nSize>>3) } return prev, compModeRepeat default: - if debug { + if debugEncoder { println("Using new, predef", predefSize>>3, ". previous:", prevSize>>3, ">", nSize>>3, "header max:", cur.maxHeaderSize()>>3, "bytes") println("tl:", cur.actualTableLog, "symbolLen:", cur.symbolLen, "norm:", cur.norm[:cur.symbolLen], "hist", cur.count[:cur.symbolLen]) } @@ -634,7 +634,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { if llEnc.useRLE { mode |= uint8(compModeRLE) << 6 llEnc.setRLE(b.sequences[0].llCode) - if debug { + if debugEncoder { println("llEnc.useRLE") } } else { @@ -645,7 +645,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { if ofEnc.useRLE { mode |= uint8(compModeRLE) << 4 ofEnc.setRLE(b.sequences[0].ofCode) - if debug { + if debugEncoder { println("ofEnc.useRLE") } } else { @@ -657,7 +657,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { if mlEnc.useRLE { mode |= uint8(compModeRLE) << 2 mlEnc.setRLE(b.sequences[0].mlCode) - if debug { + if debugEncoder { println("mlEnc.useRLE, code: ", b.sequences[0].mlCode, "value", b.sequences[0].matchLen) } } else { @@ -666,7 +666,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { mode |= uint8(m) << 2 } b.output = append(b.output, mode) - if debug { + if debugEncoder { printf("Compression modes: 0b%b", mode) } b.output, err = llEnc.writeCount(b.output) @@ -786,7 +786,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { // Size is output minus block header. bh.setSize(uint32(len(b.output)-bhOffset) - 3) - if debug { + if debugEncoder { println("Rewriting block header", bh) } _ = bh.appendTo(b.output[bhOffset:bhOffset]) diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go index 17e820a6a..aab71c6cf 100644 --- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go +++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go @@ -91,7 +91,7 @@ func (r *readerWrapper) readSmall(n int) ([]byte, error) { if err == io.EOF { return nil, io.ErrUnexpectedEOF } - if debug { + if debugDecoder { println("readSmall: got", n2, "want", n, "err", err) } return nil, err diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index f593e464b..4d984c3b2 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -113,9 +113,6 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { // Returns the number of bytes written and any error that occurred. // When the stream is done, io.EOF will be returned. func (d *Decoder) Read(p []byte) (int, error) { - if d.stream == nil { - return 0, ErrDecoderNilInput - } var n int for { if len(d.current.b) > 0 { @@ -138,7 +135,7 @@ func (d *Decoder) Read(p []byte) (int, error) { } } if len(d.current.b) > 0 { - if debug { + if debugDecoder { println("returning", n, "still bytes left:", len(d.current.b)) } // Only return error at end of block @@ -147,7 +144,7 @@ func (d *Decoder) Read(p []byte) (int, error) { if d.current.err != nil { d.drainOutput() } - if debug { + if debugDecoder { println("returning", n, d.current.err, len(d.decoders)) } return n, d.current.err @@ -167,20 +164,17 @@ func (d *Decoder) Reset(r io.Reader) error { if r == nil { d.current.err = ErrDecoderNilInput + if len(d.current.b) > 0 { + d.current.b = d.current.b[:0] + } d.current.flushed = true return nil } - if d.stream == nil { - d.stream = make(chan decodeStream, 1) - d.streamWg.Add(1) - go d.startStreamDecoder(d.stream) - } - - // If bytes buffer and < 1MB, do sync decoding anyway. - if bb, ok := r.(byter); ok && bb.Len() < 1<<20 { + // If bytes buffer and < 5MB, do sync decoding anyway. + if bb, ok := r.(byter); ok && bb.Len() < 5<<20 { bb2 := bb - if debug { + if debugDecoder { println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) } b := bb2.Bytes() @@ -196,12 +190,18 @@ func (d *Decoder) Reset(r io.Reader) error { d.current.b = dst d.current.err = err d.current.flushed = true - if debug { + if debugDecoder { println("sync decode to", len(dst), "bytes, err:", err) } return nil } + if d.stream == nil { + d.stream = make(chan decodeStream, 1) + d.streamWg.Add(1) + go d.startStreamDecoder(d.stream) + } + // Remove current block. d.current.decodeOutput = decodeOutput{} d.current.err = nil @@ -225,7 +225,7 @@ func (d *Decoder) drainOutput() { d.current.cancel = nil } if d.current.d != nil { - if debug { + if debugDecoder { printf("re-adding current decoder %p, decoders: %d", d.current.d, len(d.decoders)) } d.decoders <- d.current.d @@ -238,7 +238,7 @@ func (d *Decoder) drainOutput() { } for v := range d.current.output { if v.d != nil { - if debug { + if debugDecoder { printf("re-adding decoder %p", v.d) } d.decoders <- v.d @@ -255,9 +255,6 @@ func (d *Decoder) drainOutput() { // The return value n is the number of bytes written. // Any error encountered during the write is also returned. func (d *Decoder) WriteTo(w io.Writer) (int64, error) { - if d.stream == nil { - return 0, ErrDecoderNilInput - } var n int64 for { if len(d.current.b) > 0 { @@ -297,7 +294,7 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { block := <-d.decoders frame := block.localFrame defer func() { - if debug { + if debugDecoder { printf("re-adding decoder: %p", block) } frame.rawInput = nil @@ -310,7 +307,7 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { frame.history.reset() err := frame.reset(&frame.bBuf) if err == io.EOF { - if debug { + if debugDecoder { println("frame reset return EOF") } return dst, nil @@ -355,7 +352,7 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { return dst, err } if len(frame.bBuf) == 0 { - if debug { + if debugDecoder { println("frame dbuf empty") } break @@ -371,7 +368,7 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { // if no data was available without blocking. func (d *Decoder) nextBlock(blocking bool) (ok bool) { if d.current.d != nil { - if debug { + if debugDecoder { printf("re-adding current decoder %p", d.current.d) } d.decoders <- d.current.d @@ -391,7 +388,7 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) { return false } } - if debug { + if debugDecoder { println("got", len(d.current.b), "bytes, error:", d.current.err) } return true @@ -485,7 +482,7 @@ func (d *Decoder) startStreamDecoder(inStream chan decodeStream) { defer d.streamWg.Done() frame := newFrameDec(d.o) for stream := range inStream { - if debug { + if debugDecoder { println("got new stream") } br := readerWrapper{r: stream.r} @@ -493,7 +490,7 @@ func (d *Decoder) startStreamDecoder(inStream chan decodeStream) { for { frame.history.reset() err := frame.reset(&br) - if debug && err != nil { + if debugDecoder && err != nil { println("Frame decoder returned", err) } if err == nil && frame.DictionaryID != nil { @@ -510,7 +507,7 @@ func (d *Decoder) startStreamDecoder(inStream chan decodeStream) { } break } - if debug { + if debugDecoder { println("starting frame decoder") } diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go index fa25a18d8..a36ae83ef 100644 --- a/vendor/github.com/klauspost/compress/zstd/dict.go +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -82,7 +82,7 @@ func loadDict(b []byte) (*dict, error) { println("Transform table error:", err) return err } - if debug { + if debugDecoder || debugEncoder { println("Read table ok", "symbolLen:", dec.symbolLen) } // Set decoders as predefined so they aren't reused. diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go index dc1eed5f0..b7d4b9004 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_best.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -132,7 +132,7 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { } _ = addLiterals - if debug { + if debugEncoder { println("recent offsets:", blk.recentOffsets) } @@ -274,7 +274,7 @@ encodeLoop: nextEmit = s if s >= sLimit { - if debug { + if debugEncoder { println("repeat ended", s, best.length) } @@ -412,7 +412,7 @@ encodeLoop: blk.recentOffsets[0] = uint32(offset1) blk.recentOffsets[1] = uint32(offset2) blk.recentOffsets[2] = uint32(offset3) - if debug { + if debugEncoder { println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) } } diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go index 604954290..eab7b5083 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -138,7 +138,7 @@ func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { blk.literals = append(blk.literals, src[nextEmit:until]...) s.litLen = uint32(until - nextEmit) } - if debug { + if debugEncoder { println("recent offsets:", blk.recentOffsets) } @@ -204,7 +204,7 @@ encodeLoop: nextEmit = s if s >= sLimit { - if debug { + if debugEncoder { println("repeat ended", s, lenght) } @@ -264,7 +264,7 @@ encodeLoop: s += lenght + repOff2 nextEmit = s if s >= sLimit { - if debug { + if debugEncoder { println("repeat ended", s, lenght) } @@ -553,7 +553,7 @@ encodeLoop: } blk.recentOffsets[0] = uint32(offset1) blk.recentOffsets[1] = uint32(offset2) - if debug { + if debugEncoder { println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) } } @@ -656,7 +656,7 @@ func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) { blk.literals = append(blk.literals, src[nextEmit:until]...) s.litLen = uint32(until - nextEmit) } - if debug { + if debugEncoder { println("recent offsets:", blk.recentOffsets) } @@ -724,7 +724,7 @@ encodeLoop: nextEmit = s if s >= sLimit { - if debug { + if debugEncoder { println("repeat ended", s, lenght) } @@ -787,7 +787,7 @@ encodeLoop: s += lenght + repOff2 nextEmit = s if s >= sLimit { - if debug { + if debugEncoder { println("repeat ended", s, lenght) } @@ -1084,7 +1084,7 @@ encodeLoop: } blk.recentOffsets[0] = uint32(offset1) blk.recentOffsets[1] = uint32(offset2) - if debug { + if debugEncoder { println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) } } diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go index 8629d43d8..96b21b90e 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -109,7 +109,7 @@ func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) { blk.literals = append(blk.literals, src[nextEmit:until]...) s.litLen = uint32(until - nextEmit) } - if debug { + if debugEncoder { println("recent offsets:", blk.recentOffsets) } @@ -170,7 +170,7 @@ encodeLoop: s += lenght + repOff nextEmit = s if s >= sLimit { - if debug { + if debugEncoder { println("repeat ended", s, lenght) } @@ -368,7 +368,7 @@ encodeLoop: } blk.recentOffsets[0] = uint32(offset1) blk.recentOffsets[1] = uint32(offset2) - if debug { + if debugEncoder { println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) } } @@ -427,7 +427,7 @@ func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { blk.literals = append(blk.literals, src[nextEmit:until]...) s.litLen = uint32(until - nextEmit) } - if debug { + if debugEncoder { println("recent offsets:", blk.recentOffsets) } @@ -483,7 +483,7 @@ encodeLoop: s += length + repOff nextEmit = s if s >= sLimit { - if debug { + if debugEncoder { println("repeat ended", s, length) } @@ -677,7 +677,7 @@ encodeLoop: blk.literals = append(blk.literals, src[nextEmit:]...) blk.extraLits = len(src) - int(nextEmit) } - if debug { + if debugEncoder { println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) } @@ -767,7 +767,7 @@ func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) { blk.literals = append(blk.literals, src[nextEmit:until]...) s.litLen = uint32(until - nextEmit) } - if debug { + if debugEncoder { println("recent offsets:", blk.recentOffsets) } @@ -830,7 +830,7 @@ encodeLoop: s += lenght + repOff nextEmit = s if s >= sLimit { - if debug { + if debugEncoder { println("repeat ended", s, lenght) } @@ -1039,7 +1039,7 @@ encodeLoop: } blk.recentOffsets[0] = uint32(offset1) blk.recentOffsets[1] = uint32(offset2) - if debug { + if debugEncoder { println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) } // If we encoded more than 64K mark all dirty. diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go index ba4a17e10..2246d286d 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -103,7 +103,7 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { blk.literals = append(blk.literals, src[nextEmit:until]...) s.litLen = uint32(until - nextEmit) } - if debug { + if debugEncoder { println("recent offsets:", blk.recentOffsets) } @@ -178,7 +178,7 @@ encodeLoop: s += length + 2 nextEmit = s if s >= sLimit { - if debug { + if debugEncoder { println("repeat ended", s, length) } @@ -330,7 +330,7 @@ encodeLoop: } blk.recentOffsets[0] = uint32(offset1) blk.recentOffsets[1] = uint32(offset2) - if debug { + if debugEncoder { println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) } } @@ -343,7 +343,7 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { inputMargin = 8 minNonLiteralBlockSize = 1 + 1 + inputMargin ) - if debug { + if debugEncoder { if len(src) > maxBlockSize { panic("src too big") } @@ -391,7 +391,7 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { blk.literals = append(blk.literals, src[nextEmit:until]...) s.litLen = uint32(until - nextEmit) } - if debug { + if debugEncoder { println("recent offsets:", blk.recentOffsets) } @@ -462,7 +462,7 @@ encodeLoop: s += length + 2 nextEmit = s if s >= sLimit { - if debug { + if debugEncoder { println("repeat ended", s, length) } @@ -616,7 +616,7 @@ encodeLoop: blk.literals = append(blk.literals, src[nextEmit:]...) blk.extraLits = len(src) - int(nextEmit) } - if debug { + if debugEncoder { println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) } // We do not store history, so we must offset e.cur to avoid false matches for next user. @@ -696,7 +696,7 @@ func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) { blk.literals = append(blk.literals, src[nextEmit:until]...) s.litLen = uint32(until - nextEmit) } - if debug { + if debugEncoder { println("recent offsets:", blk.recentOffsets) } @@ -773,7 +773,7 @@ encodeLoop: s += length + 2 nextEmit = s if s >= sLimit { - if debug { + if debugEncoder { println("repeat ended", s, length) } @@ -926,7 +926,7 @@ encodeLoop: } blk.recentOffsets[0] = uint32(offset1) blk.recentOffsets[1] = uint32(offset2) - if debug { + if debugEncoder { println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) } } diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index 4871dd03a..ea85548fc 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -245,7 +245,7 @@ func (e *Encoder) nextBlock(final bool) error { s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current s.wg.Add(1) go func(src []byte) { - if debug { + if debugEncoder { println("Adding block,", len(src), "bytes, final:", final) } defer func() { @@ -290,7 +290,7 @@ func (e *Encoder) nextBlock(final bool) error { } switch err { case errIncompressible: - if debug { + if debugEncoder { println("Storing incompressible block as raw") } blk.encodeRaw(src) @@ -313,7 +313,7 @@ func (e *Encoder) nextBlock(final bool) error { // // The Copy function uses ReaderFrom if available. func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) { - if debug { + if debugEncoder { println("Using ReadFrom") } @@ -336,20 +336,20 @@ func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) { switch err { case io.EOF: e.state.filling = e.state.filling[:len(e.state.filling)-len(src)] - if debug { + if debugEncoder { println("ReadFrom: got EOF final block:", len(e.state.filling)) } return n, nil case nil: default: - if debug { + if debugEncoder { println("ReadFrom: got error:", err) } e.state.err = err return n, err } if len(src) > 0 { - if debug { + if debugEncoder { println("ReadFrom: got space left in source:", len(src)) } continue @@ -512,7 +512,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { switch err { case errIncompressible: - if debug { + if debugEncoder { println("Storing incompressible block as raw") } dst = blk.encodeRawTo(dst, src) @@ -548,7 +548,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { switch err { case errIncompressible: - if debug { + if debugEncoder { println("Storing incompressible block as raw") } dst = blk.encodeRawTo(dst, todo) diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index 4dc151213..e8cc9a2c2 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -78,20 +78,33 @@ func newFrameDec(o decoderOptions) *frameDec { func (d *frameDec) reset(br byteBuffer) error { d.HasCheckSum = false d.WindowSize = 0 - var b []byte + var signature [4]byte for { var err error - b, err = br.readSmall(4) + // Check if we can read more... + b, err := br.readSmall(1) switch err { case io.EOF, io.ErrUnexpectedEOF: return io.EOF default: return err case nil: + signature[0] = b[0] + } + // Read the rest, don't allow io.ErrUnexpectedEOF + b, err = br.readSmall(3) + switch err { + case io.EOF: + return io.EOF + default: + return err + case nil: + copy(signature[1:], b) } - if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 { - if debug { - println("Not skippable", hex.EncodeToString(b), hex.EncodeToString(skippableFrameMagic)) + + if !bytes.Equal(signature[1:4], skippableFrameMagic) || signature[0]&0xf0 != 0x50 { + if debugDecoder { + println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString(skippableFrameMagic)) } // Break if not skippable frame. break @@ -99,28 +112,34 @@ func (d *frameDec) reset(br byteBuffer) error { // Read size to skip b, err = br.readSmall(4) if err != nil { - println("Reading Frame Size", err) + if debugDecoder { + println("Reading Frame Size", err) + } return err } n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) println("Skipping frame with", n, "bytes.") err = br.skipN(int(n)) if err != nil { - if debug { + if debugDecoder { println("Reading discarded frame", err) } return err } } - if !bytes.Equal(b, frameMagic) { - println("Got magic numbers: ", b, "want:", frameMagic) + if !bytes.Equal(signature[:], frameMagic) { + if debugDecoder { + println("Got magic numbers: ", signature, "want:", frameMagic) + } return ErrMagicMismatch } // Read Frame_Header_Descriptor fhd, err := br.readByte() if err != nil { - println("Reading Frame_Header_Descriptor", err) + if debugDecoder { + println("Reading Frame_Header_Descriptor", err) + } return err } d.SingleSegment = fhd&(1<<5) != 0 @@ -135,7 +154,9 @@ func (d *frameDec) reset(br byteBuffer) error { if !d.SingleSegment { wd, err := br.readByte() if err != nil { - println("Reading Window_Descriptor", err) + if debugDecoder { + println("Reading Window_Descriptor", err) + } return err } printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) @@ -153,7 +174,7 @@ func (d *frameDec) reset(br byteBuffer) error { size = 4 } - b, err = br.readSmall(int(size)) + b, err := br.readSmall(int(size)) if err != nil { println("Reading Dictionary_ID", err) return err @@ -167,7 +188,7 @@ func (d *frameDec) reset(br byteBuffer) error { case 4: id = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) } - if debug { + if debugDecoder { println("Dict size", size, "ID:", id) } if id > 0 { @@ -191,7 +212,7 @@ func (d *frameDec) reset(br byteBuffer) error { } d.FrameContentSize = 0 if fcsSize > 0 { - b, err = br.readSmall(fcsSize) + b, err := br.readSmall(fcsSize) if err != nil { println("Reading Frame content", err) return err @@ -209,7 +230,7 @@ func (d *frameDec) reset(br byteBuffer) error { d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) d.FrameContentSize = uint64(d1) | (uint64(d2) << 32) } - if debug { + if debugDecoder { println("field size bits:", v, "fcsSize:", fcsSize, "FrameContentSize:", d.FrameContentSize, hex.EncodeToString(b[:fcsSize]), "singleseg:", d.SingleSegment, "window:", d.WindowSize) } } @@ -252,7 +273,7 @@ func (d *frameDec) reset(br byteBuffer) error { // next will start decoding the next block from stream. func (d *frameDec) next(block *blockDec) error { - if debug { + if debugDecoder { printf("decoding new block %p:%p", block, block.data) } err := block.reset(d.rawInput, d.WindowSize) @@ -263,7 +284,7 @@ func (d *frameDec) next(block *blockDec) error { return err } block.input <- struct{}{} - if debug { + if debugDecoder { println("next block:", block) } d.asyncRunningMu.Lock() @@ -318,12 +339,12 @@ func (d *frameDec) checkCRC() error { } if !bytes.Equal(tmp[:], want) { - if debug { + if debugDecoder { println("CRC Check Failed:", tmp[:], "!=", want) } return ErrCRCMismatch } - if debug { + if debugDecoder { println("CRC ok", tmp[:]) } return nil @@ -344,7 +365,7 @@ func (d *frameDec) initAsync() { if cap(d.decoding) < d.o.concurrent { d.decoding = make(chan *blockDec, d.o.concurrent) } - if debug { + if debugDecoder { h := d.history printf("history init. len: %d, cap: %d", len(h.b), cap(h.b)) } @@ -392,7 +413,7 @@ func (d *frameDec) startDecoder(output chan decodeOutput) { output <- r return } - if debug { + if debugDecoder { println("got result, from ", d.offset, "to", d.offset+int64(len(r.b))) d.offset += int64(len(r.b)) } @@ -400,7 +421,7 @@ func (d *frameDec) startDecoder(output chan decodeOutput) { // Send history to next block select { case next = <-d.decoding: - if debug { + if debugDecoder { println("Sending ", len(d.history.b), "bytes as history") } next.history <- &d.history @@ -438,7 +459,7 @@ func (d *frameDec) startDecoder(output chan decodeOutput) { output <- r if next == nil { // There was no decoder available, we wait for one now that we have sent to the writer. - if debug { + if debugDecoder { println("Sending ", len(d.history.b), " bytes as history") } next = <-d.decoding @@ -462,7 +483,7 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { if err != nil { break } - if debug { + if debugDecoder { println("next block:", dec) } err = dec.decodeBuf(&d.history) diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go index c74681b99..b4757ee3f 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go @@ -229,7 +229,7 @@ func (s *fseEncoder) setRLE(val byte) { deltaFindState: 0, deltaNbBits: 0, } - if debug { + if debugEncoder { println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val]) } s.rleVal = val diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go index 9d9d1d567..0372b1714 100644 --- a/vendor/github.com/klauspost/compress/zstd/snappy.go +++ b/vendor/github.com/klauspost/compress/zstd/snappy.go @@ -203,7 +203,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { written += int64(n) continue case chunkTypeUncompressedData: - if debug { + if debugEncoder { println("Uncompressed, chunklen", chunkLen) } // Section 4.3. Uncompressed data (chunk type 0x01). @@ -246,7 +246,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { continue case chunkTypeStreamIdentifier: - if debug { + if debugEncoder { println("stream id", chunkLen, len(snappyMagicBody)) } // Section 4.1. Stream identifier (chunk type 0xff). diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index 1ba308c8b..ef1d49a00 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -15,6 +15,12 @@ import ( // enable debug printing const debug = false +// enable encoding debug printing +const debugEncoder = debug + +// enable decoding debug printing +const debugDecoder = debug + // Enable extra assertions. const debugAsserts = debug || false @@ -82,13 +88,13 @@ var ( ) func println(a ...interface{}) { - if debug { + if debug || debugDecoder || debugEncoder { log.Println(a...) } } func printf(format string, a ...interface{}) { - if debug { + if debug || debugDecoder || debugEncoder { log.Printf(format, a...) } } |