summaryrefslogtreecommitdiff
path: root/vendor/github.com/klauspost/compress/zstd
diff options
context:
space:
mode:
authorDaniel J Walsh <dwalsh@redhat.com>2019-08-01 03:46:14 -0400
committerDaniel J Walsh <dwalsh@redhat.com>2019-08-01 03:46:14 -0400
commit9d6dce119949d340ebb42becae018a3c11148690 (patch)
treec0bb999f9bddbc9379f9f7b159194d35c077fb36 /vendor/github.com/klauspost/compress/zstd
parent39de184b8bfb14954f77190f0e6127c1ddc363c0 (diff)
downloadpodman-9d6dce119949d340ebb42becae018a3c11148690.tar.gz
podman-9d6dce119949d340ebb42becae018a3c11148690.tar.bz2
podman-9d6dce119949d340ebb42becae018a3c11148690.zip
github.com/containers/storage v1.12.13
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
Diffstat (limited to 'vendor/github.com/klauspost/compress/zstd')
-rw-r--r--vendor/github.com/klauspost/compress/zstd/README.md383
-rw-r--r--vendor/github.com/klauspost/compress/zstd/bitreader.go121
-rw-r--r--vendor/github.com/klauspost/compress/zstd/bitwriter.go169
-rw-r--r--vendor/github.com/klauspost/compress/zstd/blockdec.go708
-rw-r--r--vendor/github.com/klauspost/compress/zstd/blockenc.go754
-rw-r--r--vendor/github.com/klauspost/compress/zstd/blocktype_string.go85
-rw-r--r--vendor/github.com/klauspost/compress/zstd/bytebuf.go121
-rw-r--r--vendor/github.com/klauspost/compress/zstd/bytereader.go74
-rw-r--r--vendor/github.com/klauspost/compress/zstd/decoder.go437
-rw-r--r--vendor/github.com/klauspost/compress/zstd/decoder_options.go66
-rw-r--r--vendor/github.com/klauspost/compress/zstd/enc_dfast.go410
-rw-r--r--vendor/github.com/klauspost/compress/zstd/enc_fast.go416
-rw-r--r--vendor/github.com/klauspost/compress/zstd/enc_params.go154
-rw-r--r--vendor/github.com/klauspost/compress/zstd/encoder.go472
-rw-r--r--vendor/github.com/klauspost/compress/zstd/encoder_options.go184
-rw-r--r--vendor/github.com/klauspost/compress/zstd/framedec.go473
-rw-r--r--vendor/github.com/klauspost/compress/zstd/frameenc.go118
-rw-r--r--vendor/github.com/klauspost/compress/zstd/fse_decoder.go337
-rw-r--r--vendor/github.com/klauspost/compress/zstd/fse_encoder.go717
-rw-r--r--vendor/github.com/klauspost/compress/zstd/fse_predefined.go153
-rw-r--r--vendor/github.com/klauspost/compress/zstd/hash.go77
-rw-r--r--vendor/github.com/klauspost/compress/zstd/history.go73
-rw-r--r--vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt22
-rw-r--r--vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md58
-rw-r--r--vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go238
-rw-r--r--vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go13
-rw-r--r--vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s215
-rw-r--r--vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go76
-rw-r--r--vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go11
-rw-r--r--vendor/github.com/klauspost/compress/zstd/seqdec.go409
-rw-r--r--vendor/github.com/klauspost/compress/zstd/seqenc.go115
-rw-r--r--vendor/github.com/klauspost/compress/zstd/snappy.go435
-rw-r--r--vendor/github.com/klauspost/compress/zstd/zstd.go135
33 files changed, 8229 insertions, 0 deletions
diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md
new file mode 100644
index 000000000..670f98af4
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/README.md
@@ -0,0 +1,383 @@
+# zstd
+
+[Zstandard](https://facebook.github.io/zstd/) is a real-time compression algorithm, providing high compression ratios.
+It offers a very wide range of compression / speed trade-off, while being backed by a very fast decoder.
+A high performance compression algorithm is implemented. For now focused on speed.
+
+This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content.
+Note that custom dictionaries are not supported yet, so if your code relies on that,
+you cannot use the package as-is.
+
+This package is pure Go and without use of "unsafe".
+If a significant speedup can be achieved using "unsafe", it may be added as an option later.
+
+The `zstd` package is provided as open source software using a Go standard license.
+
+Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors.
+
+## Installation
+
+Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`.
+
+Godoc Documentation: https://godoc.org/github.com/klauspost/compress/zstd
+
+
+## Compressor
+
+### Status:
+
+BETA - there may still be subtle bugs, but a wide variety of content has been tested.
+There may still be implementation specific stuff in regards to error handling that could lead to edge cases.
+
+For now, a high speed (fastest) and medium-fast (default) compressor has been implemented.
+
+The "Fastest" compression ratio is roughly equivalent to zstd level 1.
+The "Default" compression ration is roughly equivalent to zstd level 3 (default).
+
+In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode. The compression ratio compared to stdlib is around level 3, but usually 3x as fast.
+
+Compared to cgo zstd, the speed is around level 3 (default), but compression slightly worse, between level 1&2.
+
+
+### Usage
+
+An Encoder can be used for either compressing a stream via the
+`io.WriteCloser` interface supported by the Encoder or as multiple independent
+tasks via the `EncodeAll` function.
+Smaller encodes are encouraged to use the EncodeAll function.
+Use `NewWriter` to create a new instance that can be used for both.
+
+To create a writer with default options, do like this:
+
+```Go
+// Compress input to output.
+func Compress(in io.Reader, out io.Writer) error {
+ w, err := NewWriter(output)
+ if err != nil {
+ return err
+ }
+ _, err := io.Copy(w, input)
+ if err != nil {
+ enc.Close()
+ return err
+ }
+ return enc.Close()
+}
+```
+
+Now you can encode by writing data to `enc`. The output will be finished writing when `Close()` is called.
+Even if your encode fails, you should still call `Close()` to release any resources that may be held up.
+
+The above is fine for big encodes. However, whenever possible try to *reuse* the writer.
+
+To reuse the encoder, you can use the `Reset(io.Writer)` function to change to another output.
+This will allow the encoder to reuse all resources and avoid wasteful allocations.
+
+Currently stream encoding has 'light' concurrency, meaning up to 2 goroutines can be working on part
+of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is likely to change
+in the future. So if you want to limit concurrency for future updates, specify the concurrency
+you would like.
+
+You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined
+compression settings can be specified.
+
+#### Future Compatibility Guarantees
+
+This will be an evolving project. When using this package it is important to note that both the compression efficiency and speed may change.
+
+The goal will be to keep the default efficiency at the default zstd (level 3).
+However the encoding should never be assumed to remain the same,
+and you should not use hashes of compressed output for similarity checks.
+
+The Encoder can be assumed to produce the same output from the exact same code version.
+However, the may be modes in the future that break this,
+although they will not be enabled without an explicit option.
+
+This encoder is not designed to (and will probably never) output the exact same bitstream as the reference encoder.
+
+Also note, that the cgo decompressor currently does not [report all errors on invalid input](https://github.com/DataDog/zstd/issues/59),
+[omits error checks](https://github.com/DataDog/zstd/issues/61), [ignores checksums](https://github.com/DataDog/zstd/issues/43)
+and seems to ignore concatenated streams, even though [it is part of the spec](https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frames).
+
+#### Blocks
+
+For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`.
+
+`EncodeAll` will encode all input in src and append it to dst.
+This function can be called concurrently, but each call will only run on a single goroutine.
+
+Encoded blocks can be concatenated and the result will be the combined input stream.
+Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`.
+
+Especially when encoding blocks you should take special care to reuse the encoder.
+This will effectively make it run without allocations after a warmup period.
+To make it run completely without allocations, supply a destination buffer with space for all content.
+
+```Go
+import "github.com/klauspost/compress/zstd"
+
+// Create a writer that caches compressors.
+// For this operation type we supply a nil Reader.
+var encoder, _ = zstd.NewWriter(nil)
+
+// Compress a buffer.
+// If you have a destination buffer, the allocation in the call can also be eliminated.
+func Compress(src []byte) []byte {
+ return encoder.EncodeAll(src, make([]byte, 0, len(src)))
+}
+```
+
+You can control the maximum number of concurrent encodes using the `WithEncoderConcurrency(n)`
+option when creating the writer.
+
+Using the Encoder for both a stream and individual blocks concurrently is safe.
+
+### Performance
+
+I have collected some speed examples to compare speed and compression against other compressors.
+
+* `file` is the input file.
+* `out` is the compressor used. `zskp` is this package. `gzstd` is gzip standard library. `zstd` is the Datadog cgo library.
+* `level` is the compression level used. For `zskp` level 1 is "fastest", level 2 is "default".
+* `insize`/`outsize` is the input/output size.
+* `millis` is the number of milliseconds used for compression.
+* `mb/s` is megabytes (2^20 bytes) per second.
+
+```
+The test data for the Large Text Compression Benchmark is the first
+10^9 bytes of the English Wikipedia dump on Mar. 3, 2006.
+http://mattmahoney.net/dc/textdata.html
+
+file out level insize outsize millis mb/s
+enwik9 zskp 1 1000000000 343833033 5840 163.30
+enwik9 zskp 2 1000000000 317822183 8449 112.87
+enwik9 gzstd 1 1000000000 382578136 13627 69.98
+enwik9 gzstd 3 1000000000 349139651 22344 42.68
+enwik9 zstd 1 1000000000 357416379 4838 197.12
+enwik9 zstd 3 1000000000 313734522 7556 126.21
+
+GOB stream of binary data. Highly compressible.
+https://files.klauspost.com/compress/gob-stream.7z
+
+file out level insize outsize millis mb/s
+gob-stream zskp 1 1911399616 234981983 5100 357.42
+gob-stream zskp 2 1911399616 208674003 6698 272.15
+gob-stream gzstd 1 1911399616 357382641 14727 123.78
+gob-stream gzstd 3 1911399616 327835097 17005 107.19
+gob-stream zstd 1 1911399616 250787165 4075 447.22
+gob-stream zstd 3 1911399616 208191888 5511 330.77
+
+Highly compressible JSON file. Similar to logs in a lot of ways.
+https://files.klauspost.com/compress/adresser.001.gz
+
+file out level insize outsize millis mb/s
+adresser.001 zskp 1 1073741824 18510122 1477 692.83
+adresser.001 zskp 2 1073741824 19831697 1705 600.59
+adresser.001 gzstd 1 1073741824 47755503 3079 332.47
+adresser.001 gzstd 3 1073741824 40052381 3051 335.63
+adresser.001 zstd 1 1073741824 16135896 994 1030.18
+adresser.001 zstd 3 1073741824 17794465 905 1131.49
+
+VM Image, Linux mint with a few installed applications:
+https://files.klauspost.com/compress/rawstudio-mint14.7z
+
+file out level insize outsize millis mb/s
+rawstudio-mint14.tar zskp 1 8558382592 3648168838 33398 244.38
+rawstudio-mint14.tar zskp 2 8558382592 3376721436 50962 160.16
+rawstudio-mint14.tar gzstd 1 8558382592 3926257486 84712 96.35
+rawstudio-mint14.tar gzstd 3 8558382592 3740711978 176344 46.28
+rawstudio-mint14.tar zstd 1 8558382592 3607859742 27903 292.51
+rawstudio-mint14.tar zstd 3 8558382592 3341710879 46700 174.77
+
+
+The test data is designed to test archivers in realistic backup scenarios.
+http://mattmahoney.net/dc/10gb.html
+
+file out level insize outsize millis mb/s
+10gb.tar zskp 1 10065157632 4883149814 45715 209.97
+10gb.tar zskp 2 10065157632 4638110010 60970 157.44
+10gb.tar gzstd 1 10065157632 5198296126 97769 98.18
+10gb.tar gzstd 3 10065157632 4932665487 313427 30.63
+10gb.tar zstd 1 10065157632 4940796535 40391 237.65
+10gb.tar zstd 3 10065157632 4638618579 52911 181.42
+
+Silesia Corpus:
+http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip
+
+file out level insize outsize millis mb/s
+silesia.tar zskp 1 211947520 73025800 1108 182.26
+silesia.tar zskp 2 211947520 67674684 1599 126.41
+silesia.tar gzstd 1 211947520 80007735 2515 80.37
+silesia.tar gzstd 3 211947520 73133380 4259 47.45
+silesia.tar zstd 1 211947520 73513991 933 216.64
+silesia.tar zstd 3 211947520 66793301 1377 146.79
+```
+
+### Converters
+
+As part of the development process a *Snappy* -> *Zstandard* converter was also built.
+
+This can convert a *framed* [Snappy Stream](https://godoc.org/github.com/golang/snappy#Writer) to a zstd stream. Note that a single block is not framed.
+
+Conversion is done by converting the stream directly from Snappy without intermediate full decoding.
+Therefore the compression ratio is much less than what can be done by a full decompression
+and compression, and a faulty Snappy stream may lead to a faulty Zstandard stream without
+any errors being generated.
+No CRC value is being generated and not all CRC values of the Snappy stream are checked.
+However, it provides really fast re-compression of Snappy streams.
+
+
+```
+BenchmarkSnappy_ConvertSilesia-8 1 1156001600 ns/op 183.35 MB/s
+Snappy len 103008711 -> zstd len 82687318
+
+BenchmarkSnappy_Enwik9-8 1 6472998400 ns/op 154.49 MB/s
+Snappy len 508028601 -> zstd len 390921079
+```
+
+
+```Go
+ s := zstd.SnappyConverter{}
+ n, err = s.Convert(input, output)
+ if err != nil {
+ fmt.Println("Re-compressed stream to", n, "bytes")
+ }
+```
+
+The converter `s` can be reused to avoid allocations, even after errors.
+
+
+## Decompressor
+
+STATUS: Release Candidate - there may still be subtle bugs, but a wide variety of content has been tested.
+
+
+### Usage
+
+The package has been designed for two main usages, big streams of data and smaller in-memory buffers.
+There are two main usages of the package for these. Both of them are accessed by creating a `Decoder`.
+
+For streaming use a simple setup could look like this:
+
+```Go
+import "github.com/klauspost/compress/zstd"
+
+func Decompress(in io.Reader, out io.Writer) error {
+ d, err := zstd.NewReader(input)
+ if err != nil {
+ return err
+ }
+ defer d.Close()
+
+ // Copy content...
+ _, err := io.Copy(out, d)
+ return err
+}
+```
+
+It is important to use the "Close" function when you no longer need the Reader to stop running goroutines.
+See "Allocation-less operation" below.
+
+For decoding buffers, it could look something like this:
+
+```Go
+import "github.com/klauspost/compress/zstd"
+
+// Create a reader that caches decompressors.
+// For this operation type we supply a nil Reader.
+var decoder, _ = zstd.NewReader(nil)
+
+// Decompress a buffer. We don't supply a destination buffer,
+// so it will be allocated by the decoder.
+func Decompress(src []byte) ([]byte, error) {
+ return decoder.DecodeAll(src, nil)
+}
+```
+
+Both of these cases should provide the functionality needed.
+The decoder can be used for *concurrent* decompression of multiple buffers.
+It will only allow a certain number of concurrent operations to run.
+To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder.
+
+### Allocation-less operation
+
+The decoder has been designed to operate without allocations after a warmup.
+
+This means that you should *store* the decoder for best performance.
+To re-use a stream decoder, use the `Reset(r io.Reader) error` to switch to another stream.
+A decoder can safely be re-used even if the previous stream failed.
+
+To release the resources, you must call the `Close()` function on a decoder.
+After this it can *no longer be reused*, but all running goroutines will be stopped.
+So you *must* use this if you will no longer need the Reader.
+
+For decompressing smaller buffers a single decoder can be used.
+When decoding buffers, you can supply a destination slice with length 0 and your expected capacity.
+In this case no unneeded allocations should be made.
+
+### Concurrency
+
+The buffer decoder does everything on the same goroutine and does nothing concurrently.
+It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that.
+
+The stream decoder operates on
+
+* One goroutine reads input and splits the input to several block decoders.
+* A number of decoders will decode blocks.
+* A goroutine coordinates these blocks and sends history from one to the next.
+
+So effectively this also means the decoder will "read ahead" and prepare data to always be available for output.
+
+Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency.
+
+In practice this means that concurrency is often limited to utilizing about 2 cores effectively.
+
+
+### Benchmarks
+
+These are some examples of performance compared to [datadog cgo library](https://github.com/DataDog/zstd).
+
+The first two are streaming decodes and the last are smaller inputs.
+
+```
+BenchmarkDecoderSilesia-8 20 642550210 ns/op 329.85 MB/s 3101 B/op 8 allocs/op
+BenchmarkDecoderSilesiaCgo-8 100 384930000 ns/op 550.61 MB/s 451878 B/op 9713 allocs/op
+
+BenchmarkDecoderEnwik9-2 10 3146000080 ns/op 317.86 MB/s 2649 B/op 9 allocs/op
+BenchmarkDecoderEnwik9Cgo-2 20 1905900000 ns/op 524.69 MB/s 1125120 B/op 45785 allocs/op
+
+BenchmarkDecoder_DecodeAll/z000000.zst-8 200 7049994 ns/op 138.26 MB/s 40 B/op 2 allocs/op
+BenchmarkDecoder_DecodeAll/z000001.zst-8 100000 19560 ns/op 97.49 MB/s 40 B/op 2 allocs/op
+BenchmarkDecoder_DecodeAll/z000002.zst-8 5000 297599 ns/op 236.99 MB/s 40 B/op 2 allocs/op
+BenchmarkDecoder_DecodeAll/z000003.zst-8 2000 725502 ns/op 141.17 MB/s 40 B/op 2 allocs/op
+BenchmarkDecoder_DecodeAll/z000004.zst-8 200000 9314 ns/op 54.54 MB/s 40 B/op 2 allocs/op
+BenchmarkDecoder_DecodeAll/z000005.zst-8 10000 137500 ns/op 104.72 MB/s 40 B/op 2 allocs/op
+BenchmarkDecoder_DecodeAll/z000006.zst-8 500 2316009 ns/op 206.06 MB/s 40 B/op 2 allocs/op
+BenchmarkDecoder_DecodeAll/z000007.zst-8 20000 64499 ns/op 344.90 MB/s 40 B/op 2 allocs/op
+BenchmarkDecoder_DecodeAll/z000008.zst-8 50000 24900 ns/op 219.56 MB/s 40 B/op 2 allocs/op
+BenchmarkDecoder_DecodeAll/z000009.zst-8 1000 2348999 ns/op 154.01 MB/s 40 B/op 2 allocs/op
+
+BenchmarkDecoder_DecodeAllCgo/z000000.zst-8 500 4268005 ns/op 228.38 MB/s 1228849 B/op 3 allocs/op
+BenchmarkDecoder_DecodeAllCgo/z000001.zst-8 100000 15250 ns/op 125.05 MB/s 2096 B/op 3 allocs/op
+BenchmarkDecoder_DecodeAllCgo/z000002.zst-8 10000 147399 ns/op 478.49 MB/s 73776 B/op 3 allocs/op
+BenchmarkDecoder_DecodeAllCgo/z000003.zst-8 5000 320798 ns/op 319.27 MB/s 139312 B/op 3 allocs/op
+BenchmarkDecoder_DecodeAllCgo/z000004.zst-8 200000 10004 ns/op 50.77 MB/s 560 B/op 3 allocs/op
+BenchmarkDecoder_DecodeAllCgo/z000005.zst-8 20000 73599 ns/op 195.64 MB/s 19120 B/op 3 allocs/op
+BenchmarkDecoder_DecodeAllCgo/z000006.zst-8 1000 1119003 ns/op 426.48 MB/s 557104 B/op 3 allocs/op
+BenchmarkDecoder_DecodeAllCgo/z000007.zst-8 20000 103450 ns/op 215.04 MB/s 71296 B/op 9 allocs/op
+BenchmarkDecoder_DecodeAllCgo/z000008.zst-8 100000 20130 ns/op 271.58 MB/s 6192 B/op 3 allocs/op
+BenchmarkDecoder_DecodeAllCgo/z000009.zst-8 2000 1123500 ns/op 322.00 MB/s 368688 B/op 3 allocs/op
+```
+
+This reflects the performance around May 2019, but this may be out of date.
+
+# Contributions
+
+Contributions are always welcome.
+For new features/fixes, remember to add tests and for performance enhancements include benchmarks.
+
+For sending files for reproducing errors use a service like [goobox](https://goobox.io/#/upload) or similar to share your files.
+
+For general feedback and experience reports, feel free to open an issue or write me on [Twitter](https://twitter.com/sh0dan).
+
+This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare. \ No newline at end of file
diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go
new file mode 100644
index 000000000..15d79d439
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go
@@ -0,0 +1,121 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+ "errors"
+ "io"
+ "math/bits"
+)
+
+// bitReader reads a bitstream in reverse.
+// The last set bit indicates the start of the stream and is used
+// for aligning the input.
+type bitReader struct {
+ in []byte
+ off uint // next byte to read is at in[off - 1]
+ value uint64 // Maybe use [16]byte, but shifting is awkward.
+ bitsRead uint8
+}
+
+// init initializes and resets the bit reader.
+func (b *bitReader) init(in []byte) error {
+ if len(in) < 1 {
+ return errors.New("corrupt stream: too short")
+ }
+ b.in = in
+ b.off = uint(len(in))
+ // The highest bit of the last byte indicates where to start
+ v := in[len(in)-1]
+ if v == 0 {
+ return errors.New("corrupt stream, did not find end of stream")
+ }
+ b.bitsRead = 64
+ b.value = 0
+ b.fill()
+ b.fill()
+ b.bitsRead += 8 - uint8(highBits(uint32(v)))
+ return nil
+}
+
+// getBits will return n bits. n can be 0.
+func (b *bitReader) getBits(n uint8) int {
+ if n == 0 /*|| b.bitsRead >= 64 */ {
+ return 0
+ }
+ return b.getBitsFast(n)
+}
+
+// getBitsFast requires that at least one bit is requested every time.
+// There are no checks if the buffer is filled.
+func (b *bitReader) getBitsFast(n uint8) int {
+ const regMask = 64 - 1
+ v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask))
+ b.bitsRead += n
+ return int(v)
+}
+
+// fillFast() will make sure at least 32 bits are available.
+// There must be at least 4 bytes available.
+func (b *bitReader) fillFast() {
+ if b.bitsRead < 32 {
+ return
+ }
+ // Do single re-slice to avoid bounds checks.
+ v := b.in[b.off-4 : b.off]
+ low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ b.value = (b.value << 32) | uint64(low)
+ b.bitsRead -= 32
+ b.off -= 4
+}
+
+// fill() will make sure at least 32 bits are available.
+func (b *bitReader) fill() {
+ if b.bitsRead < 32 {
+ return
+ }
+ if b.off >= 4 {
+ v := b.in[b.off-4 : b.off]
+ low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ b.value = (b.value << 32) | uint64(low)
+ b.bitsRead -= 32
+ b.off -= 4
+ return
+ }
+ for b.off > 0 {
+ b.value = (b.value << 8) | uint64(b.in[b.off-1])
+ b.bitsRead -= 8
+ b.off--
+ }
+}
+
+// finished returns true if all bits have been read from the bit stream.
+func (b *bitReader) finished() bool {
+ return b.off == 0 && b.bitsRead >= 64
+}
+
+// overread returns true if more bits have been requested than is on the stream.
+func (b *bitReader) overread() bool {
+ return b.bitsRead > 64
+}
+
+// remain returns the number of bits remaining.
+func (b *bitReader) remain() uint {
+ return b.off*8 + 64 - uint(b.bitsRead)
+}
+
+// close the bitstream and returns an error if out-of-buffer reads occurred.
+func (b *bitReader) close() error {
+ // Release reference.
+ b.in = nil
+ if b.bitsRead > 64 {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+
+func highBits(val uint32) (n uint32) {
+ return uint32(bits.Len32(val) - 1)
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go
new file mode 100644
index 000000000..303ae90f9
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go
@@ -0,0 +1,169 @@
+// Copyright 2018 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
+
+package zstd
+
+import "fmt"
+
+// bitWriter will write bits.
+// First bit will be LSB of the first byte of output.
+type bitWriter struct {
+ bitContainer uint64
+ nBits uint8
+ out []byte
+}
+
+// bitMask16 is bitmasks. Has extra to avoid bounds check.
+var bitMask16 = [32]uint16{
+ 0, 1, 3, 7, 0xF, 0x1F,
+ 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF,
+ 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF,
+ 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF,
+ 0xFFFF, 0xFFFF} /* up to 16 bits */
+
+var bitMask32 = [32]uint32{
+ 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF,
+ 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF,
+ 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF,
+ 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF,
+} // up to 32 bits
+
+// addBits16NC will add up to 16 bits.
+// It will not check if there is space for them,
+// so the caller must ensure that it has flushed recently.
+func (b *bitWriter) addBits16NC(value uint16, bits uint8) {
+ b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63)
+ b.nBits += bits
+}
+
+// addBits32NC will add up to 32 bits.
+// It will not check if there is space for them,
+// so the caller must ensure that it has flushed recently.
+func (b *bitWriter) addBits32NC(value uint32, bits uint8) {
+ b.bitContainer |= uint64(value&bitMask32[bits&31]) << (b.nBits & 63)
+ b.nBits += bits
+}
+
+// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
+// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
+func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
+ b.bitContainer |= uint64(value) << (b.nBits & 63)
+ b.nBits += bits
+}
+
+// flush will flush all pending full bytes.
+// There will be at least 56 bits available for writing when this has been called.
+// Using flush32 is faster, but leaves less space for writing.
+func (b *bitWriter) flush() {
+ v := b.nBits >> 3
+ switch v {
+ case 0:
+ case 1:
+ b.out = append(b.out,
+ byte(b.bitContainer),
+ )
+ case 2:
+ b.out = append(b.out,
+ byte(b.bitContainer),
+ byte(b.bitContainer>>8),
+ )
+ case 3:
+ b.out = append(b.out,
+ byte(b.bitContainer),
+ byte(b.bitContainer>>8),
+ byte(b.bitContainer>>16),
+ )
+ case 4:
+ b.out = append(b.out,
+ byte(b.bitContainer),
+ byte(b.bitContainer>>8),
+ byte(b.bitContainer>>16),
+ byte(b.bitContainer>>24),
+ )
+ case 5:
+ b.out = append(b.out,
+ byte(b.bitContainer),
+ byte(b.bitContainer>>8),
+ byte(b.bitContainer>>16),
+ byte(b.bitContainer>>24),
+ byte(b.bitContainer>>32),
+ )
+ case 6:
+ b.out = append(b.out,
+ byte(b.bitContainer),
+ byte(b.bitContainer>>8),
+ byte(b.bitContainer>>16),
+ byte(b.bitContainer>>24),
+ byte(b.bitContainer>>32),
+ byte(b.bitContainer>>40),
+ )
+ case 7:
+ b.out = append(b.out,
+ byte(b.bitContainer),
+ byte(b.bitContainer>>8),
+ byte(b.bitContainer>>16),
+ byte(b.bitContainer>>24),
+ byte(b.bitContainer>>32),
+ byte(b.bitContainer>>40),
+ byte(b.bitContainer>>48),
+ )
+ case 8:
+ b.out = append(b.out,
+ byte(b.bitContainer),
+ byte(b.bitContainer>>8),
+ byte(b.bitContainer>>16),
+ byte(b.bitContainer>>24),
+ byte(b.bitContainer>>32),
+ byte(b.bitContainer>>40),
+ byte(b.bitContainer>>48),
+ byte(b.bitContainer>>56),
+ )
+ default:
+ panic(fmt.Errorf("bits (%d) > 64", b.nBits))
+ }
+ b.bitContainer >>= v << 3
+ b.nBits &= 7
+}
+
+// flush32 will flush out, so there are at least 32 bits available for writing.
+func (b *bitWriter) flush32() {
+ if b.nBits < 32 {
+ return
+ }
+ b.out = append(b.out,
+ byte(b.bitContainer),
+ byte(b.bitContainer>>8),
+ byte(b.bitContainer>>16),
+ byte(b.bitContainer>>24))
+ b.nBits -= 32
+ b.bitContainer >>= 32
+}
+
+// flushAlign will flush remaining full bytes and align to next byte boundary.
+func (b *bitWriter) flushAlign() {
+ nbBytes := (b.nBits + 7) >> 3
+ for i := uint8(0); i < nbBytes; i++ {
+ b.out = append(b.out, byte(b.bitContainer>>(i*8)))
+ }
+ b.nBits = 0
+ b.bitContainer = 0
+}
+
+// close will write the alignment bit and write the final byte(s)
+// to the output.
+func (b *bitWriter) close() error {
+ // End mark
+ b.addBits16Clean(1, 1)
+ // flush until next byte.
+ b.flushAlign()
+ return nil
+}
+
+// reset and continue writing by appending to out.
+func (b *bitWriter) reset(out []byte) {
+ b.bitContainer = 0
+ b.nBits = 0
+ b.out = out
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go
new file mode 100644
index 000000000..aca1cb85d
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go
@@ -0,0 +1,708 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "sync"
+
+ "github.com/klauspost/compress/huff0"
+)
+
+type blockType uint8
+
+//go:generate stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex
+
+const (
+ blockTypeRaw blockType = iota
+ blockTypeRLE
+ blockTypeCompressed
+ blockTypeReserved
+)
+
+type literalsBlockType uint8
+
+const (
+ literalsBlockRaw literalsBlockType = iota
+ literalsBlockRLE
+ literalsBlockCompressed
+ literalsBlockTreeless
+)
+
+const (
+ // maxCompressedBlockSize is the biggest allowed compressed block size (128KB)
+ maxCompressedBlockSize = 128 << 10
+
+ // Maximum possible block size (all Raw+Uncompressed).
+ maxBlockSize = (1 << 21) - 1
+
+ // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#literals_section_header
+ maxCompressedLiteralSize = 1 << 18
+ maxRLELiteralSize = 1 << 20
+ maxMatchLen = 131074
+ maxSequences = 0x7f00 + 0xffff
+
+ // We support slightly less than the reference decoder to be able to
+ // use ints on 32 bit archs.
+ maxOffsetBits = 30
+)
+
+var (
+ huffDecoderPool = sync.Pool{New: func() interface{} {
+ return &huff0.Scratch{}
+ }}
+
+ fseDecoderPool = sync.Pool{New: func() interface{} {
+ return &fseDecoder{}
+ }}
+)
+
+type blockDec struct {
+ // Raw source data of the block.
+ data []byte
+
+ // Destination of the decoded data.
+ dst []byte
+
+ // Buffer for literals data.
+ literalBuf []byte
+
+ // Window size of the block.
+ WindowSize uint64
+ Type blockType
+ RLESize uint32
+
+ // Is this the last block of a frame?
+ Last bool
+
+ // Use less memory
+ lowMem bool
+ history chan *history
+ input chan struct{}
+ result chan decodeOutput
+ sequenceBuf []seq
+ tmp [4]byte
+ err error
+}
+
+func (b *blockDec) String() string {
+ if b == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("Steam Size: %d, Type: %v, Last: %t, Window: %d", len(b.data), b.Type, b.Last, b.WindowSize)
+}
+
+func newBlockDec(lowMem bool) *blockDec {
+ b := blockDec{
+ lowMem: lowMem,
+ result: make(chan decodeOutput, 1),
+ input: make(chan struct{}, 1),
+ history: make(chan *history, 1),
+ }
+ go b.startDecoder()
+ return &b
+}
+
+// reset will reset the block.
+// Input must be a start of a block and will be at the end of the block when returned.
+func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
+ b.WindowSize = windowSize
+ tmp := br.readSmall(3)
+ if tmp == nil {
+ if debug {
+ println("Reading block header:", io.ErrUnexpectedEOF)
+ }
+ return io.ErrUnexpectedEOF
+ }
+ bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16)
+ b.Last = bh&1 != 0
+ b.Type = blockType((bh >> 1) & 3)
+ // find size.
+ cSize := int(bh >> 3)
+ switch b.Type {
+ case blockTypeReserved:
+ return ErrReservedBlockType
+ case blockTypeRLE:
+ b.RLESize = uint32(cSize)
+ cSize = 1
+ case blockTypeCompressed:
+ if debug {
+ println("Data size on stream:", cSize)
+ }
+ b.RLESize = 0
+ if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize {
+ if debug {
+ printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b)
+ }
+ return ErrCompressedSizeTooBig
+ }
+ default:
+ b.RLESize = 0
+ }
+
+ // Read block data.
+ if cap(b.data) < cSize {
+ if b.lowMem {
+ b.data = make([]byte, 0, cSize)
+ } else {
+ b.data = make([]byte, 0, maxBlockSize)
+ }
+ }
+ if cap(b.dst) <= maxBlockSize {
+ b.dst = make([]byte, 0, maxBlockSize+1)
+ }
+ var err error
+ b.data, err = br.readBig(cSize, b.data[:0])
+ if err != nil {
+ if debug {
+ println("Reading block:", err)
+ }
+ return err
+ }
+ return nil
+}
+
+// sendEOF will make the decoder send EOF on this frame.
+func (b *blockDec) sendErr(err error) {
+ b.Last = true
+ b.Type = blockTypeReserved
+ b.err = err
+ b.input <- struct{}{}
+}
+
+// Close will release resources.
+// Closed blockDec cannot be reset.
+func (b *blockDec) Close() {
+ close(b.input)
+ close(b.history)
+ close(b.result)
+}
+
+// decodeAsync will prepare decoding the block when it receives input.
+// This will separate output and history.
+func (b *blockDec) startDecoder() {
+ for range b.input {
+ //println("blockDec: Got block input")
+ switch b.Type {
+ case blockTypeRLE:
+ if cap(b.dst) < int(b.RLESize) {
+ if b.lowMem {
+ b.dst = make([]byte, b.RLESize)
+ } else {
+ b.dst = make([]byte, maxBlockSize)
+ }
+ }
+ o := decodeOutput{
+ d: b,
+ b: b.dst[:b.RLESize],
+ err: nil,
+ }
+ v := b.data[0]
+ for i := range o.b {
+ o.b[i] = v
+ }
+ hist := <-b.history
+ hist.append(o.b)
+ b.result <- o
+ case blockTypeRaw:
+ o := decodeOutput{
+ d: b,
+ b: b.data,
+ err: nil,
+ }
+ hist := <-b.history
+ hist.append(o.b)
+ b.result <- o
+ case blockTypeCompressed:
+ b.dst = b.dst[:0]
+ err := b.decodeCompressed(nil)
+ o := decodeOutput{
+ d: b,
+ b: b.dst,
+ err: err,
+ }
+ if debug {
+ println("Decompressed to", len(b.dst), "bytes, error:", err)
+ }
+ b.result <- o
+ case blockTypeReserved:
+ // Used for returning errors.
+ <-b.history
+ b.result <- decodeOutput{
+ d: b,
+ b: nil,
+ err: b.err,
+ }
+ default:
+ panic("Invalid block type")
+ }
+ if debug {
+ println("blockDec: Finished block")
+ }
+ }
+}
+
+// decodeAsync will prepare decoding the block when it receives the history.
+// If history is provided, it will not fetch it from the channel.
+func (b *blockDec) decodeBuf(hist *history) error {
+ switch b.Type {
+ case blockTypeRLE:
+ if cap(b.dst) < int(b.RLESize) {
+ if b.lowMem {
+ b.dst = make([]byte, b.RLESize)
+ } else {
+ b.dst = make([]byte, maxBlockSize)
+ }
+ }
+ b.dst = b.dst[:b.RLESize]
+ v := b.data[0]
+ for i := range b.dst {
+ b.dst[i] = v
+ }
+ hist.appendKeep(b.dst)
+ return nil
+ case blockTypeRaw:
+ hist.appendKeep(b.data)
+ return nil
+ case blockTypeCompressed:
+ saved := b.dst
+ b.dst = hist.b
+ hist.b = nil
+ err := b.decodeCompressed(hist)
+ if debug {
+ println("Decompressed to total", len(b.dst), "bytes, error:", err)
+ }
+ hist.b = b.dst
+ b.dst = saved
+ return err
+ case blockTypeReserved:
+ // Used for returning errors.
+ return b.err
+ default:
+ panic("Invalid block type")
+ }
+}
+
+// decodeCompressed will start decompressing a block.
+// If no history is supplied the decoder will decodeAsync as much as possible
+// before fetching from blockDec.history
+func (b *blockDec) decodeCompressed(hist *history) error {
+ in := b.data
+ delayedHistory := hist == nil
+
+ if delayedHistory {
+ // We must always grab history.
+ defer func() {
+ if hist == nil {
+ <-b.history
+ }
+ }()
+ }
+ // There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header
+ if len(in) < 2 {
+ return ErrBlockTooSmall
+ }
+ litType := literalsBlockType(in[0] & 3)
+ var litRegenSize int
+ var litCompSize int
+ sizeFormat := (in[0] >> 2) & 3
+ var fourStreams bool
+ switch litType {
+ case literalsBlockRaw, literalsBlockRLE:
+ switch sizeFormat {
+ case 0, 2:
+ // Regenerated_Size uses 5 bits (0-31). Literals_Section_Header uses 1 byte.
+ litRegenSize = int(in[0] >> 3)
+ in = in[1:]
+ case 1:
+ // Regenerated_Size uses 12 bits (0-4095). Literals_Section_Header uses 2 bytes.
+ litRegenSize = int(in[0]>>4) + (int(in[1]) << 4)
+ in = in[2:]
+ case 3:
+ // Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes.
+ if len(in) < 3 {
+ println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
+ return ErrBlockTooSmall
+ }
+ litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12)
+ in = in[3:]
+ }
+ case literalsBlockCompressed, literalsBlockTreeless:
+ switch sizeFormat {
+ case 0, 1:
+ // Both Regenerated_Size and Compressed_Size use 10 bits (0-1023).
+ if len(in) < 3 {
+ println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
+ return ErrBlockTooSmall
+ }
+ n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12)
+ litRegenSize = int(n & 1023)
+ litCompSize = int(n >> 10)
+ fourStreams = sizeFormat == 1
+ in = in[3:]
+ case 2:
+ fourStreams = true
+ if len(in) < 4 {
+ println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
+ return ErrBlockTooSmall
+ }
+ n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20)
+ litRegenSize = int(n & 16383)
+ litCompSize = int(n >> 14)
+ in = in[4:]
+ case 3:
+ fourStreams = true
+ if len(in) < 5 {
+ println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
+ return ErrBlockTooSmall
+ }
+ n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28)
+ litRegenSize = int(n & 262143)
+ litCompSize = int(n >> 18)
+ in = in[5:]
+ }
+ }
+ if debug {
+ println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize", litCompSize)
+ }
+ var literals []byte
+ var huff *huff0.Scratch
+ switch litType {
+ case literalsBlockRaw:
+ if len(in) < litRegenSize {
+ println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize)
+ return ErrBlockTooSmall
+ }
+ literals = in[:litRegenSize]
+ in = in[litRegenSize:]
+ //printf("Found %d uncompressed literals\n", litRegenSize)
+ case literalsBlockRLE:
+ if len(in) < 1 {
+ println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1)
+ return ErrBlockTooSmall
+ }
+ if cap(b.literalBuf) < litRegenSize {
+ if b.lowMem {
+ b.literalBuf = make([]byte, litRegenSize)
+ } else {
+ if litRegenSize > maxCompressedLiteralSize {
+ // Exceptional
+ b.literalBuf = make([]byte, litRegenSize)
+ } else {
+ b.literalBuf = make([]byte, litRegenSize, maxCompressedLiteralSize)
+
+ }
+ }
+ }
+ literals = b.literalBuf[:litRegenSize]
+ v := in[0]
+ for i := range literals {
+ literals[i] = v
+ }
+ in = in[1:]
+ if debug {
+ printf("Found %d RLE compressed literals\n", litRegenSize)
+ }
+ case literalsBlockTreeless:
+ if len(in) < litCompSize {
+ println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize)
+ return ErrBlockTooSmall
+ }
+ // Store compressed literals, so we defer decoding until we get history.
+ literals = in[:litCompSize]
+ in = in[litCompSize:]
+ if debug {
+ printf("Found %d compressed literals\n", litCompSize)
+ }
+ case literalsBlockCompressed:
+ if len(in) < litCompSize {
+ println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize)
+ return ErrBlockTooSmall
+ }
+ literals = in[:litCompSize]
+ in = in[litCompSize:]
+
+ huff = huffDecoderPool.Get().(*huff0.Scratch)
+ var err error
+ // Ensure we have space to store it.
+ if cap(b.literalBuf) < litRegenSize {
+ if b.lowMem {
+ b.literalBuf = make([]byte, 0, litRegenSize)
+ } else {
+ b.literalBuf = make([]byte, 0, maxCompressedLiteralSize)
+ }
+ }
+ if huff == nil {
+ huff = &huff0.Scratch{}
+ }
+ huff.Out = b.literalBuf[:0]
+ huff, literals, err = huff0.ReadTable(literals, huff)
+ if err != nil {
+ println("reading huffman table:", err)
+ return err
+ }
+ // Use our out buffer.
+ huff.Out = b.literalBuf[:0]
+ if fourStreams {
+ literals, err = huff.Decompress4X(literals, litRegenSize)
+ } else {
+ literals, err = huff.Decompress1X(literals)
+ }
+ if err != nil {
+ println("decoding compressed literals:", err)
+ return err
+ }
+ // Make sure we don't leak our literals buffer
+ huff.Out = nil
+ if len(literals) != litRegenSize {
+ return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
+ }
+ if debug {
+ printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize)
+ }
+ }
+
+ // Decode Sequences
+ // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section
+ if len(in) < 1 {
+ return ErrBlockTooSmall
+ }
+ seqHeader := in[0]
+ nSeqs := 0
+ switch {
+ case seqHeader == 0:
+ in = in[1:]
+ case seqHeader < 128:
+ nSeqs = int(seqHeader)
+ in = in[1:]
+ case seqHeader < 255:
+ if len(in) < 2 {
+ return ErrBlockTooSmall
+ }
+ nSeqs = int(seqHeader-128)<<8 | int(in[1])
+ in = in[2:]
+ case seqHeader == 255:
+ if len(in) < 3 {
+ return ErrBlockTooSmall
+ }
+ nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8)
+ in = in[3:]
+ }
+ // Allocate sequences
+ if cap(b.sequenceBuf) < nSeqs {
+ if b.lowMem {
+ b.sequenceBuf = make([]seq, nSeqs)
+ } else {
+ // Allocate max
+ b.sequenceBuf = make([]seq, nSeqs, maxSequences)
+ }
+ } else {
+ // Reuse buffer
+ b.sequenceBuf = b.sequenceBuf[:nSeqs]
+ }
+ var seqs = &sequenceDecs{}
+ if nSeqs > 0 {
+ if len(in) < 1 {
+ return ErrBlockTooSmall
+ }
+ br := byteReader{b: in, off: 0}
+ compMode := br.Uint8()
+ br.advance(1)
+ if debug {
+ printf("Compression modes: 0b%b", compMode)
+ }
+ for i := uint(0); i < 3; i++ {
+ mode := seqCompMode((compMode >> (6 - i*2)) & 3)
+ if debug {
+ println("Table", tableIndex(i), "is", mode)
+ }
+ var seq *sequenceDec
+ switch tableIndex(i) {
+ case tableLiteralLengths:
+ seq = &seqs.litLengths
+ case tableOffsets:
+ seq = &seqs.offsets
+ case tableMatchLengths:
+ seq = &seqs.matchLengths
+ default:
+ panic("unknown table")
+ }
+ switch mode {
+ case compModePredefined:
+ seq.fse = &fsePredef[i]
+ case compModeRLE:
+ if br.remain() < 1 {
+ return ErrBlockTooSmall
+ }
+ v := br.Uint8()
+ br.advance(1)
+ dec := fseDecoderPool.Get().(*fseDecoder)
+ symb, err := decSymbolValue(v, symbolTableX[i])
+ if err != nil {
+ printf("RLE Transform table (%v) error: %v", tableIndex(i), err)
+ return err
+ }
+ dec.setRLE(symb)
+ seq.fse = dec
+ if debug {
+ printf("RLE set to %+v, code: %v", symb, v)
+ }
+ case compModeFSE:
+ println("Reading table for", tableIndex(i))
+ dec := fseDecoderPool.Get().(*fseDecoder)
+ err := dec.readNCount(&br, uint16(maxTableSymbol[i]))
+ if err != nil {
+ println("Read table error:", err)
+ return err
+ }
+ err = dec.transform(symbolTableX[i])
+ if err != nil {
+ println("Transform table error:", err)
+ return err
+ }
+ if debug {
+ println("Read table ok", "symbolLen:", dec.symbolLen)
+ }
+ seq.fse = dec
+ case compModeRepeat:
+ seq.repeat = true
+ }
+ if br.overread() {
+ return io.ErrUnexpectedEOF
+ }
+ }
+ in = br.unread()
+ }
+
+ // Wait for history.
+ // All time spent after this is critical since it is strictly sequential.
+ if hist == nil {
+ hist = <-b.history
+ if hist.error {
+ return ErrDecoderClosed
+ }
+ }
+
+ // Decode treeless literal block.
+ if litType == literalsBlockTreeless {
+ // TODO: We could send the history early WITHOUT the stream history.
+ // This would allow decoding treeless literials before the byte history is available.
+ // Silencia stats: Treeless 4393, with: 32775, total: 37168, 11% treeless.
+ // So not much obvious gain here.
+
+ if hist.huffTree == nil {
+ return errors.New("literal block was treeless, but no history was defined")
+ }
+ // Ensure we have space to store it.
+ if cap(b.literalBuf) < litRegenSize {
+ if b.lowMem {
+ b.literalBuf = make([]byte, 0, litRegenSize)
+ } else {
+ b.literalBuf = make([]byte, 0, maxCompressedLiteralSize)
+ }
+ }
+ var err error
+ // Use our out buffer.
+ huff = hist.huffTree
+ huff.Out = b.literalBuf[:0]
+ if fourStreams {
+ literals, err = huff.Decompress4X(literals, litRegenSize)
+ } else {
+ literals, err = huff.Decompress1X(literals)
+ }
+ // Make sure we don't leak our literals buffer
+ huff.Out = nil
+ if err != nil {
+ println("decompressing literals:", err)
+ return err
+ }
+ if len(literals) != litRegenSize {
+ return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
+ }
+ } else {
+ if hist.huffTree != nil && huff != nil {
+ huffDecoderPool.Put(hist.huffTree)
+ hist.huffTree = nil
+ }
+ }
+ if huff != nil {
+ huff.Out = nil
+ hist.huffTree = huff
+ }
+ if debug {
+ println("Final literals:", len(literals), "and", nSeqs, "sequences.")
+ }
+
+ if nSeqs == 0 {
+ // Decompressed content is defined entirely as Literals Section content.
+ b.dst = append(b.dst, literals...)
+ if delayedHistory {
+ hist.append(literals)
+ }
+ return nil
+ }
+
+ seqs, err := seqs.mergeHistory(&hist.decoders)
+ if err != nil {
+ return err
+ }
+ if debug {
+ println("History merged ok")
+ }
+ br := &bitReader{}
+ if err := br.init(in); err != nil {
+ return err
+ }
+
+ // TODO: Investigate if sending history without decoders are faster.
+ // This would allow the sequences to be decoded async and only have to construct stream history.
+ // If only recent offsets were not transferred, this would be an obvious win.
+ // Also, if first 3 sequences don't reference recent offsets, all sequences can be decoded.
+
+ if err := seqs.initialize(br, hist, literals, b.dst); err != nil {
+ println("initializing sequences:", err)
+ return err
+ }
+
+ err = seqs.decode(nSeqs, br, hist.b)
+ if err != nil {
+ return err
+ }
+ if !br.finished() {
+ return fmt.Errorf("%d extra bits on block, should be 0", br.remain())
+ }
+
+ err = br.close()
+ if err != nil {
+ printf("Closing sequences: %v, %+v\n", err, *br)
+ }
+ if len(b.data) > maxCompressedBlockSize {
+ return fmt.Errorf("compressed block size too large (%d)", len(b.data))
+ }
+ // Set output and release references.
+ b.dst = seqs.out
+ seqs.out, seqs.literals, seqs.hist = nil, nil, nil
+
+ if !delayedHistory {
+ // If we don't have delayed history, no need to update.
+ hist.recentOffsets = seqs.prevOffset
+ return nil
+ }
+ if b.Last {
+ // if last block we don't care about history.
+ println("Last block, no history returned")
+ hist.b = hist.b[:0]
+ return nil
+ }
+ hist.append(b.dst)
+ hist.recentOffsets = seqs.prevOffset
+ if debug {
+ println("Finished block with literals:", len(literals), "and", nSeqs, "sequences.")
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go
new file mode 100644
index 000000000..cba24c76d
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go
@@ -0,0 +1,754 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "math/bits"
+
+ "github.com/klauspost/compress/huff0"
+)
+
+type blockEnc struct {
+ size int
+ literals []byte
+ sequences []seq
+ coders seqCoders
+ litEnc *huff0.Scratch
+ wr bitWriter
+
+ extraLits int
+ last bool
+
+ output []byte
+ recentOffsets [3]uint32
+ prevRecentOffsets [3]uint32
+}
+
+// init should be used once the block has been created.
+// If called more than once, the effect is the same as calling reset.
+func (b *blockEnc) init() {
+ if cap(b.literals) < maxCompressedLiteralSize {
+ b.literals = make([]byte, 0, maxCompressedLiteralSize)
+ }
+ const defSeqs = 200
+ b.literals = b.literals[:0]
+ if cap(b.sequences) < defSeqs {
+ b.sequences = make([]seq, 0, defSeqs)
+ }
+ if cap(b.output) < maxCompressedBlockSize {
+ b.output = make([]byte, 0, maxCompressedBlockSize)
+ }
+ if b.coders.mlEnc == nil {
+ b.coders.mlEnc = &fseEncoder{}
+ b.coders.mlPrev = &fseEncoder{}
+ b.coders.ofEnc = &fseEncoder{}
+ b.coders.ofPrev = &fseEncoder{}
+ b.coders.llEnc = &fseEncoder{}
+ b.coders.llPrev = &fseEncoder{}
+ }
+ b.litEnc = &huff0.Scratch{}
+ b.reset(nil)
+}
+
+// initNewEncode can be used to reset offsets and encoders to the initial state.
+func (b *blockEnc) initNewEncode() {
+ b.recentOffsets = [3]uint32{1, 4, 8}
+ b.litEnc.Reuse = huff0.ReusePolicyNone
+ b.coders.setPrev(nil, nil, nil)
+}
+
+// reset will reset the block for a new encode, but in the same stream,
+// meaning that state will be carried over, but the block content is reset.
+// If a previous block is provided, the recent offsets are carried over.
+func (b *blockEnc) reset(prev *blockEnc) {
+ b.extraLits = 0
+ b.literals = b.literals[:0]
+ b.size = 0
+ b.sequences = b.sequences[:0]
+ b.output = b.output[:0]
+ b.last = false
+ if prev != nil {
+ b.recentOffsets = prev.prevRecentOffsets
+ }
+}
+
+// reset will reset the block for a new encode, but in the same stream,
+// meaning that state will be carried over, but the block content is reset.
+// If a previous block is provided, the recent offsets are carried over.
+func (b *blockEnc) swapEncoders(prev *blockEnc) {
+ b.coders.swap(&prev.coders)
+ b.litEnc, prev.litEnc = prev.litEnc, b.litEnc
+}
+
+// blockHeader contains the information for a block header.
+type blockHeader uint32
+
+// setLast sets the 'last' indicator on a block.
+func (h *blockHeader) setLast(b bool) {
+ if b {
+ *h = *h | 1
+ } else {
+ const mask = (1 << 24) - 2
+ *h = *h & mask
+ }
+}
+
+// setSize will store the compressed size of a block.
+func (h *blockHeader) setSize(v uint32) {
+ const mask = 7
+ *h = (*h)&mask | blockHeader(v<<3)
+}
+
+// setType sets the block type.
+func (h *blockHeader) setType(t blockType) {
+ const mask = 1 | (((1 << 24) - 1) ^ 7)
+ *h = (*h & mask) | blockHeader(t<<1)
+}
+
+// appendTo will append the block header to a slice.
+func (h blockHeader) appendTo(b []byte) []byte {
+ return append(b, uint8(h), uint8(h>>8), uint8(h>>16))
+}
+
+// String returns a string representation of the block.
+func (h blockHeader) String() string {
+ return fmt.Sprintf("Type: %d, Size: %d, Last:%t", (h>>1)&3, h>>3, h&1 == 1)
+}
+
+// literalsHeader contains literals header information.
+type literalsHeader uint64
+
+// setType can be used to set the type of literal block.
+func (h *literalsHeader) setType(t literalsBlockType) {
+ const mask = math.MaxUint64 - 3
+ *h = (*h & mask) | literalsHeader(t)
+}
+
+// setSize can be used to set a single size, for uncompressed and RLE content.
+func (h *literalsHeader) setSize(regenLen int) {
+ inBits := bits.Len32(uint32(regenLen))
+ // Only retain 2 bits
+ const mask = 3
+ lh := uint64(*h & mask)
+ switch {
+ case inBits < 5:
+ lh |= (uint64(regenLen) << 3) | (1 << 60)
+ if debug {
+ got := int(lh>>3) & 0xff
+ if got != regenLen {
+ panic(fmt.Sprint("litRegenSize = ", regenLen, "(want) != ", got, "(got)"))
+ }
+ }
+ case inBits < 12:
+ lh |= (1 << 2) | (uint64(regenLen) << 4) | (2 << 60)
+ case inBits < 20:
+ lh |= (3 << 2) | (uint64(regenLen) << 4) | (3 << 60)
+ default:
+ panic(fmt.Errorf("internal error: block too big (%d)", regenLen))
+ }
+ *h = literalsHeader(lh)
+}
+
+// setSizes will set the size of a compressed literals section and the input length.
+func (h *literalsHeader) setSizes(compLen, inLen int) {
+ compBits, inBits := bits.Len32(uint32(compLen)), bits.Len32(uint32(inLen))
+ // Only retain 2 bits
+ const mask = 3
+ lh := uint64(*h & mask)
+ switch {
+ case compBits <= 10 && inBits <= 10:
+ lh |= (1 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60)
+ if debug {
+ const mmask = (1 << 24) - 1
+ n := (lh >> 4) & mmask
+ if int(n&1023) != inLen {
+ panic(fmt.Sprint("regensize:", int(n&1023), "!=", inLen, inBits))
+ }
+ if int(n>>10) != compLen {
+ panic(fmt.Sprint("compsize:", int(n>>10), "!=", compLen, compBits))
+ }
+ }
+ case compBits <= 14 && inBits <= 14:
+ lh |= (2 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (14 + 4)) | (4 << 60)
+ case compBits <= 18 && inBits <= 18:
+ lh |= (3 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (18 + 4)) | (5 << 60)
+ default:
+ panic("internal error: block too big")
+ }
+ *h = literalsHeader(lh)
+}
+
+// appendTo will append the literals header to a byte slice.
+func (h literalsHeader) appendTo(b []byte) []byte {
+ size := uint8(h >> 60)
+ switch size {
+ case 1:
+ b = append(b, uint8(h))
+ case 2:
+ b = append(b, uint8(h), uint8(h>>8))
+ case 3:
+ b = append(b, uint8(h), uint8(h>>8), uint8(h>>16))
+ case 4:
+ b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24))
+ case 5:
+ b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24), uint8(h>>32))
+ default:
+ panic(fmt.Errorf("internal error: literalsHeader has invalid size (%d)", size))
+ }
+ return b
+}
+
+// size returns the output size with currently set values.
+func (h literalsHeader) size() int {
+ return int(h >> 60)
+}
+
+func (h literalsHeader) String() string {
+ return fmt.Sprintf("Type: %d, SizeFormat: %d, Size: 0x%d, Bytes:%d", literalsBlockType(h&3), (h>>2)&3, h&((1<<60)-1)>>4, h>>60)
+}
+
+// pushOffsets will push the recent offsets to the backup store.
+func (b *blockEnc) pushOffsets() {
+ b.prevRecentOffsets = b.recentOffsets
+}
+
+// pushOffsets will push the recent offsets to the backup store.
+func (b *blockEnc) popOffsets() {
+ b.recentOffsets = b.prevRecentOffsets
+}
+
+// matchOffset will adjust recent offsets and return the adjusted one,
+// if it matches a previous offset.
+func (b *blockEnc) matchOffset(offset, lits uint32) uint32 {
+ // Check if offset is one of the recent offsets.
+ // Adjusts the output offset accordingly.
+ // Gives a tiny bit of compression, typically around 1%.
+ if true {
+ if lits > 0 {
+ switch offset {
+ case b.recentOffsets[0]:
+ offset = 1
+ case b.recentOffsets[1]:
+ b.recentOffsets[1] = b.recentOffsets[0]
+ b.recentOffsets[0] = offset
+ offset = 2
+ case b.recentOffsets[2]:
+ b.recentOffsets[2] = b.recentOffsets[1]
+ b.recentOffsets[1] = b.recentOffsets[0]
+ b.recentOffsets[0] = offset
+ offset = 3
+ default:
+ b.recentOffsets[2] = b.recentOffsets[1]
+ b.recentOffsets[1] = b.recentOffsets[0]
+ b.recentOffsets[0] = offset
+ offset += 3
+ }
+ } else {
+ switch offset {
+ case b.recentOffsets[1]:
+ b.recentOffsets[1] = b.recentOffsets[0]
+ b.recentOffsets[0] = offset
+ offset = 1
+ case b.recentOffsets[2]:
+ b.recentOffsets[2] = b.recentOffsets[1]
+ b.recentOffsets[1] = b.recentOffsets[0]
+ b.recentOffsets[0] = offset
+ offset = 2
+ case b.recentOffsets[0] - 1:
+ b.recentOffsets[2] = b.recentOffsets[1]
+ b.recentOffsets[1] = b.recentOffsets[0]
+ b.recentOffsets[0] = offset
+ offset = 3
+ default:
+ b.recentOffsets[2] = b.recentOffsets[1]
+ b.recentOffsets[1] = b.recentOffsets[0]
+ b.recentOffsets[0] = offset
+ offset += 3
+ }
+ }
+ } else {
+ offset += 3
+ }
+ return offset
+}
+
+// encodeRaw can be used to set the output to a raw representation of supplied bytes.
+func (b *blockEnc) encodeRaw(a []byte) {
+ var bh blockHeader
+ bh.setLast(b.last)
+ bh.setSize(uint32(len(a)))
+ bh.setType(blockTypeRaw)
+ b.output = bh.appendTo(b.output[:0])
+ b.output = append(b.output, a...)
+ if debug {
+ println("Adding RAW block, length", len(a))
+ }
+}
+
+// encodeLits can be used if the block is only litLen.
+func (b *blockEnc) encodeLits() error {
+ var bh blockHeader
+ bh.setLast(b.last)
+ bh.setSize(uint32(len(b.literals)))
+
+ // Don't compress extremely small blocks
+ if len(b.literals) < 32 {
+ if debug {
+ println("Adding RAW block, length", len(b.literals))
+ }
+ bh.setType(blockTypeRaw)
+ b.output = bh.appendTo(b.output)
+ b.output = append(b.output, b.literals...)
+ return nil
+ }
+
+ // TODO: Switch to 1X when less than x bytes.
+ out, reUsed, err := huff0.Compress4X(b.literals, b.litEnc)
+ // Bail out of compression is too little.
+ if len(out) > (len(b.literals) - len(b.literals)>>4) {
+ err = huff0.ErrIncompressible
+ }
+ switch err {
+ case huff0.ErrIncompressible:
+ if debug {
+ println("Adding RAW block, length", len(b.literals))
+ }
+ bh.setType(blockTypeRaw)
+ b.output = bh.appendTo(b.output)
+ b.output = append(b.output, b.literals...)
+ return nil
+ case huff0.ErrUseRLE:
+ if debug {
+ println("Adding RLE block, length", len(b.literals))
+ }
+ bh.setType(blockTypeRLE)
+ b.output = bh.appendTo(b.output)
+ b.output = append(b.output, b.literals[0])
+ return nil
+ default:
+ return err
+ case nil:
+ }
+ // Compressed...
+ // Now, allow reuse
+ b.litEnc.Reuse = huff0.ReusePolicyAllow
+ bh.setType(blockTypeCompressed)
+ var lh literalsHeader
+ if reUsed {
+ if debug {
+ println("Reused tree, compressed to", len(out))
+ }
+ lh.setType(literalsBlockTreeless)
+ } else {
+ if debug {
+ println("New tree, compressed to", len(out), "tree size:", len(b.litEnc.OutTable))
+ }
+ lh.setType(literalsBlockCompressed)
+ }
+ // Set sizes
+ lh.setSizes(len(out), len(b.literals))
+ bh.setSize(uint32(len(out) + lh.size() + 1))
+
+ // Write block headers.
+ b.output = bh.appendTo(b.output)
+ b.output = lh.appendTo(b.output)
+ // Add compressed data.
+ b.output = append(b.output, out...)
+ // No sequences.
+ b.output = append(b.output, 0)
+ return nil
+}
+
+// encode will encode the block and put the output in b.output.
+func (b *blockEnc) encode() error {
+ if len(b.sequences) == 0 {
+ return b.encodeLits()
+ }
+ // We want some difference
+ if len(b.literals) > (b.size - (b.size >> 5)) {
+ return errIncompressible
+ }
+
+ var bh blockHeader
+ var lh literalsHeader
+ bh.setLast(b.last)
+ bh.setType(blockTypeCompressed)
+ b.output = bh.appendTo(b.output)
+
+ var (
+ out []byte
+ reUsed bool
+ err error
+ )
+ if len(b.literals) > 32 {
+ // TODO: Switch to 1X on small blocks.
+ out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc)
+ if len(out) > len(b.literals)-len(b.literals)>>4 {
+ err = huff0.ErrIncompressible
+ }
+ } else {
+ err = huff0.ErrIncompressible
+ }
+ switch err {
+ case huff0.ErrIncompressible:
+ lh.setType(literalsBlockRaw)
+ lh.setSize(len(b.literals))
+ b.output = lh.appendTo(b.output)
+ b.output = append(b.output, b.literals...)
+ if debug {
+ println("Adding literals RAW, length", len(b.literals))
+ }
+ case huff0.ErrUseRLE:
+ lh.setType(literalsBlockRLE)
+ lh.setSize(len(b.literals))
+ b.output = lh.appendTo(b.output)
+ b.output = append(b.output, b.literals[0])
+ if debug {
+ println("Adding literals RLE")
+ }
+ default:
+ if debug {
+ println("Adding literals ERROR:", err)
+ }
+ return err
+ case nil:
+ // Compressed litLen...
+ if reUsed {
+ if debug {
+ println("reused tree")
+ }
+ lh.setType(literalsBlockTreeless)
+ } else {
+ if debug {
+ println("new tree, size:", len(b.litEnc.OutTable))
+ }
+ lh.setType(literalsBlockCompressed)
+ if debug {
+ _, _, err := huff0.ReadTable(out, nil)
+ if err != nil {
+ panic(err)
+ }
+ }
+ }
+ lh.setSizes(len(out), len(b.literals))
+ if debug {
+ printf("Compressed %d literals to %d bytes", len(b.literals), len(out))
+ println("Adding literal header:", lh)
+ }
+ b.output = lh.appendTo(b.output)
+ b.output = append(b.output, out...)
+ b.litEnc.Reuse = huff0.ReusePolicyAllow
+ if debug {
+ println("Adding literals compressed")
+ }
+ }
+ // Sequence compression
+
+ // Write the number of sequences
+ switch {
+ case len(b.sequences) < 128:
+ b.output = append(b.output, uint8(len(b.sequences)))
+ case len(b.sequences) < 0x7f00: // TODO: this could be wrong
+ n := len(b.sequences)
+ b.output = append(b.output, 128+uint8(n>>8), uint8(n))
+ default:
+ n := len(b.sequences) - 0x7f00
+ b.output = append(b.output, 255, uint8(n), uint8(n>>8))
+ }
+ if debug {
+ println("Encoding", len(b.sequences), "sequences")
+ }
+ b.genCodes()
+ llEnc := b.coders.llEnc
+ ofEnc := b.coders.ofEnc
+ mlEnc := b.coders.mlEnc
+ err = llEnc.normalizeCount(len(b.sequences))
+ if err != nil {
+ return err
+ }
+ err = ofEnc.normalizeCount(len(b.sequences))
+ if err != nil {
+ return err
+ }
+ err = mlEnc.normalizeCount(len(b.sequences))
+ if err != nil {
+ return err
+ }
+
+ // Choose the best compression mode for each type.
+ // Will evaluate the new vs predefined and previous.
+ chooseComp := func(cur, prev, preDef *fseEncoder) (*fseEncoder, seqCompMode) {
+ // See if predefined/previous is better
+ hist := cur.count[:cur.symbolLen]
+ nSize := cur.approxSize(hist) + cur.maxHeaderSize()
+ predefSize := preDef.approxSize(hist)
+ prevSize := prev.approxSize(hist)
+
+ // Add a small penalty for new encoders.
+ // Don't bother with extremely small (<2 byte gains).
+ nSize = nSize + (nSize+2*8*16)>>4
+ switch {
+ case predefSize <= prevSize && predefSize <= nSize || forcePreDef:
+ if debug {
+ println("Using predefined", predefSize>>3, "<=", nSize>>3)
+ }
+ return preDef, compModePredefined
+ case prevSize <= nSize:
+ if debug {
+ println("Using previous", prevSize>>3, "<=", nSize>>3)
+ }
+ return prev, compModeRepeat
+ default:
+ if debug {
+ println("Using new, predef", predefSize>>3, ". previous:", prevSize>>3, ">", nSize>>3, "header max:", cur.maxHeaderSize()>>3, "bytes")
+ println("tl:", cur.actualTableLog, "symbolLen:", cur.symbolLen, "norm:", cur.norm[:cur.symbolLen], "hist", cur.count[:cur.symbolLen])
+ }
+ return cur, compModeFSE
+ }
+ }
+
+ // Write compression mode
+ var mode uint8
+ if llEnc.useRLE {
+ mode |= uint8(compModeRLE) << 6
+ llEnc.setRLE(b.sequences[0].llCode)
+ if debug {
+ println("llEnc.useRLE")
+ }
+ } else {
+ var m seqCompMode
+ llEnc, m = chooseComp(llEnc, b.coders.llPrev, &fsePredefEnc[tableLiteralLengths])
+ mode |= uint8(m) << 6
+ }
+ if ofEnc.useRLE {
+ mode |= uint8(compModeRLE) << 4
+ ofEnc.setRLE(b.sequences[0].ofCode)
+ if debug {
+ println("ofEnc.useRLE")
+ }
+ } else {
+ var m seqCompMode
+ ofEnc, m = chooseComp(ofEnc, b.coders.ofPrev, &fsePredefEnc[tableOffsets])
+ mode |= uint8(m) << 4
+ }
+
+ if mlEnc.useRLE {
+ mode |= uint8(compModeRLE) << 2
+ mlEnc.setRLE(b.sequences[0].mlCode)
+ if debug {
+ println("mlEnc.useRLE, code: ", b.sequences[0].mlCode, "value", b.sequences[0].matchLen)
+ }
+ } else {
+ var m seqCompMode
+ mlEnc, m = chooseComp(mlEnc, b.coders.mlPrev, &fsePredefEnc[tableMatchLengths])
+ mode |= uint8(m) << 2
+ }
+ b.output = append(b.output, mode)
+ if debug {
+ printf("Compression modes: 0b%b", mode)
+ }
+ b.output, err = llEnc.writeCount(b.output)
+ if err != nil {
+ return err
+ }
+ start := len(b.output)
+ b.output, err = ofEnc.writeCount(b.output)
+ if err != nil {
+ return err
+ }
+ if false {
+ println("block:", b.output[start:], "tablelog", ofEnc.actualTableLog, "maxcount:", ofEnc.maxCount)
+ fmt.Printf("selected TableLog: %d, Symbol length: %d\n", ofEnc.actualTableLog, ofEnc.symbolLen)
+ for i, v := range ofEnc.norm[:ofEnc.symbolLen] {
+ fmt.Printf("%3d: %5d -> %4d \n", i, ofEnc.count[i], v)
+ }
+ }
+ b.output, err = mlEnc.writeCount(b.output)
+ if err != nil {
+ return err
+ }
+
+ // Maybe in block?
+ wr := &b.wr
+ wr.reset(b.output)
+
+ var ll, of, ml cState
+
+ // Current sequence
+ seq := len(b.sequences) - 1
+ s := b.sequences[seq]
+ llEnc.setBits(llBitsTable[:])
+ mlEnc.setBits(mlBitsTable[:])
+ ofEnc.setBits(nil)
+
+ llTT, ofTT, mlTT := llEnc.ct.symbolTT[:256], ofEnc.ct.symbolTT[:256], mlEnc.ct.symbolTT[:256]
+
+ // We have 3 bounds checks here (and in the loop).
+ // Since we are iterating backwards it is kinda hard to avoid.
+ llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode]
+ ll.init(wr, &llEnc.ct, llB)
+ of.init(wr, &ofEnc.ct, ofB)
+ wr.flush32()
+ ml.init(wr, &mlEnc.ct, mlB)
+
+ // Each of these lookups also generates a bounds check.
+ wr.addBits32NC(s.litLen, llB.outBits)
+ wr.addBits32NC(s.matchLen, mlB.outBits)
+ wr.flush32()
+ wr.addBits32NC(s.offset, ofB.outBits)
+ if debugSequences {
+ println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB)
+ }
+ seq--
+ if llEnc.maxBits+mlEnc.maxBits+ofEnc.maxBits <= 32 {
+ // No need to flush (common)
+ for seq >= 0 {
+ s = b.sequences[seq]
+ wr.flush32()
+ llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode]
+ // tabelog max is 8 for all.
+ of.encode(ofB)
+ ml.encode(mlB)
+ ll.encode(llB)
+ wr.flush32()
+
+ // We checked that all can stay within 32 bits
+ wr.addBits32NC(s.litLen, llB.outBits)
+ wr.addBits32NC(s.matchLen, mlB.outBits)
+ wr.addBits32NC(s.offset, ofB.outBits)
+
+ if debugSequences {
+ println("Encoded seq", seq, s)
+ }
+
+ seq--
+ }
+ } else {
+ for seq >= 0 {
+ s = b.sequences[seq]
+ wr.flush32()
+ llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode]
+ // tabelog max is below 8 for each.
+ of.encode(ofB)
+ ml.encode(mlB)
+ ll.encode(llB)
+ wr.flush32()
+
+ // ml+ll = max 32 bits total
+ wr.addBits32NC(s.litLen, llB.outBits)
+ wr.addBits32NC(s.matchLen, mlB.outBits)
+ wr.flush32()
+ wr.addBits32NC(s.offset, ofB.outBits)
+
+ if debugSequences {
+ println("Encoded seq", seq, s)
+ }
+
+ seq--
+ }
+ }
+ ml.flush(mlEnc.actualTableLog)
+ of.flush(ofEnc.actualTableLog)
+ ll.flush(llEnc.actualTableLog)
+ err = wr.close()
+ if err != nil {
+ return err
+ }
+ b.output = wr.out
+
+ if len(b.output)-3 >= b.size {
+ // Maybe even add a bigger margin.
+ b.litEnc.Reuse = huff0.ReusePolicyNone
+ return errIncompressible
+ }
+
+ // Size is output minus block header.
+ bh.setSize(uint32(len(b.output)) - 3)
+ if debug {
+ println("Rewriting block header", bh)
+ }
+ _ = bh.appendTo(b.output[:0])
+ b.coders.setPrev(llEnc, mlEnc, ofEnc)
+ return nil
+}
+
+var errIncompressible = errors.New("uncompressible")
+
+func (b *blockEnc) genCodes() {
+ if len(b.sequences) == 0 {
+ // nothing to do
+ return
+ }
+
+ if len(b.sequences) > math.MaxUint16 {
+ panic("can only encode up to 64K sequences")
+ }
+ // No bounds checks after here:
+ llH := b.coders.llEnc.Histogram()[:256]
+ ofH := b.coders.ofEnc.Histogram()[:256]
+ mlH := b.coders.mlEnc.Histogram()[:256]
+ for i := range llH {
+ llH[i] = 0
+ }
+ for i := range ofH {
+ ofH[i] = 0
+ }
+ for i := range mlH {
+ mlH[i] = 0
+ }
+
+ var llMax, ofMax, mlMax uint8
+ for i, seq := range b.sequences {
+ v := llCode(seq.litLen)
+ seq.llCode = v
+ llH[v]++
+ if v > llMax {
+ llMax = v
+ }
+
+ v = ofCode(seq.offset)
+ seq.ofCode = v
+ ofH[v]++
+ if v > ofMax {
+ ofMax = v
+ }
+
+ v = mlCode(seq.matchLen)
+ seq.mlCode = v
+ mlH[v]++
+ if v > mlMax {
+ mlMax = v
+ if debug && mlMax > maxMatchLengthSymbol {
+ panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen))
+ }
+ }
+ b.sequences[i] = seq
+ }
+ maxCount := func(a []uint32) int {
+ var max uint32
+ for _, v := range a {
+ if v > max {
+ max = v
+ }
+ }
+ return int(max)
+ }
+ if mlMax > maxMatchLengthSymbol {
+ panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax))
+ }
+ if ofMax > maxOffsetBits {
+ panic(fmt.Errorf("ofMax > maxOffsetBits (%d)", ofMax))
+ }
+ if llMax > maxLiteralLengthSymbol {
+ panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax))
+ }
+
+ b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1]))
+ b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1]))
+ b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1]))
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/blocktype_string.go b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go
new file mode 100644
index 000000000..01a01e486
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go
@@ -0,0 +1,85 @@
+// Code generated by "stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex"; DO NOT EDIT.
+
+package zstd
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[blockTypeRaw-0]
+ _ = x[blockTypeRLE-1]
+ _ = x[blockTypeCompressed-2]
+ _ = x[blockTypeReserved-3]
+}
+
+const _blockType_name = "blockTypeRawblockTypeRLEblockTypeCompressedblockTypeReserved"
+
+var _blockType_index = [...]uint8{0, 12, 24, 43, 60}
+
+func (i blockType) String() string {
+ if i >= blockType(len(_blockType_index)-1) {
+ return "blockType(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _blockType_name[_blockType_index[i]:_blockType_index[i+1]]
+}
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[literalsBlockRaw-0]
+ _ = x[literalsBlockRLE-1]
+ _ = x[literalsBlockCompressed-2]
+ _ = x[literalsBlockTreeless-3]
+}
+
+const _literalsBlockType_name = "literalsBlockRawliteralsBlockRLEliteralsBlockCompressedliteralsBlockTreeless"
+
+var _literalsBlockType_index = [...]uint8{0, 16, 32, 55, 76}
+
+func (i literalsBlockType) String() string {
+ if i >= literalsBlockType(len(_literalsBlockType_index)-1) {
+ return "literalsBlockType(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _literalsBlockType_name[_literalsBlockType_index[i]:_literalsBlockType_index[i+1]]
+}
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[compModePredefined-0]
+ _ = x[compModeRLE-1]
+ _ = x[compModeFSE-2]
+ _ = x[compModeRepeat-3]
+}
+
+const _seqCompMode_name = "compModePredefinedcompModeRLEcompModeFSEcompModeRepeat"
+
+var _seqCompMode_index = [...]uint8{0, 18, 29, 40, 54}
+
+func (i seqCompMode) String() string {
+ if i >= seqCompMode(len(_seqCompMode_index)-1) {
+ return "seqCompMode(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _seqCompMode_name[_seqCompMode_index[i]:_seqCompMode_index[i+1]]
+}
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[tableLiteralLengths-0]
+ _ = x[tableOffsets-1]
+ _ = x[tableMatchLengths-2]
+}
+
+const _tableIndex_name = "tableLiteralLengthstableOffsetstableMatchLengths"
+
+var _tableIndex_index = [...]uint8{0, 19, 31, 48}
+
+func (i tableIndex) String() string {
+ if i >= tableIndex(len(_tableIndex_index)-1) {
+ return "tableIndex(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _tableIndex_name[_tableIndex_index[i]:_tableIndex_index[i+1]]
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go
new file mode 100644
index 000000000..4a8460476
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go
@@ -0,0 +1,121 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+)
+
+type byteBuffer interface {
+ // Read up to 8 bytes.
+ // Returns nil if no more input is available.
+ readSmall(n int) []byte
+
+ // Read >8 bytes.
+ // MAY use the destination slice.
+ readBig(n int, dst []byte) ([]byte, error)
+
+ // Read a single byte.
+ readByte() (byte, error)
+
+ // Skip n bytes.
+ skipN(n int) error
+}
+
+// in-memory buffer
+type byteBuf []byte
+
+func (b *byteBuf) readSmall(n int) []byte {
+ if debug && n > 8 {
+ panic(fmt.Errorf("small read > 8 (%d). use readBig", n))
+ }
+ bb := *b
+ if len(bb) < n {
+ return nil
+ }
+ r := bb[:n]
+ *b = bb[n:]
+ return r
+}
+
+func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) {
+ bb := *b
+ if len(bb) < n {
+ return nil, io.ErrUnexpectedEOF
+ }
+ r := bb[:n]
+ *b = bb[n:]
+ return r, nil
+}
+
+func (b *byteBuf) remain() []byte {
+ return *b
+}
+
+func (b *byteBuf) readByte() (byte, error) {
+ bb := *b
+ if len(bb) < 1 {
+ return 0, nil
+ }
+ r := bb[0]
+ *b = bb[1:]
+ return r, nil
+}
+
+func (b *byteBuf) skipN(n int) error {
+ bb := *b
+ if len(bb) < n {
+ return io.ErrUnexpectedEOF
+ }
+ *b = bb[n:]
+ return nil
+}
+
+// wrapper around a reader.
+type readerWrapper struct {
+ r io.Reader
+ tmp [8]byte
+}
+
+func (r *readerWrapper) readSmall(n int) []byte {
+ if debug && n > 8 {
+ panic(fmt.Errorf("small read > 8 (%d). use readBig", n))
+ }
+ n2, err := io.ReadFull(r.r, r.tmp[:n])
+ // We only really care about the actual bytes read.
+ if n2 != n {
+ if debug {
+ println("readSmall: got", n2, "want", n, "err", err)
+ }
+ return nil
+ }
+ return r.tmp[:n]
+}
+
+func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) {
+ if cap(dst) < n {
+ dst = make([]byte, n)
+ }
+ n2, err := io.ReadFull(r.r, dst[:n])
+ return dst[:n2], err
+}
+
+func (r *readerWrapper) readByte() (byte, error) {
+ n2, err := r.r.Read(r.tmp[:1])
+ if err != nil {
+ return 0, err
+ }
+ if n2 != 1 {
+ return 0, io.ErrUnexpectedEOF
+ }
+ return r.tmp[0], nil
+}
+
+func (r *readerWrapper) skipN(n int) error {
+ _, err := io.CopyN(ioutil.Discard, r.r, int64(n))
+ return err
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go
new file mode 100644
index 000000000..dc4378b64
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/bytereader.go
@@ -0,0 +1,74 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+// byteReader provides a byte reader that reads
+// little endian values from a byte stream.
+// The input stream is manually advanced.
+// The reader performs no bounds checks.
+type byteReader struct {
+ b []byte
+ off int
+}
+
+// init will initialize the reader and set the input.
+func (b *byteReader) init(in []byte) {
+ b.b = in
+ b.off = 0
+}
+
+// advance the stream b n bytes.
+func (b *byteReader) advance(n uint) {
+ b.off += int(n)
+}
+
+// overread returns whether we have advanced too far.
+func (b *byteReader) overread() bool {
+ return b.off > len(b.b)
+}
+
+// Int32 returns a little endian int32 starting at current offset.
+func (b byteReader) Int32() int32 {
+ b2 := b.b[b.off : b.off+4 : b.off+4]
+ v3 := int32(b2[3])
+ v2 := int32(b2[2])
+ v1 := int32(b2[1])
+ v0 := int32(b2[0])
+ return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24)
+}
+
+// Uint8 returns the next byte
+func (b *byteReader) Uint8() uint8 {
+ v := b.b[b.off]
+ return v
+}
+
+// Uint32 returns a little endian uint32 starting at current offset.
+func (b byteReader) Uint32() uint32 {
+ if r := b.remain(); r < 4 {
+ // Very rare
+ v := uint32(0)
+ for i := 1; i <= r; i++ {
+ v = (v << 8) | uint32(b.b[len(b.b)-i])
+ }
+ return v
+ }
+ b2 := b.b[b.off : b.off+4 : b.off+4]
+ v3 := uint32(b2[3])
+ v2 := uint32(b2[2])
+ v1 := uint32(b2[1])
+ v0 := uint32(b2[0])
+ return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24)
+}
+
+// unread returns the unread portion of the input.
+func (b byteReader) unread() []byte {
+ return b.b[b.off:]
+}
+
+// remain will return the number of bytes remaining.
+func (b byteReader) remain() int {
+ return len(b.b) - b.off
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go
new file mode 100644
index 000000000..f06bff6f6
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/decoder.go
@@ -0,0 +1,437 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "sync"
+)
+
+// Decoder provides decoding of zstandard streams.
+// The decoder has been designed to operate without allocations after a warmup.
+// This means that you should store the decoder for best performance.
+// To re-use a stream decoder, use the Reset(r io.Reader) error to switch to another stream.
+// A decoder can safely be re-used even if the previous stream failed.
+// To release the resources, you must call the Close() function on a decoder.
+type Decoder struct {
+ o decoderOptions
+
+ // Unreferenced decoders, ready for use.
+ decoders chan *blockDec
+
+ // Unreferenced decoders, ready for use.
+ frames chan *frameDec
+
+ // Streams ready to be decoded.
+ stream chan decodeStream
+
+ // Current read position used for Reader functionality.
+ current decoderState
+
+ // Custom dictionaries
+ dicts map[uint32]struct{}
+
+ // streamWg is the waitgroup for all streams
+ streamWg sync.WaitGroup
+}
+
+// decoderState is used for maintaining state when the decoder
+// is used for streaming.
+type decoderState struct {
+ // current block being written to stream.
+ decodeOutput
+
+ // output in order to be written to stream.
+ output chan decodeOutput
+
+ // cancel remaining output.
+ cancel chan struct{}
+
+ flushed bool
+}
+
+var (
+ // Check the interfaces we want to support.
+ _ = io.WriterTo(&Decoder{})
+ _ = io.Reader(&Decoder{})
+)
+
+// NewReader creates a new decoder.
+// A nil Reader can be provided in which case Reset can be used to start a decode.
+//
+// A Decoder can be used in two modes:
+//
+// 1) As a stream, or
+// 2) For stateless decoding using DecodeAll or DecodeBuffer.
+//
+// Only a single stream can be decoded concurrently, but the same decoder
+// can run multiple concurrent stateless decodes. It is even possible to
+// use stateless decodes while a stream is being decoded.
+//
+// The Reset function can be used to initiate a new stream, which is will considerably
+// reduce the allocations normally caused by NewReader.
+func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) {
+ var d Decoder
+ d.o.setDefault()
+ for _, o := range opts {
+ err := o(&d.o)
+ if err != nil {
+ return nil, err
+ }
+ }
+ d.current.output = make(chan decodeOutput, d.o.concurrent)
+ d.current.flushed = true
+
+ // Create decoders
+ d.decoders = make(chan *blockDec, d.o.concurrent)
+ d.frames = make(chan *frameDec, d.o.concurrent)
+ for i := 0; i < d.o.concurrent; i++ {
+ d.frames <- newFrameDec(d.o)
+ d.decoders <- newBlockDec(d.o.lowMem)
+ }
+
+ if r == nil {
+ return &d, nil
+ }
+ return &d, d.Reset(r)
+}
+
+// Read bytes from the decompressed stream into p.
+// Returns the number of bytes written and any error that occurred.
+// When the stream is done, io.EOF will be returned.
+func (d *Decoder) Read(p []byte) (int, error) {
+ if d.stream == nil {
+ return 0, errors.New("no input has been initialized")
+ }
+ var n int
+ for {
+ if len(d.current.b) > 0 {
+ filled := copy(p, d.current.b)
+ p = p[filled:]
+ d.current.b = d.current.b[filled:]
+ n += filled
+ }
+ if len(p) == 0 {
+ break
+ }
+ if len(d.current.b) == 0 {
+ // We have an error and no more data
+ if d.current.err != nil {
+ break
+ }
+ d.nextBlock()
+ }
+ }
+ if len(d.current.b) > 0 {
+ // Only return error at end of block
+ return n, nil
+ }
+ if d.current.err != nil {
+ d.drainOutput()
+ }
+ if debug {
+ println("returning", n, d.current.err, len(d.decoders))
+ }
+ return n, d.current.err
+}
+
+// Reset will reset the decoder the supplied stream after the current has finished processing.
+// Note that this functionality cannot be used after Close has been called.
+func (d *Decoder) Reset(r io.Reader) error {
+ if d.current.err == ErrDecoderClosed {
+ return d.current.err
+ }
+ if r == nil {
+ return errors.New("nil Reader sent as input")
+ }
+
+ if d.stream == nil {
+ d.stream = make(chan decodeStream, 1)
+ d.streamWg.Add(1)
+ go d.startStreamDecoder(d.stream)
+ }
+
+ d.drainOutput()
+
+ // If bytes buffer and < 1MB, do sync decoding anyway.
+ if bb, ok := r.(*bytes.Buffer); ok && bb.Len() < 1<<20 {
+ b := bb.Bytes()
+ dst, err := d.DecodeAll(b, nil)
+ if err == nil {
+ err = io.EOF
+ }
+ d.current.b = dst
+ d.current.err = err
+ d.current.flushed = true
+ return nil
+ }
+
+ // Remove current block.
+ d.current.decodeOutput = decodeOutput{}
+ d.current.err = nil
+ d.current.cancel = make(chan struct{})
+ d.current.flushed = false
+ d.current.d = nil
+
+ d.stream <- decodeStream{
+ r: r,
+ output: d.current.output,
+ cancel: d.current.cancel,
+ }
+ return nil
+}
+
+// drainOutput will drain the output until errEndOfStream is sent.
+func (d *Decoder) drainOutput() {
+ if d.current.cancel != nil {
+ println("cancelling current")
+ close(d.current.cancel)
+ d.current.cancel = nil
+ }
+ if d.current.d != nil {
+ println("re-adding current decoder", d.current.d, len(d.decoders))
+ d.decoders <- d.current.d
+ d.current.d = nil
+ d.current.b = nil
+ }
+ if d.current.output == nil || d.current.flushed {
+ println("current already flushed")
+ return
+ }
+ for {
+ select {
+ case v := <-d.current.output:
+ if v.d != nil {
+ println("got decoder", v.d)
+ d.decoders <- v.d
+ }
+ if v.err == errEndOfStream {
+ println("current flushed")
+ d.current.flushed = true
+ return
+ }
+ }
+ }
+}
+
+// WriteTo writes data to w until there's no more data to write or when an error occurs.
+// The return value n is the number of bytes written.
+// Any error encountered during the write is also returned.
+func (d *Decoder) WriteTo(w io.Writer) (int64, error) {
+ if d.stream == nil {
+ return 0, errors.New("no input has been initialized")
+ }
+ var n int64
+ for {
+ if len(d.current.b) > 0 {
+ n2, err2 := w.Write(d.current.b)
+ n += int64(n2)
+ if err2 != nil && d.current.err == nil {
+ d.current.err = err2
+ break
+ }
+ }
+ if d.current.err != nil {
+ break
+ }
+ d.nextBlock()
+ }
+ err := d.current.err
+ if err != nil {
+ d.drainOutput()
+ }
+ if err == io.EOF {
+ err = nil
+ }
+ return n, err
+}
+
+// DecodeAll allows stateless decoding of a blob of bytes.
+// Output will be appended to dst, so if the destination size is known
+// you can pre-allocate the destination slice to avoid allocations.
+// DecodeAll can be used concurrently.
+// The Decoder concurrency limits will be respected.
+func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
+ if d.current.err == ErrDecoderClosed {
+ return dst, ErrDecoderClosed
+ }
+ //println(len(d.frames), len(d.decoders), d.current)
+ block, frame := <-d.decoders, <-d.frames
+ defer func() {
+ d.decoders <- block
+ frame.rawInput = nil
+ d.frames <- frame
+ }()
+ if cap(dst) == 0 {
+ // Allocate 1MB by default.
+ dst = make([]byte, 0, 1<<20)
+ }
+ br := byteBuf(input)
+ for {
+ err := frame.reset(&br)
+ if err == io.EOF {
+ return dst, nil
+ }
+ if err != nil {
+ return dst, err
+ }
+ if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
+ return dst, ErrDecoderSizeExceeded
+ }
+ if frame.FrameContentSize > 0 && frame.FrameContentSize < 1<<30 {
+ // Never preallocate moe than 1 GB up front.
+ if uint64(cap(dst)) < frame.FrameContentSize {
+ dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize))
+ copy(dst2, dst)
+ dst = dst2
+ }
+ }
+ dst, err = frame.runDecoder(dst, block)
+ if err != nil {
+ return dst, err
+ }
+ if len(br) == 0 {
+ break
+ }
+ }
+ return dst, nil
+}
+
+// nextBlock returns the next block.
+// If an error occurs d.err will be set.
+func (d *Decoder) nextBlock() {
+ if d.current.d != nil {
+ d.decoders <- d.current.d
+ d.current.d = nil
+ }
+ if d.current.err != nil {
+ // Keep error state.
+ return
+ }
+ d.current.decodeOutput = <-d.current.output
+ if debug {
+ println("got", len(d.current.b), "bytes, error:", d.current.err)
+ }
+}
+
+// Close will release all resources.
+// It is NOT possible to reuse the decoder after this.
+func (d *Decoder) Close() {
+ if d.current.err == ErrDecoderClosed {
+ return
+ }
+ d.drainOutput()
+ if d.stream != nil {
+ close(d.stream)
+ d.streamWg.Wait()
+ d.stream = nil
+ }
+ if d.decoders != nil {
+ close(d.decoders)
+ for dec := range d.decoders {
+ dec.Close()
+ }
+ d.decoders = nil
+ }
+ if d.current.d != nil {
+ d.current.d.Close()
+ d.current.d = nil
+ }
+ d.current.err = ErrDecoderClosed
+}
+
+type decodeOutput struct {
+ d *blockDec
+ b []byte
+ err error
+}
+
+type decodeStream struct {
+ r io.Reader
+
+ // Blocks ready to be written to output.
+ output chan decodeOutput
+
+ // cancel reading from the input
+ cancel chan struct{}
+}
+
+// errEndOfStream indicates that everything from the stream was read.
+var errEndOfStream = errors.New("end-of-stream")
+
+// Create Decoder:
+// Spawn n block decoders. These accept tasks to decode a block.
+// Create goroutine that handles stream processing, this will send history to decoders as they are available.
+// Decoders update the history as they decode.
+// When a block is returned:
+// a) history is sent to the next decoder,
+// b) content written to CRC.
+// c) return data to WRITER.
+// d) wait for next block to return data.
+// Once WRITTEN, the decoders reused by the writer frame decoder for re-use.
+func (d *Decoder) startStreamDecoder(inStream chan decodeStream) {
+ defer d.streamWg.Done()
+ frame := newFrameDec(d.o)
+ for stream := range inStream {
+ br := readerWrapper{r: stream.r}
+ decodeStream:
+ for {
+ err := frame.reset(&br)
+ if debug && err != nil {
+ println("Frame decoder returned", err)
+ }
+ if err != nil {
+ stream.output <- decodeOutput{
+ err: err,
+ }
+ break
+ }
+ if debug {
+ println("starting frame decoder")
+ }
+
+ // This goroutine will forward history between frames.
+ frame.frameDone.Add(1)
+ frame.initAsync()
+
+ go frame.startDecoder(stream.output)
+ decodeFrame:
+ // Go through all blocks of the frame.
+ for {
+ dec := <-d.decoders
+ select {
+ case <-stream.cancel:
+ if !frame.sendErr(dec, io.EOF) {
+ // To not let the decoder dangle, send it back.
+ stream.output <- decodeOutput{d: dec}
+ }
+ break decodeStream
+ default:
+ }
+ err := frame.next(dec)
+ switch err {
+ case io.EOF:
+ // End of current frame, no error
+ println("EOF on next block")
+ break decodeFrame
+ case nil:
+ continue
+ default:
+ println("block decoder returned", err)
+ break decodeStream
+ }
+ }
+ // All blocks have started decoding, check if there are more frames.
+ println("waiting for done")
+ frame.frameDone.Wait()
+ println("done waiting...")
+ }
+ frame.frameDone.Wait()
+ println("Sending EOS")
+ stream.output <- decodeOutput{err: errEndOfStream}
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go
new file mode 100644
index 000000000..52c1eb066
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go
@@ -0,0 +1,66 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+ "errors"
+ "fmt"
+ "runtime"
+)
+
+// DOption is an option for creating a decoder.
+type DOption func(*decoderOptions) error
+
+// options retains accumulated state of multiple options.
+type decoderOptions struct {
+ lowMem bool
+ concurrent int
+ maxDecodedSize uint64
+}
+
+func (o *decoderOptions) setDefault() {
+ *o = decoderOptions{
+ // use less ram: true for now, but may change.
+ lowMem: true,
+ concurrent: runtime.GOMAXPROCS(0),
+ }
+ o.maxDecodedSize = 1 << 63
+}
+
+// WithDecoderLowmem will set whether to use a lower amount of memory,
+// but possibly have to allocate more while running.
+func WithDecoderLowmem(b bool) DOption {
+ return func(o *decoderOptions) error { o.lowMem = b; return nil }
+}
+
+// WithDecoderConcurrency will set the concurrency,
+// meaning the maximum number of decoders to run concurrently.
+// The value supplied must be at least 1.
+// By default this will be set to GOMAXPROCS.
+func WithDecoderConcurrency(n int) DOption {
+ return func(o *decoderOptions) error {
+ if n <= 0 {
+ return fmt.Errorf("Concurrency must be at least 1")
+ }
+ o.concurrent = n
+ return nil
+ }
+}
+
+// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory
+// (non-streaming) operations.
+// Maxmimum and default is 1 << 63 bytes.
+func WithDecoderMaxMemory(n uint64) DOption {
+ return func(o *decoderOptions) error {
+ if n == 0 {
+ return errors.New("WithDecoderMaxmemory must be at least 1")
+ }
+ if n > 1<<63 {
+ return fmt.Errorf("WithDecoderMaxmemorymust be less than 1 << 63")
+ }
+ o.maxDecodedSize = n
+ return nil
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
new file mode 100644
index 000000000..02c79814f
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
@@ -0,0 +1,410 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+const (
+ dFastLongTableBits = 17 // Bits used in the long match table
+ dFastLongTableSize = 1 << dFastLongTableBits // Size of the table
+ dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
+
+ dFastShortTableBits = tableBits // Bits used in the short match table
+ dFastShortTableSize = 1 << dFastShortTableBits // Size of the table
+ dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
+)
+
+type doubleFastEncoder struct {
+ fastEncoder
+ longTable [dFastLongTableSize]tableEntry
+}
+
+// Encode mimmics functionality in zstd_dfast.c
+func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) {
+ const (
+ // Input margin is the number of bytes we read (8)
+ // and the maximum we will read ahead (2)
+ inputMargin = 8 + 2
+ minNonLiteralBlockSize = 16
+ )
+
+ // Protect against e.cur wraparound.
+ for e.cur > (1<<30)+e.maxMatchOff {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ for i := range e.longTable[:] {
+ e.longTable[i] = tableEntry{}
+ }
+ e.cur = e.maxMatchOff
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
+ for i := range e.table[:] {
+ v := e.table[i].offset
+ if v < minOff {
+ v = 0
+ } else {
+ v = v - e.cur + e.maxMatchOff
+ }
+ e.table[i].offset = v
+ }
+ for i := range e.longTable[:] {
+ v := e.longTable[i].offset
+ if v < minOff {
+ v = 0
+ } else {
+ v = v - e.cur + e.maxMatchOff
+ }
+ e.longTable[i].offset = v
+ }
+ e.cur = e.maxMatchOff
+ }
+
+ s := e.addBlock(src)
+ blk.size = len(src)
+ if len(src) < minNonLiteralBlockSize {
+ blk.extraLits = len(src)
+ blk.literals = blk.literals[:len(src)]
+ copy(blk.literals, src)
+ return
+ }
+
+ // Override src
+ src = e.hist
+ sLimit := int32(len(src)) - inputMargin
+ // stepSize is the number of bytes to skip on every main loop iteration.
+ // It should be >= 1.
+ stepSize := int32(e.o.targetLength)
+ if stepSize == 0 {
+ stepSize++
+ }
+
+ // TEMPLATE
+
+ const kSearchStrength = 8
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := s
+ cv := load6432(src, s)
+ // nextHash is the hash at s
+ nextHashS := hash5(cv, dFastShortTableBits)
+ nextHashL := hash8(cv, dFastLongTableBits)
+
+ // Relative offsets
+ offset1 := int32(blk.recentOffsets[0])
+ offset2 := int32(blk.recentOffsets[1])
+
+ addLiterals := func(s *seq, until int32) {
+ if until == nextEmit {
+ return
+ }
+ blk.literals = append(blk.literals, src[nextEmit:until]...)
+ s.litLen = uint32(until - nextEmit)
+ }
+ if debug {
+ println("recent offsets:", blk.recentOffsets)
+ }
+
+encodeLoop:
+ for {
+ var t int32
+ // We allow the encoder to optionally turn off repeat offsets across blocks
+ canRepeat := len(blk.sequences) > 2
+
+ for {
+ if debug && canRepeat && offset1 == 0 {
+ panic("offset0 was 0")
+ }
+
+ nextHashS = nextHashS & dFastShortTableMask
+ nextHashL = nextHashL & dFastLongTableMask
+ candidateL := e.longTable[nextHashL]
+ candidateS := e.table[nextHashS]
+
+ const repOff = 1
+ repIndex := s - offset1 + repOff
+ entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
+ e.longTable[nextHashL] = entry
+ e.table[nextHashS] = entry
+
+ if canRepeat {
+ if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
+ // Consider history as well.
+ var seq seq
+ lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+
+ seq.matchLen = uint32(lenght - zstdMinMatch)
+
+ // We might be able to match backwards.
+ // Extend as long as we can.
+ start := s + repOff
+ // We end the search early, so we don't risk 0 literals
+ // and have to do special offset treatment.
+ startLimit := nextEmit + 1
+
+ tMin := s - e.maxMatchOff
+ if tMin < 0 {
+ tMin = 0
+ }
+ for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
+ repIndex--
+ start--
+ seq.matchLen++
+ }
+ addLiterals(&seq, start)
+
+ // rep 0
+ seq.offset = 1
+ if debugSequences {
+ println("repeat sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+ s += lenght + repOff
+ nextEmit = s
+ if s >= sLimit {
+ if debug {
+ println("repeat ended", s, lenght)
+
+ }
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+ nextHashS = hash5(cv, dFastShortTableBits)
+ nextHashL = hash8(cv, dFastLongTableBits)
+ continue
+ }
+ const repOff2 = 1
+ // We deviate from the reference encoder and also check offset 2.
+ // Slower and not consistently better, so disabled.
+ // repIndex = s - offset2 + repOff2
+ if false && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff2*8)) {
+ // Consider history as well.
+ var seq seq
+ lenght := 4 + e.matchlen(s+4+repOff2, repIndex+4, src)
+
+ seq.matchLen = uint32(lenght - zstdMinMatch)
+
+ // We might be able to match backwards.
+ // Extend as long as we can.
+ start := s + repOff2
+ // We end the search early, so we don't risk 0 literals
+ // and have to do special offset treatment.
+ startLimit := nextEmit + 1
+
+ tMin := s - e.maxMatchOff
+ if tMin < 0 {
+ tMin = 0
+ }
+ for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
+ repIndex--
+ start--
+ seq.matchLen++
+ }
+ addLiterals(&seq, start)
+
+ // rep 2
+ seq.offset = 2
+ if debugSequences {
+ println("repeat sequence 2", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+ s += lenght + repOff2
+ nextEmit = s
+ if s >= sLimit {
+ if debug {
+ println("repeat ended", s, lenght)
+
+ }
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+ nextHashS = hash5(cv, dFastShortTableBits)
+ nextHashL = hash8(cv, dFastLongTableBits)
+ // Swap offsets
+ offset1, offset2 = offset2, offset1
+ continue
+ }
+ }
+ // Find the offsets of our two matches.
+ coffsetL := s - (candidateL.offset - e.cur)
+ coffsetS := s - (candidateS.offset - e.cur)
+
+ // Check if we have a long match.
+ if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val {
+ // Found a long match, likely at least 8 bytes.
+ // Reference encoder checks all 8 bytes, we only check 4,
+ // but the likelihood of both the first 4 bytes and the hash matching should be enough.
+ t = candidateL.offset - e.cur
+ if debug && s <= t {
+ panic("s <= t")
+ }
+ if debug && s-t > e.maxMatchOff {
+ panic("s - t >e.maxMatchOff")
+ }
+ if debug {
+ println("long match")
+ }
+ break
+ }
+
+ // Check if we have a short match.
+ if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val {
+ // found a regular match
+ // See if we can find a long match at s+1
+ const checkAt = 1
+ cv := load6432(src, s+checkAt)
+ nextHashL = hash8(cv, dFastLongTableBits)
+ candidateL = e.longTable[nextHashL]
+ coffsetL = s - (candidateL.offset - e.cur) + checkAt
+
+ // We can store it, since we have at least a 4 byte match.
+ e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)}
+ if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val {
+ // Found a long match, likely at least 8 bytes.
+ // Reference encoder checks all 8 bytes, we only check 4,
+ // but the likelihood of both the first 4 bytes and the hash matching should be enough.
+ t = candidateL.offset - e.cur
+ s += checkAt
+ if debug {
+ println("long match (after short)")
+ }
+ break
+ }
+
+ t = candidateS.offset - e.cur
+ if debug && s <= t {
+ panic("s <= t")
+ }
+ if debug && s-t > e.maxMatchOff {
+ panic("s - t >e.maxMatchOff")
+ }
+ if debug && t < 0 {
+ panic("t<0")
+ }
+ if debug {
+ println("short match")
+ }
+ break
+ }
+
+ // No match found, move forward in input.
+ s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
+ if s >= sLimit {
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+ nextHashS = hash5(cv, dFastShortTableBits)
+ nextHashL = hash8(cv, dFastLongTableBits)
+ }
+
+ // A 4-byte match has been found. Update recent offsets.
+ // We'll later see if more than 4 bytes.
+ offset2 = offset1
+ offset1 = s - t
+
+ if debug && s <= t {
+ panic("s <= t")
+ }
+
+ if debug && canRepeat && int(offset1) > len(src) {
+ panic("invalid offset")
+ }
+
+ // Extend the 4-byte match as long as possible.
+ l := e.matchlen(s+4, t+4, src) + 4
+
+ // Extend backwards
+ tMin := s - e.maxMatchOff
+ if tMin < 0 {
+ tMin = 0
+ }
+ for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
+ s--
+ t--
+ l++
+ }
+
+ // Write our sequence
+ var seq seq
+ seq.litLen = uint32(s - nextEmit)
+ seq.matchLen = uint32(l - zstdMinMatch)
+ if seq.litLen > 0 {
+ blk.literals = append(blk.literals, src[nextEmit:s]...)
+ }
+ seq.offset = uint32(s-t) + 3
+ s += l
+ if debugSequences {
+ println("sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+ nextEmit = s
+ if s >= sLimit {
+ break encodeLoop
+ }
+
+ // Index match start + 2 and end - 2
+ index0 := s - l + 2
+ index1 := s - 2
+ if l == 4 {
+ // if l is 4, we would check the same place twice, so index s-1 instead.
+ index1++
+ }
+
+ cv0 := load6432(src, index0)
+ cv1 := load6432(src, index1)
+ entry0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
+ entry1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
+ e.table[hash5(cv0, dFastShortTableBits)&dFastShortTableMask] = entry0
+ e.longTable[hash8(cv0, dFastLongTableBits)&dFastLongTableMask] = entry0
+ e.table[hash5(cv1, dFastShortTableBits)&dFastShortTableMask] = entry1
+ e.longTable[hash8(cv1, dFastLongTableBits)&dFastLongTableMask] = entry1
+
+ cv = load6432(src, s)
+ nextHashS = hash5(cv, dFastShortTableBits)
+ nextHashL = hash8(cv, dFastLongTableBits)
+
+ // Check offset 2
+ if o2 := s - offset2; canRepeat && o2 > 0 && load3232(src, o2) == uint32(cv) {
+ // We have at least 4 byte match.
+ // No need to check backwards. We come straight from a match
+ l := 4 + e.matchlen(s+4, o2+4, src)
+ // Store this, since we have it.
+ entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
+ e.longTable[nextHashL&dFastLongTableMask] = entry
+ e.table[nextHashS&dFastShortTableMask] = entry
+ seq.matchLen = uint32(l) - zstdMinMatch
+ seq.litLen = 0
+ // Since litlen is always 0, this is offset 1.
+ seq.offset = 1
+ s += l
+ nextEmit = s
+ if debugSequences {
+ println("sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+
+ // Swap offset 1 and 2.
+ offset1, offset2 = offset2, offset1
+ if s >= sLimit {
+ break encodeLoop
+ }
+ // Prepare next loop.
+ cv = load6432(src, s)
+ nextHashS = hash5(cv, dFastShortTableBits)
+ nextHashL = hash8(cv, dFastLongTableBits)
+ }
+ }
+
+ if int(nextEmit) < len(src) {
+ blk.literals = append(blk.literals, src[nextEmit:]...)
+ blk.extraLits = len(src) - int(nextEmit)
+ }
+ blk.recentOffsets[0] = uint32(offset1)
+ blk.recentOffsets[1] = uint32(offset2)
+ if debug {
+ println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
new file mode 100644
index 000000000..a8edaa888
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
@@ -0,0 +1,416 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+ "math/bits"
+
+ "github.com/klauspost/compress/zstd/internal/xxhash"
+)
+
+const (
+ tableBits = 15 // Bits used in the table
+ tableSize = 1 << tableBits // Size of the table
+ tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
+ maxMatchLength = 131074
+)
+
+type tableEntry struct {
+ val uint32
+ offset int32
+}
+
+type fastEncoder struct {
+ o encParams
+ // cur is the offset at the start of hist
+ cur int32
+ // maximum offset. Should be at least 2x block size.
+ maxMatchOff int32
+ hist []byte
+ crc *xxhash.Digest
+ table [tableSize]tableEntry
+ tmp [8]byte
+ blk *blockEnc
+}
+
+// CRC returns the underlying CRC writer.
+func (e *fastEncoder) CRC() *xxhash.Digest {
+ return e.crc
+}
+
+// AppendCRC will append the CRC to the destination slice and return it.
+func (e *fastEncoder) AppendCRC(dst []byte) []byte {
+ crc := e.crc.Sum(e.tmp[:0])
+ dst = append(dst, crc[7], crc[6], crc[5], crc[4])
+ return dst
+}
+
+// WindowSize returns the window size of the encoder,
+// or a window size small enough to contain the input size, if > 0.
+func (e *fastEncoder) WindowSize(size int) int32 {
+ if size > 0 && size < int(e.maxMatchOff) {
+ b := int32(1) << uint(bits.Len(uint(size)))
+ // Keep minimum window.
+ if b < 1024 {
+ b = 1024
+ }
+ return b
+ }
+ return e.maxMatchOff
+}
+
+// Block returns the current block.
+func (e *fastEncoder) Block() *blockEnc {
+ return e.blk
+}
+
+// Encode mimmics functionality in zstd_fast.c
+func (e *fastEncoder) Encode(blk *blockEnc, src []byte) {
+ const (
+ inputMargin = 8
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ )
+
+ // Protect against e.cur wraparound.
+ for e.cur > (1<<30)+e.maxMatchOff {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ e.cur = e.maxMatchOff
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
+ for i := range e.table[:] {
+ v := e.table[i].offset
+ if v < minOff {
+ v = 0
+ } else {
+ v = v - e.cur + e.maxMatchOff
+ }
+ e.table[i].offset = v
+ }
+ e.cur = e.maxMatchOff
+ }
+
+ s := e.addBlock(src)
+ blk.size = len(src)
+ if len(src) < minNonLiteralBlockSize {
+ blk.extraLits = len(src)
+ blk.literals = blk.literals[:len(src)]
+ copy(blk.literals, src)
+ return
+ }
+
+ // Override src
+ src = e.hist
+ sLimit := int32(len(src)) - inputMargin
+ // stepSize is the number of bytes to skip on every main loop iteration.
+ // It should be >= 2.
+ stepSize := int32(e.o.targetLength)
+ if stepSize == 0 {
+ stepSize++
+ }
+ stepSize++
+
+ // TEMPLATE
+ const hashLog = tableBits
+ // seems global, but would be nice to tweak.
+ const kSearchStrength = 8
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := s
+ cv := load6432(src, s)
+ // nextHash is the hash at s
+ nextHash := hash6(cv, hashLog)
+
+ // Relative offsets
+ offset1 := int32(blk.recentOffsets[0])
+ offset2 := int32(blk.recentOffsets[1])
+
+ addLiterals := func(s *seq, until int32) {
+ if until == nextEmit {
+ return
+ }
+ blk.literals = append(blk.literals, src[nextEmit:until]...)
+ s.litLen = uint32(until - nextEmit)
+ }
+ if debug {
+ println("recent offsets:", blk.recentOffsets)
+ }
+
+encodeLoop:
+ for {
+ // t will contain the match offset when we find one.
+ // When existing the search loop, we have already checked 4 bytes.
+ var t int32
+
+ // We will not use repeat offsets across blocks.
+ // By not using them for the first 3 matches
+ canRepeat := len(blk.sequences) > 2
+
+ for {
+ if debug && canRepeat && offset1 == 0 {
+ panic("offset0 was 0")
+ }
+
+ nextHash2 := hash6(cv>>8, hashLog) & tableMask
+ nextHash = nextHash & tableMask
+ candidate := e.table[nextHash]
+ candidate2 := e.table[nextHash2]
+ repIndex := s - offset1 + 2
+
+ e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
+ e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)}
+
+ if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) {
+ // Consider history as well.
+ var seq seq
+ lenght := 4 + e.matchlen(s+6, repIndex+4, src)
+
+ seq.matchLen = uint32(lenght - zstdMinMatch)
+
+ // We might be able to match backwards.
+ // Extend as long as we can.
+ start := s + 2
+ // We end the search early, so we don't risk 0 literals
+ // and have to do special offset treatment.
+ startLimit := nextEmit + 1
+
+ sMin := s - e.maxMatchOff
+ if sMin < 0 {
+ sMin = 0
+ }
+ for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch {
+ repIndex--
+ start--
+ seq.matchLen++
+ }
+ addLiterals(&seq, start)
+
+ // rep 0
+ seq.offset = 1
+ if debugSequences {
+ println("repeat sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+ s += lenght + 2
+ nextEmit = s
+ if s >= sLimit {
+ if debug {
+ println("repeat ended", s, lenght)
+
+ }
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+ //nextHash = hashLen(cv, hashLog, mls)
+ nextHash = hash6(cv, hashLog)
+ continue
+ }
+ coffset0 := s - (candidate.offset - e.cur)
+ coffset1 := s - (candidate2.offset - e.cur) + 1
+ if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val {
+ // found a regular match
+ t = candidate.offset - e.cur
+ if debug && s <= t {
+ panic("s <= t")
+ }
+ if debug && s-t > e.maxMatchOff {
+ panic("s - t >e.maxMatchOff")
+ }
+ break
+ }
+
+ if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val {
+ // found a regular match
+ t = candidate2.offset - e.cur
+ s++
+ if debug && s <= t {
+ panic("s <= t")
+ }
+ if debug && s-t > e.maxMatchOff {
+ panic("s - t >e.maxMatchOff")
+ }
+ if debug && t < 0 {
+ panic("t<0")
+ }
+ break
+ }
+ s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
+ if s >= sLimit {
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+ nextHash = hash6(cv, hashLog)
+ }
+ // A 4-byte match has been found. We'll later see if more than 4 bytes.
+ offset2 = offset1
+ offset1 = s - t
+
+ if debug && s <= t {
+ panic("s <= t")
+ }
+
+ if debug && canRepeat && int(offset1) > len(src) {
+ panic("invalid offset")
+ }
+
+ // Extend the 4-byte match as long as possible.
+ l := e.matchlen(s+4, t+4, src) + 4
+
+ // Extend backwards
+ tMin := s - e.maxMatchOff
+ if tMin < 0 {
+ tMin = 0
+ }
+ for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
+ s--
+ t--
+ l++
+ }
+
+ // Write our sequence.
+ var seq seq
+ seq.litLen = uint32(s - nextEmit)
+ seq.matchLen = uint32(l - zstdMinMatch)
+ if seq.litLen > 0 {
+ blk.literals = append(blk.literals, src[nextEmit:s]...)
+ }
+ // Don't use repeat offsets
+ seq.offset = uint32(s-t) + 3
+ s += l
+ if debugSequences {
+ println("sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+ nextEmit = s
+ if s >= sLimit {
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+ nextHash = hash6(cv, hashLog)
+
+ // Check offset 2
+ if o2 := s - offset2; canRepeat && o2 > 0 && load3232(src, o2) == uint32(cv) {
+ // We have at least 4 byte match.
+ // No need to check backwards. We come straight from a match
+ l := 4 + e.matchlen(s+4, o2+4, src)
+ // Store this, since we have it.
+ e.table[nextHash&tableMask] = tableEntry{offset: s + e.cur, val: uint32(cv)}
+ seq.matchLen = uint32(l) - zstdMinMatch
+ seq.litLen = 0
+ // Since litlen is always 0, this is offset 1.
+ seq.offset = 1
+ s += l
+ nextEmit = s
+ if debugSequences {
+ println("sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+
+ // Swap offset 1 and 2.
+ offset1, offset2 = offset2, offset1
+ if s >= sLimit {
+ break encodeLoop
+ }
+ // Prepare next loop.
+ cv = load6432(src, s)
+ nextHash = hash6(cv, hashLog)
+ }
+ }
+
+ if int(nextEmit) < len(src) {
+ blk.literals = append(blk.literals, src[nextEmit:]...)
+ blk.extraLits = len(src) - int(nextEmit)
+ }
+ blk.recentOffsets[0] = uint32(offset1)
+ blk.recentOffsets[1] = uint32(offset2)
+ if debug {
+ println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
+ }
+}
+
+func (e *fastEncoder) addBlock(src []byte) int32 {
+ // check if we have space already
+ if len(e.hist)+len(src) > cap(e.hist) {
+ if cap(e.hist) == 0 {
+ l := e.maxMatchOff * 2
+ // Make it at least 1MB.
+ if l < 1<<20 {
+ l = 1 << 20
+ }
+ e.hist = make([]byte, 0, l)
+ } else {
+ if cap(e.hist) < int(e.maxMatchOff*2) {
+ panic("unexpected buffer size")
+ }
+ // Move down
+ offset := int32(len(e.hist)) - e.maxMatchOff
+ copy(e.hist[0:e.maxMatchOff], e.hist[offset:])
+ e.cur += offset
+ e.hist = e.hist[:e.maxMatchOff]
+ }
+ }
+ s := int32(len(e.hist))
+ e.hist = append(e.hist, src...)
+ return s
+}
+
+// useBlock will replace the block with the provided one,
+// but transfer recent offsets from the previous.
+func (e *fastEncoder) UseBlock(enc *blockEnc) {
+ enc.reset(e.blk)
+ e.blk = enc
+}
+
+func (e *fastEncoder) matchlen(s, t int32, src []byte) int32 {
+ if debug {
+ if s < 0 {
+ panic("s<0")
+ }
+ if t < 0 {
+ panic("t<0")
+ }
+ if s-t > e.maxMatchOff {
+ panic(s - t)
+ }
+ }
+ s1 := int(s) + maxMatchLength - 4
+ if s1 > len(src) {
+ s1 = len(src)
+ }
+
+ // Extend the match to be as long as possible.
+ return int32(matchLen(src[s:s1], src[t:]))
+}
+
+// Reset the encoding table.
+func (e *fastEncoder) Reset() {
+ if e.blk == nil {
+ e.blk = &blockEnc{}
+ e.blk.init()
+ } else {
+ e.blk.reset(nil)
+ }
+ e.blk.initNewEncode()
+ if e.crc == nil {
+ e.crc = xxhash.New()
+ } else {
+ e.crc.Reset()
+ }
+ if cap(e.hist) < int(e.maxMatchOff*2) {
+ l := e.maxMatchOff * 2
+ // Make it at least 1MB.
+ if l < 1<<20 {
+ l = 1 << 20
+ }
+ e.hist = make([]byte, 0, l)
+ }
+ // We offset current position so everything will be out of reach
+ e.cur += e.maxMatchOff + int32(len(e.hist))
+ e.hist = e.hist[:0]
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_params.go b/vendor/github.com/klauspost/compress/zstd/enc_params.go
new file mode 100644
index 000000000..b6779ecb6
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/enc_params.go
@@ -0,0 +1,154 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+type encParams struct {
+ // largest match distance : larger == more compression, more memory needed during decompression
+ windowLog uint8
+
+ // fully searched segment : larger == more compression, slower, more memory (useless for fast)
+ chainLog uint8
+
+ // dispatch table : larger == faster, more memory
+ hashLog uint8
+
+ // < nb of searches : larger == more compression, slower
+ searchLog uint8
+
+ // < match length searched : larger == faster decompression, sometimes less compression
+ minMatch uint8
+
+ // acceptable match size for optimal parser (only) : larger == more compression, slower
+ targetLength uint32
+
+ // see ZSTD_strategy definition above
+ strategy strategy
+}
+
+// strategy defines the algorithm to use when generating sequences.
+type strategy uint8
+
+const (
+ // Compression strategies, listed from fastest to strongest
+ strategyFast strategy = iota + 1
+ strategyDfast
+ strategyGreedy
+ strategyLazy
+ strategyLazy2
+ strategyBtlazy2
+ strategyBtopt
+ strategyBtultra
+ strategyBtultra2
+ // note : new strategies _might_ be added in the future.
+ // Only the order (from fast to strong) is guaranteed
+
+)
+
+var defEncParams = [4][]encParams{
+ { // "default" - for any srcSize > 256 KB
+ // W, C, H, S, L, TL, strat
+ {19, 12, 13, 1, 6, 1, strategyFast}, // base for negative levels
+ {19, 13, 14, 1, 7, 0, strategyFast}, // level 1
+ {20, 15, 16, 1, 6, 0, strategyFast}, // level 2
+ {21, 16, 17, 1, 5, 1, strategyDfast}, // level 3
+ {21, 18, 18, 1, 5, 1, strategyDfast}, // level 4
+ {21, 18, 19, 2, 5, 2, strategyGreedy}, // level 5
+ {21, 19, 19, 3, 5, 4, strategyGreedy}, // level 6
+ {21, 19, 19, 3, 5, 8, strategyLazy}, // level 7
+ {21, 19, 19, 3, 5, 16, strategyLazy2}, // level 8
+ {21, 19, 20, 4, 5, 16, strategyLazy2}, // level 9
+ {22, 20, 21, 4, 5, 16, strategyLazy2}, // level 10
+ {22, 21, 22, 4, 5, 16, strategyLazy2}, // level 11
+ {22, 21, 22, 5, 5, 16, strategyLazy2}, // level 12
+ {22, 21, 22, 5, 5, 32, strategyBtlazy2}, // level 13
+ {22, 22, 23, 5, 5, 32, strategyBtlazy2}, // level 14
+ {22, 23, 23, 6, 5, 32, strategyBtlazy2}, // level 15
+ {22, 22, 22, 5, 5, 48, strategyBtopt}, // level 16
+ {23, 23, 22, 5, 4, 64, strategyBtopt}, // level 17
+ {23, 23, 22, 6, 3, 64, strategyBtultra}, // level 18
+ {23, 24, 22, 7, 3, 256, strategyBtultra2}, // level 19
+ {25, 25, 23, 7, 3, 256, strategyBtultra2}, // level 20
+ {26, 26, 24, 7, 3, 512, strategyBtultra2}, // level 21
+ {27, 27, 25, 9, 3, 999, strategyBtultra2}, // level 22
+ },
+ { // for srcSize <= 256 KB
+ // W, C, H, S, L, T, strat
+ {18, 12, 13, 1, 5, 1, strategyFast}, // base for negative levels
+ {18, 13, 14, 1, 6, 0, strategyFast}, // level 1
+ {18, 14, 14, 1, 5, 1, strategyDfast}, // level 2
+ {18, 16, 16, 1, 4, 1, strategyDfast}, // level 3
+ {18, 16, 17, 2, 5, 2, strategyGreedy}, // level 4.
+ {18, 18, 18, 3, 5, 2, strategyGreedy}, // level 5.
+ {18, 18, 19, 3, 5, 4, strategyLazy}, // level 6.
+ {18, 18, 19, 4, 4, 4, strategyLazy}, // level 7
+ {18, 18, 19, 4, 4, 8, strategyLazy2}, // level 8
+ {18, 18, 19, 5, 4, 8, strategyLazy2}, // level 9
+ {18, 18, 19, 6, 4, 8, strategyLazy2}, // level 10
+ {18, 18, 19, 5, 4, 12, strategyBtlazy2}, // level 11.
+ {18, 19, 19, 7, 4, 12, strategyBtlazy2}, // level 12.
+ {18, 18, 19, 4, 4, 16, strategyBtopt}, // level 13
+ {18, 18, 19, 4, 3, 32, strategyBtopt}, // level 14.
+ {18, 18, 19, 6, 3, 128, strategyBtopt}, // level 15.
+ {18, 19, 19, 6, 3, 128, strategyBtultra}, // level 16.
+ {18, 19, 19, 8, 3, 256, strategyBtultra}, // level 17.
+ {18, 19, 19, 6, 3, 128, strategyBtultra2}, // level 18.
+ {18, 19, 19, 8, 3, 256, strategyBtultra2}, // level 19.
+ {18, 19, 19, 10, 3, 512, strategyBtultra2}, // level 20.
+ {18, 19, 19, 12, 3, 512, strategyBtultra2}, // level 21.
+ {18, 19, 19, 13, 3, 999, strategyBtultra2}, // level 22.
+ },
+ { // for srcSize <= 128 KB
+ // W, C, H, S, L, T, strat
+ {17, 12, 12, 1, 5, 1, strategyFast}, // base for negative levels
+ {17, 12, 13, 1, 6, 0, strategyFast}, // level 1
+ {17, 13, 15, 1, 5, 0, strategyFast}, // level 2
+ {17, 15, 16, 2, 5, 1, strategyDfast}, // level 3
+ {17, 17, 17, 2, 4, 1, strategyDfast}, // level 4
+ {17, 16, 17, 3, 4, 2, strategyGreedy}, // level 5
+ {17, 17, 17, 3, 4, 4, strategyLazy}, // level 6
+ {17, 17, 17, 3, 4, 8, strategyLazy2}, // level 7
+ {17, 17, 17, 4, 4, 8, strategyLazy2}, // level 8
+ {17, 17, 17, 5, 4, 8, strategyLazy2}, // level 9
+ {17, 17, 17, 6, 4, 8, strategyLazy2}, // level 10
+ {17, 17, 17, 5, 4, 8, strategyBtlazy2}, // level 11
+ {17, 18, 17, 7, 4, 12, strategyBtlazy2}, // level 12
+ {17, 18, 17, 3, 4, 12, strategyBtopt}, // level 13.
+ {17, 18, 17, 4, 3, 32, strategyBtopt}, // level 14.
+ {17, 18, 17, 6, 3, 256, strategyBtopt}, // level 15.
+ {17, 18, 17, 6, 3, 128, strategyBtultra}, // level 16.
+ {17, 18, 17, 8, 3, 256, strategyBtultra}, // level 17.
+ {17, 18, 17, 10, 3, 512, strategyBtultra}, // level 18.
+ {17, 18, 17, 5, 3, 256, strategyBtultra2}, // level 19.
+ {17, 18, 17, 7, 3, 512, strategyBtultra2}, // level 20.
+ {17, 18, 17, 9, 3, 512, strategyBtultra2}, // level 21.
+ {17, 18, 17, 11, 3, 999, strategyBtultra2}, // level 22.
+ },
+ { // for srcSize <= 16 KB
+ // W, C, H, S, L, T, strat
+ {14, 12, 13, 1, 5, 1, strategyFast}, // base for negative levels
+ {14, 14, 15, 1, 5, 0, strategyFast}, // level 1
+ {14, 14, 15, 1, 4, 0, strategyFast}, // level 2
+ {14, 14, 15, 2, 4, 1, strategyDfast}, // level 3
+ {14, 14, 14, 4, 4, 2, strategyGreedy}, // level 4
+ {14, 14, 14, 3, 4, 4, strategyLazy}, // level 5.
+ {14, 14, 14, 4, 4, 8, strategyLazy2}, // level 6
+ {14, 14, 14, 6, 4, 8, strategyLazy2}, // level 7
+ {14, 14, 14, 8, 4, 8, strategyLazy2}, // level 8.
+ {14, 15, 14, 5, 4, 8, strategyBtlazy2}, // level 9.
+ {14, 15, 14, 9, 4, 8, strategyBtlazy2}, // level 10.
+ {14, 15, 14, 3, 4, 12, strategyBtopt}, // level 11.
+ {14, 15, 14, 4, 3, 24, strategyBtopt}, // level 12.
+ {14, 15, 14, 5, 3, 32, strategyBtultra}, // level 13.
+ {14, 15, 15, 6, 3, 64, strategyBtultra}, // level 14.
+ {14, 15, 15, 7, 3, 256, strategyBtultra}, // level 15.
+ {14, 15, 15, 5, 3, 48, strategyBtultra2}, // level 16.
+ {14, 15, 15, 6, 3, 128, strategyBtultra2}, // level 17.
+ {14, 15, 15, 7, 3, 256, strategyBtultra2}, // level 18.
+ {14, 15, 15, 8, 3, 256, strategyBtultra2}, // level 19.
+ {14, 15, 15, 8, 3, 512, strategyBtultra2}, // level 20.
+ {14, 15, 15, 9, 3, 512, strategyBtultra2}, // level 21.
+ {14, 15, 15, 10, 3, 999, strategyBtultra2}, // level 22.
+ },
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go
new file mode 100644
index 000000000..ed028f5a7
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/encoder.go
@@ -0,0 +1,472 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+ "crypto/rand"
+ "fmt"
+ "io"
+ rdebug "runtime/debug"
+ "sync"
+
+ "github.com/klauspost/compress/zstd/internal/xxhash"
+)
+
+// Encoder provides encoding to Zstandard.
+// An Encoder can be used for either compressing a stream via the
+// io.WriteCloser interface supported by the Encoder or as multiple independent
+// tasks via the EncodeAll function.
+// Smaller encodes are encouraged to use the EncodeAll function.
+// Use NewWriter to create a new instance.
+type Encoder struct {
+ o encoderOptions
+ encoders chan encoder
+ state encoderState
+ init sync.Once
+}
+
+type encoder interface {
+ Encode(blk *blockEnc, src []byte)
+ Block() *blockEnc
+ CRC() *xxhash.Digest
+ AppendCRC([]byte) []byte
+ WindowSize(size int) int32
+ UseBlock(*blockEnc)
+ Reset()
+}
+
+type encoderState struct {
+ w io.Writer
+ filling []byte
+ current []byte
+ previous []byte
+ encoder encoder
+ writing *blockEnc
+ err error
+ writeErr error
+ nWritten int64
+ headerWritten bool
+ eofWritten bool
+
+ // This waitgroup indicates an encode is running.
+ wg sync.WaitGroup
+ // This waitgroup indicates we have a block encoding/writing.
+ wWg sync.WaitGroup
+}
+
+// NewWriter will create a new Zstandard encoder.
+// If the encoder will be used for encoding blocks a nil writer can be used.
+func NewWriter(w io.Writer, opts ...EOption) (*Encoder, error) {
+ var e Encoder
+ e.o.setDefault()
+ for _, o := range opts {
+ err := o(&e.o)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if w != nil {
+ e.Reset(w)
+ } else {
+ e.init.Do(func() {
+ e.initialize()
+ })
+ }
+ return &e, nil
+}
+
+func (e *Encoder) initialize() {
+ e.encoders = make(chan encoder, e.o.concurrent)
+ for i := 0; i < e.o.concurrent; i++ {
+ e.encoders <- e.o.encoder()
+ }
+}
+
+// Reset will re-initialize the writer and new writes will encode to the supplied writer
+// as a new, independent stream.
+func (e *Encoder) Reset(w io.Writer) {
+ e.init.Do(func() {
+ e.initialize()
+ })
+ s := &e.state
+ s.wg.Wait()
+ s.wWg.Wait()
+ if cap(s.filling) == 0 {
+ s.filling = make([]byte, 0, e.o.blockSize)
+ }
+ if cap(s.current) == 0 {
+ s.current = make([]byte, 0, e.o.blockSize)
+ }
+ if cap(s.previous) == 0 {
+ s.previous = make([]byte, 0, e.o.blockSize)
+ }
+ if s.encoder == nil {
+ s.encoder = e.o.encoder()
+ }
+ if s.writing == nil {
+ s.writing = &blockEnc{}
+ s.writing.init()
+ }
+ s.writing.initNewEncode()
+ s.filling = s.filling[:0]
+ s.current = s.current[:0]
+ s.previous = s.previous[:0]
+ s.encoder.Reset()
+ s.headerWritten = false
+ s.eofWritten = false
+ s.w = w
+ s.err = nil
+ s.nWritten = 0
+ s.writeErr = nil
+}
+
+// Write data to the encoder.
+// Input data will be buffered and as the buffer fills up
+// content will be compressed and written to the output.
+// When done writing, use Close to flush the remaining output
+// and write CRC if requested.
+func (e *Encoder) Write(p []byte) (n int, err error) {
+ s := &e.state
+ for len(p) > 0 {
+ if len(p)+len(s.filling) < e.o.blockSize {
+ if e.o.crc {
+ _, _ = s.encoder.CRC().Write(p)
+ }
+ s.filling = append(s.filling, p...)
+ return n + len(p), nil
+ }
+ add := p
+ if len(p)+len(s.filling) > e.o.blockSize {
+ add = add[:e.o.blockSize-len(s.filling)]
+ }
+ if e.o.crc {
+ _, _ = s.encoder.CRC().Write(add)
+ }
+ s.filling = append(s.filling, add...)
+ p = p[len(add):]
+ n += len(add)
+ if len(s.filling) < e.o.blockSize {
+ return n, nil
+ }
+ err := e.nextBlock(false)
+ if err != nil {
+ return n, err
+ }
+ if debug && len(s.filling) > 0 {
+ panic(len(s.filling))
+ }
+ }
+ return n, nil
+}
+
+// nextBlock will synchronize and start compressing input in e.state.filling.
+// If an error has occurred during encoding it will be returned.
+func (e *Encoder) nextBlock(final bool) error {
+ s := &e.state
+ // Wait for current block.
+ s.wg.Wait()
+ if s.err != nil {
+ return s.err
+ }
+ if len(s.filling) > e.o.blockSize {
+ return fmt.Errorf("block > maxStoreBlockSize")
+ }
+ if !s.headerWritten {
+ var tmp [maxHeaderSize]byte
+ fh := frameHeader{
+ ContentSize: 0,
+ WindowSize: uint32(s.encoder.WindowSize(0)),
+ SingleSegment: false,
+ Checksum: e.o.crc,
+ DictID: 0,
+ }
+ dst, err := fh.appendTo(tmp[:0])
+ if err != nil {
+ return err
+ }
+ s.headerWritten = true
+ s.wWg.Wait()
+ var n2 int
+ n2, s.err = s.w.Write(dst)
+ if s.err != nil {
+ return s.err
+ }
+ s.nWritten += int64(n2)
+ }
+ if s.eofWritten {
+ // Ensure we only write it once.
+ final = false
+ }
+
+ if len(s.filling) == 0 {
+ // Final block, but no data.
+ if final {
+ enc := s.encoder
+ blk := enc.Block()
+ blk.reset(nil)
+ blk.last = true
+ blk.encodeRaw(nil)
+ s.wWg.Wait()
+ _, s.err = s.w.Write(blk.output)
+ s.nWritten += int64(len(blk.output))
+ }
+ return s.err
+ }
+
+ // Move blocks forward.
+ s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current
+ s.wg.Add(1)
+ go func(src []byte) {
+ if debug {
+ println("Adding block,", len(src), "bytes, final:", final)
+ }
+ defer func() {
+ if r := recover(); r != nil {
+ s.err = fmt.Errorf("panic while encoding: %v", r)
+ rdebug.PrintStack()
+ }
+ s.wg.Done()
+ }()
+ enc := s.encoder
+ blk := enc.Block()
+ enc.Encode(blk, src)
+ blk.last = final
+ if final {
+ s.eofWritten = true
+ }
+ // Wait for pending writes.
+ s.wWg.Wait()
+ if s.writeErr != nil {
+ s.err = s.writeErr
+ return
+ }
+ // Transfer encoders from previous write block.
+ blk.swapEncoders(s.writing)
+ // Transfer recent offsets to next.
+ enc.UseBlock(s.writing)
+ s.writing = blk
+ s.wWg.Add(1)
+ go func() {
+ defer func() {
+ if r := recover(); r != nil {
+ s.writeErr = fmt.Errorf("panic while encoding/writing: %v", r)
+ rdebug.PrintStack()
+ }
+ s.wWg.Done()
+ }()
+ err := blk.encode()
+ switch err {
+ case errIncompressible:
+ if debug {
+ println("Storing incompressible block as raw")
+ }
+ blk.encodeRaw(src)
+ // In fast mode, we do not transfer offsets, so we don't have to deal with changing the.
+ case nil:
+ default:
+ s.writeErr = err
+ return
+ }
+ _, s.writeErr = s.w.Write(blk.output)
+ s.nWritten += int64(len(blk.output))
+ }()
+ }(s.current)
+ return nil
+}
+
+// ReadFrom reads data from r until EOF or error.
+// The return value n is the number of bytes read.
+// Any error except io.EOF encountered during the read is also returned.
+//
+// The Copy function uses ReaderFrom if available.
+func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) {
+ if debug {
+ println("Using ReadFrom")
+ }
+ // Maybe handle stuff queued?
+ e.state.filling = e.state.filling[:e.o.blockSize]
+ src := e.state.filling
+ for {
+ n2, err := r.Read(src)
+ _, _ = e.state.encoder.CRC().Write(src[:n2])
+ // src is now the unfilled part...
+ src = src[n2:]
+ n += int64(n2)
+ switch err {
+ case io.EOF:
+ e.state.filling = e.state.filling[:len(e.state.filling)-len(src)]
+ if debug {
+ println("ReadFrom: got EOF final block:", len(e.state.filling))
+ }
+ return n, e.nextBlock(true)
+ default:
+ if debug {
+ println("ReadFrom: got error:", err)
+ }
+ e.state.err = err
+ return n, err
+ case nil:
+ }
+ if len(src) > 0 {
+ if debug {
+ println("ReadFrom: got space left in source:", len(src))
+ }
+ continue
+ }
+ err = e.nextBlock(false)
+ if err != nil {
+ return n, err
+ }
+ e.state.filling = e.state.filling[:e.o.blockSize]
+ src = e.state.filling
+ }
+}
+
+// Flush will send the currently written data to output
+// and block until everything has been written.
+// This should only be used on rare occasions where pushing the currently queued data is critical.
+func (e *Encoder) Flush() error {
+ s := &e.state
+ if len(s.filling) > 0 {
+ err := e.nextBlock(false)
+ if err != nil {
+ return err
+ }
+ }
+ s.wg.Wait()
+ s.wWg.Wait()
+ if s.err != nil {
+ return s.err
+ }
+ return s.writeErr
+}
+
+// Close will flush the final output and close the stream.
+// The function will block until everything has been written.
+// The Encoder can still be re-used after calling this.
+func (e *Encoder) Close() error {
+ s := &e.state
+ if s.encoder == nil {
+ return nil
+ }
+ err := e.nextBlock(true)
+ if err != nil {
+ return err
+ }
+ s.wg.Wait()
+ s.wWg.Wait()
+
+ if s.err != nil {
+ return s.err
+ }
+ if s.writeErr != nil {
+ return s.writeErr
+ }
+
+ // Write CRC
+ if e.o.crc && s.err == nil {
+ // heap alloc.
+ var tmp [4]byte
+ _, s.err = s.w.Write(s.encoder.AppendCRC(tmp[:0]))
+ s.nWritten += 4
+ }
+
+ // Add padding with content from crypto/rand.Reader
+ if s.err == nil && e.o.pad > 0 {
+ add := calcSkippableFrame(s.nWritten, int64(e.o.pad))
+ frame, err := skippableFrame(s.filling[:0], add, rand.Reader)
+ if err != nil {
+ return err
+ }
+ _, s.err = s.w.Write(frame)
+ }
+ return s.err
+}
+
+// EncodeAll will encode all input in src and append it to dst.
+// This function can be called concurrently, but each call will only run on a single goroutine.
+// If empty input is given, nothing is returned.
+// Encoded blocks can be concatenated and the result will be the combined input stream.
+// Data compressed with EncodeAll can be decoded with the Decoder,
+// using either a stream or DecodeAll.
+func (e *Encoder) EncodeAll(src, dst []byte) []byte {
+ if len(src) == 0 {
+ return dst
+ }
+ e.init.Do(func() {
+ e.o.setDefault()
+ e.initialize()
+ })
+ enc := <-e.encoders
+ defer func() {
+ // Release encoder reference to last block.
+ enc.Reset()
+ e.encoders <- enc
+ }()
+ enc.Reset()
+ blk := enc.Block()
+ single := len(src) > 1<<20
+ if e.o.single != nil {
+ single = *e.o.single
+ }
+ fh := frameHeader{
+ ContentSize: uint64(len(src)),
+ WindowSize: uint32(enc.WindowSize(len(src))),
+ SingleSegment: single,
+ Checksum: e.o.crc,
+ DictID: 0,
+ }
+
+ // If less than 1MB, allocate a buffer up front.
+ if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 {
+ dst = make([]byte, 0, len(src))
+ }
+ dst, err := fh.appendTo(dst)
+ if err != nil {
+ panic(err)
+ }
+
+ for len(src) > 0 {
+ todo := src
+ if len(todo) > e.o.blockSize {
+ todo = todo[:e.o.blockSize]
+ }
+ src = src[len(todo):]
+ if e.o.crc {
+ _, _ = enc.CRC().Write(todo)
+ }
+ blk.reset(nil)
+ blk.pushOffsets()
+ enc.Encode(blk, todo)
+ if len(src) == 0 {
+ blk.last = true
+ }
+ err := blk.encode()
+ switch err {
+ case errIncompressible:
+ if debug {
+ println("Storing incompressible block as raw")
+ }
+ blk.encodeRaw(todo)
+ blk.popOffsets()
+ case nil:
+ default:
+ panic(err)
+ }
+ dst = append(dst, blk.output...)
+ }
+ if e.o.crc {
+ dst = enc.AppendCRC(dst)
+ }
+ // Add padding with content from crypto/rand.Reader
+ if e.o.pad > 0 {
+ add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad))
+ dst, err = skippableFrame(dst, add, rand.Reader)
+ if err != nil {
+ panic(err)
+ }
+ }
+ return dst
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go
new file mode 100644
index 000000000..6e210c4a0
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go
@@ -0,0 +1,184 @@
+package zstd
+
+import (
+ "fmt"
+ "runtime"
+ "strings"
+)
+
+// DOption is an option for creating a encoder.
+type EOption func(*encoderOptions) error
+
+// options retains accumulated state of multiple options.
+type encoderOptions struct {
+ concurrent int
+ crc bool
+ single *bool
+ pad int
+ blockSize int
+ windowSize int
+ level EncoderLevel
+}
+
+func (o *encoderOptions) setDefault() {
+ *o = encoderOptions{
+ // use less ram: true for now, but may change.
+ concurrent: runtime.GOMAXPROCS(0),
+ crc: true,
+ single: nil,
+ blockSize: 1 << 16,
+ windowSize: 1 << 22,
+ level: SpeedDefault,
+ }
+}
+
+// encoder returns an encoder with the selected options.
+func (o encoderOptions) encoder() encoder {
+ switch o.level {
+ case SpeedDefault:
+ return &doubleFastEncoder{fastEncoder: fastEncoder{maxMatchOff: int32(o.windowSize)}}
+ case SpeedFastest:
+ return &fastEncoder{maxMatchOff: int32(o.windowSize)}
+ }
+ panic("unknown compression level")
+}
+
+// WithEncoderCRC will add CRC value to output.
+// Output will be 4 bytes larger.
+func WithEncoderCRC(b bool) EOption {
+ return func(o *encoderOptions) error { o.crc = b; return nil }
+}
+
+// WithEncoderConcurrency will set the concurrency,
+// meaning the maximum number of decoders to run concurrently.
+// The value supplied must be at least 1.
+// By default this will be set to GOMAXPROCS.
+func WithEncoderConcurrency(n int) EOption {
+ return func(o *encoderOptions) error {
+ if n <= 0 {
+ return fmt.Errorf("concurrency must be at least 1")
+ }
+ o.concurrent = n
+ return nil
+ }
+}
+
+// WithEncoderPadding will add padding to all output so the size will be a multiple of n.
+// This can be used to obfuscate the exact output size or make blocks of a certain size.
+// The contents will be a skippable frame, so it will be invisible by the decoder.
+// n must be > 0 and <= 1GB, 1<<30 bytes.
+// The padded area will be filled with data from crypto/rand.Reader.
+// If `EncodeAll` is used with data already in the destination, the total size will be multiple of this.
+func WithEncoderPadding(n int) EOption {
+ return func(o *encoderOptions) error {
+ if n <= 0 {
+ return fmt.Errorf("padding must be at least 1")
+ }
+ // No need to waste our time.
+ if n == 1 {
+ o.pad = 0
+ }
+ if n > 1<<30 {
+ return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ")
+ }
+ o.pad = n
+ return nil
+ }
+}
+
+// EncoderLevel predefines encoder compression levels.
+// Only use the constants made available, since the actual mapping
+// of these values are very likely to change and your compression could change
+// unpredictably when upgrading the library.
+type EncoderLevel int
+
+const (
+ speedNotSet EncoderLevel = iota
+
+ // SpeedFastest will choose the fastest reasonable compression.
+ // This is roughly equivalent to the fastest Zstandard mode.
+ SpeedFastest
+
+ // SpeedDefault is the default "pretty fast" compression option.
+ // This is roughly equivalent to the default Zstandard mode (level 3).
+ SpeedDefault
+
+ // speedLast should be kept as the last actual compression option.
+ // The is not for external usage, but is used to keep track of the valid options.
+ speedLast
+
+ // SpeedBetterCompression will (in the future) yield better compression than the default,
+ // but at approximately 4x the CPU usage of the default.
+ // For now this is not implemented.
+ SpeedBetterCompression = SpeedDefault
+
+ // SpeedBestCompression will choose the best available compression option.
+ // For now this is not implemented.
+ SpeedBestCompression = SpeedDefault
+)
+
+// EncoderLevelFromString will convert a string representation of an encoding level back
+// to a compression level. The compare is not case sensitive.
+// If the string wasn't recognized, (false, SpeedDefault) will be returned.
+func EncoderLevelFromString(s string) (bool, EncoderLevel) {
+ for l := EncoderLevel(speedNotSet + 1); l < speedLast; l++ {
+ if strings.EqualFold(s, l.String()) {
+ return true, l
+ }
+ }
+ return false, SpeedDefault
+}
+
+// EncoderLevelFromZstd will return an encoder level that closest matches the compression
+// ratio of a specific zstd compression level.
+// Many input values will provide the same compression level.
+func EncoderLevelFromZstd(level int) EncoderLevel {
+ switch {
+ case level < 3:
+ return SpeedFastest
+ case level >= 3:
+ return SpeedDefault
+ }
+ return SpeedDefault
+}
+
+// String provides a string representation of the compression level.
+func (e EncoderLevel) String() string {
+ switch e {
+ case SpeedFastest:
+ return "fastest"
+ case SpeedDefault:
+ return "default"
+ default:
+ return "invalid"
+ }
+}
+
+// WithEncoderLevel specifies a predefined compression level.
+func WithEncoderLevel(l EncoderLevel) EOption {
+ return func(o *encoderOptions) error {
+ switch {
+ case l <= speedNotSet || l >= speedLast:
+ return fmt.Errorf("unknown encoder level")
+ }
+ o.level = l
+ return nil
+ }
+}
+
+// WithSingleSegment will set the "single segment" flag when EncodeAll is used.
+// If this flag is set, data must be regenerated within a single continuous memory segment.
+// In this case, Window_Descriptor byte is skipped, but Frame_Content_Size is necessarily present.
+// As a consequence, the decoder must allocate a memory segment of size equal or larger than size of your content.
+// In order to preserve the decoder from unreasonable memory requirements,
+// a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range.
+// For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB.
+// This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations.
+// If this is not specified, block encodes will automatically choose this based on the input size.
+// This setting has no effect on streamed encodes.
+func WithSingleSegment(b bool) EOption {
+ return func(o *encoderOptions) error {
+ o.single = &b
+ return nil
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go
new file mode 100644
index 000000000..8fa264fc2
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/framedec.go
@@ -0,0 +1,473 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+ "bytes"
+ "encoding/hex"
+ "errors"
+ "hash"
+ "io"
+ "sync"
+
+ "github.com/klauspost/compress/zstd/internal/xxhash"
+)
+
+type frameDec struct {
+ o decoderOptions
+ crc hash.Hash64
+ frameDone sync.WaitGroup
+ offset int64
+
+ WindowSize uint64
+ DictionaryID uint32
+ FrameContentSize uint64
+ HasCheckSum bool
+ SingleSegment bool
+
+ // maxWindowSize is the maximum windows size to support.
+ // should never be bigger than max-int.
+ maxWindowSize uint64
+
+ // In order queue of blocks being decoded.
+ decoding chan *blockDec
+
+ // Frame history passed between blocks
+ history history
+
+ rawInput byteBuffer
+
+ // asyncRunning indicates whether the async routine processes input on 'decoding'.
+ asyncRunning bool
+ asyncRunningMu sync.Mutex
+}
+
+const (
+ // The minimum Window_Size is 1 KB.
+ minWindowSize = 1 << 10
+)
+
+var (
+ frameMagic = []byte{0x28, 0xb5, 0x2f, 0xfd}
+ skippableFrameMagic = []byte{0x2a, 0x4d, 0x18}
+)
+
+func newFrameDec(o decoderOptions) *frameDec {
+ d := frameDec{
+ o: o,
+ maxWindowSize: 1 << 30,
+ }
+ return &d
+}
+
+// reset will read the frame header and prepare for block decoding.
+// If nothing can be read from the input, io.EOF will be returned.
+// Any other error indicated that the stream contained data, but
+// there was a problem.
+func (d *frameDec) reset(br byteBuffer) error {
+ d.HasCheckSum = false
+ d.WindowSize = 0
+ var b []byte
+ for {
+ b = br.readSmall(4)
+ if b == nil {
+ return io.EOF
+ }
+ if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 {
+ if debug {
+ println("Not skippable", hex.EncodeToString(b), hex.EncodeToString(skippableFrameMagic))
+ }
+ // Break if not skippable frame.
+ break
+ }
+ // Read size to skip
+ b = br.readSmall(4)
+ if b == nil {
+ println("Reading Frame Size EOF")
+ return io.ErrUnexpectedEOF
+ }
+ n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
+ println("Skipping frame with", n, "bytes.")
+ err := br.skipN(int(n))
+ if err != nil {
+ if debug {
+ println("Reading discarded frame", err)
+ }
+ return err
+ }
+ }
+ if !bytes.Equal(b, frameMagic) {
+ println("Got magic numbers: ", b, "want:", frameMagic)
+ return ErrMagicMismatch
+ }
+
+ // Read Frame_Header_Descriptor
+ fhd, err := br.readByte()
+ if err != nil {
+ println("Reading Frame_Header_Descriptor", err)
+ return err
+ }
+ d.SingleSegment = fhd&(1<<5) != 0
+
+ if fhd&(1<<3) != 0 {
+ return errors.New("Reserved bit set on frame header")
+ }
+
+ // Read Window_Descriptor
+ // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor
+ d.WindowSize = 0
+ if !d.SingleSegment {
+ wd, err := br.readByte()
+ if err != nil {
+ println("Reading Window_Descriptor", err)
+ return err
+ }
+ printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3)
+ windowLog := 10 + (wd >> 3)
+ windowBase := uint64(1) << windowLog
+ windowAdd := (windowBase / 8) * uint64(wd&0x7)
+ d.WindowSize = windowBase + windowAdd
+ }
+
+ // Read Dictionary_ID
+ // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id
+ d.DictionaryID = 0
+ if size := fhd & 3; size != 0 {
+ if size == 3 {
+ size = 4
+ }
+ b = br.readSmall(int(size))
+ if b == nil {
+ if debug {
+ println("Reading Dictionary_ID", io.ErrUnexpectedEOF)
+ }
+ return io.ErrUnexpectedEOF
+ }
+ switch size {
+ case 1:
+ d.DictionaryID = uint32(b[0])
+ case 2:
+ d.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8)
+ case 4:
+ d.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
+ }
+ if debug {
+ println("Dict size", size, "ID:", d.DictionaryID)
+ }
+ if d.DictionaryID != 0 {
+ return ErrUnknownDictionary
+ }
+ }
+
+ // Read Frame_Content_Size
+ // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size
+ var fcsSize int
+ v := fhd >> 6
+ switch v {
+ case 0:
+ if d.SingleSegment {
+ fcsSize = 1
+ }
+ default:
+ fcsSize = 1 << v
+ }
+ d.FrameContentSize = 0
+ if fcsSize > 0 {
+ b := br.readSmall(fcsSize)
+ if b == nil {
+ println("Reading Frame content", io.ErrUnexpectedEOF)
+ return io.ErrUnexpectedEOF
+ }
+ switch fcsSize {
+ case 1:
+ d.FrameContentSize = uint64(b[0])
+ case 2:
+ // When FCS_Field_Size is 2, the offset of 256 is added.
+ d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256
+ case 4:
+ d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3] << 24))
+ case 8:
+ d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
+ d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24)
+ d.FrameContentSize = uint64(d1) | (uint64(d2) << 32)
+ }
+ if debug {
+ println("field size bits:", v, "fcsSize:", fcsSize, "FrameContentSize:", d.FrameContentSize, hex.EncodeToString(b[:fcsSize]))
+ }
+ }
+ // Move this to shared.
+ d.HasCheckSum = fhd&(1<<2) != 0
+ if d.HasCheckSum {
+ if d.crc == nil {
+ d.crc = xxhash.New()
+ }
+ d.crc.Reset()
+ }
+
+ if d.WindowSize == 0 && d.SingleSegment {
+ // We may not need window in this case.
+ d.WindowSize = d.FrameContentSize
+ if d.WindowSize < minWindowSize {
+ d.WindowSize = minWindowSize
+ }
+ }
+
+ if d.WindowSize > d.maxWindowSize {
+ printf("window size %d > max %d\n", d.WindowSize, d.maxWindowSize)
+ return ErrWindowSizeExceeded
+ }
+ // The minimum Window_Size is 1 KB.
+ if d.WindowSize < minWindowSize {
+ println("got window size: ", d.WindowSize)
+ return ErrWindowSizeTooSmall
+ }
+ d.history.windowSize = int(d.WindowSize)
+ d.history.maxSize = d.history.windowSize + maxBlockSize
+ // history contains input - maybe we do something
+ d.rawInput = br
+ return nil
+}
+
+// next will start decoding the next block from stream.
+func (d *frameDec) next(block *blockDec) error {
+ println("decoding new block")
+ err := block.reset(d.rawInput, d.WindowSize)
+ if err != nil {
+ println("block error:", err)
+ // Signal the frame decoder we have a problem.
+ d.sendErr(block, err)
+ return err
+ }
+ block.input <- struct{}{}
+ if debug {
+ println("next block:", block)
+ }
+ d.asyncRunningMu.Lock()
+ defer d.asyncRunningMu.Unlock()
+ if !d.asyncRunning {
+ return nil
+ }
+ if block.Last {
+ // We indicate the frame is done by sending io.EOF
+ d.decoding <- block
+ return io.EOF
+ }
+ d.decoding <- block
+ return nil
+}
+
+// sendEOF will queue an error block on the frame.
+// This will cause the frame decoder to return when it encounters the block.
+// Returns true if the decoder was added.
+func (d *frameDec) sendErr(block *blockDec, err error) bool {
+ d.asyncRunningMu.Lock()
+ defer d.asyncRunningMu.Unlock()
+ if !d.asyncRunning {
+ return false
+ }
+
+ println("sending error", err.Error())
+ block.sendErr(err)
+ d.decoding <- block
+ return true
+}
+
+// checkCRC will check the checksum if the frame has one.
+// Will return ErrCRCMismatch if crc check failed, otherwise nil.
+func (d *frameDec) checkCRC() error {
+ if !d.HasCheckSum {
+ return nil
+ }
+ var tmp [8]byte
+ gotB := d.crc.Sum(tmp[:0])
+ // Flip to match file order.
+ gotB[0] = gotB[7]
+ gotB[1] = gotB[6]
+ gotB[2] = gotB[5]
+ gotB[3] = gotB[4]
+
+ // We can overwrite upper tmp now
+ want := d.rawInput.readSmall(4)
+ if want == nil {
+ println("CRC missing?")
+ return io.ErrUnexpectedEOF
+ }
+
+ if !bytes.Equal(gotB[:4], want) {
+ println("CRC Check Failed:", gotB[:4], "!=", want)
+ return ErrCRCMismatch
+ }
+ println("CRC ok")
+ return nil
+}
+
+func (d *frameDec) initAsync() {
+ if !d.o.lowMem && !d.SingleSegment {
+ // set max extra size history to 20MB.
+ d.history.maxSize = d.history.windowSize + maxBlockSize*10
+ }
+ // re-alloc if more than one extra block size.
+ if d.o.lowMem && cap(d.history.b) > d.history.maxSize+maxBlockSize {
+ d.history.b = make([]byte, 0, d.history.maxSize)
+ }
+ if cap(d.history.b) < d.history.maxSize {
+ d.history.b = make([]byte, 0, d.history.maxSize)
+ }
+ if cap(d.decoding) < d.o.concurrent {
+ d.decoding = make(chan *blockDec, d.o.concurrent)
+ }
+ if debug {
+ h := d.history
+ printf("history init. len: %d, cap: %d", len(h.b), cap(h.b))
+ }
+ d.asyncRunningMu.Lock()
+ d.asyncRunning = true
+ d.asyncRunningMu.Unlock()
+}
+
+// startDecoder will start decoding blocks and write them to the writer.
+// The decoder will stop as soon as an error occurs or at end of frame.
+// When the frame has finished decoding the *bufio.Reader
+// containing the remaining input will be sent on frameDec.frameDone.
+func (d *frameDec) startDecoder(output chan decodeOutput) {
+ // TODO: Init to dictionary
+ d.history.reset()
+ written := int64(0)
+
+ defer func() {
+ d.asyncRunningMu.Lock()
+ d.asyncRunning = false
+ d.asyncRunningMu.Unlock()
+
+ // Drain the currently decoding.
+ d.history.error = true
+ flushdone:
+ for {
+ select {
+ case b := <-d.decoding:
+ b.history <- &d.history
+ output <- <-b.result
+ default:
+ break flushdone
+ }
+ }
+ println("frame decoder done, signalling done")
+ d.frameDone.Done()
+ }()
+ // Get decoder for first block.
+ block := <-d.decoding
+ block.history <- &d.history
+ for {
+ var next *blockDec
+ // Get result
+ r := <-block.result
+ if r.err != nil {
+ println("Result contained error", r.err)
+ output <- r
+ return
+ }
+ if debug {
+ println("got result, from ", d.offset, "to", d.offset+int64(len(r.b)))
+ d.offset += int64(len(r.b))
+ }
+ if !block.Last {
+ // Send history to next block
+ select {
+ case next = <-d.decoding:
+ if debug {
+ println("Sending ", len(d.history.b), "bytes as history")
+ }
+ next.history <- &d.history
+ default:
+ // Wait until we have sent the block, so
+ // other decoders can potentially get the decoder.
+ next = nil
+ }
+ }
+
+ // Add checksum, async to decoding.
+ if d.HasCheckSum {
+ n, err := d.crc.Write(r.b)
+ if err != nil {
+ r.err = err
+ if n != len(r.b) {
+ r.err = io.ErrShortWrite
+ }
+ output <- r
+ return
+ }
+ }
+ written += int64(len(r.b))
+ if d.SingleSegment && uint64(written) > d.FrameContentSize {
+ r.err = ErrFrameSizeExceeded
+ output <- r
+ return
+ }
+ if block.Last {
+ r.err = d.checkCRC()
+ output <- r
+ return
+ }
+ output <- r
+ if next == nil {
+ // There was no decoder available, we wait for one now that we have sent to the writer.
+ if debug {
+ println("Sending ", len(d.history.b), " bytes as history")
+ }
+ next = <-d.decoding
+ next.history <- &d.history
+ }
+ block = next
+ }
+}
+
+// runDecoder will create a sync decoder that will decodeAsync a block of data.
+func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
+ // TODO: Init to dictionary
+ d.history.reset()
+ saved := d.history.b
+
+ // We use the history for output to avoid copying it.
+ d.history.b = dst
+ // Store input length, so we only check new data.
+ crcStart := len(dst)
+ var err error
+ for {
+ err = dec.reset(d.rawInput, d.WindowSize)
+ if err != nil {
+ break
+ }
+ if debug {
+ println("next block:", dec)
+ }
+ err = dec.decodeBuf(&d.history)
+ if err != nil || dec.Last {
+ break
+ }
+ if uint64(len(d.history.b)) > d.o.maxDecodedSize {
+ err = ErrDecoderSizeExceeded
+ break
+ }
+ if d.SingleSegment && uint64(len(d.history.b)) > d.o.maxDecodedSize {
+ err = ErrFrameSizeExceeded
+ break
+ }
+ }
+ dst = d.history.b
+ if err == nil {
+ if d.HasCheckSum {
+ var n int
+ n, err = d.crc.Write(dst[crcStart:])
+ if err == nil {
+ if n != len(dst)-crcStart {
+ err = io.ErrShortWrite
+ }
+ }
+ err = d.checkCRC()
+ }
+ }
+ d.history.b = saved
+ return dst, err
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go
new file mode 100644
index 000000000..acac32527
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/frameenc.go
@@ -0,0 +1,118 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "math/bits"
+)
+
+type frameHeader struct {
+ ContentSize uint64
+ WindowSize uint32
+ SingleSegment bool
+ Checksum bool
+ DictID uint32 // Not stored.
+}
+
+const maxHeaderSize = 14
+
+func (f frameHeader) appendTo(dst []byte) ([]byte, error) {
+ dst = append(dst, frameMagic...)
+ var fhd uint8
+ if f.Checksum {
+ fhd |= 1 << 2
+ }
+ if f.SingleSegment {
+ fhd |= 1 << 5
+ }
+ var fcs uint8
+ if f.ContentSize >= 256 {
+ fcs++
+ }
+ if f.ContentSize >= 65536+256 {
+ fcs++
+ }
+ if f.ContentSize >= 0xffffffff {
+ fcs++
+ }
+ fhd |= fcs << 6
+
+ dst = append(dst, fhd)
+ if !f.SingleSegment {
+ const winLogMin = 10
+ windowLog := (bits.Len32(f.WindowSize-1) - winLogMin) << 3
+ dst = append(dst, uint8(windowLog))
+ }
+ if f.SingleSegment && f.ContentSize == 0 {
+ return nil, errors.New("single segment, but no size set")
+ }
+ switch fcs {
+ case 0:
+ if f.SingleSegment {
+ dst = append(dst, uint8(f.ContentSize))
+ }
+ // Unless SingleSegment is set, framessizes < 256 are nto stored.
+ case 1:
+ f.ContentSize -= 256
+ dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8))
+ case 2:
+ dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24))
+ case 3:
+ dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24),
+ uint8(f.ContentSize>>32), uint8(f.ContentSize>>40), uint8(f.ContentSize>>48), uint8(f.ContentSize>>56))
+ default:
+ panic("invalid fcs")
+ }
+ return dst, nil
+}
+
+const skippableFrameHeader = 4 + 4
+
+// calcSkippableFrame will return a total size to be added for written
+// to be divisible by multiple.
+// The value will always be > skippableFrameHeader.
+// The function will panic if written < 0 or wantMultiple <= 0.
+func calcSkippableFrame(written, wantMultiple int64) int {
+ if wantMultiple <= 0 {
+ panic("wantMultiple <= 0")
+ }
+ if written < 0 {
+ panic("written < 0")
+ }
+ leftOver := written % wantMultiple
+ if leftOver == 0 {
+ return 0
+ }
+ toAdd := wantMultiple - leftOver
+ for toAdd < skippableFrameHeader {
+ toAdd += wantMultiple
+ }
+ return int(toAdd)
+}
+
+// skippableFrame will add a skippable frame with a total size of bytes.
+// total should be >= skippableFrameHeader and < math.MaxUint32.
+func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) {
+ if total == 0 {
+ return dst, nil
+ }
+ if total < skippableFrameHeader {
+ return dst, fmt.Errorf("requested skippable frame (%d) < 8", total)
+ }
+ if int64(total) > math.MaxUint32 {
+ return dst, fmt.Errorf("requested skippable frame (%d) > max uint32", total)
+ }
+ dst = append(dst, 0x50, 0x2a, 0x4d, 0x18)
+ f := uint32(total - skippableFrameHeader)
+ dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24))
+ start := len(dst)
+ dst = append(dst, make([]byte, f)...)
+ _, err := io.ReadFull(r, dst[start:])
+ return dst, err
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go
new file mode 100644
index 000000000..a86d00bc3
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go
@@ -0,0 +1,337 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+ "errors"
+ "fmt"
+)
+
+const (
+ tablelogAbsoluteMax = 9
+)
+
+const (
+ /*!MEMORY_USAGE :
+ * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+ * Increasing memory usage improves compression ratio
+ * Reduced memory usage can improve speed, due to cache effect
+ * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
+ maxMemoryUsage = 11
+
+ maxTableLog = maxMemoryUsage - 2
+ maxTablesize = 1 << maxTableLog
+ maxTableMask = (1 << maxTableLog) - 1
+ minTablelog = 5
+ maxSymbolValue = 255
+)
+
+// fseDecoder provides temporary storage for compression and decompression.
+type fseDecoder struct {
+ dt [maxTablesize]decSymbol // Decompression table.
+ symbolLen uint16 // Length of active part of the symbol table.
+ actualTableLog uint8 // Selected tablelog.
+ maxBits uint8 // Maximum number of additional bits
+
+ // used for table creation to avoid allocations.
+ stateTable [256]uint16
+ norm [maxSymbolValue + 1]int16
+ preDefined bool
+}
+
+// tableStep returns the next table index.
+func tableStep(tableSize uint32) uint32 {
+ return (tableSize >> 1) + (tableSize >> 3) + 3
+}
+
+// readNCount will read the symbol distribution so decoding tables can be constructed.
+func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error {
+ var (
+ charnum uint16
+ previous0 bool
+ )
+ if b.remain() < 4 {
+ return errors.New("input too small")
+ }
+ bitStream := b.Uint32()
+ nbBits := uint((bitStream & 0xF) + minTablelog) // extract tableLog
+ if nbBits > tablelogAbsoluteMax {
+ println("Invalid tablelog:", nbBits)
+ return errors.New("tableLog too large")
+ }
+ bitStream >>= 4
+ bitCount := uint(4)
+
+ s.actualTableLog = uint8(nbBits)
+ remaining := int32((1 << nbBits) + 1)
+ threshold := int32(1 << nbBits)
+ gotTotal := int32(0)
+ nbBits++
+
+ for remaining > 1 && charnum <= maxSymbol {
+ if previous0 {
+ //println("prev0")
+ n0 := charnum
+ for (bitStream & 0xFFFF) == 0xFFFF {
+ //println("24 x 0")
+ n0 += 24
+ if r := b.remain(); r > 5 {
+ b.advance(2)
+ bitStream = b.Uint32() >> bitCount
+ } else {
+ // end of bit stream
+ bitStream >>= 16
+ bitCount += 16
+ }
+ }
+ //printf("bitstream: %d, 0b%b", bitStream&3, bitStream)
+ for (bitStream & 3) == 3 {
+ n0 += 3
+ bitStream >>= 2
+ bitCount += 2
+ }
+ n0 += uint16(bitStream & 3)
+ bitCount += 2
+
+ if n0 > maxSymbolValue {
+ return errors.New("maxSymbolValue too small")
+ }
+ //println("inserting ", n0-charnum, "zeroes from idx", charnum, "ending before", n0)
+ for charnum < n0 {
+ s.norm[uint8(charnum)] = 0
+ charnum++
+ }
+
+ if r := b.remain(); r >= 7 || r+int(bitCount>>3) >= 4 {
+ b.advance(bitCount >> 3)
+ bitCount &= 7
+ bitStream = b.Uint32() >> bitCount
+ } else {
+ bitStream >>= 2
+ }
+ }
+
+ max := (2*threshold - 1) - remaining
+ var count int32
+
+ if int32(bitStream)&(threshold-1) < max {
+ count = int32(bitStream) & (threshold - 1)
+ if debug && nbBits < 1 {
+ panic("nbBits underflow")
+ }
+ bitCount += nbBits - 1
+ } else {
+ count = int32(bitStream) & (2*threshold - 1)
+ if count >= threshold {
+ count -= max
+ }
+ bitCount += nbBits
+ }
+
+ // extra accuracy
+ count--
+ if count < 0 {
+ // -1 means +1
+ remaining += count
+ gotTotal -= count
+ } else {
+ remaining -= count
+ gotTotal += count
+ }
+ s.norm[charnum&0xff] = int16(count)
+ charnum++
+ previous0 = count == 0
+ for remaining < threshold {
+ nbBits--
+ threshold >>= 1
+ }
+
+ //println("b.off:", b.off, "len:", len(b.b), "bc:", bitCount, "remain:", b.remain())
+ if r := b.remain(); r >= 7 || r+int(bitCount>>3) >= 4 {
+ b.advance(bitCount >> 3)
+ bitCount &= 7
+ } else {
+ bitCount -= (uint)(8 * (len(b.b) - 4 - b.off))
+ b.off = len(b.b) - 4
+ //println("b.off:", b.off, "len:", len(b.b), "bc:", bitCount, "iend", iend)
+ }
+ bitStream = b.Uint32() >> (bitCount & 31)
+ //printf("bitstream is now: 0b%b", bitStream)
+ }
+ s.symbolLen = charnum
+ if s.symbolLen <= 1 {
+ return fmt.Errorf("symbolLen (%d) too small", s.symbolLen)
+ }
+ if s.symbolLen > maxSymbolValue+1 {
+ return fmt.Errorf("symbolLen (%d) too big", s.symbolLen)
+ }
+ if remaining != 1 {
+ return fmt.Errorf("corruption detected (remaining %d != 1)", remaining)
+ }
+ if bitCount > 32 {
+ return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount)
+ }
+ if gotTotal != 1<<s.actualTableLog {
+ return fmt.Errorf("corruption detected (total %d != %d)", gotTotal, 1<<s.actualTableLog)
+ }
+ b.advance((bitCount + 7) >> 3)
+ // println(s.norm[:s.symbolLen], s.symbolLen)
+ return s.buildDtable()
+}
+
+// decSymbol contains information about a state entry,
+// Including the state offset base, the output symbol and
+// the number of bits to read for the low part of the destination state.
+type decSymbol struct {
+ newState uint16
+ addBits uint8 // Used for symbols until transformed.
+ nbBits uint8
+ baseline uint32
+}
+
+// decSymbolValue returns the transformed decSymbol for the given symbol.
+func decSymbolValue(symb uint8, t []baseOffset) (decSymbol, error) {
+ if int(symb) >= len(t) {
+ return decSymbol{}, fmt.Errorf("rle symbol %d >= max %d", symb, len(t))
+ }
+ lu := t[symb]
+ return decSymbol{
+ addBits: lu.addBits,
+ baseline: lu.baseLine,
+ }, nil
+}
+
+// setRLE will set the decoder til RLE mode.
+func (s *fseDecoder) setRLE(symbol decSymbol) {
+ s.actualTableLog = 0
+ s.maxBits = symbol.addBits
+ s.dt[0] = symbol
+}
+
+// buildDtable will build the decoding table.
+func (s *fseDecoder) buildDtable() error {
+ tableSize := uint32(1 << s.actualTableLog)
+ highThreshold := tableSize - 1
+ symbolNext := s.stateTable[:256]
+
+ // Init, lay down lowprob symbols
+ {
+ for i, v := range s.norm[:s.symbolLen] {
+ if v == -1 {
+ s.dt[highThreshold].addBits = uint8(i)
+ highThreshold--
+ symbolNext[i] = 1
+ } else {
+ symbolNext[i] = uint16(v)
+ }
+ }
+ }
+ // Spread symbols
+ {
+ tableMask := tableSize - 1
+ step := tableStep(tableSize)
+ position := uint32(0)
+ for ss, v := range s.norm[:s.symbolLen] {
+ for i := 0; i < int(v); i++ {
+ s.dt[position].addBits = uint8(ss)
+ position = (position + step) & tableMask
+ for position > highThreshold {
+ // lowprob area
+ position = (position + step) & tableMask
+ }
+ }
+ }
+ if position != 0 {
+ // position must reach all cells once, otherwise normalizedCounter is incorrect
+ return errors.New("corrupted input (position != 0)")
+ }
+ }
+
+ // Build Decoding table
+ {
+ tableSize := uint16(1 << s.actualTableLog)
+ for u, v := range s.dt[:tableSize] {
+ symbol := v.addBits
+ nextState := symbolNext[symbol]
+ symbolNext[symbol] = nextState + 1
+ nBits := s.actualTableLog - byte(highBits(uint32(nextState)))
+ s.dt[u&maxTableMask].nbBits = nBits
+ newState := (nextState << nBits) - tableSize
+ if newState > tableSize {
+ return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize)
+ }
+ if newState == uint16(u) && nBits == 0 {
+ // Seems weird that this is possible with nbits > 0.
+ return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u)
+ }
+ s.dt[u&maxTableMask].newState = newState
+ }
+ }
+ return nil
+}
+
+// transform will transform the decoder table into a table usable for
+// decoding without having to apply the transformation while decoding.
+// The state will contain the base value and the number of bits to read.
+func (s *fseDecoder) transform(t []baseOffset) error {
+ tableSize := uint16(1 << s.actualTableLog)
+ s.maxBits = 0
+ for i, v := range s.dt[:tableSize] {
+ if int(v.addBits) >= len(t) {
+ return fmt.Errorf("invalid decoding table entry %d, symbol %d >= max (%d)", i, v.addBits, len(t))
+ }
+ lu := t[v.addBits]
+ if lu.addBits > s.maxBits {
+ s.maxBits = lu.addBits
+ }
+ s.dt[i&maxTableMask] = decSymbol{
+ newState: v.newState,
+ nbBits: v.nbBits,
+ addBits: lu.addBits,
+ baseline: lu.baseLine,
+ }
+ }
+ return nil
+}
+
+type fseState struct {
+ // TODO: Check if *[1 << maxTablelog]decSymbol is faster.
+ dt []decSymbol
+ state decSymbol
+}
+
+// Initialize and decodeAsync first state and symbol.
+func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) {
+ s.dt = dt
+ br.fill()
+ s.state = dt[br.getBits(tableLog)]
+}
+
+// next returns the current symbol and sets the next state.
+// At least tablelog bits must be available in the bit reader.
+func (s *fseState) next(br *bitReader) {
+ lowBits := uint16(br.getBits(s.state.nbBits))
+ s.state = s.dt[s.state.newState+lowBits]
+}
+
+// finished returns true if all bits have been read from the bitstream
+// and the next state would require reading bits from the input.
+func (s *fseState) finished(br *bitReader) bool {
+ return br.finished() && s.state.nbBits > 0
+}
+
+// final returns the current state symbol without decoding the next.
+func (s *fseState) final() (int, uint8) {
+ return int(s.state.baseline), s.state.addBits
+}
+
+// nextFast returns the next symbol and sets the next state.
+// This can only be used if no symbols are 0 bits.
+// At least tablelog bits must be available in the bit reader.
+func (s *fseState) nextFast(br *bitReader) (uint32, uint8) {
+ lowBits := uint16(br.getBitsFast(s.state.nbBits))
+ s.state = s.dt[s.state.newState+lowBits]
+ return s.state.baseline, s.state.addBits
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go
new file mode 100644
index 000000000..dfa6cf7ce
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go
@@ -0,0 +1,717 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+ "errors"
+ "fmt"
+ "math"
+)
+
+const (
+ // For encoding we only support up to
+ maxEncTableLog = 8
+ maxEncTablesize = 1 << maxTableLog
+ maxEncTableMask = (1 << maxTableLog) - 1
+ minEncTablelog = 5
+ maxEncSymbolValue = maxMatchLengthSymbol
+)
+
+// Scratch provides temporary storage for compression and decompression.
+type fseEncoder struct {
+ symbolLen uint16 // Length of active part of the symbol table.
+ actualTableLog uint8 // Selected tablelog.
+ ct cTable // Compression tables.
+ maxCount int // count of the most probable symbol
+ zeroBits bool // no bits has prob > 50%.
+ clearCount bool // clear count
+ useRLE bool // This encoder is for RLE
+ preDefined bool // This encoder is predefined.
+ reUsed bool // Set to know when the encoder has been reused.
+ rleVal uint8 // RLE Symbol
+ maxBits uint8 // Maximum output bits after transform.
+
+ // TODO: Technically zstd should be fine with 64 bytes.
+ count [256]uint32
+ norm [256]int16
+}
+
+// cTable contains tables used for compression.
+type cTable struct {
+ tableSymbol []byte
+ stateTable []uint16
+ symbolTT []symbolTransform
+}
+
+// symbolTransform contains the state transform for a symbol.
+type symbolTransform struct {
+ deltaNbBits uint32
+ deltaFindState int16
+ outBits uint8
+}
+
+// String prints values as a human readable string.
+func (s symbolTransform) String() string {
+ return fmt.Sprintf("{deltabits: %08x, findstate:%d outbits:%d}", s.deltaNbBits, s.deltaFindState, s.outBits)
+}
+
+// Histogram allows to populate the histogram and skip that step in the compression,
+// It otherwise allows to inspect the histogram when compression is done.
+// To indicate that you have populated the histogram call HistogramFinished
+// with the value of the highest populated symbol, as well as the number of entries
+// in the most populated entry. These are accepted at face value.
+// The returned slice will always be length 256.
+func (s *fseEncoder) Histogram() []uint32 {
+ return s.count[:]
+}
+
+// HistogramFinished can be called to indicate that the histogram has been populated.
+// maxSymbol is the index of the highest set symbol of the next data segment.
+// maxCount is the number of entries in the most populated entry.
+// These are accepted at face value.
+func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) {
+ s.maxCount = maxCount
+ s.symbolLen = uint16(maxSymbol) + 1
+ s.clearCount = maxCount != 0
+}
+
+// prepare will prepare and allocate scratch tables used for both compression and decompression.
+func (s *fseEncoder) prepare() (*fseEncoder, error) {
+ if s == nil {
+ s = &fseEncoder{}
+ }
+ s.useRLE = false
+ if s.clearCount && s.maxCount == 0 {
+ for i := range s.count {
+ s.count[i] = 0
+ }
+ s.clearCount = false
+ }
+ return s, nil
+}
+
+// allocCtable will allocate tables needed for compression.
+// If existing tables a re big enough, they are simply re-used.
+func (s *fseEncoder) allocCtable() {
+ tableSize := 1 << s.actualTableLog
+ // get tableSymbol that is big enough.
+ if cap(s.ct.tableSymbol) < int(tableSize) {
+ s.ct.tableSymbol = make([]byte, tableSize)
+ }
+ s.ct.tableSymbol = s.ct.tableSymbol[:tableSize]
+
+ ctSize := tableSize
+ if cap(s.ct.stateTable) < ctSize {
+ s.ct.stateTable = make([]uint16, ctSize)
+ }
+ s.ct.stateTable = s.ct.stateTable[:ctSize]
+
+ if cap(s.ct.symbolTT) < 256 {
+ s.ct.symbolTT = make([]symbolTransform, 256)
+ }
+ s.ct.symbolTT = s.ct.symbolTT[:256]
+}
+
+// buildCTable will populate the compression table so it is ready to be used.
+func (s *fseEncoder) buildCTable() error {
+ tableSize := uint32(1 << s.actualTableLog)
+ highThreshold := tableSize - 1
+ var cumul [256]int16
+
+ s.allocCtable()
+ tableSymbol := s.ct.tableSymbol[:tableSize]
+ // symbol start positions
+ {
+ cumul[0] = 0
+ for ui, v := range s.norm[:s.symbolLen-1] {
+ u := byte(ui) // one less than reference
+ if v == -1 {
+ // Low proba symbol
+ cumul[u+1] = cumul[u] + 1
+ tableSymbol[highThreshold] = u
+ highThreshold--
+ } else {
+ cumul[u+1] = cumul[u] + v
+ }
+ }
+ // Encode last symbol separately to avoid overflowing u
+ u := int(s.symbolLen - 1)
+ v := s.norm[s.symbolLen-1]
+ if v == -1 {
+ // Low proba symbol
+ cumul[u+1] = cumul[u] + 1
+ tableSymbol[highThreshold] = byte(u)
+ highThreshold--
+ } else {
+ cumul[u+1] = cumul[u] + v
+ }
+ if uint32(cumul[s.symbolLen]) != tableSize {
+ return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize)
+ }
+ cumul[s.symbolLen] = int16(tableSize) + 1
+ }
+ // Spread symbols
+ s.zeroBits = false
+ {
+ step := tableStep(tableSize)
+ tableMask := tableSize - 1
+ var position uint32
+ // if any symbol > largeLimit, we may have 0 bits output.
+ largeLimit := int16(1 << (s.actualTableLog - 1))
+ for ui, v := range s.norm[:s.symbolLen] {
+ symbol := byte(ui)
+ if v > largeLimit {
+ s.zeroBits = true
+ }
+ for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ {
+ tableSymbol[position] = symbol
+ position = (position + step) & tableMask
+ for position > highThreshold {
+ position = (position + step) & tableMask
+ } /* Low proba area */
+ }
+ }
+
+ // Check if we have gone through all positions
+ if position != 0 {
+ return errors.New("position!=0")
+ }
+ }
+
+ // Build table
+ table := s.ct.stateTable
+ {
+ tsi := int(tableSize)
+ for u, v := range tableSymbol {
+ // TableU16 : sorted by symbol order; gives next state value
+ table[cumul[v]] = uint16(tsi + u)
+ cumul[v]++
+ }
+ }
+
+ // Build Symbol Transformation Table
+ {
+ total := int16(0)
+ symbolTT := s.ct.symbolTT[:s.symbolLen]
+ tableLog := s.actualTableLog
+ tl := (uint32(tableLog) << 16) - (1 << tableLog)
+ for i, v := range s.norm[:s.symbolLen] {
+ switch v {
+ case 0:
+ case -1, 1:
+ symbolTT[i].deltaNbBits = tl
+ symbolTT[i].deltaFindState = int16(total - 1)
+ total++
+ default:
+ maxBitsOut := uint32(tableLog) - highBit(uint32(v-1))
+ minStatePlus := uint32(v) << maxBitsOut
+ symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus
+ symbolTT[i].deltaFindState = int16(total - v)
+ total += v
+ }
+ }
+ if total != int16(tableSize) {
+ return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize)
+ }
+ }
+ return nil
+}
+
+var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000}
+
+func (s *fseEncoder) setRLE(val byte) {
+ s.allocCtable()
+ s.actualTableLog = 0
+ s.ct.stateTable = s.ct.stateTable[:1]
+ s.ct.symbolTT[val] = symbolTransform{
+ deltaFindState: 0,
+ deltaNbBits: 0,
+ }
+ if debug {
+ println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val])
+ }
+ s.rleVal = val
+ s.useRLE = true
+}
+
+// setBits will set output bits for the transform.
+// if nil is provided, the number of bits is equal to the index.
+func (s *fseEncoder) setBits(transform []byte) {
+ if s.reUsed || s.preDefined {
+ return
+ }
+ if s.useRLE {
+ if transform == nil {
+ s.ct.symbolTT[s.rleVal].outBits = s.rleVal
+ s.maxBits = s.rleVal
+ return
+ }
+ s.maxBits = transform[s.rleVal]
+ s.ct.symbolTT[s.rleVal].outBits = s.maxBits
+ return
+ }
+ if transform == nil {
+ for i := range s.ct.symbolTT[:s.symbolLen] {
+ s.ct.symbolTT[i].outBits = uint8(i)
+ }
+ s.maxBits = uint8(s.symbolLen - 1)
+ return
+ }
+ s.maxBits = 0
+ for i, v := range transform[:s.symbolLen] {
+ s.ct.symbolTT[i].outBits = v
+ if v > s.maxBits {
+ // We could assume bits always going up, but we play safe.
+ s.maxBits = v
+ }
+ }
+}
+
+// normalizeCount will normalize the count of the symbols so
+// the total is equal to the table size.
+// If successful, compression tables will also be made ready.
+func (s *fseEncoder) normalizeCount(length int) error {
+ if s.reUsed {
+ return nil
+ }
+ s.optimalTableLog(length)
+ var (
+ tableLog = s.actualTableLog
+ scale = 62 - uint64(tableLog)
+ step = (1 << 62) / uint64(length)
+ vStep = uint64(1) << (scale - 20)
+ stillToDistribute = int16(1 << tableLog)
+ largest int
+ largestP int16
+ lowThreshold = (uint32)(length >> tableLog)
+ )
+ if s.maxCount == length {
+ s.useRLE = true
+ return nil
+ }
+ s.useRLE = false
+ for i, cnt := range s.count[:s.symbolLen] {
+ // already handled
+ // if (count[s] == s.length) return 0; /* rle special case */
+
+ if cnt == 0 {
+ s.norm[i] = 0
+ continue
+ }
+ if cnt <= lowThreshold {
+ s.norm[i] = -1
+ stillToDistribute--
+ } else {
+ proba := (int16)((uint64(cnt) * step) >> scale)
+ if proba < 8 {
+ restToBeat := vStep * uint64(rtbTable[proba])
+ v := uint64(cnt)*step - (uint64(proba) << scale)
+ if v > restToBeat {
+ proba++
+ }
+ }
+ if proba > largestP {
+ largestP = proba
+ largest = i
+ }
+ s.norm[i] = proba
+ stillToDistribute -= proba
+ }
+ }
+
+ if -stillToDistribute >= (s.norm[largest] >> 1) {
+ // corner case, need another normalization method
+ err := s.normalizeCount2(length)
+ if err != nil {
+ return err
+ }
+ if debug {
+ err = s.validateNorm()
+ if err != nil {
+ return err
+ }
+ }
+ return s.buildCTable()
+ }
+ s.norm[largest] += stillToDistribute
+ if debug {
+ err := s.validateNorm()
+ if err != nil {
+ return err
+ }
+ }
+ return s.buildCTable()
+}
+
+// Secondary normalization method.
+// To be used when primary method fails.
+func (s *fseEncoder) normalizeCount2(length int) error {
+ const notYetAssigned = -2
+ var (
+ distributed uint32
+ total = uint32(length)
+ tableLog = s.actualTableLog
+ lowThreshold = uint32(total >> tableLog)
+ lowOne = uint32((total * 3) >> (tableLog + 1))
+ )
+ for i, cnt := range s.count[:s.symbolLen] {
+ if cnt == 0 {
+ s.norm[i] = 0
+ continue
+ }
+ if cnt <= lowThreshold {
+ s.norm[i] = -1
+ distributed++
+ total -= cnt
+ continue
+ }
+ if cnt <= lowOne {
+ s.norm[i] = 1
+ distributed++
+ total -= cnt
+ continue
+ }
+ s.norm[i] = notYetAssigned
+ }
+ toDistribute := (1 << tableLog) - distributed
+
+ if (total / toDistribute) > lowOne {
+ // risk of rounding to zero
+ lowOne = uint32((total * 3) / (toDistribute * 2))
+ for i, cnt := range s.count[:s.symbolLen] {
+ if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) {
+ s.norm[i] = 1
+ distributed++
+ total -= cnt
+ continue
+ }
+ }
+ toDistribute = (1 << tableLog) - distributed
+ }
+ if distributed == uint32(s.symbolLen)+1 {
+ // all values are pretty poor;
+ // probably incompressible data (should have already been detected);
+ // find max, then give all remaining points to max
+ var maxV int
+ var maxC uint32
+ for i, cnt := range s.count[:s.symbolLen] {
+ if cnt > maxC {
+ maxV = i
+ maxC = cnt
+ }
+ }
+ s.norm[maxV] += int16(toDistribute)
+ return nil
+ }
+
+ if total == 0 {
+ // all of the symbols were low enough for the lowOne or lowThreshold
+ for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) {
+ if s.norm[i] > 0 {
+ toDistribute--
+ s.norm[i]++
+ }
+ }
+ return nil
+ }
+
+ var (
+ vStepLog = 62 - uint64(tableLog)
+ mid = uint64((1 << (vStepLog - 1)) - 1)
+ rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining
+ tmpTotal = mid
+ )
+ for i, cnt := range s.count[:s.symbolLen] {
+ if s.norm[i] == notYetAssigned {
+ var (
+ end = tmpTotal + uint64(cnt)*rStep
+ sStart = uint32(tmpTotal >> vStepLog)
+ sEnd = uint32(end >> vStepLog)
+ weight = sEnd - sStart
+ )
+ if weight < 1 {
+ return errors.New("weight < 1")
+ }
+ s.norm[i] = int16(weight)
+ tmpTotal = end
+ }
+ }
+ return nil
+}
+
+// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog
+func (s *fseEncoder) optimalTableLog(length int) {
+ tableLog := uint8(maxEncTableLog)
+ minBitsSrc := highBit(uint32(length)) + 1
+ minBitsSymbols := highBit(uint32(s.symbolLen-1)) + 2
+ minBits := uint8(minBitsSymbols)
+ if minBitsSrc < minBitsSymbols {
+ minBits = uint8(minBitsSrc)
+ }
+
+ maxBitsSrc := uint8(highBit(uint32(length-1))) - 2
+ if maxBitsSrc < tableLog {
+ // Accuracy can be reduced
+ tableLog = maxBitsSrc
+ }
+ if minBits > tableLog {
+ tableLog = minBits
+ }
+ // Need a minimum to safely represent all symbol values
+ if tableLog < minEncTablelog {
+ tableLog = minEncTablelog
+ }
+ if tableLog > maxEncTableLog {
+ tableLog = maxEncTableLog
+ }
+ s.actualTableLog = tableLog
+}
+
+// validateNorm validates the normalized histogram table.
+func (s *fseEncoder) validateNorm() (err error) {
+ var total int
+ for _, v := range s.norm[:s.symbolLen] {
+ if v >= 0 {
+ total += int(v)
+ } else {
+ total -= int(v)
+ }
+ }
+ defer func() {
+ if err == nil {
+ return
+ }
+ fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen)
+ for i, v := range s.norm[:s.symbolLen] {
+ fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v)
+ }
+ }()
+ if total != (1 << s.actualTableLog) {
+ return fmt.Errorf("warning: Total == %d != %d", total, 1<<s.actualTableLog)
+ }
+ for i, v := range s.count[s.symbolLen:] {
+ if v != 0 {
+ return fmt.Errorf("warning: Found symbol out of range, %d after cut", i)
+ }
+ }
+ return nil
+}
+
+// writeCount will write the normalized histogram count to header.
+// This is read back by readNCount.
+func (s *fseEncoder) writeCount(out []byte) ([]byte, error) {
+ var (
+ tableLog = s.actualTableLog
+ tableSize = 1 << tableLog
+ previous0 bool
+ charnum uint16
+
+ maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3
+
+ // Write Table Size
+ bitStream = uint32(tableLog - minEncTablelog)
+ bitCount = uint(4)
+ remaining = int16(tableSize + 1) /* +1 for extra accuracy */
+ threshold = int16(tableSize)
+ nbBits = uint(tableLog + 1)
+ )
+ if s.useRLE {
+ return append(out, s.rleVal), nil
+ }
+ if s.preDefined || s.reUsed {
+ // Never write predefined.
+ return out, nil
+ }
+ outP := len(out)
+ out = out[:outP+maxHeaderSize]
+
+ // stops at 1
+ for remaining > 1 {
+ if previous0 {
+ start := charnum
+ for s.norm[charnum] == 0 {
+ charnum++
+ }
+ for charnum >= start+24 {
+ start += 24
+ bitStream += uint32(0xFFFF) << bitCount
+ out[outP] = byte(bitStream)
+ out[outP+1] = byte(bitStream >> 8)
+ outP += 2
+ bitStream >>= 16
+ }
+ for charnum >= start+3 {
+ start += 3
+ bitStream += 3 << bitCount
+ bitCount += 2
+ }
+ bitStream += uint32(charnum-start) << bitCount
+ bitCount += 2
+ if bitCount > 16 {
+ out[outP] = byte(bitStream)
+ out[outP+1] = byte(bitStream >> 8)
+ outP += 2
+ bitStream >>= 16
+ bitCount -= 16
+ }
+ }
+
+ count := s.norm[charnum]
+ charnum++
+ max := (2*threshold - 1) - remaining
+ if count < 0 {
+ remaining += count
+ } else {
+ remaining -= count
+ }
+ count++ // +1 for extra accuracy
+ if count >= threshold {
+ count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[
+ }
+ bitStream += uint32(count) << bitCount
+ bitCount += nbBits
+ if count < max {
+ bitCount--
+ }
+
+ previous0 = count == 1
+ if remaining < 1 {
+ return nil, errors.New("internal error: remaining < 1")
+ }
+ for remaining < threshold {
+ nbBits--
+ threshold >>= 1
+ }
+
+ if bitCount > 16 {
+ out[outP] = byte(bitStream)
+ out[outP+1] = byte(bitStream >> 8)
+ outP += 2
+ bitStream >>= 16
+ bitCount -= 16
+ }
+ }
+
+ out[outP] = byte(bitStream)
+ out[outP+1] = byte(bitStream >> 8)
+ outP += int((bitCount + 7) / 8)
+
+ if uint16(charnum) > s.symbolLen {
+ return nil, errors.New("internal error: charnum > s.symbolLen")
+ }
+ return out[:outP], nil
+}
+
+// Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits)
+// note 1 : assume symbolValue is valid (<= maxSymbolValue)
+// note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits *
+func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 {
+ minNbBits := s.ct.symbolTT[symbolValue].deltaNbBits >> 16
+ threshold := (minNbBits + 1) << 16
+ if debug {
+ if !(s.actualTableLog < 16) {
+ panic("!s.actualTableLog < 16")
+ }
+ // ensure enough room for renormalization double shift
+ if !(uint8(accuracyLog) < 31-s.actualTableLog) {
+ panic("!uint8(accuracyLog) < 31-s.actualTableLog")
+ }
+ }
+ tableSize := uint32(1) << s.actualTableLog
+ deltaFromThreshold := threshold - (s.ct.symbolTT[symbolValue].deltaNbBits + tableSize)
+ // linear interpolation (very approximate)
+ normalizedDeltaFromThreshold := (deltaFromThreshold << accuracyLog) >> s.actualTableLog
+ bitMultiplier := uint32(1) << accuracyLog
+ if debug {
+ if s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold {
+ panic("s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold")
+ }
+ if normalizedDeltaFromThreshold > bitMultiplier {
+ panic("normalizedDeltaFromThreshold > bitMultiplier")
+ }
+ }
+ return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold
+}
+
+// Returns the cost in bits of encoding the distribution in count using ctable.
+// Histogram should only be up to the last non-zero symbol.
+// Returns an -1 if ctable cannot represent all the symbols in count.
+func (s *fseEncoder) approxSize(hist []uint32) uint32 {
+ if int(s.symbolLen) < len(hist) {
+ // More symbols than we have.
+ return math.MaxUint32
+ }
+ if s.useRLE {
+ // We will never reuse RLE encoders.
+ return math.MaxUint32
+ }
+ const kAccuracyLog = 8
+ badCost := (uint32(s.actualTableLog) + 1) << kAccuracyLog
+ var cost uint32
+ for i, v := range hist {
+ if v == 0 {
+ continue
+ }
+ if s.norm[i] == 0 {
+ return math.MaxUint32
+ }
+ bitCost := s.bitCost(uint8(i), kAccuracyLog)
+ if bitCost > badCost {
+ return math.MaxUint32
+ }
+ cost += v * bitCost
+ }
+ return cost >> kAccuracyLog
+}
+
+// maxHeaderSize returns the maximum header size in bits.
+// This is not exact size, but we want a penalty for new tables anyway.
+func (s *fseEncoder) maxHeaderSize() uint32 {
+ if s.preDefined {
+ return 0
+ }
+ if s.useRLE {
+ return 8
+ }
+ return (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8
+}
+
+// cState contains the compression state of a stream.
+type cState struct {
+ bw *bitWriter
+ stateTable []uint16
+ state uint16
+}
+
+// init will initialize the compression state to the first symbol of the stream.
+func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) {
+ c.bw = bw
+ c.stateTable = ct.stateTable
+ if len(c.stateTable) == 1 {
+ // RLE
+ c.stateTable[0] = uint16(0)
+ c.state = 0
+ return
+ }
+ nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16
+ im := int32((nbBitsOut << 16) - first.deltaNbBits)
+ lu := (im >> nbBitsOut) + int32(first.deltaFindState)
+ c.state = c.stateTable[lu]
+ return
+}
+
+// encode the output symbol provided and write it to the bitstream.
+func (c *cState) encode(symbolTT symbolTransform) {
+ nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16
+ dstState := int32(c.state>>(nbBitsOut&15)) + int32(symbolTT.deltaFindState)
+ c.bw.addBits16NC(c.state, uint8(nbBitsOut))
+ c.state = c.stateTable[dstState]
+}
+
+// flush will write the tablelog to the output and flush the remaining full bytes.
+func (c *cState) flush(tableLog uint8) {
+ c.bw.flush32()
+ c.bw.addBits16NC(c.state, tableLog)
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go
new file mode 100644
index 000000000..5186de802
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go
@@ -0,0 +1,153 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+ "fmt"
+ "math"
+)
+
+var (
+ // fsePredef are the predefined fse tables as defined here:
+ // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions
+ // These values are already transformed.
+ fsePredef [3]fseDecoder
+
+ // fsePredefEnc are the predefined encoder based on fse tables as defined here:
+ // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions
+ // These values are already transformed.
+ fsePredefEnc [3]fseEncoder
+
+ // symbolTableX contain the transformations needed for each type as defined in
+ // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets
+ symbolTableX [3][]baseOffset
+
+ // maxTableSymbol is the biggest supported symbol for each table type
+ // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets
+ maxTableSymbol = [3]uint8{tableLiteralLengths: maxLiteralLengthSymbol, tableOffsets: maxOffsetLengthSymbol, tableMatchLengths: maxMatchLengthSymbol}
+
+ // bitTables is the bits table for each table.
+ bitTables = [3][]byte{tableLiteralLengths: llBitsTable[:], tableOffsets: nil, tableMatchLengths: mlBitsTable[:]}
+)
+
+type tableIndex uint8
+
+const (
+ // indexes for fsePredef and symbolTableX
+ tableLiteralLengths tableIndex = 0
+ tableOffsets tableIndex = 1
+ tableMatchLengths tableIndex = 2
+
+ maxLiteralLengthSymbol = 35
+ maxOffsetLengthSymbol = 30
+ maxMatchLengthSymbol = 52
+)
+
+// baseOffset is used for calculating transformations.
+type baseOffset struct {
+ baseLine uint32
+ addBits uint8
+}
+
+// fillBase will precalculate base offsets with the given bit distributions.
+func fillBase(dst []baseOffset, base uint32, bits ...uint8) {
+ if len(bits) != len(dst) {
+ panic(fmt.Sprintf("len(dst) (%d) != len(bits) (%d)", len(dst), len(bits)))
+ }
+ for i, bit := range bits {
+ if base > math.MaxInt32 {
+ panic(fmt.Sprintf("invalid decoding table, base overflows int32"))
+ }
+
+ dst[i] = baseOffset{
+ baseLine: base,
+ addBits: bit,
+ }
+ base += 1 << bit
+ }
+}
+
+func init() {
+ // Literals length codes
+ tmp := make([]baseOffset, 36)
+ for i := range tmp[:16] {
+ tmp[i] = baseOffset{
+ baseLine: uint32(i),
+ addBits: 0,
+ }
+ }
+ fillBase(tmp[16:], 16, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)
+ symbolTableX[tableLiteralLengths] = tmp
+
+ // Match length codes
+ tmp = make([]baseOffset, 53)
+ for i := range tmp[:32] {
+ tmp[i] = baseOffset{
+ // The transformation adds the 3 length.
+ baseLine: uint32(i) + 3,
+ addBits: 0,
+ }
+ }
+ fillBase(tmp[32:], 35, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)
+ symbolTableX[tableMatchLengths] = tmp
+
+ // Offset codes
+ tmp = make([]baseOffset, maxOffsetBits+1)
+ tmp[1] = baseOffset{
+ baseLine: 1,
+ addBits: 1,
+ }
+ fillBase(tmp[2:], 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30)
+ symbolTableX[tableOffsets] = tmp
+
+ // Fill predefined tables and transform them.
+ // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions
+ for i := range fsePredef[:] {
+ f := &fsePredef[i]
+ switch tableIndex(i) {
+ case tableLiteralLengths:
+ // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L243
+ f.actualTableLog = 6
+ copy(f.norm[:], []int16{4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1,
+ -1, -1, -1, -1})
+ f.symbolLen = 36
+ case tableOffsets:
+ // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L281
+ f.actualTableLog = 5
+ copy(f.norm[:], []int16{
+ 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1})
+ f.symbolLen = 29
+ case tableMatchLengths:
+ //https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L304
+ f.actualTableLog = 6
+ copy(f.norm[:], []int16{
+ 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1,
+ -1, -1, -1, -1, -1})
+ f.symbolLen = 53
+ }
+ if err := f.buildDtable(); err != nil {
+ panic(fmt.Errorf("building table %v: %v", tableIndex(i), err))
+ }
+ if err := f.transform(symbolTableX[i]); err != nil {
+ panic(fmt.Errorf("building table %v: %v", tableIndex(i), err))
+ }
+ f.preDefined = true
+
+ // Create encoder as well
+ enc := &fsePredefEnc[i]
+ copy(enc.norm[:], f.norm[:])
+ enc.symbolLen = f.symbolLen
+ enc.actualTableLog = f.actualTableLog
+ if err := enc.buildCTable(); err != nil {
+ panic(fmt.Errorf("building encoding table %v: %v", tableIndex(i), err))
+ }
+ enc.setBits(bitTables[i])
+ enc.preDefined = true
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go
new file mode 100644
index 000000000..819d87f88
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/hash.go
@@ -0,0 +1,77 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+const (
+ prime3bytes = 506832829
+ prime4bytes = 2654435761
+ prime5bytes = 889523592379
+ prime6bytes = 227718039650203
+ prime7bytes = 58295818150454627
+ prime8bytes = 0xcf1bbcdcb7a56463
+)
+
+// hashLen returns a hash of the lowest l bytes of u for a size size of h bytes.
+// l must be >=4 and <=8. Any other value will return hash for 4 bytes.
+// h should always be <32.
+// Preferably h and l should be a constant.
+// FIXME: This does NOT get resolved, if 'mls' is constant,
+// so this cannot be used.
+func hashLen(u uint64, hashLog, mls uint8) uint32 {
+ switch mls {
+ case 5:
+ return hash5(u, hashLog)
+ case 6:
+ return hash6(u, hashLog)
+ case 7:
+ return hash7(u, hashLog)
+ case 8:
+ return hash8(u, hashLog)
+ default:
+ return hash4x64(u, hashLog)
+ }
+}
+
+// hash3 returns the hash of the lower 3 bytes of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <32.
+func hash3(u uint32, h uint8) uint32 {
+ return ((u << (32 - 24)) * prime3bytes) >> ((32 - h) & 31)
+}
+
+// hash4 returns the hash of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <32.
+func hash4(u uint32, h uint8) uint32 {
+ return (u * prime4bytes) >> ((32 - h) & 31)
+}
+
+// hash4x64 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <32.
+func hash4x64(u uint64, h uint8) uint32 {
+ return (uint32(u) * prime4bytes) >> ((32 - h) & 31)
+}
+
+// hash5 returns the hash of the lowest 5 bytes of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <64.
+func hash5(u uint64, h uint8) uint32 {
+ return uint32(((u << (64 - 40)) * prime5bytes) >> ((64 - h) & 63))
+}
+
+// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <64.
+func hash6(u uint64, h uint8) uint32 {
+ return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63))
+}
+
+// hash6 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <64.
+func hash7(u uint64, h uint8) uint32 {
+ return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63))
+}
+
+// hash8 returns the hash of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <64.
+func hash8(u uint64, h uint8) uint32 {
+ return uint32((u * prime8bytes) >> ((64 - h) & 63))
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go
new file mode 100644
index 000000000..e8c419bd5
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/history.go
@@ -0,0 +1,73 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+ "github.com/klauspost/compress/huff0"
+)
+
+// history contains the information transferred between blocks.
+type history struct {
+ b []byte
+ huffTree *huff0.Scratch
+ recentOffsets [3]int
+ decoders sequenceDecs
+ windowSize int
+ maxSize int
+ error bool
+}
+
+// reset will reset the history to initial state of a frame.
+// The history must already have been initialized to the desired size.
+func (h *history) reset() {
+ h.b = h.b[:0]
+ h.error = false
+ h.recentOffsets = [3]int{1, 4, 8}
+ if f := h.decoders.litLengths.fse; f != nil && !f.preDefined {
+ fseDecoderPool.Put(f)
+ }
+ if f := h.decoders.offsets.fse; f != nil && !f.preDefined {
+ fseDecoderPool.Put(f)
+ }
+ if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined {
+ fseDecoderPool.Put(f)
+ }
+ h.decoders = sequenceDecs{}
+ if h.huffTree != nil {
+ huffDecoderPool.Put(h.huffTree)
+ }
+ h.huffTree = nil
+ //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b))
+}
+
+// append bytes to history.
+// This function will make sure there is space for it,
+// if the buffer has been allocated with enough extra space.
+func (h *history) append(b []byte) {
+ if len(b) >= h.windowSize {
+ // Discard all history by simply overwriting
+ h.b = h.b[:h.windowSize]
+ copy(h.b, b[len(b)-h.windowSize:])
+ return
+ }
+
+ // If there is space, append it.
+ if len(b) < cap(h.b)-len(h.b) {
+ h.b = append(h.b, b...)
+ return
+ }
+
+ // Move data down so we only have window size left.
+ // We know we have less than window size in b at this point.
+ discard := len(b) + len(h.b) - h.windowSize
+ copy(h.b, h.b[discard:])
+ h.b = h.b[:h.windowSize]
+ copy(h.b[h.windowSize-len(b):], b)
+}
+
+// append bytes to history without ever discarding anything.
+func (h *history) appendKeep(b []byte) {
+ h.b = append(h.b, b...)
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt
new file mode 100644
index 000000000..24b53065f
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt
@@ -0,0 +1,22 @@
+Copyright (c) 2016 Caleb Spare
+
+MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md
new file mode 100644
index 000000000..69aa3bb58
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md
@@ -0,0 +1,58 @@
+# xxhash
+
+VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package.
+
+
+[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash)
+[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash)
+
+xxhash is a Go implementation of the 64-bit
+[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
+high-quality hashing algorithm that is much faster than anything in the Go
+standard library.
+
+This package provides a straightforward API:
+
+```
+func Sum64(b []byte) uint64
+func Sum64String(s string) uint64
+type Digest struct{ ... }
+ func New() *Digest
+```
+
+The `Digest` type implements hash.Hash64. Its key methods are:
+
+```
+func (*Digest) Write([]byte) (int, error)
+func (*Digest) WriteString(string) (int, error)
+func (*Digest) Sum64() uint64
+```
+
+This implementation provides a fast pure-Go implementation and an even faster
+assembly implementation for amd64.
+
+## Benchmarks
+
+Here are some quick benchmarks comparing the pure-Go and assembly
+implementations of Sum64.
+
+| input size | purego | asm |
+| --- | --- | --- |
+| 5 B | 979.66 MB/s | 1291.17 MB/s |
+| 100 B | 7475.26 MB/s | 7973.40 MB/s |
+| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
+| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
+
+These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
+the following commands under Go 1.11.2:
+
+```
+$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
+$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
+```
+
+## Projects using this package
+
+- [InfluxDB](https://github.com/influxdata/influxdb)
+- [Prometheus](https://github.com/prometheus/prometheus)
+- [FreeCache](https://github.com/coocood/freecache)
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go
new file mode 100644
index 000000000..426b9cac7
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go
@@ -0,0 +1,238 @@
+// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
+// at http://cyan4973.github.io/xxHash/.
+// THIS IS VENDORED: Go to github.com/cespare/xxhash for original package.
+
+package xxhash
+
+import (
+ "encoding/binary"
+ "errors"
+ "math/bits"
+)
+
+const (
+ prime1 uint64 = 11400714785074694791
+ prime2 uint64 = 14029467366897019727
+ prime3 uint64 = 1609587929392839161
+ prime4 uint64 = 9650029242287828579
+ prime5 uint64 = 2870177450012600261
+)
+
+// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
+// possible in the Go code is worth a small (but measurable) performance boost
+// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
+// convenience in the Go code in a few places where we need to intentionally
+// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
+// result overflows a uint64).
+var (
+ prime1v = prime1
+ prime2v = prime2
+ prime3v = prime3
+ prime4v = prime4
+ prime5v = prime5
+)
+
+// Digest implements hash.Hash64.
+type Digest struct {
+ v1 uint64
+ v2 uint64
+ v3 uint64
+ v4 uint64
+ total uint64
+ mem [32]byte
+ n int // how much of mem is used
+}
+
+// New creates a new Digest that computes the 64-bit xxHash algorithm.
+func New() *Digest {
+ var d Digest
+ d.Reset()
+ return &d
+}
+
+// Reset clears the Digest's state so that it can be reused.
+func (d *Digest) Reset() {
+ d.v1 = prime1v + prime2
+ d.v2 = prime2
+ d.v3 = 0
+ d.v4 = -prime1v
+ d.total = 0
+ d.n = 0
+}
+
+// Size always returns 8 bytes.
+func (d *Digest) Size() int { return 8 }
+
+// BlockSize always returns 32 bytes.
+func (d *Digest) BlockSize() int { return 32 }
+
+// Write adds more data to d. It always returns len(b), nil.
+func (d *Digest) Write(b []byte) (n int, err error) {
+ n = len(b)
+ d.total += uint64(n)
+
+ if d.n+n < 32 {
+ // This new data doesn't even fill the current block.
+ copy(d.mem[d.n:], b)
+ d.n += n
+ return
+ }
+
+ if d.n > 0 {
+ // Finish off the partial block.
+ copy(d.mem[d.n:], b)
+ d.v1 = round(d.v1, u64(d.mem[0:8]))
+ d.v2 = round(d.v2, u64(d.mem[8:16]))
+ d.v3 = round(d.v3, u64(d.mem[16:24]))
+ d.v4 = round(d.v4, u64(d.mem[24:32]))
+ b = b[32-d.n:]
+ d.n = 0
+ }
+
+ if len(b) >= 32 {
+ // One or more full blocks left.
+ nw := writeBlocks(d, b)
+ b = b[nw:]
+ }
+
+ // Store any remaining partial block.
+ copy(d.mem[:], b)
+ d.n = len(b)
+
+ return
+}
+
+// Sum appends the current hash to b and returns the resulting slice.
+func (d *Digest) Sum(b []byte) []byte {
+ s := d.Sum64()
+ return append(
+ b,
+ byte(s>>56),
+ byte(s>>48),
+ byte(s>>40),
+ byte(s>>32),
+ byte(s>>24),
+ byte(s>>16),
+ byte(s>>8),
+ byte(s),
+ )
+}
+
+// Sum64 returns the current hash.
+func (d *Digest) Sum64() uint64 {
+ var h uint64
+
+ if d.total >= 32 {
+ v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
+ h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
+ h = mergeRound(h, v1)
+ h = mergeRound(h, v2)
+ h = mergeRound(h, v3)
+ h = mergeRound(h, v4)
+ } else {
+ h = d.v3 + prime5
+ }
+
+ h += d.total
+
+ i, end := 0, d.n
+ for ; i+8 <= end; i += 8 {
+ k1 := round(0, u64(d.mem[i:i+8]))
+ h ^= k1
+ h = rol27(h)*prime1 + prime4
+ }
+ if i+4 <= end {
+ h ^= uint64(u32(d.mem[i:i+4])) * prime1
+ h = rol23(h)*prime2 + prime3
+ i += 4
+ }
+ for i < end {
+ h ^= uint64(d.mem[i]) * prime5
+ h = rol11(h) * prime1
+ i++
+ }
+
+ h ^= h >> 33
+ h *= prime2
+ h ^= h >> 29
+ h *= prime3
+ h ^= h >> 32
+
+ return h
+}
+
+const (
+ magic = "xxh\x06"
+ marshaledSize = len(magic) + 8*5 + 32
+)
+
+// MarshalBinary implements the encoding.BinaryMarshaler interface.
+func (d *Digest) MarshalBinary() ([]byte, error) {
+ b := make([]byte, 0, marshaledSize)
+ b = append(b, magic...)
+ b = appendUint64(b, d.v1)
+ b = appendUint64(b, d.v2)
+ b = appendUint64(b, d.v3)
+ b = appendUint64(b, d.v4)
+ b = appendUint64(b, d.total)
+ b = append(b, d.mem[:d.n]...)
+ b = b[:len(b)+len(d.mem)-d.n]
+ return b, nil
+}
+
+// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
+func (d *Digest) UnmarshalBinary(b []byte) error {
+ if len(b) < len(magic) || string(b[:len(magic)]) != magic {
+ return errors.New("xxhash: invalid hash state identifier")
+ }
+ if len(b) != marshaledSize {
+ return errors.New("xxhash: invalid hash state size")
+ }
+ b = b[len(magic):]
+ b, d.v1 = consumeUint64(b)
+ b, d.v2 = consumeUint64(b)
+ b, d.v3 = consumeUint64(b)
+ b, d.v4 = consumeUint64(b)
+ b, d.total = consumeUint64(b)
+ copy(d.mem[:], b)
+ b = b[len(d.mem):]
+ d.n = int(d.total % uint64(len(d.mem)))
+ return nil
+}
+
+func appendUint64(b []byte, x uint64) []byte {
+ var a [8]byte
+ binary.LittleEndian.PutUint64(a[:], x)
+ return append(b, a[:]...)
+}
+
+func consumeUint64(b []byte) ([]byte, uint64) {
+ x := u64(b)
+ return b[8:], x
+}
+
+func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
+func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
+
+func round(acc, input uint64) uint64 {
+ acc += input * prime2
+ acc = rol31(acc)
+ acc *= prime1
+ return acc
+}
+
+func mergeRound(acc, val uint64) uint64 {
+ val = round(0, val)
+ acc ^= val
+ acc = acc*prime1 + prime4
+ return acc
+}
+
+func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) }
+func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) }
+func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
+func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
+func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
+func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
+func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
+func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go
new file mode 100644
index 000000000..35318d7c4
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go
@@ -0,0 +1,13 @@
+// +build !appengine
+// +build gc
+// +build !purego
+
+package xxhash
+
+// Sum64 computes the 64-bit xxHash digest of b.
+//
+//go:noescape
+func Sum64(b []byte) uint64
+
+//go:noescape
+func writeBlocks(*Digest, []byte) int
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
new file mode 100644
index 000000000..d580e32ae
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
@@ -0,0 +1,215 @@
+// +build !appengine
+// +build gc
+// +build !purego
+
+#include "textflag.h"
+
+// Register allocation:
+// AX h
+// CX pointer to advance through b
+// DX n
+// BX loop end
+// R8 v1, k1
+// R9 v2
+// R10 v3
+// R11 v4
+// R12 tmp
+// R13 prime1v
+// R14 prime2v
+// R15 prime4v
+
+// round reads from and advances the buffer pointer in CX.
+// It assumes that R13 has prime1v and R14 has prime2v.
+#define round(r) \
+ MOVQ (CX), R12 \
+ ADDQ $8, CX \
+ IMULQ R14, R12 \
+ ADDQ R12, r \
+ ROLQ $31, r \
+ IMULQ R13, r
+
+// mergeRound applies a merge round on the two registers acc and val.
+// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v.
+#define mergeRound(acc, val) \
+ IMULQ R14, val \
+ ROLQ $31, val \
+ IMULQ R13, val \
+ XORQ val, acc \
+ IMULQ R13, acc \
+ ADDQ R15, acc
+
+// func Sum64(b []byte) uint64
+TEXT ·Sum64(SB), NOSPLIT, $0-32
+ // Load fixed primes.
+ MOVQ ·prime1v(SB), R13
+ MOVQ ·prime2v(SB), R14
+ MOVQ ·prime4v(SB), R15
+
+ // Load slice.
+ MOVQ b_base+0(FP), CX
+ MOVQ b_len+8(FP), DX
+ LEAQ (CX)(DX*1), BX
+
+ // The first loop limit will be len(b)-32.
+ SUBQ $32, BX
+
+ // Check whether we have at least one block.
+ CMPQ DX, $32
+ JLT noBlocks
+
+ // Set up initial state (v1, v2, v3, v4).
+ MOVQ R13, R8
+ ADDQ R14, R8
+ MOVQ R14, R9
+ XORQ R10, R10
+ XORQ R11, R11
+ SUBQ R13, R11
+
+ // Loop until CX > BX.
+blockLoop:
+ round(R8)
+ round(R9)
+ round(R10)
+ round(R11)
+
+ CMPQ CX, BX
+ JLE blockLoop
+
+ MOVQ R8, AX
+ ROLQ $1, AX
+ MOVQ R9, R12
+ ROLQ $7, R12
+ ADDQ R12, AX
+ MOVQ R10, R12
+ ROLQ $12, R12
+ ADDQ R12, AX
+ MOVQ R11, R12
+ ROLQ $18, R12
+ ADDQ R12, AX
+
+ mergeRound(AX, R8)
+ mergeRound(AX, R9)
+ mergeRound(AX, R10)
+ mergeRound(AX, R11)
+
+ JMP afterBlocks
+
+noBlocks:
+ MOVQ ·prime5v(SB), AX
+
+afterBlocks:
+ ADDQ DX, AX
+
+ // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8.
+ ADDQ $24, BX
+
+ CMPQ CX, BX
+ JG fourByte
+
+wordLoop:
+ // Calculate k1.
+ MOVQ (CX), R8
+ ADDQ $8, CX
+ IMULQ R14, R8
+ ROLQ $31, R8
+ IMULQ R13, R8
+
+ XORQ R8, AX
+ ROLQ $27, AX
+ IMULQ R13, AX
+ ADDQ R15, AX
+
+ CMPQ CX, BX
+ JLE wordLoop
+
+fourByte:
+ ADDQ $4, BX
+ CMPQ CX, BX
+ JG singles
+
+ MOVL (CX), R8
+ ADDQ $4, CX
+ IMULQ R13, R8
+ XORQ R8, AX
+
+ ROLQ $23, AX
+ IMULQ R14, AX
+ ADDQ ·prime3v(SB), AX
+
+singles:
+ ADDQ $4, BX
+ CMPQ CX, BX
+ JGE finalize
+
+singlesLoop:
+ MOVBQZX (CX), R12
+ ADDQ $1, CX
+ IMULQ ·prime5v(SB), R12
+ XORQ R12, AX
+
+ ROLQ $11, AX
+ IMULQ R13, AX
+
+ CMPQ CX, BX
+ JL singlesLoop
+
+finalize:
+ MOVQ AX, R12
+ SHRQ $33, R12
+ XORQ R12, AX
+ IMULQ R14, AX
+ MOVQ AX, R12
+ SHRQ $29, R12
+ XORQ R12, AX
+ IMULQ ·prime3v(SB), AX
+ MOVQ AX, R12
+ SHRQ $32, R12
+ XORQ R12, AX
+
+ MOVQ AX, ret+24(FP)
+ RET
+
+// writeBlocks uses the same registers as above except that it uses AX to store
+// the d pointer.
+
+// func writeBlocks(d *Digest, b []byte) int
+TEXT ·writeBlocks(SB), NOSPLIT, $0-40
+ // Load fixed primes needed for round.
+ MOVQ ·prime1v(SB), R13
+ MOVQ ·prime2v(SB), R14
+
+ // Load slice.
+ MOVQ b_base+8(FP), CX
+ MOVQ b_len+16(FP), DX
+ LEAQ (CX)(DX*1), BX
+ SUBQ $32, BX
+
+ // Load vN from d.
+ MOVQ d+0(FP), AX
+ MOVQ 0(AX), R8 // v1
+ MOVQ 8(AX), R9 // v2
+ MOVQ 16(AX), R10 // v3
+ MOVQ 24(AX), R11 // v4
+
+ // We don't need to check the loop condition here; this function is
+ // always called with at least one block of data to process.
+blockLoop:
+ round(R8)
+ round(R9)
+ round(R10)
+ round(R11)
+
+ CMPQ CX, BX
+ JLE blockLoop
+
+ // Copy vN back to d.
+ MOVQ R8, 0(AX)
+ MOVQ R9, 8(AX)
+ MOVQ R10, 16(AX)
+ MOVQ R11, 24(AX)
+
+ // The number of bytes written is CX minus the old base pointer.
+ SUBQ b_base+8(FP), CX
+ MOVQ CX, ret+32(FP)
+
+ RET
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go
new file mode 100644
index 000000000..4a5a82160
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go
@@ -0,0 +1,76 @@
+// +build !amd64 appengine !gc purego
+
+package xxhash
+
+// Sum64 computes the 64-bit xxHash digest of b.
+func Sum64(b []byte) uint64 {
+ // A simpler version would be
+ // d := New()
+ // d.Write(b)
+ // return d.Sum64()
+ // but this is faster, particularly for small inputs.
+
+ n := len(b)
+ var h uint64
+
+ if n >= 32 {
+ v1 := prime1v + prime2
+ v2 := prime2
+ v3 := uint64(0)
+ v4 := -prime1v
+ for len(b) >= 32 {
+ v1 = round(v1, u64(b[0:8:len(b)]))
+ v2 = round(v2, u64(b[8:16:len(b)]))
+ v3 = round(v3, u64(b[16:24:len(b)]))
+ v4 = round(v4, u64(b[24:32:len(b)]))
+ b = b[32:len(b):len(b)]
+ }
+ h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
+ h = mergeRound(h, v1)
+ h = mergeRound(h, v2)
+ h = mergeRound(h, v3)
+ h = mergeRound(h, v4)
+ } else {
+ h = prime5
+ }
+
+ h += uint64(n)
+
+ i, end := 0, len(b)
+ for ; i+8 <= end; i += 8 {
+ k1 := round(0, u64(b[i:i+8:len(b)]))
+ h ^= k1
+ h = rol27(h)*prime1 + prime4
+ }
+ if i+4 <= end {
+ h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
+ h = rol23(h)*prime2 + prime3
+ i += 4
+ }
+ for ; i < end; i++ {
+ h ^= uint64(b[i]) * prime5
+ h = rol11(h) * prime1
+ }
+
+ h ^= h >> 33
+ h *= prime2
+ h ^= h >> 29
+ h *= prime3
+ h ^= h >> 32
+
+ return h
+}
+
+func writeBlocks(d *Digest, b []byte) int {
+ v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
+ n := len(b)
+ for len(b) >= 32 {
+ v1 = round(v1, u64(b[0:8:len(b)]))
+ v2 = round(v2, u64(b[8:16:len(b)]))
+ v3 = round(v3, u64(b[16:24:len(b)]))
+ v4 = round(v4, u64(b[24:32:len(b)]))
+ b = b[32:len(b):len(b)]
+ }
+ d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
+ return n - len(b)
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go
new file mode 100644
index 000000000..6f3b0cb10
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go
@@ -0,0 +1,11 @@
+package xxhash
+
+// Sum64String computes the 64-bit xxHash digest of s.
+func Sum64String(s string) uint64 {
+ return Sum64([]byte(s))
+}
+
+// WriteString adds more data to d. It always returns len(s), nil.
+func (d *Digest) WriteString(s string) (n int, err error) {
+ return d.Write([]byte(s))
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go
new file mode 100644
index 000000000..cef69e35b
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go
@@ -0,0 +1,409 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+ "errors"
+ "fmt"
+ "io"
+)
+
+type seq struct {
+ litLen uint32
+ matchLen uint32
+ offset uint32
+
+ // Codes are stored here for the encoder
+ // so they only have to be looked up once.
+ llCode, mlCode, ofCode uint8
+}
+
+func (s seq) String() string {
+ if s.offset <= 3 {
+ if s.offset == 0 {
+ return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset: INVALID (0)")
+ }
+ return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset, " (repeat)")
+ }
+ return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset-3, " (new)")
+}
+
+type seqCompMode uint8
+
+const (
+ compModePredefined seqCompMode = iota
+ compModeRLE
+ compModeFSE
+ compModeRepeat
+)
+
+type sequenceDec struct {
+ // decoder keeps track of the current state and updates it from the bitstream.
+ fse *fseDecoder
+ state fseState
+ repeat bool
+}
+
+// init the state of the decoder with input from stream.
+func (s *sequenceDec) init(br *bitReader) error {
+ if s.fse == nil {
+ return errors.New("sequence decoder not defined")
+ }
+ s.state.init(br, s.fse.actualTableLog, s.fse.dt[:1<<s.fse.actualTableLog])
+ return nil
+}
+
+// sequenceDecs contains all 3 sequence decoders and their state.
+type sequenceDecs struct {
+ litLengths sequenceDec
+ offsets sequenceDec
+ matchLengths sequenceDec
+ prevOffset [3]int
+ hist []byte
+ literals []byte
+ out []byte
+ maxBits uint8
+}
+
+// initialize all 3 decoders from the stream input.
+func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out []byte) error {
+ if err := s.litLengths.init(br); err != nil {
+ return errors.New("litLengths:" + err.Error())
+ }
+ if err := s.offsets.init(br); err != nil {
+ return errors.New("offsets:" + err.Error())
+ }
+ if err := s.matchLengths.init(br); err != nil {
+ return errors.New("matchLengths:" + err.Error())
+ }
+ s.literals = literals
+ s.hist = hist.b
+ s.prevOffset = hist.recentOffsets
+ s.maxBits = s.litLengths.fse.maxBits + s.offsets.fse.maxBits + s.matchLengths.fse.maxBits
+ s.out = out
+ return nil
+}
+
+// decode sequences from the stream with the provided history.
+func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
+ startSize := len(s.out)
+ for i := seqs - 1; i >= 0; i-- {
+ if br.overread() {
+ printf("reading sequence %d, exceeded available data\n", seqs-i)
+ return io.ErrUnexpectedEOF
+ }
+ var litLen, matchOff, matchLen int
+ if br.off > 4+((maxOffsetBits+16+16)>>3) {
+ litLen, matchOff, matchLen = s.nextFast(br)
+ br.fillFast()
+ } else {
+ litLen, matchOff, matchLen = s.next(br)
+ br.fill()
+ }
+
+ if debugSequences {
+ println("Seq", seqs-i-1, "Litlen:", litLen, "matchOff:", matchOff, "(abs) matchLen:", matchLen)
+ }
+
+ if litLen > len(s.literals) {
+ return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", litLen, len(s.literals))
+ }
+ size := litLen + matchLen + len(s.out)
+ if size-startSize > maxBlockSize {
+ return fmt.Errorf("output (%d) bigger than max block size", size)
+ }
+ if size > cap(s.out) {
+ // Not enough size, will be extremely rarely triggered,
+ // but could be if destination slice is too small for sync operations.
+ // We add maxBlockSize to the capacity.
+ s.out = append(s.out, make([]byte, maxBlockSize)...)
+ s.out = s.out[:len(s.out)-maxBlockSize]
+ }
+ if matchLen > maxMatchLen {
+ return fmt.Errorf("match len (%d) bigger than max allowed length", matchLen)
+ }
+ if matchOff > len(s.out)+len(hist)+litLen {
+ return fmt.Errorf("match offset (%d) bigger than current history (%d)", matchOff, len(s.out)+len(hist)+litLen)
+ }
+ if matchOff == 0 && matchLen > 0 {
+ return fmt.Errorf("zero matchoff and matchlen > 0")
+ }
+
+ s.out = append(s.out, s.literals[:litLen]...)
+ s.literals = s.literals[litLen:]
+ out := s.out
+
+ // Copy from history.
+ // TODO: Blocks without history could be made to ignore this completely.
+ if v := matchOff - len(s.out); v > 0 {
+ // v is the start position in history from end.
+ start := len(s.hist) - v
+ if matchLen > v {
+ // Some goes into current block.
+ // Copy remainder of history
+ out = append(out, s.hist[start:]...)
+ matchOff -= v
+ matchLen -= v
+ } else {
+ out = append(out, s.hist[start:start+matchLen]...)
+ matchLen = 0
+ }
+ }
+ // We must be in current buffer now
+ if matchLen > 0 {
+ start := len(s.out) - matchOff
+ if matchLen <= len(s.out)-start {
+ // No overlap
+ out = append(out, s.out[start:start+matchLen]...)
+ } else {
+ // Overlapping copy
+ // Extend destination slice and copy one byte at the time.
+ out = out[:len(out)+matchLen]
+ src := out[start : start+matchLen]
+ // Destination is the space we just added.
+ dst := out[len(out)-matchLen:]
+ dst = dst[:len(src)]
+ for i := range src {
+ dst[i] = src[i]
+ }
+ }
+ }
+ s.out = out
+ if i == 0 {
+ // This is the last sequence, so we shouldn't update state.
+ break
+ }
+ if true {
+ // Manually inlined, ~ 5-20% faster
+ // Update all 3 states at once. Approx 20% faster.
+ a, b, c := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
+
+ nBits := a.nbBits + b.nbBits + c.nbBits
+ if nBits == 0 {
+ s.litLengths.state.state = s.litLengths.state.dt[a.newState]
+ s.matchLengths.state.state = s.matchLengths.state.dt[b.newState]
+ s.offsets.state.state = s.offsets.state.dt[c.newState]
+ } else {
+ bits := br.getBitsFast(nBits)
+ lowBits := uint16(bits >> ((c.nbBits + b.nbBits) & 31))
+ s.litLengths.state.state = s.litLengths.state.dt[a.newState+lowBits]
+
+ lowBits = uint16(bits >> (c.nbBits & 31))
+ lowBits &= bitMask[b.nbBits&15]
+ s.matchLengths.state.state = s.matchLengths.state.dt[b.newState+lowBits]
+
+ lowBits = uint16(bits) & bitMask[c.nbBits&15]
+ s.offsets.state.state = s.offsets.state.dt[c.newState+lowBits]
+ }
+ } else {
+ s.updateAlt(br)
+ }
+ }
+
+ // Add final literals
+ s.out = append(s.out, s.literals...)
+ return nil
+}
+
+// update states, at least 27 bits must be available.
+func (s *sequenceDecs) update(br *bitReader) {
+ // Max 8 bits
+ s.litLengths.state.next(br)
+ // Max 9 bits
+ s.matchLengths.state.next(br)
+ // Max 8 bits
+ s.offsets.state.next(br)
+}
+
+var bitMask [16]uint16
+
+func init() {
+ for i := range bitMask[:] {
+ bitMask[i] = uint16((1 << uint(i)) - 1)
+ }
+}
+
+// update states, at least 27 bits must be available.
+func (s *sequenceDecs) updateAlt(br *bitReader) {
+ // Update all 3 states at once. Approx 20% faster.
+ a, b, c := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
+
+ nBits := a.nbBits + b.nbBits + c.nbBits
+ if nBits == 0 {
+ s.litLengths.state.state = s.litLengths.state.dt[a.newState]
+ s.matchLengths.state.state = s.matchLengths.state.dt[b.newState]
+ s.offsets.state.state = s.offsets.state.dt[c.newState]
+ return
+ }
+ bits := br.getBitsFast(nBits)
+ lowBits := uint16(bits >> ((c.nbBits + b.nbBits) & 31))
+ s.litLengths.state.state = s.litLengths.state.dt[a.newState+lowBits]
+
+ lowBits = uint16(bits >> (c.nbBits & 31))
+ lowBits &= bitMask[b.nbBits&15]
+ s.matchLengths.state.state = s.matchLengths.state.dt[b.newState+lowBits]
+
+ lowBits = uint16(bits) & bitMask[c.nbBits&15]
+ s.offsets.state.state = s.offsets.state.dt[c.newState+lowBits]
+}
+
+// nextFast will return new states when there are at least 4 unused bytes left on the stream when done.
+func (s *sequenceDecs) nextFast(br *bitReader) (ll, mo, ml int) {
+ // Final will not read from stream.
+ ll, llB := s.litLengths.state.final()
+ ml, mlB := s.matchLengths.state.final()
+ mo, moB := s.offsets.state.final()
+
+ // extra bits are stored in reverse order.
+ br.fillFast()
+ if s.maxBits <= 32 {
+ mo += br.getBits(moB)
+ ml += br.getBits(mlB)
+ ll += br.getBits(llB)
+ } else {
+ mo += br.getBits(moB)
+ br.fillFast()
+ // matchlength+literal length, max 32 bits
+ ml += br.getBits(mlB)
+ ll += br.getBits(llB)
+ }
+
+ // mo = s.adjustOffset(mo, ll, moB)
+ // Inlined for rather big speedup
+ if moB > 1 {
+ s.prevOffset[2] = s.prevOffset[1]
+ s.prevOffset[1] = s.prevOffset[0]
+ s.prevOffset[0] = mo
+ return
+ }
+
+ if ll == 0 {
+ // There is an exception though, when current sequence's literals_length = 0.
+ // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
+ // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
+ mo++
+ }
+
+ if mo == 0 {
+ mo = s.prevOffset[0]
+ return
+ }
+ var temp int
+ if mo == 3 {
+ temp = s.prevOffset[0] - 1
+ } else {
+ temp = s.prevOffset[mo]
+ }
+
+ if temp == 0 {
+ // 0 is not valid; input is corrupted; force offset to 1
+ println("temp was 0")
+ temp = 1
+ }
+
+ if mo != 1 {
+ s.prevOffset[2] = s.prevOffset[1]
+ }
+ s.prevOffset[1] = s.prevOffset[0]
+ s.prevOffset[0] = temp
+ mo = temp
+ return
+}
+
+func (s *sequenceDecs) next(br *bitReader) (ll, mo, ml int) {
+ // Final will not read from stream.
+ ll, llB := s.litLengths.state.final()
+ ml, mlB := s.matchLengths.state.final()
+ mo, moB := s.offsets.state.final()
+
+ // extra bits are stored in reverse order.
+ br.fill()
+ if s.maxBits <= 32 {
+ mo += br.getBits(moB)
+ ml += br.getBits(mlB)
+ ll += br.getBits(llB)
+ } else {
+ mo += br.getBits(moB)
+ br.fill()
+ // matchlength+literal length, max 32 bits
+ ml += br.getBits(mlB)
+ ll += br.getBits(llB)
+
+ }
+ mo = s.adjustOffset(mo, ll, moB)
+ return
+}
+
+func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int {
+ if offsetB > 1 {
+ s.prevOffset[2] = s.prevOffset[1]
+ s.prevOffset[1] = s.prevOffset[0]
+ s.prevOffset[0] = offset
+ return offset
+ }
+
+ if litLen == 0 {
+ // There is an exception though, when current sequence's literals_length = 0.
+ // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
+ // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
+ offset++
+ }
+
+ if offset == 0 {
+ return s.prevOffset[0]
+ }
+ var temp int
+ if offset == 3 {
+ temp = s.prevOffset[0] - 1
+ } else {
+ temp = s.prevOffset[offset]
+ }
+
+ if temp == 0 {
+ // 0 is not valid; input is corrupted; force offset to 1
+ println("temp was 0")
+ temp = 1
+ }
+
+ if offset != 1 {
+ s.prevOffset[2] = s.prevOffset[1]
+ }
+ s.prevOffset[1] = s.prevOffset[0]
+ s.prevOffset[0] = temp
+ return temp
+}
+
+// mergeHistory will merge history.
+func (s *sequenceDecs) mergeHistory(hist *sequenceDecs) (*sequenceDecs, error) {
+ for i := uint(0); i < 3; i++ {
+ var sNew, sHist *sequenceDec
+ switch i {
+ default:
+ // same as "case 0":
+ sNew = &s.litLengths
+ sHist = &hist.litLengths
+ case 1:
+ sNew = &s.offsets
+ sHist = &hist.offsets
+ case 2:
+ sNew = &s.matchLengths
+ sHist = &hist.matchLengths
+ }
+ if sNew.repeat {
+ if sHist.fse == nil {
+ return nil, fmt.Errorf("sequence stream %d, repeat requested, but no history", i)
+ }
+ continue
+ }
+ if sNew.fse == nil {
+ return nil, fmt.Errorf("sequence stream %d, no fse found", i)
+ }
+ if sHist.fse != nil && !sHist.fse.preDefined {
+ fseDecoderPool.Put(sHist.fse)
+ }
+ sHist.fse = sNew.fse
+ }
+ return hist, nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/seqenc.go b/vendor/github.com/klauspost/compress/zstd/seqenc.go
new file mode 100644
index 000000000..36bcc3cc0
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/seqenc.go
@@ -0,0 +1,115 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import "math/bits"
+
+type seqCoders struct {
+ llEnc, ofEnc, mlEnc *fseEncoder
+ llPrev, ofPrev, mlPrev *fseEncoder
+}
+
+// swap coders with another (block).
+func (s *seqCoders) swap(other *seqCoders) {
+ *s, *other = *other, *s
+}
+
+// setPrev will update the previous encoders to the actually used ones
+// and make sure a fresh one is in the main slot.
+func (s *seqCoders) setPrev(ll, ml, of *fseEncoder) {
+ compareSwap := func(used *fseEncoder, current, prev **fseEncoder) {
+ // We used the new one, more current to history and reuse the previous history
+ if *current == used {
+ *prev, *current = *current, *prev
+ c := *current
+ p := *prev
+ c.reUsed = false
+ p.reUsed = true
+ return
+ }
+ if used == *prev {
+ return
+ }
+ // Ensure we cannot reuse by accident
+ prevEnc := *prev
+ prevEnc.symbolLen = 0
+ return
+ }
+ compareSwap(ll, &s.llEnc, &s.llPrev)
+ compareSwap(ml, &s.mlEnc, &s.mlPrev)
+ compareSwap(of, &s.ofEnc, &s.ofPrev)
+}
+
+func highBit(val uint32) (n uint32) {
+ return uint32(bits.Len32(val) - 1)
+}
+
+var llCodeTable = [64]byte{0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 16, 17, 17, 18, 18, 19, 19,
+ 20, 20, 20, 20, 21, 21, 21, 21,
+ 22, 22, 22, 22, 22, 22, 22, 22,
+ 23, 23, 23, 23, 23, 23, 23, 23,
+ 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24}
+
+// Up to 6 bits
+const maxLLCode = 35
+
+// llBitsTable translates from ll code to number of bits.
+var llBitsTable = [maxLLCode + 1]byte{
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 2, 2, 3, 3,
+ 4, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16}
+
+// llCode returns the code that represents the literal length requested.
+func llCode(litLength uint32) uint8 {
+ const llDeltaCode = 19
+ if litLength <= 63 {
+ // Compiler insists on bounds check (Go 1.12)
+ return llCodeTable[litLength&63]
+ }
+ return uint8(highBit(litLength)) + llDeltaCode
+}
+
+var mlCodeTable = [128]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37,
+ 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39,
+ 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
+ 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
+ 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
+ 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42}
+
+// Up to 6 bits
+const maxMLCode = 52
+
+// mlBitsTable translates from ml code to number of bits.
+var mlBitsTable = [maxMLCode + 1]byte{
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 2, 2, 3, 3,
+ 4, 4, 5, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16}
+
+// note : mlBase = matchLength - MINMATCH;
+// because it's the format it's stored in seqStore->sequences
+func mlCode(mlBase uint32) uint8 {
+ const mlDeltaCode = 36
+ if mlBase <= 127 {
+ // Compiler insists on bounds check (Go 1.12)
+ return mlCodeTable[mlBase&127]
+ }
+ return uint8(highBit(mlBase)) + mlDeltaCode
+}
+
+func ofCode(offset uint32) uint8 {
+ // A valid offset will always be > 0.
+ return uint8(bits.Len32(offset) - 1)
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go
new file mode 100644
index 000000000..e9e518570
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/snappy.go
@@ -0,0 +1,435 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+ "encoding/binary"
+ "errors"
+ "hash/crc32"
+ "io"
+
+ "github.com/klauspost/compress/huff0"
+ "github.com/klauspost/compress/snappy"
+)
+
+const (
+ snappyTagLiteral = 0x00
+ snappyTagCopy1 = 0x01
+ snappyTagCopy2 = 0x02
+ snappyTagCopy4 = 0x03
+)
+
+const (
+ snappyChecksumSize = 4
+ snappyMagicBody = "sNaPpY"
+
+ // snappyMaxBlockSize is the maximum size of the input to encodeBlock. It is not
+ // part of the wire format per se, but some parts of the encoder assume
+ // that an offset fits into a uint16.
+ //
+ // Also, for the framing format (Writer type instead of Encode function),
+ // https://github.com/google/snappy/blob/master/framing_format.txt says
+ // that "the uncompressed data in a chunk must be no longer than 65536
+ // bytes".
+ snappyMaxBlockSize = 65536
+
+ // snappyMaxEncodedLenOfMaxBlockSize equals MaxEncodedLen(snappyMaxBlockSize), but is
+ // hard coded to be a const instead of a variable, so that obufLen can also
+ // be a const. Their equivalence is confirmed by
+ // TestMaxEncodedLenOfMaxBlockSize.
+ snappyMaxEncodedLenOfMaxBlockSize = 76490
+)
+
+const (
+ chunkTypeCompressedData = 0x00
+ chunkTypeUncompressedData = 0x01
+ chunkTypePadding = 0xfe
+ chunkTypeStreamIdentifier = 0xff
+)
+
+var (
+ // ErrSnappyCorrupt reports that the input is invalid.
+ ErrSnappyCorrupt = errors.New("snappy: corrupt input")
+ // ErrSnappyTooLarge reports that the uncompressed length is too large.
+ ErrSnappyTooLarge = errors.New("snappy: decoded block is too large")
+ // ErrSnappyUnsupported reports that the input isn't supported.
+ ErrSnappyUnsupported = errors.New("snappy: unsupported input")
+
+ errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length")
+)
+
+// SnappyConverter can read SnappyConverter-compressed streams and convert them to zstd.
+// Conversion is done by converting the stream directly from Snappy without intermediate
+// full decoding.
+// Therefore the compression ratio is much less than what can be done by a full decompression
+// and compression, and a faulty Snappy stream may lead to a faulty Zstandard stream without
+// any errors being generated.
+// No CRC value is being generated and not all CRC values of the Snappy stream are checked.
+// However, it provides really fast recompression of Snappy streams.
+// The converter can be reused to avoid allocations, even after errors.
+type SnappyConverter struct {
+ r io.Reader
+ err error
+ buf []byte
+ block *blockEnc
+}
+
+// Convert the Snappy stream supplied in 'in' and write the zStandard stream to 'w'.
+// If any error is detected on the Snappy stream it is returned.
+// The number of bytes written is returned.
+func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
+ r.err = nil
+ r.r = in
+ if r.block == nil {
+ r.block = &blockEnc{}
+ r.block.init()
+ }
+ r.block.initNewEncode()
+ if len(r.buf) != snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize {
+ r.buf = make([]byte, snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize)
+ }
+ r.block.litEnc.Reuse = huff0.ReusePolicyNone
+ var written int64
+ var readHeader bool
+ {
+ var header []byte
+ var n int
+ header, r.err = frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0])
+
+ n, r.err = w.Write(header)
+ if r.err != nil {
+ return written, r.err
+ }
+ written += int64(n)
+ }
+
+ for {
+ if !r.readFull(r.buf[:4], true) {
+ // Add empty last block
+ r.block.reset(nil)
+ r.block.last = true
+ err := r.block.encodeLits()
+ if err != nil {
+ return written, err
+ }
+ n, err := w.Write(r.block.output)
+ if err != nil {
+ return written, err
+ }
+ written += int64(n)
+
+ return written, r.err
+ }
+ chunkType := r.buf[0]
+ if !readHeader {
+ if chunkType != chunkTypeStreamIdentifier {
+ println("chunkType != chunkTypeStreamIdentifier", chunkType)
+ r.err = ErrSnappyCorrupt
+ return written, r.err
+ }
+ readHeader = true
+ }
+ chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
+ if chunkLen > len(r.buf) {
+ println("chunkLen > len(r.buf)", chunkType)
+ r.err = ErrSnappyUnsupported
+ return written, r.err
+ }
+
+ // The chunk types are specified at
+ // https://github.com/google/snappy/blob/master/framing_format.txt
+ switch chunkType {
+ case chunkTypeCompressedData:
+ // Section 4.2. Compressed data (chunk type 0x00).
+ if chunkLen < snappyChecksumSize {
+ println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize)
+ r.err = ErrSnappyCorrupt
+ return written, r.err
+ }
+ buf := r.buf[:chunkLen]
+ if !r.readFull(buf, false) {
+ return written, r.err
+ }
+ //checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+ buf = buf[snappyChecksumSize:]
+
+ n, hdr, err := snappyDecodedLen(buf)
+ if err != nil {
+ r.err = err
+ return written, r.err
+ }
+ buf = buf[hdr:]
+ if n > snappyMaxBlockSize {
+ println("n > snappyMaxBlockSize", n, snappyMaxBlockSize)
+ r.err = ErrSnappyCorrupt
+ return written, r.err
+ }
+ r.block.reset(nil)
+ r.block.pushOffsets()
+ if err := decodeSnappy(r.block, buf); err != nil {
+ r.err = err
+ return written, r.err
+ }
+ if r.block.size+r.block.extraLits != n {
+ printf("invalid size, want %d, got %d\n", n, r.block.size+r.block.extraLits)
+ r.err = ErrSnappyCorrupt
+ return written, r.err
+ }
+ err = r.block.encode()
+ switch err {
+ case errIncompressible:
+ r.block.popOffsets()
+ r.block.reset(nil)
+ r.block.literals, err = snappy.Decode(r.block.literals[:n], r.buf[snappyChecksumSize:chunkLen])
+ if err != nil {
+ println("snappy.Decode:", err)
+ return written, err
+ }
+ err = r.block.encodeLits()
+ if err != nil {
+ return written, err
+ }
+ case nil:
+ default:
+ return written, err
+ }
+
+ n, r.err = w.Write(r.block.output)
+ if r.err != nil {
+ return written, err
+ }
+ written += int64(n)
+ continue
+ case chunkTypeUncompressedData:
+ if debug {
+ println("Uncompressed, chunklen", chunkLen)
+ }
+ // Section 4.3. Uncompressed data (chunk type 0x01).
+ if chunkLen < snappyChecksumSize {
+ println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize)
+ r.err = ErrSnappyCorrupt
+ return written, r.err
+ }
+ r.block.reset(nil)
+ buf := r.buf[:snappyChecksumSize]
+ if !r.readFull(buf, false) {
+ return written, r.err
+ }
+ checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+ // Read directly into r.decoded instead of via r.buf.
+ n := chunkLen - snappyChecksumSize
+ if n > snappyMaxBlockSize {
+ println("n > snappyMaxBlockSize", n, snappyMaxBlockSize)
+ r.err = ErrSnappyCorrupt
+ return written, r.err
+ }
+ r.block.literals = r.block.literals[:n]
+ if !r.readFull(r.block.literals, false) {
+ return written, r.err
+ }
+ if snappyCRC(r.block.literals) != checksum {
+ println("literals crc mismatch")
+ r.err = ErrSnappyCorrupt
+ return written, r.err
+ }
+ err := r.block.encodeLits()
+ if err != nil {
+ return written, err
+ }
+ n, r.err = w.Write(r.block.output)
+ if r.err != nil {
+ return written, err
+ }
+ written += int64(n)
+ continue
+
+ case chunkTypeStreamIdentifier:
+ if debug {
+ println("stream id", chunkLen, len(snappyMagicBody))
+ }
+ // Section 4.1. Stream identifier (chunk type 0xff).
+ if chunkLen != len(snappyMagicBody) {
+ println("chunkLen != len(snappyMagicBody)", chunkLen, len(snappyMagicBody))
+ r.err = ErrSnappyCorrupt
+ return written, r.err
+ }
+ if !r.readFull(r.buf[:len(snappyMagicBody)], false) {
+ return written, r.err
+ }
+ for i := 0; i < len(snappyMagicBody); i++ {
+ if r.buf[i] != snappyMagicBody[i] {
+ println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i)
+ r.err = ErrSnappyCorrupt
+ return written, r.err
+ }
+ }
+ continue
+ }
+
+ if chunkType <= 0x7f {
+ // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
+ println("chunkType <= 0x7f")
+ r.err = ErrSnappyUnsupported
+ return written, r.err
+ }
+ // Section 4.4 Padding (chunk type 0xfe).
+ // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
+ if !r.readFull(r.buf[:chunkLen], false) {
+ return written, r.err
+ }
+ }
+}
+
+// decodeSnappy writes the decoding of src to dst. It assumes that the varint-encoded
+// length of the decompressed bytes has already been read.
+func decodeSnappy(blk *blockEnc, src []byte) error {
+ //decodeRef(make([]byte, snappyMaxBlockSize), src)
+ var s, length int
+ lits := blk.extraLits
+ var offset uint32
+ for s < len(src) {
+ switch src[s] & 0x03 {
+ case snappyTagLiteral:
+ x := uint32(src[s] >> 2)
+ switch {
+ case x < 60:
+ s++
+ case x == 60:
+ s += 2
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ println("uint(s) > uint(len(src)", s, src)
+ return ErrSnappyCorrupt
+ }
+ x = uint32(src[s-1])
+ case x == 61:
+ s += 3
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ println("uint(s) > uint(len(src)", s, src)
+ return ErrSnappyCorrupt
+ }
+ x = uint32(src[s-2]) | uint32(src[s-1])<<8
+ case x == 62:
+ s += 4
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ println("uint(s) > uint(len(src)", s, src)
+ return ErrSnappyCorrupt
+ }
+ x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+ case x == 63:
+ s += 5
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ println("uint(s) > uint(len(src)", s, src)
+ return ErrSnappyCorrupt
+ }
+ x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+ }
+ if x > snappyMaxBlockSize {
+ println("x > snappyMaxBlockSize", x, snappyMaxBlockSize)
+ return ErrSnappyCorrupt
+ }
+ length = int(x) + 1
+ if length <= 0 {
+ println("length <= 0 ", length)
+
+ return errUnsupportedLiteralLength
+ }
+ //if length > snappyMaxBlockSize-d || uint32(length) > len(src)-s {
+ // return ErrSnappyCorrupt
+ //}
+
+ blk.literals = append(blk.literals, src[s:s+length]...)
+ //println(length, "litLen")
+ lits += length
+ s += length
+ continue
+
+ case snappyTagCopy1:
+ s += 2
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ println("uint(s) > uint(len(src)", s, len(src))
+ return ErrSnappyCorrupt
+ }
+ length = 4 + int(src[s-2])>>2&0x7
+ offset = uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])
+
+ case snappyTagCopy2:
+ s += 3
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ println("uint(s) > uint(len(src)", s, len(src))
+ return ErrSnappyCorrupt
+ }
+ length = 1 + int(src[s-3])>>2
+ offset = uint32(src[s-2]) | uint32(src[s-1])<<8
+
+ case snappyTagCopy4:
+ s += 5
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ println("uint(s) > uint(len(src)", s, len(src))
+ return ErrSnappyCorrupt
+ }
+ length = 1 + int(src[s-5])>>2
+ offset = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+ }
+
+ if offset <= 0 || blk.size+lits < int(offset) /*|| length > len(blk)-d */ {
+ println("offset <= 0 || blk.size+lits < int(offset)", offset, blk.size+lits, int(offset), blk.size, lits)
+
+ return ErrSnappyCorrupt
+ }
+
+ // Check if offset is one of the recent offsets.
+ // Adjusts the output offset accordingly.
+ // Gives a tiny bit of compression, typically around 1%.
+ if false {
+ offset = blk.matchOffset(offset, uint32(lits))
+ } else {
+ offset += 3
+ }
+
+ blk.sequences = append(blk.sequences, seq{
+ litLen: uint32(lits),
+ offset: offset,
+ matchLen: uint32(length) - zstdMinMatch,
+ })
+ blk.size += length + lits
+ lits = 0
+ }
+ blk.extraLits = lits
+ return nil
+}
+
+func (r *SnappyConverter) readFull(p []byte, allowEOF bool) (ok bool) {
+ if _, r.err = io.ReadFull(r.r, p); r.err != nil {
+ if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
+ r.err = ErrSnappyCorrupt
+ }
+ return false
+ }
+ return true
+}
+
+var crcTable = crc32.MakeTable(crc32.Castagnoli)
+
+// crc implements the checksum specified in section 3 of
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func snappyCRC(b []byte) uint32 {
+ c := crc32.Update(0, crcTable, b)
+ return uint32(c>>15|c<<17) + 0xa282ead8
+}
+
+// snappyDecodedLen returns the length of the decoded block and the number of bytes
+// that the length header occupied.
+func snappyDecodedLen(src []byte) (blockLen, headerLen int, err error) {
+ v, n := binary.Uvarint(src)
+ if n <= 0 || v > 0xffffffff {
+ return 0, 0, ErrSnappyCorrupt
+ }
+
+ const wordSize = 32 << (^uint(0) >> 32 & 1)
+ if wordSize == 32 && v > 0x7fffffff {
+ return 0, 0, ErrSnappyTooLarge
+ }
+ return int(v), n, nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go
new file mode 100644
index 000000000..b975954c1
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/zstd.go
@@ -0,0 +1,135 @@
+// Package zstd provides decompression of zstandard files.
+//
+// For advanced usage and examples, go to the README: https://github.com/klauspost/compress/tree/master/zstd#zstd
+package zstd
+
+import (
+ "errors"
+ "log"
+ "math/bits"
+)
+
+const debug = false
+const debugSequences = false
+
+// force encoder to use predefined tables.
+const forcePreDef = false
+
+// zstdMinMatch is the minimum zstd match length.
+const zstdMinMatch = 3
+
+var (
+ // ErrReservedBlockType is returned when a reserved block type is found.
+ // Typically this indicates wrong or corrupted input.
+ ErrReservedBlockType = errors.New("invalid input: reserved block type encountered")
+
+ // ErrCompressedSizeTooBig is returned when a block is bigger than allowed.
+ // Typically this indicates wrong or corrupted input.
+ ErrCompressedSizeTooBig = errors.New("invalid input: compressed size too big")
+
+ // ErrBlockTooSmall is returned when a block is too small to be decoded.
+ // Typically returned on invalid input.
+ ErrBlockTooSmall = errors.New("block too small")
+
+ // ErrMagicMismatch is returned when a "magic" number isn't what is expected.
+ // Typically this indicates wrong or corrupted input.
+ ErrMagicMismatch = errors.New("invalid input: magic number mismatch")
+
+ // ErrWindowSizeExceeded is returned when a reference exceeds the valid window size.
+ // Typically this indicates wrong or corrupted input.
+ ErrWindowSizeExceeded = errors.New("window size exceeded")
+
+ // ErrWindowSizeTooSmall is returned when no window size is specified.
+ // Typically this indicates wrong or corrupted input.
+ ErrWindowSizeTooSmall = errors.New("invalid input: window size was too small")
+
+ // ErrDecoderSizeExceeded is returned if decompressed size exceeds the configured limit.
+ ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit")
+
+ // ErrUnknownDictionary is returned if the dictionary ID is unknown.
+ // For the time being dictionaries are not supported.
+ ErrUnknownDictionary = errors.New("unknown dictionary")
+
+ // ErrFrameSizeExceeded is returned if the stated frame size is exceeded.
+ // This is only returned if SingleSegment is specified on the frame.
+ ErrFrameSizeExceeded = errors.New("frame size exceeded")
+
+ // ErrCRCMismatch is returned if CRC mismatches.
+ ErrCRCMismatch = errors.New("CRC check failed")
+
+ // ErrDecoderClosed will be returned if the Decoder was used after
+ // Close has been called.
+ ErrDecoderClosed = errors.New("decoder used after Close")
+)
+
+func println(a ...interface{}) {
+ if debug {
+ log.Println(a...)
+ }
+}
+
+func printf(format string, a ...interface{}) {
+ if debug {
+ log.Printf(format, a...)
+ }
+}
+
+// matchLen returns the maximum length.
+// a must be the shortest of the two.
+// The function also returns whether all bytes matched.
+func matchLen(a, b []byte) int {
+ b = b[:len(a)]
+ for i := 0; i < len(a)-7; i += 8 {
+ if diff := load64(a, i) ^ load64(b, i); diff != 0 {
+ return i + (bits.TrailingZeros64(diff) >> 3)
+ }
+ }
+ checked := (len(a) >> 3) << 3
+ a = a[checked:]
+ b = b[checked:]
+ // TODO: We could do a 4 check.
+ for i := range a {
+ if a[i] != b[i] {
+ return int(i) + checked
+ }
+ }
+ return len(a) + checked
+}
+
+// matchLen returns a match length in src between index s and t
+func matchLenIn(src []byte, s, t int32) int32 {
+ s1 := len(src)
+ b := src[t:]
+ a := src[s:s1]
+ b = b[:len(a)]
+ // Extend the match to be as long as possible.
+ for i := range a {
+ if a[i] != b[i] {
+ return int32(i)
+ }
+ }
+ return int32(len(a))
+}
+
+func load3232(b []byte, i int32) uint32 {
+ // Help the compiler eliminate bounds checks on the read so it can be done in a single read.
+ b = b[i:]
+ b = b[:4]
+ return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+}
+
+func load6432(b []byte, i int32) uint64 {
+ // Help the compiler eliminate bounds checks on the read so it can be done in a single read.
+ b = b[i:]
+ b = b[:8]
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+ uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+}
+
+func load64(b []byte, i int) uint64 {
+ // Help the compiler eliminate bounds checks on the read so it can be done in a single read.
+ b = b[i:]
+ b = b[:8]
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+ uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+}