summaryrefslogtreecommitdiff
path: root/vendor/github.com/klauspost
diff options
context:
space:
mode:
authorValentin Rothberg <rothberg@redhat.com>2019-06-24 11:29:13 +0200
committerValentin Rothberg <rothberg@redhat.com>2019-06-24 13:20:59 +0200
commitd697456dc90adbaf68224ed7c115b38d5855e582 (patch)
tree5fd88c48b34e7bead0028fa97e39f43f03880642 /vendor/github.com/klauspost
parenta3211b73c62a9fcc13f09305bf629ef507b26d34 (diff)
downloadpodman-d697456dc90adbaf68224ed7c115b38d5855e582.tar.gz
podman-d697456dc90adbaf68224ed7c115b38d5855e582.tar.bz2
podman-d697456dc90adbaf68224ed7c115b38d5855e582.zip
migrate to go-modules
Signed-off-by: Valentin Rothberg <rothberg@redhat.com>
Diffstat (limited to 'vendor/github.com/klauspost')
-rw-r--r--vendor/github.com/klauspost/compress/README.md160
-rw-r--r--vendor/github.com/klauspost/compress/flate/gen.go265
-rw-r--r--vendor/github.com/klauspost/cpuid/.gitignore24
-rw-r--r--vendor/github.com/klauspost/cpuid/.travis.yml23
-rw-r--r--vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt35
-rw-r--r--vendor/github.com/klauspost/cpuid/private-gen.go476
-rw-r--r--vendor/github.com/klauspost/pgzip/.gitignore24
-rw-r--r--vendor/github.com/klauspost/pgzip/.travis.yml21
8 files changed, 868 insertions, 160 deletions
diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md
deleted file mode 100644
index 280d54890..000000000
--- a/vendor/github.com/klauspost/compress/README.md
+++ /dev/null
@@ -1,160 +0,0 @@
-# compress
-
-This package is based on an optimized Deflate function, which is used by gzip/zip/zlib packages.
-
-It offers slightly better compression at lower compression settings, and up to 3x faster encoding at highest compression level.
-
-* [High Throughput Benchmark](http://blog.klauspost.com/go-gzipdeflate-benchmarks/).
-* [Small Payload/Webserver Benchmarks](http://blog.klauspost.com/gzip-performance-for-go-webservers/).
-* [Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/).
-* [Re-balancing Deflate Compression Levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/)
-
-[![Build Status](https://travis-ci.org/klauspost/compress.svg?branch=master)](https://travis-ci.org/klauspost/compress)
-[![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge)
-
-# changelog
-
-* Aug 1, 2018: Added [huff0 README](https://github.com/klauspost/compress/tree/master/huff0#huff0-entropy-compression).
-* Jul 8, 2018: Added [Performance Update 2018](#performance-update-2018) below.
-* Jun 23, 2018: Merged [Go 1.11 inflate optimizations](https://go-review.googlesource.com/c/go/+/102235). Go 1.9 is now required. Backwards compatible version tagged with [v1.3.0](https://github.com/klauspost/compress/releases/tag/v1.3.0).
-* Apr 2, 2018: Added [huff0](https://godoc.org/github.com/klauspost/compress/huff0) en/decoder. Experimental for now, API may change.
-* Mar 4, 2018: Added [FSE Entropy](https://godoc.org/github.com/klauspost/compress/fse) en/decoder. Experimental for now, API may change.
-* Nov 3, 2017: Add compression [Estimate](https://godoc.org/github.com/klauspost/compress#Estimate) function.
-* May 28, 2017: Reduce allocations when resetting decoder.
-* Apr 02, 2017: Change back to official crc32, since changes were merged in Go 1.7.
-* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625).
-* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before.
-* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update.
-* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level.
-* Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression.
-* Mar 24, 2016: Small speedup for level 1-3.
-* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster.
-* Feb 19, 2016: Handle small payloads faster in level 1-3.
-* Feb 19, 2016: Added faster level 2 + 3 compression modes.
-* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5.
-* Feb 14, 2016: Snappy: Merge upstream changes.
-* Feb 14, 2016: Snappy: Fix aggressive skipping.
-* Feb 14, 2016: Snappy: Update benchmark.
-* Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression.
-* Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%.
-* Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content.
-* Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup.
-* Jan 16, 2016: Optimization on deflate level 1,2,3 compression.
-* Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives.
-* Dec 8 2015: Make level 1 and -2 deterministic even if write size differs.
-* Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms.
-* Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update!
-* Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet).
-* Nov 20 2015: Small optimization to bit writer on 64 bit systems.
-* Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15).
-* Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate.
-* Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file
-* Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x.
-
-# usage
-
-The packages are drop-in replacements for standard libraries. Simply replace the import path to use them:
-
-| old import | new import |
-|--------------------|-----------------------------------------|
-| `compress/gzip` | `github.com/klauspost/compress/gzip` |
-| `compress/zlib` | `github.com/klauspost/compress/zlib` |
-| `archive/zip` | `github.com/klauspost/compress/zip` |
-| `compress/flate` | `github.com/klauspost/compress/flate` |
-
-You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages.
-
-The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/).
-
-Currently there is only minor speedup on decompression (mostly CRC32 calculation).
-
-# Performance Update 2018
-
-It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD.
-
-The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet.
-
-The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard libary at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input.
-
-The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet).
-
-
-## Overall differences.
-
-There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels.
-
-The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library.
-
-This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression.
-
-There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab.
-
-## Web Content
-
-This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS.
-
-Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big.
-
-Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case.
-
-## Object files
-
-This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible.
-
-The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression.
-
-The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively.
-
-## Highly Compressible File
-
-This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc.
-
-It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression.
-
-So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground".
-
-## Medium-High Compressible
-
-This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams.
-
-We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both.
-
-## Medium Compressible
-
-I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario.
-
-The most notable thing is how quickly the standard libary drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior.
-
-
-## Un-compressible Content
-
-This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections.
-
-
-# linear time compression (huffman only)
-
-This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character.
-
-This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like 'ยค' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM).
-
-Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core.
-
-The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression raio can never be better than 8:1 (12.5%).
-
-The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup.
-
-For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/).
-
-This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip.
-
-
-# snappy package
-
-The standard snappy package has now been improved. This repo contains a copy of the snappy repo.
-
-I would advise to use the standard package: https://github.com/golang/snappy
-
-
-# license
-
-This code is licensed under the same conditions as the original Go code. See LICENSE file.
diff --git a/vendor/github.com/klauspost/compress/flate/gen.go b/vendor/github.com/klauspost/compress/flate/gen.go
new file mode 100644
index 000000000..154c89a48
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/gen.go
@@ -0,0 +1,265 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// This program generates fixedhuff.go
+// Invoke as
+//
+// go run gen.go -output fixedhuff.go
+
+package main
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "go/format"
+ "io/ioutil"
+ "log"
+)
+
+var filename = flag.String("output", "fixedhuff.go", "output file name")
+
+const maxCodeLen = 16
+
+// Note: the definition of the huffmanDecoder struct is copied from
+// inflate.go, as it is private to the implementation.
+
+// chunk & 15 is number of bits
+// chunk >> 4 is value, including table link
+
+const (
+ huffmanChunkBits = 9
+ huffmanNumChunks = 1 << huffmanChunkBits
+ huffmanCountMask = 15
+ huffmanValueShift = 4
+)
+
+type huffmanDecoder struct {
+ min int // the minimum code length
+ chunks [huffmanNumChunks]uint32 // chunks as described above
+ links [][]uint32 // overflow links
+ linkMask uint32 // mask the width of the link table
+}
+
+// Initialize Huffman decoding tables from array of code lengths.
+// Following this function, h is guaranteed to be initialized into a complete
+// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a
+// degenerate case where the tree has only a single symbol with length 1. Empty
+// trees are permitted.
+func (h *huffmanDecoder) init(bits []int) bool {
+ // Sanity enables additional runtime tests during Huffman
+ // table construction. It's intended to be used during
+ // development to supplement the currently ad-hoc unit tests.
+ const sanity = false
+
+ if h.min != 0 {
+ *h = huffmanDecoder{}
+ }
+
+ // Count number of codes of each length,
+ // compute min and max length.
+ var count [maxCodeLen]int
+ var min, max int
+ for _, n := range bits {
+ if n == 0 {
+ continue
+ }
+ if min == 0 || n < min {
+ min = n
+ }
+ if n > max {
+ max = n
+ }
+ count[n]++
+ }
+
+ // Empty tree. The decompressor.huffSym function will fail later if the tree
+ // is used. Technically, an empty tree is only valid for the HDIST tree and
+ // not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree
+ // is guaranteed to fail since it will attempt to use the tree to decode the
+ // codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is
+ // guaranteed to fail later since the compressed data section must be
+ // composed of at least one symbol (the end-of-block marker).
+ if max == 0 {
+ return true
+ }
+
+ code := 0
+ var nextcode [maxCodeLen]int
+ for i := min; i <= max; i++ {
+ code <<= 1
+ nextcode[i] = code
+ code += count[i]
+ }
+
+ // Check that the coding is complete (i.e., that we've
+ // assigned all 2-to-the-max possible bit sequences).
+ // Exception: To be compatible with zlib, we also need to
+ // accept degenerate single-code codings. See also
+ // TestDegenerateHuffmanCoding.
+ if code != 1<<uint(max) && !(code == 1 && max == 1) {
+ return false
+ }
+
+ h.min = min
+ if max > huffmanChunkBits {
+ numLinks := 1 << (uint(max) - huffmanChunkBits)
+ h.linkMask = uint32(numLinks - 1)
+
+ // create link tables
+ link := nextcode[huffmanChunkBits+1] >> 1
+ h.links = make([][]uint32, huffmanNumChunks-link)
+ for j := uint(link); j < huffmanNumChunks; j++ {
+ reverse := int(reverseByte[j>>8]) | int(reverseByte[j&0xff])<<8
+ reverse >>= uint(16 - huffmanChunkBits)
+ off := j - uint(link)
+ if sanity && h.chunks[reverse] != 0 {
+ panic("impossible: overwriting existing chunk")
+ }
+ h.chunks[reverse] = uint32(off<<huffmanValueShift | (huffmanChunkBits + 1))
+ h.links[off] = make([]uint32, numLinks)
+ }
+ }
+
+ for i, n := range bits {
+ if n == 0 {
+ continue
+ }
+ code := nextcode[n]
+ nextcode[n]++
+ chunk := uint32(i<<huffmanValueShift | n)
+ reverse := int(reverseByte[code>>8]) | int(reverseByte[code&0xff])<<8
+ reverse >>= uint(16 - n)
+ if n <= huffmanChunkBits {
+ for off := reverse; off < len(h.chunks); off += 1 << uint(n) {
+ // We should never need to overwrite
+ // an existing chunk. Also, 0 is
+ // never a valid chunk, because the
+ // lower 4 "count" bits should be
+ // between 1 and 15.
+ if sanity && h.chunks[off] != 0 {
+ panic("impossible: overwriting existing chunk")
+ }
+ h.chunks[off] = chunk
+ }
+ } else {
+ j := reverse & (huffmanNumChunks - 1)
+ if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 {
+ // Longer codes should have been
+ // associated with a link table above.
+ panic("impossible: not an indirect chunk")
+ }
+ value := h.chunks[j] >> huffmanValueShift
+ linktab := h.links[value]
+ reverse >>= huffmanChunkBits
+ for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) {
+ if sanity && linktab[off] != 0 {
+ panic("impossible: overwriting existing chunk")
+ }
+ linktab[off] = chunk
+ }
+ }
+ }
+
+ if sanity {
+ // Above we've sanity checked that we never overwrote
+ // an existing entry. Here we additionally check that
+ // we filled the tables completely.
+ for i, chunk := range h.chunks {
+ if chunk == 0 {
+ // As an exception, in the degenerate
+ // single-code case, we allow odd
+ // chunks to be missing.
+ if code == 1 && i%2 == 1 {
+ continue
+ }
+ panic("impossible: missing chunk")
+ }
+ }
+ for _, linktab := range h.links {
+ for _, chunk := range linktab {
+ if chunk == 0 {
+ panic("impossible: missing chunk")
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+func main() {
+ flag.Parse()
+
+ var h huffmanDecoder
+ var bits [288]int
+ initReverseByte()
+ for i := 0; i < 144; i++ {
+ bits[i] = 8
+ }
+ for i := 144; i < 256; i++ {
+ bits[i] = 9
+ }
+ for i := 256; i < 280; i++ {
+ bits[i] = 7
+ }
+ for i := 280; i < 288; i++ {
+ bits[i] = 8
+ }
+ h.init(bits[:])
+ if h.links != nil {
+ log.Fatal("Unexpected links table in fixed Huffman decoder")
+ }
+
+ var buf bytes.Buffer
+
+ fmt.Fprintf(&buf, `// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.`+"\n\n")
+
+ fmt.Fprintln(&buf, "package flate")
+ fmt.Fprintln(&buf)
+ fmt.Fprintln(&buf, "// autogenerated by go run gen.go -output fixedhuff.go, DO NOT EDIT")
+ fmt.Fprintln(&buf)
+ fmt.Fprintln(&buf, "var fixedHuffmanDecoder = huffmanDecoder{")
+ fmt.Fprintf(&buf, "\t%d,\n", h.min)
+ fmt.Fprintln(&buf, "\t[huffmanNumChunks]uint32{")
+ for i := 0; i < huffmanNumChunks; i++ {
+ if i&7 == 0 {
+ fmt.Fprintf(&buf, "\t\t")
+ } else {
+ fmt.Fprintf(&buf, " ")
+ }
+ fmt.Fprintf(&buf, "0x%04x,", h.chunks[i])
+ if i&7 == 7 {
+ fmt.Fprintln(&buf)
+ }
+ }
+ fmt.Fprintln(&buf, "\t},")
+ fmt.Fprintln(&buf, "\tnil, 0,")
+ fmt.Fprintln(&buf, "}")
+
+ data, err := format.Source(buf.Bytes())
+ if err != nil {
+ log.Fatal(err)
+ }
+ err = ioutil.WriteFile(*filename, data, 0644)
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+
+var reverseByte [256]byte
+
+func initReverseByte() {
+ for x := 0; x < 256; x++ {
+ var result byte
+ for i := uint(0); i < 8; i++ {
+ result |= byte(((x >> i) & 1) << (7 - i))
+ }
+ reverseByte[x] = result
+ }
+}
diff --git a/vendor/github.com/klauspost/cpuid/.gitignore b/vendor/github.com/klauspost/cpuid/.gitignore
new file mode 100644
index 000000000..daf913b1b
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
diff --git a/vendor/github.com/klauspost/cpuid/.travis.yml b/vendor/github.com/klauspost/cpuid/.travis.yml
new file mode 100644
index 000000000..630192d59
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/.travis.yml
@@ -0,0 +1,23 @@
+language: go
+
+sudo: false
+
+os:
+ - linux
+ - osx
+go:
+ - 1.8.x
+ - 1.9.x
+ - 1.10.x
+ - master
+
+script:
+ - go vet ./...
+ - go test -v ./...
+ - go test -race ./...
+ - diff <(gofmt -d .) <("")
+
+matrix:
+ allow_failures:
+ - go: 'master'
+ fast_finish: true
diff --git a/vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt b/vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt
new file mode 100644
index 000000000..2ef4714f7
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt
@@ -0,0 +1,35 @@
+Developer Certificate of Origin
+Version 1.1
+
+Copyright (C) 2015- Klaus Post & Contributors.
+Email: klauspost@gmail.com
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+
+Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I
+ have the right to submit it under the open source license
+ indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best
+ of my knowledge, is covered under an appropriate open source
+ license and I have the right under that license to submit that
+ work with modifications, whether created in whole or in part
+ by me, under the same open source license (unless I am
+ permitted to submit under a different license), as indicated
+ in the file; or
+
+(c) The contribution was provided directly to me by some other
+ person who certified (a), (b) or (c) and I have not modified
+ it.
+
+(d) I understand and agree that this project and the contribution
+ are public and that a record of the contribution (including all
+ personal information I submit with it, including my sign-off) is
+ maintained indefinitely and may be redistributed consistent with
+ this project or the open source license(s) involved.
diff --git a/vendor/github.com/klauspost/cpuid/private-gen.go b/vendor/github.com/klauspost/cpuid/private-gen.go
new file mode 100644
index 000000000..437333d29
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/private-gen.go
@@ -0,0 +1,476 @@
+// +build ignore
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/printer"
+ "go/token"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "reflect"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+var inFiles = []string{"cpuid.go", "cpuid_test.go"}
+var copyFiles = []string{"cpuid_amd64.s", "cpuid_386.s", "detect_ref.go", "detect_intel.go"}
+var fileSet = token.NewFileSet()
+var reWrites = []rewrite{
+ initRewrite("CPUInfo -> cpuInfo"),
+ initRewrite("Vendor -> vendor"),
+ initRewrite("Flags -> flags"),
+ initRewrite("Detect -> detect"),
+ initRewrite("CPU -> cpu"),
+}
+var excludeNames = map[string]bool{"string": true, "join": true, "trim": true,
+ // cpuid_test.go
+ "t": true, "println": true, "logf": true, "log": true, "fatalf": true, "fatal": true,
+}
+
+var excludePrefixes = []string{"test", "benchmark"}
+
+func main() {
+ Package := "private"
+ parserMode := parser.ParseComments
+ exported := make(map[string]rewrite)
+ for _, file := range inFiles {
+ in, err := os.Open(file)
+ if err != nil {
+ log.Fatalf("opening input", err)
+ }
+
+ src, err := ioutil.ReadAll(in)
+ if err != nil {
+ log.Fatalf("reading input", err)
+ }
+
+ astfile, err := parser.ParseFile(fileSet, file, src, parserMode)
+ if err != nil {
+ log.Fatalf("parsing input", err)
+ }
+
+ for _, rw := range reWrites {
+ astfile = rw(astfile)
+ }
+
+ // Inspect the AST and print all identifiers and literals.
+ var startDecl token.Pos
+ var endDecl token.Pos
+ ast.Inspect(astfile, func(n ast.Node) bool {
+ var s string
+ switch x := n.(type) {
+ case *ast.Ident:
+ if x.IsExported() {
+ t := strings.ToLower(x.Name)
+ for _, pre := range excludePrefixes {
+ if strings.HasPrefix(t, pre) {
+ return true
+ }
+ }
+ if excludeNames[t] != true {
+ //if x.Pos() > startDecl && x.Pos() < endDecl {
+ exported[x.Name] = initRewrite(x.Name + " -> " + t)
+ }
+ }
+
+ case *ast.GenDecl:
+ if x.Tok == token.CONST && x.Lparen > 0 {
+ startDecl = x.Lparen
+ endDecl = x.Rparen
+ // fmt.Printf("Decl:%s -> %s\n", fileSet.Position(startDecl), fileSet.Position(endDecl))
+ }
+ }
+ if s != "" {
+ fmt.Printf("%s:\t%s\n", fileSet.Position(n.Pos()), s)
+ }
+ return true
+ })
+
+ for _, rw := range exported {
+ astfile = rw(astfile)
+ }
+
+ var buf bytes.Buffer
+
+ printer.Fprint(&buf, fileSet, astfile)
+
+ // Remove package documentation and insert information
+ s := buf.String()
+ ind := strings.Index(buf.String(), "\npackage cpuid")
+ s = s[ind:]
+ s = "// Generated, DO NOT EDIT,\n" +
+ "// but copy it to your own project and rename the package.\n" +
+ "// See more at http://github.com/klauspost/cpuid\n" +
+ s
+
+ outputName := Package + string(os.PathSeparator) + file
+
+ err = ioutil.WriteFile(outputName, []byte(s), 0644)
+ if err != nil {
+ log.Fatalf("writing output: %s", err)
+ }
+ log.Println("Generated", outputName)
+ }
+
+ for _, file := range copyFiles {
+ dst := ""
+ if strings.HasPrefix(file, "cpuid") {
+ dst = Package + string(os.PathSeparator) + file
+ } else {
+ dst = Package + string(os.PathSeparator) + "cpuid_" + file
+ }
+ err := copyFile(file, dst)
+ if err != nil {
+ log.Fatalf("copying file: %s", err)
+ }
+ log.Println("Copied", dst)
+ }
+}
+
+// CopyFile copies a file from src to dst. If src and dst files exist, and are
+// the same, then return success. Copy the file contents from src to dst.
+func copyFile(src, dst string) (err error) {
+ sfi, err := os.Stat(src)
+ if err != nil {
+ return
+ }
+ if !sfi.Mode().IsRegular() {
+ // cannot copy non-regular files (e.g., directories,
+ // symlinks, devices, etc.)
+ return fmt.Errorf("CopyFile: non-regular source file %s (%q)", sfi.Name(), sfi.Mode().String())
+ }
+ dfi, err := os.Stat(dst)
+ if err != nil {
+ if !os.IsNotExist(err) {
+ return
+ }
+ } else {
+ if !(dfi.Mode().IsRegular()) {
+ return fmt.Errorf("CopyFile: non-regular destination file %s (%q)", dfi.Name(), dfi.Mode().String())
+ }
+ if os.SameFile(sfi, dfi) {
+ return
+ }
+ }
+ err = copyFileContents(src, dst)
+ return
+}
+
+// copyFileContents copies the contents of the file named src to the file named
+// by dst. The file will be created if it does not already exist. If the
+// destination file exists, all it's contents will be replaced by the contents
+// of the source file.
+func copyFileContents(src, dst string) (err error) {
+ in, err := os.Open(src)
+ if err != nil {
+ return
+ }
+ defer in.Close()
+ out, err := os.Create(dst)
+ if err != nil {
+ return
+ }
+ defer func() {
+ cerr := out.Close()
+ if err == nil {
+ err = cerr
+ }
+ }()
+ if _, err = io.Copy(out, in); err != nil {
+ return
+ }
+ err = out.Sync()
+ return
+}
+
+type rewrite func(*ast.File) *ast.File
+
+// Mostly copied from gofmt
+func initRewrite(rewriteRule string) rewrite {
+ f := strings.Split(rewriteRule, "->")
+ if len(f) != 2 {
+ fmt.Fprintf(os.Stderr, "rewrite rule must be of the form 'pattern -> replacement'\n")
+ os.Exit(2)
+ }
+ pattern := parseExpr(f[0], "pattern")
+ replace := parseExpr(f[1], "replacement")
+ return func(p *ast.File) *ast.File { return rewriteFile(pattern, replace, p) }
+}
+
+// parseExpr parses s as an expression.
+// It might make sense to expand this to allow statement patterns,
+// but there are problems with preserving formatting and also
+// with what a wildcard for a statement looks like.
+func parseExpr(s, what string) ast.Expr {
+ x, err := parser.ParseExpr(s)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "parsing %s %s at %s\n", what, s, err)
+ os.Exit(2)
+ }
+ return x
+}
+
+// Keep this function for debugging.
+/*
+func dump(msg string, val reflect.Value) {
+ fmt.Printf("%s:\n", msg)
+ ast.Print(fileSet, val.Interface())
+ fmt.Println()
+}
+*/
+
+// rewriteFile applies the rewrite rule 'pattern -> replace' to an entire file.
+func rewriteFile(pattern, replace ast.Expr, p *ast.File) *ast.File {
+ cmap := ast.NewCommentMap(fileSet, p, p.Comments)
+ m := make(map[string]reflect.Value)
+ pat := reflect.ValueOf(pattern)
+ repl := reflect.ValueOf(replace)
+
+ var rewriteVal func(val reflect.Value) reflect.Value
+ rewriteVal = func(val reflect.Value) reflect.Value {
+ // don't bother if val is invalid to start with
+ if !val.IsValid() {
+ return reflect.Value{}
+ }
+ for k := range m {
+ delete(m, k)
+ }
+ val = apply(rewriteVal, val)
+ if match(m, pat, val) {
+ val = subst(m, repl, reflect.ValueOf(val.Interface().(ast.Node).Pos()))
+ }
+ return val
+ }
+
+ r := apply(rewriteVal, reflect.ValueOf(p)).Interface().(*ast.File)
+ r.Comments = cmap.Filter(r).Comments() // recreate comments list
+ return r
+}
+
+// set is a wrapper for x.Set(y); it protects the caller from panics if x cannot be changed to y.
+func set(x, y reflect.Value) {
+ // don't bother if x cannot be set or y is invalid
+ if !x.CanSet() || !y.IsValid() {
+ return
+ }
+ defer func() {
+ if x := recover(); x != nil {
+ if s, ok := x.(string); ok &&
+ (strings.Contains(s, "type mismatch") || strings.Contains(s, "not assignable")) {
+ // x cannot be set to y - ignore this rewrite
+ return
+ }
+ panic(x)
+ }
+ }()
+ x.Set(y)
+}
+
+// Values/types for special cases.
+var (
+ objectPtrNil = reflect.ValueOf((*ast.Object)(nil))
+ scopePtrNil = reflect.ValueOf((*ast.Scope)(nil))
+
+ identType = reflect.TypeOf((*ast.Ident)(nil))
+ objectPtrType = reflect.TypeOf((*ast.Object)(nil))
+ positionType = reflect.TypeOf(token.NoPos)
+ callExprType = reflect.TypeOf((*ast.CallExpr)(nil))
+ scopePtrType = reflect.TypeOf((*ast.Scope)(nil))
+)
+
+// apply replaces each AST field x in val with f(x), returning val.
+// To avoid extra conversions, f operates on the reflect.Value form.
+func apply(f func(reflect.Value) reflect.Value, val reflect.Value) reflect.Value {
+ if !val.IsValid() {
+ return reflect.Value{}
+ }
+
+ // *ast.Objects introduce cycles and are likely incorrect after
+ // rewrite; don't follow them but replace with nil instead
+ if val.Type() == objectPtrType {
+ return objectPtrNil
+ }
+
+ // similarly for scopes: they are likely incorrect after a rewrite;
+ // replace them with nil
+ if val.Type() == scopePtrType {
+ return scopePtrNil
+ }
+
+ switch v := reflect.Indirect(val); v.Kind() {
+ case reflect.Slice:
+ for i := 0; i < v.Len(); i++ {
+ e := v.Index(i)
+ set(e, f(e))
+ }
+ case reflect.Struct:
+ for i := 0; i < v.NumField(); i++ {
+ e := v.Field(i)
+ set(e, f(e))
+ }
+ case reflect.Interface:
+ e := v.Elem()
+ set(v, f(e))
+ }
+ return val
+}
+
+func isWildcard(s string) bool {
+ rune, size := utf8.DecodeRuneInString(s)
+ return size == len(s) && unicode.IsLower(rune)
+}
+
+// match returns true if pattern matches val,
+// recording wildcard submatches in m.
+// If m == nil, match checks whether pattern == val.
+func match(m map[string]reflect.Value, pattern, val reflect.Value) bool {
+ // Wildcard matches any expression. If it appears multiple
+ // times in the pattern, it must match the same expression
+ // each time.
+ if m != nil && pattern.IsValid() && pattern.Type() == identType {
+ name := pattern.Interface().(*ast.Ident).Name
+ if isWildcard(name) && val.IsValid() {
+ // wildcards only match valid (non-nil) expressions.
+ if _, ok := val.Interface().(ast.Expr); ok && !val.IsNil() {
+ if old, ok := m[name]; ok {
+ return match(nil, old, val)
+ }
+ m[name] = val
+ return true
+ }
+ }
+ }
+
+ // Otherwise, pattern and val must match recursively.
+ if !pattern.IsValid() || !val.IsValid() {
+ return !pattern.IsValid() && !val.IsValid()
+ }
+ if pattern.Type() != val.Type() {
+ return false
+ }
+
+ // Special cases.
+ switch pattern.Type() {
+ case identType:
+ // For identifiers, only the names need to match
+ // (and none of the other *ast.Object information).
+ // This is a common case, handle it all here instead
+ // of recursing down any further via reflection.
+ p := pattern.Interface().(*ast.Ident)
+ v := val.Interface().(*ast.Ident)
+ return p == nil && v == nil || p != nil && v != nil && p.Name == v.Name
+ case objectPtrType, positionType:
+ // object pointers and token positions always match
+ return true
+ case callExprType:
+ // For calls, the Ellipsis fields (token.Position) must
+ // match since that is how f(x) and f(x...) are different.
+ // Check them here but fall through for the remaining fields.
+ p := pattern.Interface().(*ast.CallExpr)
+ v := val.Interface().(*ast.CallExpr)
+ if p.Ellipsis.IsValid() != v.Ellipsis.IsValid() {
+ return false
+ }
+ }
+
+ p := reflect.Indirect(pattern)
+ v := reflect.Indirect(val)
+ if !p.IsValid() || !v.IsValid() {
+ return !p.IsValid() && !v.IsValid()
+ }
+
+ switch p.Kind() {
+ case reflect.Slice:
+ if p.Len() != v.Len() {
+ return false
+ }
+ for i := 0; i < p.Len(); i++ {
+ if !match(m, p.Index(i), v.Index(i)) {
+ return false
+ }
+ }
+ return true
+
+ case reflect.Struct:
+ for i := 0; i < p.NumField(); i++ {
+ if !match(m, p.Field(i), v.Field(i)) {
+ return false
+ }
+ }
+ return true
+
+ case reflect.Interface:
+ return match(m, p.Elem(), v.Elem())
+ }
+
+ // Handle token integers, etc.
+ return p.Interface() == v.Interface()
+}
+
+// subst returns a copy of pattern with values from m substituted in place
+// of wildcards and pos used as the position of tokens from the pattern.
+// if m == nil, subst returns a copy of pattern and doesn't change the line
+// number information.
+func subst(m map[string]reflect.Value, pattern reflect.Value, pos reflect.Value) reflect.Value {
+ if !pattern.IsValid() {
+ return reflect.Value{}
+ }
+
+ // Wildcard gets replaced with map value.
+ if m != nil && pattern.Type() == identType {
+ name := pattern.Interface().(*ast.Ident).Name
+ if isWildcard(name) {
+ if old, ok := m[name]; ok {
+ return subst(nil, old, reflect.Value{})
+ }
+ }
+ }
+
+ if pos.IsValid() && pattern.Type() == positionType {
+ // use new position only if old position was valid in the first place
+ if old := pattern.Interface().(token.Pos); !old.IsValid() {
+ return pattern
+ }
+ return pos
+ }
+
+ // Otherwise copy.
+ switch p := pattern; p.Kind() {
+ case reflect.Slice:
+ v := reflect.MakeSlice(p.Type(), p.Len(), p.Len())
+ for i := 0; i < p.Len(); i++ {
+ v.Index(i).Set(subst(m, p.Index(i), pos))
+ }
+ return v
+
+ case reflect.Struct:
+ v := reflect.New(p.Type()).Elem()
+ for i := 0; i < p.NumField(); i++ {
+ v.Field(i).Set(subst(m, p.Field(i), pos))
+ }
+ return v
+
+ case reflect.Ptr:
+ v := reflect.New(p.Type()).Elem()
+ if elem := p.Elem(); elem.IsValid() {
+ v.Set(subst(m, elem, pos).Addr())
+ }
+ return v
+
+ case reflect.Interface:
+ v := reflect.New(p.Type()).Elem()
+ if elem := p.Elem(); elem.IsValid() {
+ v.Set(subst(m, elem, pos))
+ }
+ return v
+ }
+
+ return pattern
+}
diff --git a/vendor/github.com/klauspost/pgzip/.gitignore b/vendor/github.com/klauspost/pgzip/.gitignore
new file mode 100644
index 000000000..daf913b1b
--- /dev/null
+++ b/vendor/github.com/klauspost/pgzip/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
diff --git a/vendor/github.com/klauspost/pgzip/.travis.yml b/vendor/github.com/klauspost/pgzip/.travis.yml
new file mode 100644
index 000000000..6e9fca0ba
--- /dev/null
+++ b/vendor/github.com/klauspost/pgzip/.travis.yml
@@ -0,0 +1,21 @@
+language: go
+
+sudo: false
+
+os:
+ - linux
+ - osx
+
+go:
+ - 1.9.x
+ - 1.10.x
+ - master
+
+script:
+ - go test -v -cpu=1,2,4 .
+ - go test -v -cpu=2 -race -short .
+
+matrix:
+ allow_failures:
+ - go: 'master'
+ fast_finish: true