summaryrefslogtreecommitdiff
path: root/vendor/github.com/klauspost
diff options
context:
space:
mode:
authorDaniel J Walsh <dwalsh@redhat.com>2021-09-29 15:24:31 -0400
committerDaniel J Walsh <dwalsh@redhat.com>2021-09-29 15:25:54 -0400
commit1805ed360d41e5b4ed90e9493e59a2129608dcd5 (patch)
treed77b405b70c866af963de54d8f71b8a95fa54ae5 /vendor/github.com/klauspost
parentd987f26f1e2449d3237faa0b873d82ce5a89e0ee (diff)
downloadpodman-1805ed360d41e5b4ed90e9493e59a2129608dcd5.tar.gz
podman-1805ed360d41e5b4ed90e9493e59a2129608dcd5.tar.bz2
podman-1805ed360d41e5b4ed90e9493e59a2129608dcd5.zip
Vendor in latest containers/storage
Fix handling of additional shares with no images Fixes: https://github.com/containers/storage/issues/1029 Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
Diffstat (limited to 'vendor/github.com/klauspost')
-rw-r--r--vendor/github.com/klauspost/compress/LICENSE12
-rw-r--r--vendor/github.com/klauspost/compress/README.md12
-rw-r--r--vendor/github.com/klauspost/compress/flate/fast_encoder.go25
-rw-r--r--vendor/github.com/klauspost/compress/huff0/decompress.go275
-rw-r--r--vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go1
-rw-r--r--vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go2
-rw-r--r--vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s66
7 files changed, 305 insertions, 88 deletions
diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE
index 6cd1e9627..87d557477 100644
--- a/vendor/github.com/klauspost/compress/LICENSE
+++ b/vendor/github.com/klauspost/compress/LICENSE
@@ -290,3 +290,15 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+-----------------
+
+Files: s2/cmd/internal/filepathx/*
+
+Copyright 2016 The filepathx Authors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md
index 235dc7cc6..3429879eb 100644
--- a/vendor/github.com/klauspost/compress/README.md
+++ b/vendor/github.com/klauspost/compress/README.md
@@ -17,12 +17,17 @@ This package provides various compression algorithms.
# changelog
+* Aug 30, 2021 (v1.13.5)
+ * gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425)
+ * s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413)
+ * zstd: pooledZipWriter should return Writers to the same pool [#426](https://github.com/klauspost/compress/pull/426)
+ * Removed golang/snappy as external dependency for tests [#421](https://github.com/klauspost/compress/pull/421)
+
* Aug 12, 2021 (v1.13.4)
* Add [snappy replacement package](https://github.com/klauspost/compress/tree/master/snappy).
* zstd: Fix incorrect encoding in "best" mode [#415](https://github.com/klauspost/compress/pull/415)
* Aug 3, 2021 (v1.13.3)
-
* zstd: Improve Best compression [#404](https://github.com/klauspost/compress/pull/404)
* zstd: Fix WriteTo error forwarding [#411](https://github.com/klauspost/compress/pull/411)
* gzhttp: Return http.HandlerFunc instead of http.Handler. Unlikely breaking change. [#406](https://github.com/klauspost/compress/pull/406)
@@ -31,7 +36,6 @@ This package provides various compression algorithms.
* zstd: use SpeedBestCompression for level >= 10 [#410](https://github.com/klauspost/compress/pull/410)
* Jun 14, 2021 (v1.13.1)
-
* s2: Add full Snappy output support [#396](https://github.com/klauspost/compress/pull/396)
* zstd: Add configurable [Decoder window](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithDecoderMaxWindow) size [#394](https://github.com/klauspost/compress/pull/394)
* gzhttp: Add header to skip compression [#389](https://github.com/klauspost/compress/pull/389)
@@ -64,6 +68,9 @@ This package provides various compression algorithms.
* zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346)
* s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349)
+<details>
+ <summary>See changes prior to v1.12.1</summary>
+
* Mar 26, 2021 (v1.11.13)
* zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345)
* zstd: Add [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) encoder option [#336](https://github.com/klauspost/compress/pull/336)
@@ -118,6 +125,7 @@ This package provides various compression algorithms.
* zstd: Add experimental compression [dictionaries](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) [#281](https://github.com/klauspost/compress/pull/281)
* zstd: Fix mixed Write and ReadFrom calls [#282](https://github.com/klauspost/compress/pull/282)
* inflate/gz: Limit variable shifts, ~5% faster decompression [#274](https://github.com/klauspost/compress/pull/274)
+</details>
<details>
<summary>See changes prior to v1.11.0</summary>
diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go
index 347ac2c90..a746eb733 100644
--- a/vendor/github.com/klauspost/compress/flate/fast_encoder.go
+++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go
@@ -215,24 +215,15 @@ func (e *fastGen) Reset() {
func matchLen(a, b []byte) int {
b = b[:len(a)]
var checked int
- if len(a) >= 4 {
- // Try 4 bytes first
- if diff := binary.LittleEndian.Uint32(a) ^ binary.LittleEndian.Uint32(b); diff != 0 {
- return bits.TrailingZeros32(diff) >> 3
- }
- // Switch to 8 byte matching.
- checked = 4
- a = a[4:]
- b = b[4:]
- for len(a) >= 8 {
- b = b[:len(a)]
- if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 {
- return checked + (bits.TrailingZeros64(diff) >> 3)
- }
- checked += 8
- a = a[8:]
- b = b[8:]
+
+ for len(a) >= 8 {
+ b = b[:len(a)]
+ if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 {
+ return checked + (bits.TrailingZeros64(diff) >> 3)
}
+ checked += 8
+ a = a[8:]
+ b = b[8:]
}
b = b[:len(a)]
for i := range a {
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go
index 41703bba4..9b7cc8e97 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress.go
+++ b/vendor/github.com/klauspost/compress/huff0/decompress.go
@@ -344,35 +344,241 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
var buf [256]byte
var off uint8
- shift := (8 - d.actualTableLog) & 7
-
- //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog)
- for br.off >= 4 {
- br.fillFast()
- v := dt[br.peekByteFast()>>shift]
- br.advance(uint8(v.entry))
- buf[off+0] = uint8(v.entry >> 8)
-
- v = dt[br.peekByteFast()>>shift]
- br.advance(uint8(v.entry))
- buf[off+1] = uint8(v.entry >> 8)
-
- v = dt[br.peekByteFast()>>shift]
- br.advance(uint8(v.entry))
- buf[off+2] = uint8(v.entry >> 8)
-
- v = dt[br.peekByteFast()>>shift]
- br.advance(uint8(v.entry))
- buf[off+3] = uint8(v.entry >> 8)
-
- off += 4
- if off == 0 {
- if len(dst)+256 > maxDecodedSize {
- br.close()
- return nil, ErrMaxDecodedSizeExceeded
+ switch d.actualTableLog {
+ case 8:
+ const shift = 8 - 8
+ for br.off >= 4 {
+ br.fillFast()
+ v := dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+0] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+1] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+2] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+3] = uint8(v.entry >> 8)
+
+ off += 4
+ if off == 0 {
+ if len(dst)+256 > maxDecodedSize {
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:]...)
+ }
+ }
+ case 7:
+ const shift = 8 - 7
+ for br.off >= 4 {
+ br.fillFast()
+ v := dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+0] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+1] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+2] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+3] = uint8(v.entry >> 8)
+
+ off += 4
+ if off == 0 {
+ if len(dst)+256 > maxDecodedSize {
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:]...)
+ }
+ }
+ case 6:
+ const shift = 8 - 6
+ for br.off >= 4 {
+ br.fillFast()
+ v := dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+0] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+1] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+2] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+3] = uint8(v.entry >> 8)
+
+ off += 4
+ if off == 0 {
+ if len(dst)+256 > maxDecodedSize {
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:]...)
+ }
+ }
+ case 5:
+ const shift = 8 - 5
+ for br.off >= 4 {
+ br.fillFast()
+ v := dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+0] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+1] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+2] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+3] = uint8(v.entry >> 8)
+
+ off += 4
+ if off == 0 {
+ if len(dst)+256 > maxDecodedSize {
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:]...)
+ }
+ }
+ case 4:
+ const shift = 8 - 4
+ for br.off >= 4 {
+ br.fillFast()
+ v := dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+0] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+1] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+2] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+3] = uint8(v.entry >> 8)
+
+ off += 4
+ if off == 0 {
+ if len(dst)+256 > maxDecodedSize {
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:]...)
+ }
+ }
+ case 3:
+ const shift = 8 - 3
+ for br.off >= 4 {
+ br.fillFast()
+ v := dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+0] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+1] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+2] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+3] = uint8(v.entry >> 8)
+
+ off += 4
+ if off == 0 {
+ if len(dst)+256 > maxDecodedSize {
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:]...)
+ }
+ }
+ case 2:
+ const shift = 8 - 2
+ for br.off >= 4 {
+ br.fillFast()
+ v := dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+0] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+1] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+2] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+3] = uint8(v.entry >> 8)
+
+ off += 4
+ if off == 0 {
+ if len(dst)+256 > maxDecodedSize {
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:]...)
+ }
+ }
+ case 1:
+ const shift = 8 - 1
+ for br.off >= 4 {
+ br.fillFast()
+ v := dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+0] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+1] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+2] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+3] = uint8(v.entry >> 8)
+
+ off += 4
+ if off == 0 {
+ if len(dst)+256 > maxDecodedSize {
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:]...)
}
- dst = append(dst, buf[:]...)
}
+ default:
+ return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog)
}
if len(dst)+int(off) > maxDecodedSize {
@@ -383,6 +589,8 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
// br < 4, so uint8 is fine
bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead))
+ shift := (8 - d.actualTableLog) & 7
+
for bitsLeft > 0 {
if br.bitsRead >= 64-8 {
for br.off > 0 {
@@ -423,24 +631,24 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
var buf [256]byte
var off uint8
- const shift = 0
+ const shift = 56
//fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog)
for br.off >= 4 {
br.fillFast()
- v := dt[br.peekByteFast()>>shift]
+ v := dt[uint8(br.value>>shift)]
br.advance(uint8(v.entry))
buf[off+0] = uint8(v.entry >> 8)
- v = dt[br.peekByteFast()>>shift]
+ v = dt[uint8(br.value>>shift)]
br.advance(uint8(v.entry))
buf[off+1] = uint8(v.entry >> 8)
- v = dt[br.peekByteFast()>>shift]
+ v = dt[uint8(br.value>>shift)]
br.advance(uint8(v.entry))
buf[off+2] = uint8(v.entry >> 8)
- v = dt[br.peekByteFast()>>shift]
+ v = dt[uint8(br.value>>shift)]
br.advance(uint8(v.entry))
buf[off+3] = uint8(v.entry >> 8)
@@ -474,7 +682,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
- v := dt[br.peekByteFast()>>shift]
+ v := dt[br.peekByteFast()]
nBits := uint8(v.entry)
br.advance(nBits)
bitsLeft -= int8(nBits)
@@ -709,7 +917,6 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
shift := (8 - d.actualTableLog) & 7
const tlSize = 1 << 8
- const tlMask = tlSize - 1
single := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty.
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go
index 426b9cac7..2c112a0ab 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go
@@ -195,7 +195,6 @@ func (d *Digest) UnmarshalBinary(b []byte) error {
b, d.v4 = consumeUint64(b)
b, d.total = consumeUint64(b)
copy(d.mem[:], b)
- b = b[len(d.mem):]
d.n = int(d.total % uint64(len(d.mem)))
return nil
}
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go
index 3ddbd5c0b..0ae847f75 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go
@@ -9,4 +9,4 @@ package xxhash
func Sum64(b []byte) uint64
//go:noescape
-func writeBlocks(*Digest, []byte) int
+func writeBlocks(d *Digest, b []byte) int
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
index 2c9c5357a..be8db5bf7 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
@@ -6,7 +6,7 @@
// Register allocation:
// AX h
-// CX pointer to advance through b
+// SI pointer to advance through b
// DX n
// BX loop end
// R8 v1, k1
@@ -16,39 +16,39 @@
// R12 tmp
// R13 prime1v
// R14 prime2v
-// R15 prime4v
+// DI prime4v
-// round reads from and advances the buffer pointer in CX.
+// round reads from and advances the buffer pointer in SI.
// It assumes that R13 has prime1v and R14 has prime2v.
#define round(r) \
- MOVQ (CX), R12 \
- ADDQ $8, CX \
+ MOVQ (SI), R12 \
+ ADDQ $8, SI \
IMULQ R14, R12 \
ADDQ R12, r \
ROLQ $31, r \
IMULQ R13, r
// mergeRound applies a merge round on the two registers acc and val.
-// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v.
+// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
#define mergeRound(acc, val) \
IMULQ R14, val \
ROLQ $31, val \
IMULQ R13, val \
XORQ val, acc \
IMULQ R13, acc \
- ADDQ R15, acc
+ ADDQ DI, acc
// func Sum64(b []byte) uint64
TEXT ·Sum64(SB), NOSPLIT, $0-32
// Load fixed primes.
MOVQ ·prime1v(SB), R13
MOVQ ·prime2v(SB), R14
- MOVQ ·prime4v(SB), R15
+ MOVQ ·prime4v(SB), DI
// Load slice.
- MOVQ b_base+0(FP), CX
+ MOVQ b_base+0(FP), SI
MOVQ b_len+8(FP), DX
- LEAQ (CX)(DX*1), BX
+ LEAQ (SI)(DX*1), BX
// The first loop limit will be len(b)-32.
SUBQ $32, BX
@@ -65,14 +65,14 @@ TEXT ·Sum64(SB), NOSPLIT, $0-32
XORQ R11, R11
SUBQ R13, R11
- // Loop until CX > BX.
+ // Loop until SI > BX.
blockLoop:
round(R8)
round(R9)
round(R10)
round(R11)
- CMPQ CX, BX
+ CMPQ SI, BX
JLE blockLoop
MOVQ R8, AX
@@ -100,16 +100,16 @@ noBlocks:
afterBlocks:
ADDQ DX, AX
- // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8.
+ // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
ADDQ $24, BX
- CMPQ CX, BX
+ CMPQ SI, BX
JG fourByte
wordLoop:
// Calculate k1.
- MOVQ (CX), R8
- ADDQ $8, CX
+ MOVQ (SI), R8
+ ADDQ $8, SI
IMULQ R14, R8
ROLQ $31, R8
IMULQ R13, R8
@@ -117,18 +117,18 @@ wordLoop:
XORQ R8, AX
ROLQ $27, AX
IMULQ R13, AX
- ADDQ R15, AX
+ ADDQ DI, AX
- CMPQ CX, BX
+ CMPQ SI, BX
JLE wordLoop
fourByte:
ADDQ $4, BX
- CMPQ CX, BX
+ CMPQ SI, BX
JG singles
- MOVL (CX), R8
- ADDQ $4, CX
+ MOVL (SI), R8
+ ADDQ $4, SI
IMULQ R13, R8
XORQ R8, AX
@@ -138,19 +138,19 @@ fourByte:
singles:
ADDQ $4, BX
- CMPQ CX, BX
+ CMPQ SI, BX
JGE finalize
singlesLoop:
- MOVBQZX (CX), R12
- ADDQ $1, CX
+ MOVBQZX (SI), R12
+ ADDQ $1, SI
IMULQ ·prime5v(SB), R12
XORQ R12, AX
ROLQ $11, AX
IMULQ R13, AX
- CMPQ CX, BX
+ CMPQ SI, BX
JL singlesLoop
finalize:
@@ -179,13 +179,13 @@ TEXT ·writeBlocks(SB), NOSPLIT, $0-40
MOVQ ·prime2v(SB), R14
// Load slice.
- MOVQ arg1_base+8(FP), CX
- MOVQ arg1_len+16(FP), DX
- LEAQ (CX)(DX*1), BX
+ MOVQ b_base+8(FP), SI
+ MOVQ b_len+16(FP), DX
+ LEAQ (SI)(DX*1), BX
SUBQ $32, BX
// Load vN from d.
- MOVQ arg+0(FP), AX
+ MOVQ d+0(FP), AX
MOVQ 0(AX), R8 // v1
MOVQ 8(AX), R9 // v2
MOVQ 16(AX), R10 // v3
@@ -199,7 +199,7 @@ blockLoop:
round(R10)
round(R11)
- CMPQ CX, BX
+ CMPQ SI, BX
JLE blockLoop
// Copy vN back to d.
@@ -208,8 +208,8 @@ blockLoop:
MOVQ R10, 16(AX)
MOVQ R11, 24(AX)
- // The number of bytes written is CX minus the old base pointer.
- SUBQ arg1_base+8(FP), CX
- MOVQ CX, ret+32(FP)
+ // The number of bytes written is SI minus the old base pointer.
+ SUBQ b_base+8(FP), SI
+ MOVQ SI, ret+32(FP)
RET