diff options
author | Daniel J Walsh <dwalsh@redhat.com> | 2020-09-10 09:31:28 -0400 |
---|---|---|
committer | Daniel J Walsh <dwalsh@redhat.com> | 2020-09-10 09:34:31 -0400 |
commit | 98620c56d6ad0d896308a4e5858c5d1913476eaf (patch) | |
tree | df24f9bb53b8b455cf79cb57594987c7c0054b16 /vendor/github.com/klauspost/compress/flate | |
parent | 08b602043ec07601b9be23c449c7773067683d90 (diff) | |
download | podman-98620c56d6ad0d896308a4e5858c5d1913476eaf.tar.gz podman-98620c56d6ad0d896308a4e5858c5d1913476eaf.tar.bz2 podman-98620c56d6ad0d896308a4e5858c5d1913476eaf.zip |
vendor containers/storage v1.23.5
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
Diffstat (limited to 'vendor/github.com/klauspost/compress/flate')
7 files changed, 190 insertions, 102 deletions
diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go index 6d4c1e98b..4a73e1bdd 100644 --- a/vendor/github.com/klauspost/compress/flate/fast_encoder.go +++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go @@ -127,7 +127,7 @@ func (e *fastGen) addBlock(src []byte) int32 { // hash4 returns the hash of u to fit in a hash table with h bits. // Preferably h should be a constant and should always be <32. func hash4u(u uint32, h uint8) uint32 { - return (u * prime4bytes) >> ((32 - h) & 31) + return (u * prime4bytes) >> ((32 - h) & reg8SizeMask32) } type tableEntryPrev struct { @@ -138,25 +138,25 @@ type tableEntryPrev struct { // hash4x64 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits. // Preferably h should be a constant and should always be <32. func hash4x64(u uint64, h uint8) uint32 { - return (uint32(u) * prime4bytes) >> ((32 - h) & 31) + return (uint32(u) * prime4bytes) >> ((32 - h) & reg8SizeMask32) } // hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits. // Preferably h should be a constant and should always be <64. func hash7(u uint64, h uint8) uint32 { - return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63)) + return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & reg8SizeMask64)) } // hash8 returns the hash of u to fit in a hash table with h bits. // Preferably h should be a constant and should always be <64. func hash8(u uint64, h uint8) uint32 { - return uint32((u * prime8bytes) >> ((64 - h) & 63)) + return uint32((u * prime8bytes) >> ((64 - h) & reg8SizeMask64)) } // hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits. // Preferably h should be a constant and should always be <64. func hash6(u uint64, h uint8) uint32 { - return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63)) + return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & reg8SizeMask64)) } // matchlen will return the match length between offsets and t in src. diff --git a/vendor/github.com/klauspost/compress/flate/gen_inflate.go b/vendor/github.com/klauspost/compress/flate/gen_inflate.go index c74a95fe7..b26d19ec2 100644 --- a/vendor/github.com/klauspost/compress/flate/gen_inflate.go +++ b/vendor/github.com/klauspost/compress/flate/gen_inflate.go @@ -85,7 +85,7 @@ readLiteral: return } f.roffset++ - b |= uint32(c) << (nb & 31) + b |= uint32(c) << (nb & regSizeMaskUint32) nb += 8 } chunk := f.hl.chunks[b&(huffmanNumChunks-1)] @@ -104,7 +104,7 @@ readLiteral: f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & 31) + f.b = b >> (n & regSizeMaskUint32) f.nb = nb - n v = int(chunk >> huffmanValueShift) break @@ -167,15 +167,15 @@ readLiteral: return } } - length += int(f.b & uint32(1<<n-1)) - f.b >>= n + length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) + f.b >>= n & regSizeMaskUint32 f.nb -= n } - var dist int + var dist uint32 if f.hd == nil { for f.nb < 5 { - if err = moreBits(); err != nil { + if err = f.moreBits(); err != nil { if debugDecode { fmt.Println("morebits f.nb<5:", err) } @@ -183,17 +183,19 @@ readLiteral: return } } - dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3))) + dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) f.b >>= 5 f.nb -= 5 } else { - if dist, err = f.huffSym(f.hd); err != nil { + sym, err := f.huffSym(f.hd) + if err != nil { if debugDecode { fmt.Println("huffsym:", err) } f.err = err return } + dist = uint32(sym) } switch { @@ -202,9 +204,9 @@ readLiteral: case dist < maxNumDist: nb := uint(dist-2) >> 1 // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << nb + extra := (dist & 1) << (nb & regSizeMaskUint32) for f.nb < nb { - if err = moreBits(); err != nil { + if err = f.moreBits(); err != nil { if debugDecode { fmt.Println("morebits f.nb<nb:", err) } @@ -212,10 +214,10 @@ readLiteral: return } } - extra |= int(f.b & uint32(1<<nb-1)) - f.b >>= nb + extra |= f.b & uint32(1<<(nb®SizeMaskUint32)-1) + f.b >>= nb & regSizeMaskUint32 f.nb -= nb - dist = 1<<(nb+1) + 1 + extra + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra default: if debugDecode { fmt.Println("dist too big:", dist, maxNumDist) @@ -225,7 +227,7 @@ readLiteral: } // No check on length; encoding can be prescient. - if dist > f.dict.histSize() { + if dist > uint32(f.dict.histSize()) { if debugDecode { fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) } @@ -233,7 +235,7 @@ readLiteral: return } - f.copyLen, f.copyDist = length, dist + f.copyLen, f.copyDist = length, int(dist) goto copyHistory } diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go index 53fe1d06e..208d66711 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go @@ -206,7 +206,7 @@ func (w *huffmanBitWriter) write(b []byte) { } func (w *huffmanBitWriter) writeBits(b int32, nb uint16) { - w.bits |= uint64(b) << (w.nbits & 63) + w.bits |= uint64(b) << (w.nbits & reg16SizeMask64) w.nbits += nb if w.nbits >= 48 { w.writeOutBits() @@ -759,7 +759,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) } else { // inlined c := lengths[lengthCode&31] - w.bits |= uint64(c.code) << (w.nbits & 63) + w.bits |= uint64(c.code) << (w.nbits & reg16SizeMask64) w.nbits += c.len if w.nbits >= 48 { w.writeOutBits() @@ -779,7 +779,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) } else { // inlined c := offs[offsetCode&31] - w.bits |= uint64(c.code) << (w.nbits & 63) + w.bits |= uint64(c.code) << (w.nbits & reg16SizeMask64) w.nbits += c.len if w.nbits >= 48 { w.writeOutBits() @@ -878,7 +878,7 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { for _, t := range input { // Bitwriting inlined, ~30% speedup c := encoding[t] - w.bits |= uint64(c.code) << ((w.nbits) & 63) + w.bits |= uint64(c.code) << ((w.nbits) & reg16SizeMask64) w.nbits += c.len if w.nbits >= 48 { bits := w.bits diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go index 3e4259f15..189e9fe0b 100644 --- a/vendor/github.com/klauspost/compress/flate/inflate.go +++ b/vendor/github.com/klauspost/compress/flate/inflate.go @@ -522,8 +522,8 @@ func (f *decompressor) readHuffman() error { return err } } - rep += int(f.b & uint32(1<<nb-1)) - f.b >>= nb + rep += int(f.b & uint32(1<<(nb®SizeMaskUint32)-1)) + f.b >>= nb & regSizeMaskUint32 f.nb -= nb if i+rep > n { if debugDecode { @@ -603,7 +603,7 @@ readLiteral: return } f.roffset++ - b |= uint32(c) << (nb & 31) + b |= uint32(c) << (nb & regSizeMaskUint32) nb += 8 } chunk := f.hl.chunks[b&(huffmanNumChunks-1)] @@ -622,7 +622,7 @@ readLiteral: f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & 31) + f.b = b >> (n & regSizeMaskUint32) f.nb = nb - n v = int(chunk >> huffmanValueShift) break @@ -685,12 +685,12 @@ readLiteral: return } } - length += int(f.b & uint32(1<<n-1)) - f.b >>= n + length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) + f.b >>= n & regSizeMaskUint32 f.nb -= n } - var dist int + var dist uint32 if f.hd == nil { for f.nb < 5 { if err = f.moreBits(); err != nil { @@ -701,17 +701,19 @@ readLiteral: return } } - dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3))) + dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) f.b >>= 5 f.nb -= 5 } else { - if dist, err = f.huffSym(f.hd); err != nil { + sym, err := f.huffSym(f.hd) + if err != nil { if debugDecode { fmt.Println("huffsym:", err) } f.err = err return } + dist = uint32(sym) } switch { @@ -720,7 +722,7 @@ readLiteral: case dist < maxNumDist: nb := uint(dist-2) >> 1 // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << nb + extra := (dist & 1) << (nb & regSizeMaskUint32) for f.nb < nb { if err = f.moreBits(); err != nil { if debugDecode { @@ -730,10 +732,10 @@ readLiteral: return } } - extra |= int(f.b & uint32(1<<nb-1)) - f.b >>= nb + extra |= f.b & uint32(1<<(nb®SizeMaskUint32)-1) + f.b >>= nb & regSizeMaskUint32 f.nb -= nb - dist = 1<<(nb+1) + 1 + extra + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra default: if debugDecode { fmt.Println("dist too big:", dist, maxNumDist) @@ -743,7 +745,7 @@ readLiteral: } // No check on length; encoding can be prescient. - if dist > f.dict.histSize() { + if dist > uint32(f.dict.histSize()) { if debugDecode { fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) } @@ -751,7 +753,7 @@ readLiteral: return } - f.copyLen, f.copyDist = length, dist + f.copyLen, f.copyDist = length, int(dist) goto copyHistory } @@ -869,7 +871,7 @@ func (f *decompressor) moreBits() error { return noEOF(err) } f.roffset++ - f.b |= uint32(c) << f.nb + f.b |= uint32(c) << (f.nb & regSizeMaskUint32) f.nb += 8 return nil } @@ -894,7 +896,7 @@ func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) { return 0, noEOF(err) } f.roffset++ - b |= uint32(c) << (nb & 31) + b |= uint32(c) << (nb & regSizeMaskUint32) nb += 8 } chunk := h.chunks[b&(huffmanNumChunks-1)] @@ -913,7 +915,7 @@ func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) { f.err = CorruptInputError(f.roffset) return 0, f.err } - f.b = b >> (n & 31) + f.b = b >> (n & regSizeMaskUint32) f.nb = nb - n return int(chunk >> huffmanValueShift), nil } diff --git a/vendor/github.com/klauspost/compress/flate/inflate_gen.go b/vendor/github.com/klauspost/compress/flate/inflate_gen.go index 397dc1b1a..9a92a1b30 100644 --- a/vendor/github.com/klauspost/compress/flate/inflate_gen.go +++ b/vendor/github.com/klauspost/compress/flate/inflate_gen.go @@ -63,7 +63,7 @@ readLiteral: return } f.roffset++ - b |= uint32(c) << (nb & 31) + b |= uint32(c) << (nb & regSizeMaskUint32) nb += 8 } chunk := f.hl.chunks[b&(huffmanNumChunks-1)] @@ -82,7 +82,7 @@ readLiteral: f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & 31) + f.b = b >> (n & regSizeMaskUint32) f.nb = nb - n v = int(chunk >> huffmanValueShift) break @@ -145,15 +145,15 @@ readLiteral: return } } - length += int(f.b & uint32(1<<n-1)) - f.b >>= n + length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) + f.b >>= n & regSizeMaskUint32 f.nb -= n } - var dist int + var dist uint32 if f.hd == nil { for f.nb < 5 { - if err = moreBits(); err != nil { + if err = f.moreBits(); err != nil { if debugDecode { fmt.Println("morebits f.nb<5:", err) } @@ -161,17 +161,19 @@ readLiteral: return } } - dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3))) + dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) f.b >>= 5 f.nb -= 5 } else { - if dist, err = f.huffSym(f.hd); err != nil { + sym, err := f.huffSym(f.hd) + if err != nil { if debugDecode { fmt.Println("huffsym:", err) } f.err = err return } + dist = uint32(sym) } switch { @@ -180,9 +182,9 @@ readLiteral: case dist < maxNumDist: nb := uint(dist-2) >> 1 // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << nb + extra := (dist & 1) << (nb & regSizeMaskUint32) for f.nb < nb { - if err = moreBits(); err != nil { + if err = f.moreBits(); err != nil { if debugDecode { fmt.Println("morebits f.nb<nb:", err) } @@ -190,10 +192,10 @@ readLiteral: return } } - extra |= int(f.b & uint32(1<<nb-1)) - f.b >>= nb + extra |= f.b & uint32(1<<(nb®SizeMaskUint32)-1) + f.b >>= nb & regSizeMaskUint32 f.nb -= nb - dist = 1<<(nb+1) + 1 + extra + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra default: if debugDecode { fmt.Println("dist too big:", dist, maxNumDist) @@ -203,7 +205,7 @@ readLiteral: } // No check on length; encoding can be prescient. - if dist > f.dict.histSize() { + if dist > uint32(f.dict.histSize()) { if debugDecode { fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) } @@ -211,7 +213,7 @@ readLiteral: return } - f.copyLen, f.copyDist = length, dist + f.copyLen, f.copyDist = length, int(dist) goto copyHistory } @@ -287,7 +289,7 @@ readLiteral: return } f.roffset++ - b |= uint32(c) << (nb & 31) + b |= uint32(c) << (nb & regSizeMaskUint32) nb += 8 } chunk := f.hl.chunks[b&(huffmanNumChunks-1)] @@ -306,7 +308,7 @@ readLiteral: f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & 31) + f.b = b >> (n & regSizeMaskUint32) f.nb = nb - n v = int(chunk >> huffmanValueShift) break @@ -369,15 +371,15 @@ readLiteral: return } } - length += int(f.b & uint32(1<<n-1)) - f.b >>= n + length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) + f.b >>= n & regSizeMaskUint32 f.nb -= n } - var dist int + var dist uint32 if f.hd == nil { for f.nb < 5 { - if err = moreBits(); err != nil { + if err = f.moreBits(); err != nil { if debugDecode { fmt.Println("morebits f.nb<5:", err) } @@ -385,17 +387,19 @@ readLiteral: return } } - dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3))) + dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) f.b >>= 5 f.nb -= 5 } else { - if dist, err = f.huffSym(f.hd); err != nil { + sym, err := f.huffSym(f.hd) + if err != nil { if debugDecode { fmt.Println("huffsym:", err) } f.err = err return } + dist = uint32(sym) } switch { @@ -404,9 +408,9 @@ readLiteral: case dist < maxNumDist: nb := uint(dist-2) >> 1 // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << nb + extra := (dist & 1) << (nb & regSizeMaskUint32) for f.nb < nb { - if err = moreBits(); err != nil { + if err = f.moreBits(); err != nil { if debugDecode { fmt.Println("morebits f.nb<nb:", err) } @@ -414,10 +418,10 @@ readLiteral: return } } - extra |= int(f.b & uint32(1<<nb-1)) - f.b >>= nb + extra |= f.b & uint32(1<<(nb®SizeMaskUint32)-1) + f.b >>= nb & regSizeMaskUint32 f.nb -= nb - dist = 1<<(nb+1) + 1 + extra + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra default: if debugDecode { fmt.Println("dist too big:", dist, maxNumDist) @@ -427,7 +431,7 @@ readLiteral: } // No check on length; encoding can be prescient. - if dist > f.dict.histSize() { + if dist > uint32(f.dict.histSize()) { if debugDecode { fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) } @@ -435,7 +439,7 @@ readLiteral: return } - f.copyLen, f.copyDist = length, dist + f.copyLen, f.copyDist = length, int(dist) goto copyHistory } @@ -511,7 +515,7 @@ readLiteral: return } f.roffset++ - b |= uint32(c) << (nb & 31) + b |= uint32(c) << (nb & regSizeMaskUint32) nb += 8 } chunk := f.hl.chunks[b&(huffmanNumChunks-1)] @@ -530,7 +534,7 @@ readLiteral: f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & 31) + f.b = b >> (n & regSizeMaskUint32) f.nb = nb - n v = int(chunk >> huffmanValueShift) break @@ -593,15 +597,15 @@ readLiteral: return } } - length += int(f.b & uint32(1<<n-1)) - f.b >>= n + length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) + f.b >>= n & regSizeMaskUint32 f.nb -= n } - var dist int + var dist uint32 if f.hd == nil { for f.nb < 5 { - if err = moreBits(); err != nil { + if err = f.moreBits(); err != nil { if debugDecode { fmt.Println("morebits f.nb<5:", err) } @@ -609,17 +613,19 @@ readLiteral: return } } - dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3))) + dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) f.b >>= 5 f.nb -= 5 } else { - if dist, err = f.huffSym(f.hd); err != nil { + sym, err := f.huffSym(f.hd) + if err != nil { if debugDecode { fmt.Println("huffsym:", err) } f.err = err return } + dist = uint32(sym) } switch { @@ -628,9 +634,9 @@ readLiteral: case dist < maxNumDist: nb := uint(dist-2) >> 1 // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << nb + extra := (dist & 1) << (nb & regSizeMaskUint32) for f.nb < nb { - if err = moreBits(); err != nil { + if err = f.moreBits(); err != nil { if debugDecode { fmt.Println("morebits f.nb<nb:", err) } @@ -638,10 +644,10 @@ readLiteral: return } } - extra |= int(f.b & uint32(1<<nb-1)) - f.b >>= nb + extra |= f.b & uint32(1<<(nb®SizeMaskUint32)-1) + f.b >>= nb & regSizeMaskUint32 f.nb -= nb - dist = 1<<(nb+1) + 1 + extra + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra default: if debugDecode { fmt.Println("dist too big:", dist, maxNumDist) @@ -651,7 +657,7 @@ readLiteral: } // No check on length; encoding can be prescient. - if dist > f.dict.histSize() { + if dist > uint32(f.dict.histSize()) { if debugDecode { fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) } @@ -659,7 +665,7 @@ readLiteral: return } - f.copyLen, f.copyDist = length, dist + f.copyLen, f.copyDist = length, int(dist) goto copyHistory } @@ -735,7 +741,7 @@ readLiteral: return } f.roffset++ - b |= uint32(c) << (nb & 31) + b |= uint32(c) << (nb & regSizeMaskUint32) nb += 8 } chunk := f.hl.chunks[b&(huffmanNumChunks-1)] @@ -754,7 +760,7 @@ readLiteral: f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & 31) + f.b = b >> (n & regSizeMaskUint32) f.nb = nb - n v = int(chunk >> huffmanValueShift) break @@ -817,15 +823,15 @@ readLiteral: return } } - length += int(f.b & uint32(1<<n-1)) - f.b >>= n + length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) + f.b >>= n & regSizeMaskUint32 f.nb -= n } - var dist int + var dist uint32 if f.hd == nil { for f.nb < 5 { - if err = moreBits(); err != nil { + if err = f.moreBits(); err != nil { if debugDecode { fmt.Println("morebits f.nb<5:", err) } @@ -833,17 +839,19 @@ readLiteral: return } } - dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3))) + dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) f.b >>= 5 f.nb -= 5 } else { - if dist, err = f.huffSym(f.hd); err != nil { + sym, err := f.huffSym(f.hd) + if err != nil { if debugDecode { fmt.Println("huffsym:", err) } f.err = err return } + dist = uint32(sym) } switch { @@ -852,9 +860,9 @@ readLiteral: case dist < maxNumDist: nb := uint(dist-2) >> 1 // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << nb + extra := (dist & 1) << (nb & regSizeMaskUint32) for f.nb < nb { - if err = moreBits(); err != nil { + if err = f.moreBits(); err != nil { if debugDecode { fmt.Println("morebits f.nb<nb:", err) } @@ -862,10 +870,10 @@ readLiteral: return } } - extra |= int(f.b & uint32(1<<nb-1)) - f.b >>= nb + extra |= f.b & uint32(1<<(nb®SizeMaskUint32)-1) + f.b >>= nb & regSizeMaskUint32 f.nb -= nb - dist = 1<<(nb+1) + 1 + extra + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra default: if debugDecode { fmt.Println("dist too big:", dist, maxNumDist) @@ -875,7 +883,7 @@ readLiteral: } // No check on length; encoding can be prescient. - if dist > f.dict.histSize() { + if dist > uint32(f.dict.histSize()) { if debugDecode { fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) } @@ -883,7 +891,7 @@ readLiteral: return } - f.copyLen, f.copyDist = length, dist + f.copyLen, f.copyDist = length, int(dist) goto copyHistory } diff --git a/vendor/github.com/klauspost/compress/flate/regmask_amd64.go b/vendor/github.com/klauspost/compress/flate/regmask_amd64.go new file mode 100644 index 000000000..6ed28061b --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/regmask_amd64.go @@ -0,0 +1,37 @@ +package flate + +const ( + // Masks for shifts with register sizes of the shift value. + // This can be used to work around the x86 design of shifting by mod register size. + // It can be used when a variable shift is always smaller than the register size. + + // reg8SizeMaskX - shift value is 8 bits, shifted is X + reg8SizeMask8 = 7 + reg8SizeMask16 = 15 + reg8SizeMask32 = 31 + reg8SizeMask64 = 63 + + // reg16SizeMaskX - shift value is 16 bits, shifted is X + reg16SizeMask8 = reg8SizeMask8 + reg16SizeMask16 = reg8SizeMask16 + reg16SizeMask32 = reg8SizeMask32 + reg16SizeMask64 = reg8SizeMask64 + + // reg32SizeMaskX - shift value is 32 bits, shifted is X + reg32SizeMask8 = reg8SizeMask8 + reg32SizeMask16 = reg8SizeMask16 + reg32SizeMask32 = reg8SizeMask32 + reg32SizeMask64 = reg8SizeMask64 + + // reg64SizeMaskX - shift value is 64 bits, shifted is X + reg64SizeMask8 = reg8SizeMask8 + reg64SizeMask16 = reg8SizeMask16 + reg64SizeMask32 = reg8SizeMask32 + reg64SizeMask64 = reg8SizeMask64 + + // regSizeMaskUintX - shift value is uint, shifted is X + regSizeMaskUint8 = reg8SizeMask8 + regSizeMaskUint16 = reg8SizeMask16 + regSizeMaskUint32 = reg8SizeMask32 + regSizeMaskUint64 = reg8SizeMask64 +) diff --git a/vendor/github.com/klauspost/compress/flate/regmask_other.go b/vendor/github.com/klauspost/compress/flate/regmask_other.go new file mode 100644 index 000000000..f477a5d6e --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/regmask_other.go @@ -0,0 +1,39 @@ +//+build !amd64 + +package flate + +const ( + // Masks for shifts with register sizes of the shift value. + // This can be used to work around the x86 design of shifting by mod register size. + // It can be used when a variable shift is always smaller than the register size. + + // reg8SizeMaskX - shift value is 8 bits, shifted is X + reg8SizeMask8 = 0xff + reg8SizeMask16 = 0xff + reg8SizeMask32 = 0xff + reg8SizeMask64 = 0xff + + // reg16SizeMaskX - shift value is 16 bits, shifted is X + reg16SizeMask8 = 0xffff + reg16SizeMask16 = 0xffff + reg16SizeMask32 = 0xffff + reg16SizeMask64 = 0xffff + + // reg32SizeMaskX - shift value is 32 bits, shifted is X + reg32SizeMask8 = 0xffffffff + reg32SizeMask16 = 0xffffffff + reg32SizeMask32 = 0xffffffff + reg32SizeMask64 = 0xffffffff + + // reg64SizeMaskX - shift value is 64 bits, shifted is X + reg64SizeMask8 = 0xffffffffffffffff + reg64SizeMask16 = 0xffffffffffffffff + reg64SizeMask32 = 0xffffffffffffffff + reg64SizeMask64 = 0xffffffffffffffff + + // regSizeMaskUintX - shift value is uint, shifted is X + regSizeMaskUint8 = ^uint(0) + regSizeMaskUint16 = ^uint(0) + regSizeMaskUint32 = ^uint(0) + regSizeMaskUint64 = ^uint(0) +) |