summaryrefslogtreecommitdiff
path: root/vendor/github.com/klauspost/compress/huff0
diff options
context:
space:
mode:
authorMiloslav Trmač <mitr@redhat.com>2019-10-01 22:15:58 +0200
committerMiloslav Trmač <mitr@redhat.com>2019-10-04 20:18:23 +0200
commitd3f59bedb393521986e645bc48c47938f321b643 (patch)
treec61aa40e008b7fcb371d899880a4afd1714f50af /vendor/github.com/klauspost/compress/huff0
parentbd08fc0e9b3a9943008585879877b68789e38c31 (diff)
downloadpodman-d3f59bedb393521986e645bc48c47938f321b643.tar.gz
podman-d3f59bedb393521986e645bc48c47938f321b643.tar.bz2
podman-d3f59bedb393521986e645bc48c47938f321b643.zip
Update c/image to v4.0.1 and buildah to 1.11.3
This requires updating all import paths throughout, and a matching buildah update to interoperate. I can't figure out the reason for go.mod tracking github.com/containers/image v3.0.2+incompatible // indirect ((go mod graph) lists it as a direct dependency of libpod, but (go list -json -m all) lists it as an indirect dependency), but at least looking at the vendor subdirectory, it doesn't seem to be actually used in the built binaries. Signed-off-by: Miloslav Trmač <mitr@redhat.com>
Diffstat (limited to 'vendor/github.com/klauspost/compress/huff0')
-rw-r--r--vendor/github.com/klauspost/compress/huff0/decompress.go31
-rw-r--r--vendor/github.com/klauspost/compress/huff0/huff0.go11
2 files changed, 36 insertions, 6 deletions
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go
index 261c54274..43b4815b3 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress.go
+++ b/vendor/github.com/klauspost/compress/huff0/decompress.go
@@ -193,14 +193,26 @@ func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) {
tmp[off+3] = hasDec(dt[br.peekBitsFast(s.actualTableLog)&tlMask])
off += 4
if off == 0 {
+ if len(s.Out)+256 > s.MaxDecodedSize {
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
s.Out = append(s.Out, tmp...)
}
}
+ if len(s.Out)+int(off) > s.MaxDecodedSize {
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
s.Out = append(s.Out, tmp[:off]...)
for !br.finished() {
br.fill()
+ if len(s.Out) >= s.MaxDecodedSize {
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
s.Out = append(s.Out, decode())
}
return s.Out, br.close()
@@ -218,6 +230,9 @@ func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) {
if len(in) < 6+(4*1) {
return nil, errors.New("input too small")
}
+ if dstSize > s.MaxDecodedSize {
+ return nil, ErrMaxDecodedSizeExceeded
+ }
// TODO: We do not detect when we overrun a buffer, except if the last one does.
var br [4]bitReader
@@ -247,9 +262,13 @@ func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) {
dstOut := s.Out
dstEvery := (dstSize + 3) / 4
+ const tlSize = 1 << tableLogMax
+ const tlMask = tlSize - 1
+ single := s.dt.single[:tlSize]
+
decode := func(br *bitReader) byte {
val := br.peekBitsFast(s.actualTableLog) /* note : actualTableLog >= 1 */
- v := s.dt.single[val]
+ v := single[val&tlMask]
br.bitsRead += v.nBits
return v.byte
}
@@ -279,7 +298,7 @@ bigloop:
off += 2
if off == bufoff {
if bufoff > dstEvery {
- return nil, errors.New("corruption detected: stream overrun")
+ return nil, errors.New("corruption detected: stream overrun 1")
}
copy(dstOut, tmp[:bufoff])
copy(dstOut[dstEvery:], tmp[bufoff:bufoff*2])
@@ -288,15 +307,15 @@ bigloop:
off = 0
dstOut = dstOut[bufoff:]
// There must at least be 3 buffers left.
- if len(dstOut) < dstEvery*3+3 {
- return nil, errors.New("corruption detected: stream overrun")
+ if len(dstOut) < dstEvery*3 {
+ return nil, errors.New("corruption detected: stream overrun 2")
}
}
}
if off > 0 {
ioff := int(off)
if len(dstOut) < dstEvery*3+ioff {
- return nil, errors.New("corruption detected: stream overrun")
+ return nil, errors.New("corruption detected: stream overrun 3")
}
copy(dstOut, tmp[:off])
copy(dstOut[dstEvery:dstEvery+ioff], tmp[bufoff:bufoff*2])
@@ -311,7 +330,7 @@ bigloop:
for !br.finished() {
br.fill()
if offset >= len(dstOut) {
- return nil, errors.New("corruption detected: stream overrun")
+ return nil, errors.New("corruption detected: stream overrun 4")
}
dstOut[offset] = decode(br)
offset++
diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go
index 50d02e440..6f823f94d 100644
--- a/vendor/github.com/klauspost/compress/huff0/huff0.go
+++ b/vendor/github.com/klauspost/compress/huff0/huff0.go
@@ -35,6 +35,9 @@ var (
// ErrTooBig is return if input is too large for a single block.
ErrTooBig = errors.New("input too big")
+
+ // ErrMaxDecodedSizeExceeded is return if input is too large for a single block.
+ ErrMaxDecodedSizeExceeded = errors.New("maximum output size exceeded")
)
type ReusePolicy uint8
@@ -86,6 +89,11 @@ type Scratch struct {
// Reuse will specify the reuse policy
Reuse ReusePolicy
+ // MaxDecodedSize will set the maximum allowed output size.
+ // This value will automatically be set to BlockSizeMax if not set.
+ // Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded.
+ MaxDecodedSize int
+
br byteReader
symbolLen uint16 // Length of active part of the symbol table.
maxCount int // count of the most probable symbol
@@ -116,6 +124,9 @@ func (s *Scratch) prepare(in []byte) (*Scratch, error) {
if s.TableLog > tableLogMax {
return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, tableLogMax)
}
+ if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax {
+ s.MaxDecodedSize = BlockSizeMax
+ }
if s.clearCount && s.maxCount == 0 {
for i := range s.count {
s.count[i] = 0